code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
# Cordex preprocessing
```
%load_ext autoreload
%autoreload 2
import xclim
xclim.__version__
import os
import intake
import xarray as xr
import numpy as np
from tqdm.notebook import tqdm
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
xr.set_options(keep_attrs=True)
print(np.__version__)
print(xr.__version__)
import intake_esm
print(intake_esm.__version__)
import cordex as cx
from cordex.preprocessing import preprocessing as preproc
eur11 = cx.cordex_domain('EUR-11')
!echo $HDF5_USE_FILE_LOCKING
from dask.distributed import Client, progress
client = Client()
client
#client.get_versions(check=True)
```
## Data access
```
url = "/work/kd0956/Catalogs/mistral-cordex.json"
cat = intake.open_esm_datastore(url)
cat
models = ['CLMcom-CCLM4-8-17',
'CLMcom-BTU-CCLM4-8-17',
'CLMcom-ETH-COSMO-crCLIM-v1-1',
'CNRM-ALADIN53',
'CNRM-ALADIN63',
'DMI-HIRHAM5',
'GERICS-REMO2015',
'ICTP-RegCM4-6', # regcm seems to make trouble when opening with to_dataset_dict..
'IPSL-INERIS-WRF331F',
'KNMI-RACMO22E',
'MOHC-HadREM3-GA7-05',
'MPI-CSC-REMO2009',
'RMIB-UGent-ALARO-0',
'SMHI-RCA4',
'UHOH-WRF361H']
models = ['UHOH-WRF361H']
#models = 'ICTP-RegCM4-6'
#institute_ids = ['CLMcom', 'CLMcom-BTU', 'CLMcom-ETH', 'CNRM', 'DMI', 'GERICS',
# 'IPSL-INERIS', 'KNMI', 'MOHC', 'MPI-CSC', 'RMIB-UGent', 'SMHI',
# 'UHOH', 'z_GERICS_KB']
# there are some missleading entries in the catalog, so we state all institute ids explicitly here
# to avoid wrong ones...
institute_ids = ['CLMcom', 'CLMcom-BTU', 'CLMcom-ETH', 'CNRM', 'DMI', 'GERICS',
'ICTP', 'IPSL-INERIS', 'KNMI', 'MOHC', 'MPI-CSC', 'RMIB-UGent',
'SMHI', 'UHOH', 'z_GERICS_KB']
scens = ['rcp26', 'rcp45', 'rcp85']
expts = ['historical'] + scens
attrs = {'variable_id': ['tas'] , 'frequency': 'mon', 'CORDEX_domain': 'EUR-11',
'experiment_id': expts, 'rcm_version_id': 'v1', 'model_id': models,}
# 'institute_id': institute_ids}
import pandas as pd
pd.set_option('display.max_rows', None)
selection = cat.search(**attrs)
selection.df.groupby(['model_id', 'institute_id', 'experiment_id', 'driving_model_id', 'member', 'frequency', 'rcm_version_id', 'version'])['variable_id'].unique().apply(list).to_frame()
selection.df.institute_id.unique()
#selection['EUR-11.MIROC-MIROC5.UHOH.UHOH-WRF361H.rcp85.mon'].df
preproc.rename_cordex
dset_dict = selection.to_dataset_dict(cdf_kwargs = {"use_cftime": True, "chunks": {}}, preprocess=preproc.rename_cordex)
dset_dict_flatten = preproc.member_id_to_dset_id(dset_dict)
dset_dict.keys()
sort = preproc.sort_ds_dict_by_attr(dset_dict, 'model_id')
for ds_id, ds in dset_dict.items():
print(ds_id)
print(ds.tas.dims)
dset_sorted = preproc.sort_ds_dict_by_attr(dset_dict_flatten, 'experiment_id')
dset_sorted['rcp45'].keys()
ds_list = []
for ds in dset_sorted['rcp85'].values():
ds = preproc.replace_rlon_rlat(ds)
ds = preproc.replace_lon_lat(ds)
ds_list.append(ds)
from xclim.ensembles import create_ensemble
rcp85 = create_ensemble(ds_list, resample_freq='MS')
for key, ds in dset_dict.items():
print(key)
print(list(ds.dims))
ds = preproc.rename_cordex(ds)
preproc.check_domain(ds)
ds = dset_dict['EUR-11.NCC-NorESM1-M.z_GERICS_KB.GERICS-REMO2015.historical.mon']
ds = preproc.rename_cordex(ds)
ds = preproc.promote_empty_dims(ds)
ds = preproc.replace_rlon_rlat(ds)
ds
ds = dset_dict['EUR-11.NCC-NorESM1-M.CNRM.CNRM-ALADIN63.historical.mon']
ds
preproc.get_grid_mapping(ds)
ds = dset_dict['EUR-11.MPI-M-MPI-ESM-LR.GERICS.GERICS-REMO2015.historical.mon']
ds
preproc.get_grid_mapping(ds)
preproc.get_grid_mapping(ds)
preproc.remap_lambert_conformal(ds)
preproc.regridder is None
preproc._init_regridder(ds, eur11)
preproc.regridder
preproc.cordex_dataset_id(ds)
ds = dset_dict['EUR-11.IPSL-IPSL-CM5A-LR.GERICS.GERICS-REMO2015.historical.mon']
ds
```
We move the member id from a coordinate to the dsets key so that all datasets have the same structure...
we concentrate on rotated pole grids for now. there are just a handful of lambert conformal projection models that we can deal later with.
```
from cordex.preprocessing import preprocessing as preproc
dset_dict['EUR-11.ICHEC-EC-EARTH.DMI.DMI-HIRHAM5.historical.mon']
```
Now, we concatenate scenarion data with historical data for easier comparisons:
Now, we have to align the different coordinats. The ensemble members might have slightly different rotated coordinates and also the projection to the global coordinates might be different. We use a reference dataset here, from wich we copy the coordinates to all datasets to make them comparable.
The idea to make the whole dataset id a coordinate, so that xarray will automatically do all stuff for all datasets...
```
# make the dataset id a coordinate for easier access
dims = {}
for rcp, dsets in dsets_sorted.items():
dset_ids = list(dsets.keys())
dim = xr.DataArray(dset_ids, dims='dset_id', name='dset_id',
coords={'dset_id': dset_ids})
dims[rcp] = dim
preproc.dset_ids_to_coord(dset_dict)
```
we create three big datasets for each senario one:
```
from cordex import cordex_domain
def create_test_ds(name, pol_name='rotated_latitude_longitude'):
domain = cordex_domain(name, mapping_name=pol_name, dummy=True, add_vertices=True)
domain.dummy
return domain
dm = create_test_ds('EUR-11', 'rotated_pole')
#dm = dm.drop(('lon', 'lat'))
#dm.rename({'rlon': 'lon', 'rlat': 'lat'})
dm['dummy'] = xr.DataArray(dm.dummy.values, dims=('lat', 'lon'), attrs=dm.dummy.attrs)
dm
preproc.rename_cordex(dm)
preproc.rename_cordex(dm).equals(create_test_ds('EUR-11'))
dm = create_test_ds('EUR-11', 'rotated_pole')
#dm = dm.rename({'lon_vertices': 'longitude_vertices', 'lat_vertices': 'latitude_vertices'})
dm
dm.drop_vars(('rlon', 'rlat'))
preproc.rename_cordex(dm)
preproc.rename_cordex(ds)
xr.Dataset(dict)
preproc.rename_cordex(dm)
dm.dummy.reset_coords().assign_coords({'lat': dm.rlat.values, 'lon': dm.rlon.values})
dm.dummy.assign_coords({'rlat': dm.rlat.values, 'rlon': dm.rlon.values})
```
| github_jupyter |
# Synthetic seismic: wedge
We're going to make the famous wedge model, which interpreters can use to visualize the tuning effect. Then we can extend the idea to other kinds of model.
## Make a wedge earth model
```
import matplotlib.pyplot as plt
import numpy as np
length = 80 # x range
depth = 200 # z range
```
### EXERCISE
Make a NumPy array of integers with these dimensions, placing a boundary at a 'depth' of 66 and another at a depth of 133.
A plot of a vertical section through this array should look something like:
|
|
---
|
|
---
|
|
```
# YOUR CODE HERE
# We have to pass dtype=int or we get floats.
# We need ints because we're going to use for indexing later.
model = 1 + np.tri(depth, length, -depth//3, dtype=int)
plt.imshow(model)
plt.colorbar()
plt.show()
```
Now set the upper part of the model — above the wedge — to zero.
```
model[:depth//3,:] = 0
plt.imshow(model)
plt.colorbar()
plt.show()
```
Now we can make some Vp-rho pairs (rock 0, rock 1, and rock 2).
```
rocks = np.array([[2540, 2550], # <-- Upper layer
[2400, 2450], # <-- Wedge
[2650, 2800]]) # <-- Lower layer
```
Now we can use ['fancy indexing'](http://docs.scipy.org/doc/numpy/user/basics.indexing.html) to use `model`, which is an array of 0, 1, and 2, as the indices of the rock property pairs to 'grab' from `rocks`.
```
earth = rocks[model]
```
Now apply `np.prod` (product) to those Vp-rho pairs to get impedance at every sample.
```
imp = np.apply_along_axis(np.prod, arr=earth, axis=-1)
```
## Model seismic reflections
Now we have an earth model — giving us acoustic impedance everywhere in this 2D grid — we define a function to compute reflection coefficients for every trace.
### EXERCISE
Can you write a function to compute the reflection coefficients in this model?
It should implement this equation, where $Z$ is acoustic impedance and :
$$ R = \frac{Z_\mathrm{lower} - Z_\mathrm{upper}}{Z_\mathrm{lower} + Z_\mathrm{upper}} $$
The result should be a sparse 2D array of shape (199, 80). The upper interface of the wedge should be positive.
```
def make_rc(imp):
# YOUR CODE HERE
return rc
rc = make_rc(imp)
def make_rc(imp):
"""
Compute reflection coefficients.
"""
upper = imp[ :-1, :]
lower = imp[1: , :]
return (lower - upper) / (lower + upper)
rc = make_rc(imp)
```
You should be able to plot the RC series like so:
```
plt.figure(figsize=(8,4))
plt.imshow(rc, aspect='auto')
plt.colorbar()
plt.show()
```
### EXERCISE
Implement a Ricker wavelet of frequency $f$ with amplitude $A$ at time $t$ given by:
$$ \mathbf{a}(\mathbf{t}) = (1-2 \pi^2 f^2 \mathbf{t}^2) \mathrm{e}^{-\pi^2 f^2 \mathbf{t}^2} $$
```
# YOUR CODE HERE
```
There is an implementation in `scipy.signal` but it has a 'width parameter' instead of 'frequency' so it's harder to parameterize.
Instead, we'll use `bruges` to make a wavelet:
```
from bruges.filters import ricker
f = 25 # We'll use this later.
w, t = ricker(duration=0.128, dt=0.001, f=f, return_t=True)
plt.plot(t, w)
plt.show()
```
### EXERCISE
Make an RC series 200 samples long, with one positive and one negative RC. Make a corresponding time array.
Pass the RC series to `np.convolve()` along with the wavelet, then plot the resulting synthetic seismogram.
```
# YOUR CODE HERE
temp = np.zeros(200)
temp[66] = 1
temp[133] = -0.5
tr = np.convolve(temp, w, mode='same')
plt.plot(tr)
```
## Synthetic wedge
It's only a little trickier for us to apply 1D convolution to every trace in our 2D reflection coeeficient matrix. NumPy provides a function, `apply_along_axis()` to apply any function along any one axis of an n-dimensional array. I don't think it's much faster than looping, but I find it easier to think about.
```
def convolve(trace, wavelet):
return np.convolve(trace, wavelet, mode='same')
synth = np.apply_along_axis(convolve,
axis=0,
arr=rc,
wavelet=w)
plt.figure(figsize=(12,6))
plt.imshow(synth, cmap="Greys", aspect=0.2)
plt.colorbar()
plt.show()
```
### EXERCISE
Use `ipywidgets.interact` to turn this into an interactive plot, so that we can vary the frequency of the wavelet and see the effect on the synthetic.
Here's a reminder of how to use it:
from ipywidgets import interact
@interact(a=(0, 10, 1), b=(0, 100, 10))
def main(a, b):
"""Do the things!"""
print(a + b)
return
```
# YOUR CODE HERE
from ipywidgets import interact
@interact(f=(4, 100, 4))
def show(f):
w, t = ricker(duration=0.128, dt=0.001, f=f, return_t=True)
synth = np.apply_along_axis(convolve,
axis=0,
arr=rc,
wavelet=w)
plt.figure(figsize=(12,6))
plt.imshow(synth, cmap="Greys", aspect=0.2)
plt.colorbar()
plt.show()
```
<hr />
<div>
<img src="https://avatars1.githubusercontent.com/u/1692321?s=50"><p style="text-align:center">© Agile Scientific 2020</p>
</div>
| github_jupyter |
```
#@title Environment Setup
import glob
BASE_DIR = "gs://download.magenta.tensorflow.org/models/music_vae/colab2"
print('Installing dependencies...')
!apt-get update -qq && apt-get install -qq libfluidsynth1 fluid-soundfont-gm build-essential libasound2-dev libjack-dev
!pip install -q pyfluidsynth
!pip install -qU magenta
# Hack to allow python to pick up the newly-installed fluidsynth lib.
# This is only needed for the hosted Colab environment.
import ctypes.util
orig_ctypes_util_find_library = ctypes.util.find_library
def proxy_find_library(lib):
if lib == 'fluidsynth':
return 'libfluidsynth.so.1'
else:
return orig_ctypes_util_find_library(lib)
ctypes.util.find_library = proxy_find_library
print('Importing libraries and defining some helper functions...')
from google.colab import files
import magenta.music as mm
from magenta.models.music_vae import configs
from magenta.models.music_vae.trained_model import TrainedModel
import numpy as np
import os
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# Necessary until pyfluidsynth is updated (>1.2.5).
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def play(note_sequence):
mm.play_sequence(note_sequence, synth=mm.fluidsynth)
def interpolate(model, start_seq, end_seq, num_steps, max_length=32,
assert_same_length=True, temperature=0.5,
individual_duration=4.0):
"""Interpolates between a start and end sequence."""
note_sequences = model.interpolate(
start_seq, end_seq,num_steps=num_steps, length=max_length,
temperature=temperature,
assert_same_length=assert_same_length)
print('Start Seq Reconstruction')
play(note_sequences[0])
print('End Seq Reconstruction')
play(note_sequences[-1])
print('Mean Sequence')
play(note_sequences[num_steps // 2])
print('Start -> End Interpolation')
interp_seq = mm.sequences_lib.concatenate_sequences(
note_sequences, [individual_duration] * len(note_sequences))
play(interp_seq)
mm.plot_sequence(interp_seq)
return interp_seq if num_steps > 3 else note_sequences[num_steps // 2]
def download(note_sequence, filename):
mm.sequence_proto_to_midi_file(note_sequence, filename)
files.download(filename)
print('Done')
#@title Drive Setup
#@markdown If your training sample is in google drive you need to connect it.
#@markdown You can also upload the data to a temporary folder but it will be
#@markdown lost when the session is closed.
from google.colab import drive
drive.mount('/content/drive')
!music_vae_generate \
--config=cat-mel_2bar_big \
--checkpoint_file=/content/drive/My\ Drive/cat-mel_2bar_big.tar \
--mode=interpolate \
--num_outputs=5 \
--input_midi_1=/2bar902_1.mid \
--input_midi_2=/2bar907_1.mid \
--output_dir=/tmp/music_vae/generated2
!convert_dir_to_note_sequences \
--input_dir=/content/drive/My\ Drive/pop909_melody_train \
--output_file=/temp/notesequences.tfrecord \
--log=INFO
!music_vae_train \
--config=cat-mel_2bar_big \
--run_dir=/temp/music_vae/ \
--mode=train \
--examples_path=/temp/notesequences.tfrecord \
--hparams=max_seq_len=32, z_size=512, free_bits=0, max_beta=0.5, beta_rate=0.99999, batch_size=512, grad_clip=1.0, clip_mode='global_norm', grad_norm_clip_to_zero=10000, learning_rate=0.01, decay_rate=0.9999, min_learning_rate=0.00001
!music_vae_generate \
--config=cat-mel_2bar_big \
--input_midi_1=/064.mid \
--input_midi_2=/058.mid \
--checkpoint_file=/temp/music_vae/train/model.ckpt-1795 \
--mode=interpolate \
--num_outputs=3 \
--output_dir=/tmp/music_vae/generated8
!music_vae_generate \
--config=cat-mel_2bar_big \
--input_midi_1=/2bar058_1.mid \
--input_midi_2=/2bar064_1.mid \
--checkpoint_file=/temp/music_vae/train/model.ckpt-1795 \
--mode=interpolate \
--num_outputs=5 \
--output_dir=/tmp/music_vae/generated9
!music_vae_generate \
--config=cat-mel_2bar_big \
--input_midi_1=/756.mid \
--input_midi_2=/746.mid \
--checkpoint_file=/temp/music_vae/train/model.ckpt-1795 \
--mode=interpolate \
--num_outputs=3 \
--output_dir=/tmp/music_vae/generated10
!music_vae_generate \
--config=cat-mel_2bar_big \
--input_midi_1=/2bar902_1.mid \
--input_midi_2=/2bar907_1.mid \
--checkpoint_file=/temp/music_vae/train/model.ckpt-1795 \
--mode=interpolate \
--num_outputs=5 \
--output_dir=/tmp/music_vae/generated13
```
| github_jupyter |
### Counter
The `Counter` dictionary is one that specializes for helping with, you guessed it, counters!
Actually we used a `defaultdict` earlier to do something similar:
```
from collections import defaultdict, Counter
```
Let's say we want to count the frequency of each character in a string:
```
sentence = 'the quick brown fox jumps over the lazy dog'
counter = defaultdict(int)
for c in sentence:
counter[c] += 1
counter
```
We can do the same thing using a `Counter` - unlike the `defaultdict` we don't specify a default factory - it's always zero (it's a counter after all):
```
counter = Counter()
for c in sentence:
counter[c] += 1
counter
```
OK, so if that's all there was to `Counter` it would be pretty odd to have a data structure different than `OrderedDict`.
But `Counter` has a slew of additional methods which make sense in the context of counters:
1. Iterate through all the elements of counters, but repeat the elements as many times as their frequency
2. Find the `n` most common (by frequency) elements
3. Decrement the counters based on another `Counter` (or iterable)
4. Increment the counters based on another `Counter` (or iterable)
5. Specialized constructor for additional flexibility
If you are familiar with multisets, then this is essentially a data structure that can be used for multisets.
#### Constructor
It is so common to create a frequency distribution of elements in an iterable, that this is supported automatically:
```
c1 = Counter('able was I ere I saw elba')
c1
```
Of course this works for iterables in general, not just strings:
```
import random
random.seed(0)
my_list = [random.randint(0, 10) for _ in range(1_000)]
c2 = Counter(my_list)
c2
```
We can also initialize a `Counter` object by passing in keyword arguments, or even a dictionary:
```
c2 = Counter(a=1, b=10)
c2
c3 = Counter({'a': 1, 'b': 10})
c3
```
Technically we can store values other than integers in a `Counter` object - it's possible but of limited use since the default is still `0` irrespective of what other values are contained in the object.
#### Finding the n most Common Elements
Let's find the `n` most common words (by frequency) in a paragraph of text. Words are considered delimited by white space or punctuation marks such as `.`, `,`, `!`, etc - basically anything except a character or a digit.
This is actually quite difficult to do, so we'll use a close enough approximation that will cover most cases just fine, using a regular expression:
```
import re
sentence = '''
his module implements pseudo-random number generators for various distributions.
For integers, there is uniform selection from a range. For sequences, there is uniform selection of a random element, a function to generate a random permutation of a list in-place, and a function for random sampling without replacement.
On the real line, there are functions to compute uniform, normal (Gaussian), lognormal, negative exponential, gamma, and beta distributions. For generating distributions of angles, the von Mises distribution is available.
Almost all module functions depend on the basic function random(), which generates a random float uniformly in the semi-open range [0.0, 1.0). Python uses the Mersenne Twister as the core generator. It produces 53-bit precision floats and has a period of 2**19937-1. The underlying implementation in C is both fast and threadsafe. The Mersenne Twister is one of the most extensively tested random number generators in existence. However, being completely deterministic, it is not suitable for all purposes, and is completely unsuitable for cryptographic purposes.'''
words = re.split('\W', sentence)
words
```
But what are the frequencies of each word, and what are the 5 most frequent words?
```
word_count = Counter(words)
word_count
word_count.most_common(5)
```
#### Using Repeated Iteration
```
c1 = Counter('abba')
c1
for c in c1:
print(c)
```
However, we can have an iteration that repeats the counter keys as many times as the indicated frequency:
```
for c in c1.elements():
print(c)
```
What's interesting about this functionality is that we can turn this around and use it as a way to create an iterable that has repeating elements.
Suppose we want to to iterate through a list of (integer) numbers that are each repeated as many times as the number itself.
For example 1 should repeat once, 2 should repeat twice, and so on.
This is actually not that easy to do!
Here's one possible way to do it:
```
l = []
for i in range(1, 11):
for _ in range(i):
l.append(i)
print(l)
```
But we could use a `Counter` object as well:
```
c1 = Counter()
for i in range(1, 11):
c1[i] = i
c1
print(c1.elements())
```
So you'll notice that we have a `chain` object here. That's one big advantage to using the `Counter` object - the repeated iterable does not actually exist as list like our previous implementation - this is a lazy iterable, so this is far more memory efficient.
And we can iterate through that `chain` quite easily:
```
for i in c1.elements():
print(i, end=', ')
```
Just for fun, how could we reproduce this functionality using a plain dictionary?
```
class RepeatIterable:
def __init__(self, **kwargs):
self.d = kwargs
def __setitem__(self, key, value):
self.d[key] = value
def __getitem__(self, key):
self.d[key] = self.d.get(key, 0)
return self.d[key]
r = RepeatIterable(x=10, y=20)
r.d
r['a'] = 100
r['a']
r['b']
r.d
```
Now we have to implement that `elements` iterator:
```
class RepeatIterable:
def __init__(self, **kwargs):
self.d = kwargs
def __setitem__(self, key, value):
self.d[key] = value
def __getitem__(self, key):
self.d[key] = self.d.get(key, 0)
return self.d[key]
def elements(self):
for k, frequency in self.d.items():
for i in range(frequency):
yield k
r = RepeatIterable(a=2, b=3, c=1)
for e in r.elements():
print(e, end=', ')
```
#### Updating from another Iterable or Counter
Lastly let's see how we can update a `Counter` object using another `Counter` object.
When both objects have the same key, we have a choice - do we add the count of one to the count of the other, or do we subtract them?
We can do either, by using the `update` (additive) or `subtract` methods.
```
c1 = Counter(a=1, b=2, c=3)
c2 = Counter(b=1, c=2, d=3)
c1.update(c2)
print(c1)
```
On the other hand we can subtract instead of add counters:
```
c1 = Counter(a=1, b=2, c=3)
c2 = Counter(b=1, c=2, d=3)
c1.subtract(c2)
print(c1)
```
Notice the key `d` - since `Counters` default missing keys to `0`, when `d: 3` in `c2` was subtracted from `c1`, the counter for `d` was defaulted to `0`.
Just as the constructor for a `Counter` can take different arguments, so too can the `update` and `subtract` methods.
```
c1 = Counter('aabbccddee')
print(c1)
c1.update('abcdef')
print(c1)
```
#### Mathematical Operations
These `Counter` objects also support several other mathematical operations when both operands are `Counter` objects. In all these cases the result is a new `Counter` object.
* `+`: same as `update`, but returns a new `Counter` object instead of an in-place update.
* `-`: subtracts one counter from another, but discards zero and negative values
* `&`: keeps the **minimum** of the key values
* `|`: keeps the **maximum** of the key values
```
c1 = Counter('aabbcc')
c2 = Counter('abc')
c1 + c2
c1 - c2
c1 = Counter(a=5, b=1)
c2 = Counter(a=1, b=10)
c1 & c2
c1 | c2
```
The **unary** `+` can also be used to remove any non-positive count from the Counter:
```
c1 = Counter(a=10, b=-10)
+c1
```
The **unary** `-` changes the sign of each counter, and removes any non-positive result:
```
-c1
```
##### Example
Let's assume you are working for a company that produces different kinds of widgets.
You are asked to identify the top 3 best selling widgets.
You have two separate data sources - one data source can give you a history of all widget orders (widget name, quantity), while another data source can give you a history of widget refunds (widget name, quantity refunded).
From these two data sources, you need to determine the top selling widgets (taking refinds into account of course).
Let's simulate both of these lists:
```
import random
random.seed(0)
widgets = ['battery', 'charger', 'cable', 'case', 'keyboard', 'mouse']
orders = [(random.choice(widgets), random.randint(1, 5)) for _ in range(100)]
refunds = [(random.choice(widgets), random.randint(1, 3)) for _ in range(20)]
orders
refunds
```
Let's first load these up into counter objects.
To do this we're going to iterate through the various lists and update our counters:
```
sold_counter = Counter()
refund_counter = Counter()
for order in orders:
sold_counter[order[0]] += order[1]
for refund in refunds:
refund_counter[refund[0]] += refund[1]
sold_counter
refund_counter
net_counter = sold_counter - refund_counter
net_counter
net_counter.most_common(3)
```
We could actually do this a little differently, not using loops to populate our initial counters.
Recall the `repeat()` function in `itertools`:
```
from itertools import repeat
list(repeat('battery', 5))
orders[0]
list(repeat(*orders[0]))
```
So we could use the `repeat()` method to essentially repeat each widget for each item of `orders`. We need to chain this up for each element of `orders` - this will give us a single iterable that we can then use in the constructor for a `Counter` object. We can do this using a generator expression for example:
```
from itertools import chain
list(chain.from_iterable(repeat(*order) for order in orders))
order_counter = Counter(chain.from_iterable(repeat(*order) for order in orders))
order_counter
```
What if we don't want to use a `Counter` object.
We can still do it (relatively easily) as follows:
```
net_sales = {}
for order in orders:
key = order[0]
cnt = order[1]
net_sales[key] = net_sales.get(key, 0) + cnt
for refund in refunds:
key = refund[0]
cnt = refund[1]
net_sales[key] = net_sales.get(key, 0) - cnt
# eliminate non-positive values (to mimic what - does for Counters)
net_sales = {k: v for k, v in net_sales.items() if v > 0}
# we now have to sort the dictionary
# this means sorting the keys based on the values
sorted_net_sales = sorted(net_sales.items(), key=lambda t: t[1], reverse=True)
# Top three
sorted_net_sales[:3]
```
| github_jupyter |
# Download Data
This notebook downloads the necessary data to replicate the results of our paper on Gender Inequalities on Wikipedia.
Note that we use a file named `dbpedia_config.py` where we set which language editions we will we study, as well as where to save and load data files.
By [Eduardo Graells-Garrido](http://carnby.github.io).
```
!cat dbpedia_config.py
import subprocess
import os
import dbpedia_config
target = dbpedia_config.DATA_FOLDER
languages = dbpedia_config.LANGUAGES
# Ontology
# note that previously (2014 version and earlier) this was in bzip format.
if not os.path.exists('{0}/dbpedia.owl'.format(target)):
subprocess.call(['/usr/bin/wget',
'http://downloads.dbpedia.org/2015-10/dbpedia_2015-10.owl',
'-O', '{0}/dbpedia.owl'.format(target)],
stdout=None, stderr=None)
# current version: http://wiki.dbpedia.org/Downloads2015-04
db_uri = 'http://downloads.dbpedia.org/2015-10/core-i18n'
for lang in languages:
if not os.path.exists('{0}/instance_types_{1}.ttl.bz2'.format(target, lang)):
subprocess.call(['/usr/bin/wget',
'{1}/{0}/instance_types_{0}.ttl.bz2'.format(lang, db_uri),
'-O', '{0}/instance_types_{1}.ttl.bz2'.format(target, lang)],
stdout=None, stderr=None)
if not os.path.exists('{0}/interlanguage_links_{1}.ttl.bz2'.format(target, lang)):
subprocess.call(['/usr/bin/wget',
'{1}/{0}/interlanguage_links_{0}.ttl.bz2'.format(lang, db_uri),
'-O', '{0}/interlanguage_links_{1}.ttl.bz2'.format(target, lang)],
stdout=None, stderr=None)
if not os.path.exists('{0}/labels_{1}.ttl.bz2'.format(target, lang)):
subprocess.call(['/usr/bin/wget',
'{1}/{0}/labels_{0}.ttl.bz2'.format(lang, db_uri),
'-O', '{0}/labels_{1}.ttl.bz2'.format(target, lang)],
stdout=None, stderr=None)
if not os.path.exists('{0}/mappingbased_literals_{1}.ttl.bz2'.format(target, lang)):
subprocess.call(['/usr/bin/wget',
'{1}/{0}/mappingbased_literals_{0}.ttl.bz2'.format(lang, db_uri),
'-O', '{0}/mappingbased_literals_{1}.ttl.bz2'.format(target, lang)],
stdout=None, stderr=None)
if not os.path.exists('{0}/mappingbased_objects_{1}.ttl.bz2'.format(target, lang)):
subprocess.call(['/usr/bin/wget',
'{1}/{0}/mappingbased_objects_{0}.ttl.bz2'.format(lang, db_uri),
'-O', '{0}/mappingbased_objects_{1}.ttl.bz2'.format(target, lang)],
stdout=None, stderr=None)
# http://oldwiki.dbpedia.org/Datasets/NLP#h172-7
dbpedia_gender = 'http://wifo5-04.informatik.uni-mannheim.de/downloads/datasets/genders_en.nt.bz2'
if not os.path.exists('{0}/genders_en.nt.bz2'.format(target)):
subprocess.call(['/usr/bin/wget',
dbpedia_gender,
'-O', '{0}/genders_en.nt.bz2'.format(target)],
stdout=None, stderr=None)
# http://www.davidbamman.com/?p=12
# note that, in previous versions, this was a text file. now it's a bzipped file with n-triplets.
wikipedia_gender = 'http://www.ark.cs.cmu.edu/bio/data/wiki.genders.txt'
if not os.path.exists('{0}/wiki.genders.txt'.format(target)):
subprocess.call(['/usr/bin/wget',
dbpedia_gender,
'-O', '{0}/wiki.genders.txt'.format(target)],
stdout=None, stderr=None)
if not os.path.exists('{0}/long_abstracts_{1}.nt.bz2'.format(target, dbpedia_config.MAIN_LANGUAGE)):
subprocess.call(['/usr/bin/wget',
'{1}/{0}/long_abstracts_{0}.ttl.bz2'.format(dbpedia_config.MAIN_LANGUAGE, db_uri),
'-O', '{0}/long_abstracts_{1}.ttl.bz2'.format(target, dbpedia_config.MAIN_LANGUAGE)],
stdout=None, stderr=None)
# network data for english only
if not os.path.exists('{0}/page_links_{1}.ttl.bz2'.format(target, dbpedia_config.MAIN_LANGUAGE)):
subprocess.call(['/usr/bin/wget',
'{1}/{0}/page_links_{0}.nt.bz2'.format(dbpedia_config.MAIN_LANGUAGE, db_uri),
'-O', '{0}/page_links_{1}.ttl.bz2'.format(target, dbpedia_config.MAIN_LANGUAGE)],
stdout=None, stderr=None)
```
| github_jupyter |
# Quantum Cryptography: Quantum Key Distribution
***
### Contributors:
A.J. Rasmusson, Richard Barney
Have you ever wanted to send a super secret message to a friend? Then you need a key to encrypt your message, and your friend needs the same key to decrypt your message. But, how do you send a super secret key to your friend without your eavesdropping enemies finding out what it is? Businesses and governments face this problem every day. People are always innovating new ways to intercept personal data or other sensitive information. Ideally, we'd like to find a way to share information that cannot be intercepted. [Quantum key distribution](https://en.wikipedia.org/wiki/Quantum_key_distribution) (QKD) was created as a solution to this problem. In this tutorial, you'll learn about and implement a version of the [BB84 QKD protocol](https://en.wikipedia.org/wiki/BB84), developed by Bennet and Brassard, to generate a secure, [one-time pad](https://en.wikipedia.org/wiki/One-time_pad) encryption key.
Quantum key distribution is all about making the right information publicly known at the right times (and keeping the secret information secret). This tutorial will take you through a quantum key distribution between you (Alice) and your friend Bob. After you get a feel for the ropes by sending your first encrypted message to Bob, we'll introduce Eve--your eavesdropping enemy. You'll learn how to detect Eve's presence and thus prevent her from intercepting your super secret key and decrypting your messages.
```
#import all the packages
# Checking the version of PYTHON
import sys
if sys.version_info < (3,5):
raise Exception('Please use Python version 3.5 or greater.')
#append to system path so qiskit and Qconfig can be found from home directory
sys.path.append('../qiskit-sdk-py/')
# Import the QuantumProgram and configuration
from qiskit import QuantumProgram
#import Qconfig
#other useful packages
import math
```
## Part 1: Encrypting and Decrypting a Message
### Pick Your Super Secret Message
The super secret message you want to send must be the same or less than the length of the super secret key.
If the key is shorter than the message, you will be forced to use parts of the key more than once. This may allow your lurking enemies to pick up a pattern in your encrypted message and possibly decrypt it. (As you'll see later on, we need to start out with a key at least double the number of characters used in your message. For now, don't worry about those details, pick your message! For this tutorial, we picked the initial key to be 3x greater--just to be safe.) Enter your message on the line below which reads "mes = ".
```
#Super secret message
mes = 'hello world'
print('Your super secret message: ',mes)
#initial size of key
n = len(mes)*3
#break up message into smaller parts if length > 10
nlist = []
for i in range(int(n/10)):
nlist.append(10)
if n%10 != 0:
nlist.append(n%10)
print('Initial key length: ',n)
```
### The Big Picture
Now that you (Alice) have the key, here's the big question: how are we going to get your key to Bob without eavesdroppers intercepting it? Quantum key distribution! Here are the steps and big picture (the effects of eavesdropping will be discussed later on):
1. You (Alice) generate a random string--the key you wish to give to Bob.
2. You (Alice) convert your string bits into corresponding qubits.
3. You (Alice) send those qubits to Bob, BUT! you randomly rotate some into a superposition. This effectively turns your key into random noise. (This is good because your lurking enemies might measure your qubits.)
4. Bob receives yours qubits AND randomly rotates some qubits in the opposite direction before measuring.
5. Alice and Bob publicly share which qubits they rotated. When they both did the same thing (either both did nothing or both rotated), they know the original key bit value made it to Bob! (Overall, you can see that only some of the bits from Alice's original key should make it.)
6. Alice and Bob create their keys. Alice modifies her original key by keeping only the bits that she knows made it to Bob. Bob does the same.
Alice and Bob now have matching keys! They can now use this key to encrypt and decrypt their messages.
<img src='QKDnoEve.png'>
Here we see Alice sending the initial key to Bob. She sends her qubits and rotates them based on her rotation string. Bob rotates the incoming qubits based on his rotation string and measures the qubits.
### Step 1: Alice Generates a Random Key
You and your friend need a super secret key so you can encrypt your message and your friend can decrypt it. Let's make a key--a pure random key.
To make a purely random string, we'll use quantum superposition. A qubit in the xy-plane of the [Bloch sphere](https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=004-The_Weird_and_Wonderful_World_of_the_Qubit~2F001-The_Weird_and_Wonderful_World_of_the_Qubit) is in a 50-50 [superposition](https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=005-Single-Qubit_Gates~2F002-Creating_superposition); 50% of the time it'll be measured as 0, and 50% of the time it'll be measured as 1. We have Alice prepare several qubits like this and measure them to generate a purely random string of 1s and 0s.
```
# Make random strings of length string_length
def randomStringGen(string_length):
#output variables used to access quantum computer results at the end of the function
output_list = []
output = ''
#start up your quantum program
qp = QuantumProgram()
backend = 'local_qasm_simulator'
circuits = ['rs']
#run circuit in batches of 10 qubits for fastest results. The results
#from each run will be appended and then clipped down to the right n size.
n = string_length
temp_n = 10
temp_output = ''
for i in range(math.ceil(n/temp_n)):
#initialize quantum registers for circuit
q = qp.create_quantum_register('q',temp_n)
c = qp.create_classical_register('c',temp_n)
rs = qp.create_circuit('rs',[q],[c])
#create temp_n number of qubits all in superpositions
for i in range(temp_n):
rs.h(q[i]) #the .h gate is the Hadamard gate that makes superpositions
rs.measure(q[i],c[i])
#execute circuit and extract 0s and 1s from key
result = qp.execute(circuits, backend, shots=1)
counts = result.get_counts('rs')
result_key = list(result.get_counts('rs').keys())
temp_output = result_key[0]
output += temp_output
#return output clipped to size of desired string length
return output[:n]
key = randomStringGen(n)
print('Initial key: ',key)
```
### Steps 2-4: Send Alice's Qubits to Bob
Alice turns her key bits into corresponding qubit states. If a bit is a 0 she will prepare a qubit on the negative z axis. If the bit is a 1 she will prepare a qubit on the positive z axis. Next, if Alice has a 1 in her rotate string, she rotates her key qubit with a [Hadamard](https://quantumexperience.ng.bluemix.net/qx/tutorial?sectionId=beginners-guide&page=005-Single-Qubit_Gates~2F002-Creating_superposition) gate. She then sends the qubit to Bob. If Bob has a 1 in his rotate string, he rotates the incoming qubit in the opposite direction with a Hadamard gate. Bob then measures the state of the qubit and records the result. The quantum circuit below executes each of these steps.
```
#generate random rotation strings for Alice and Bob
Alice_rotate = randomStringGen(n)
Bob_rotate = randomStringGen(n)
print("Alice's rotation string:",Alice_rotate)
print("Bob's rotation string: ",Bob_rotate)
#start up your quantum program
backend = 'local_qasm_simulator'
shots = 1
circuits = ['send_over']
Bob_result = ''
for ind,l in enumerate(nlist):
#define temp variables used in breaking up quantum program if message length > 10
if l < 10:
key_temp = key[10*ind:10*ind+l]
Ar_temp = Alice_rotate[10*ind:10*ind+l]
Br_temp = Bob_rotate[10*ind:10*ind+l]
else:
key_temp = key[l*ind:l*(ind+1)]
Ar_temp = Alice_rotate[l*ind:l*(ind+1)]
Br_temp = Bob_rotate[l*ind:l*(ind+1)]
#start up the rest of your quantum program
qp2 = QuantumProgram()
q = qp2.create_quantum_register('q',l)
c = qp2.create_classical_register('c',l)
send_over = qp2.create_circuit('send_over',[q],[c])
#prepare qubits based on key; add Hadamard gates based on Alice's and Bob's
#rotation strings
for i,j,k,n in zip(key_temp,Ar_temp,Br_temp,range(0,len(key_temp))):
i = int(i)
j = int(j)
k = int(k)
if i > 0:
send_over.x(q[n])
#Look at Alice's rotation string
if j > 0:
send_over.h(q[n])
#Look at Bob's rotation string
if k > 0:
send_over.h(q[n])
send_over.measure(q[n],c[n])
#execute quantum circuit
result_so = qp2.execute(circuits, backend, shots=shots)
counts_so = result_so.get_counts('send_over')
result_key_so = list(result_so.get_counts('send_over').keys())
Bob_result += result_key_so[0][::-1]
print("Bob's results: ", Bob_result)
```
### Steps 5-6: Compare Rotation Strings and Make Keys
Alice and Bob can now generate a secret quantum encryption key. First, they publicly share their rotation strings. If a bit in Alice's rotation string is the same as the corresponding bit in Bob's they know that Bob's result is the same as what Alice sent. They keep these bits to form the new key. (Alice based on her original key and Bob based on his measured results).
```
def makeKey(rotation1,rotation2,results):
key = ''
count = 0
for i,j in zip(rotation1,rotation2):
if i == j:
key += results[count]
count += 1
return key
Akey = makeKey(Bob_rotate,Alice_rotate,key)
Bkey = makeKey(Bob_rotate,Alice_rotate,Bob_result)
print("Alice's key:",Akey)
print("Bob's key: ",Bkey)
```
### Pause
We see that using only the public knowledge of Bob's and Alice's rotation strings, Alice and Bob can create the same identical key based on Alice's initial random key and Bob's results. Wow!! :D
<strong>If Alice's and Bob's key length is less than the message</strong>, the encryption is compromised. If this is the case for you, rerun all the cells above and see if you get a longer key. (We set the initial key length to 3x the message length to avoid this, but it's still possible.)
### Encrypt (and decrypt) using quantum key
We can now use our super secret key to encrypt and decrypt messages!! (of length less than the key). Note: the below "encryption" method is not powerful and should not be used for anything you want secure; it's just for fun. In real life, the super secret key you made and shared with Bob would be used in a much more sophisticated encryption algorithm.
```
#make key same length has message
shortened_Akey = Akey[:len(mes)]
encoded_m=''
#encrypt message mes using encryption key final_key
for m,k in zip(mes,shortened_Akey):
encoded_c = chr(ord(m) + 2*ord(k) % 256)
encoded_m += encoded_c
print('encoded message: ',encoded_m)
#make key same length has message
shortened_Bkey = Bkey[:len(mes)]
#decrypt message mes using encryption key final_key
result = ''
for m,k in zip(encoded_m,shortened_Bkey):
encoded_c = chr(ord(m) - 2*ord(k) % 256)
result += encoded_c
print('recovered message:',result)
```
# Part 2: Eve the Eavesdropper
What if someone is eavesdropping on Alice and Bob's line of communication? This process of random string making and rotations using quantum mechanics is only useful if it's robust against eavesdroppers.
Eve is your lurking enemy. She eavesdrops by intercepting your transmission to Bob. To be sneaky, Eve must send on the intercepted transmission--otherwise Bob will never receive anything and know that something is wrong!
Let's explain further why Eve can be detected. If Eve intercepts a qubit from Alice, she will not know if Alice rotated its state or not. Eve can only measure a 0 or 1. And she can't measure the qubit and then send the same qubit on, because her measurement will destroy the quantum state. Consequently, Eve doesn't know when or when not to rotate to recreate Alice's original qubit. She may as well send on qubits that have not been rotated, hoping to get the rotation right 50% of the time. After she sends these qubits to Bob, Alice and Bob can compare select parts of their keys to see if they have discrepancies in places they should not.
The scheme goes as follows:
1. Alice sends her qubit transmission Bob--but Eve measures the results
2. To avoid suspicion, Eve prepares qubits corresponding to the bits she measured and sends them to Bob.
3. Bob and Alice make their keys like normal
4. Alice and Bob randomly select the same parts of their keys to share publicly
5. If the selected part of the keys don't match, they know Eve was eavesdropping
6. If the selected part of the keys DO match, they can be confident Eve wasn't eavesdropping
7. They throw away the part of the key they made public and encrypt and decrypt super secret messages with the portion of the key they have left.
<img src="QKD.png">
Here we see Alice sending her qubits, rotationing them based on her rotation string, and Eve intercepting the transmittion. Eve then sending her results onto Bob who--like normal--rotates and measures the qubits.
### Step 1: Eve intercepts Alice's transmission
The code below has Alice sending her qubits and Eve intercepting them. It then displays the results of Eve's measurements.
```
#start up your quantum program
backend = 'local_qasm_simulator'
shots = 1
circuits = ['Eve']
Eve_result = ''
for ind,l in enumerate(nlist):
#define temp variables used in breaking up quantum program if message length > 10
if l < 10:
key_temp = key[10*ind:10*ind+l]
Ar_temp = Alice_rotate[10*ind:10*ind+l]
else:
key_temp = key[l*ind:l*(ind+1)]
Ar_temp = Alice_rotate[l*ind:l*(ind+1)]
#start up the rest of your quantum program
qp3 = QuantumProgram()
q = qp3.create_quantum_register('q',l)
c = qp3.create_classical_register('c',l)
Eve = qp3.create_circuit('Eve',[q],[c])
#prepare qubits based on key; add Hadamard gates based on Alice's and Bob's
#rotation strings
for i,j,n in zip(key_temp,Ar_temp,range(0,len(key_temp))):
i = int(i)
j = int(j)
if i > 0:
Eve.x(q[n])
if j > 0:
Eve.h(q[n])
Eve.measure(q[n],c[n])
#execute
result_eve = qp3.execute(circuits, backend, shots=shots)
counts_eve = result_eve.get_counts('Eve')
result_key_eve = list(result_eve.get_counts('Eve').keys())
Eve_result += result_key_eve[0][::-1]
print("Eve's results: ", Eve_result)
```
### Step 2: Eve deceives Bob
Eve sends her measured qubits on to Bob to deceive him! Since she doesn't know which of the qubits she measured were in a superposition or not, she doesn't even know whether to send the exact values she measured or opposite values. In the end, sending on the exact values is just as good a deception as mixing them up again.
```
#start up your quantum program
backend = 'local_qasm_simulator'
shots = 1
circuits = ['Eve2']
Bob_badresult = ''
for ind,l in enumerate(nlist):
#define temp variables used in breaking up quantum program if message length > 10
if l < 10:
key_temp = key[10*ind:10*ind+l]
Eve_temp = Eve_result[10*ind:10*ind+l]
Br_temp = Bob_rotate[10*ind:10*ind+l]
else:
key_temp = key[l*ind:l*(ind+1)]
Eve_temp = Eve_result[l*ind:l*(ind+1)]
Br_temp = Bob_rotate[l*ind:l*(ind+1)]
#start up the rest of your quantum program
qp4 = QuantumProgram()
q = qp4.create_quantum_register('q',l)
c = qp4.create_classical_register('c',l)
Eve2 = qp4.create_circuit('Eve2',[q],[c])
#prepare qubits
for i,j,n in zip(Eve_temp,Br_temp,range(0,len(key_temp))):
i = int(i)
j = int(j)
if i > 0:
Eve2.x(q[n])
if j > 0:
Eve2.h(q[n])
Eve2.measure(q[n],c[n])
#execute
result_eve = qp4.execute(circuits, backend, shots=shots)
counts_eve = result_eve.get_counts('Eve2')
result_key_eve = list(result_eve.get_counts('Eve2').keys())
Bob_badresult += result_key_eve[0][::-1]
print("Bob's previous results (w/o Eve):",Bob_result)
print("Bob's results from Eve:\t\t ",Bob_badresult)
```
### Step 4: Spot Check
Alice and Bob know Eve is lurking out there. They decide to pick a few random values from their individual keys and compare with each other. This requires making these subsections of their keys public (so the other can see them). If any of the values in their keys are different, they know Eve's eavesdropping messed up the superposition Alice originally created! If they find all the values are identical, they can be reasonably confident that Eve wasn't eavesdropping. Of course, making some random key values known to the public will require them to remove those values from their keys because those parts are no longer super secret. Also, Alice and Bob need to make sure they are sharing corresponding values from their respective keys.
Let's make a check key. If the randomly generated check key is a one, Alice and Bob will compare that part of their keys with each other (aka make publicly known).
```
#make keys for Alice and Bob
Akey = makeKey(Bob_rotate,Alice_rotate,key)
Bkey = makeKey(Bob_rotate,Alice_rotate,Bob_badresult)
print("Alice's key: ",Akey)
print("Bob's key: ",Bkey)
check_key = randomStringGen(len(Akey))
print('spots to check:',check_key)
```
### Steps 5-7: Compare strings and detect Eve
Alice and Bob compare the subsections of their keys. If they notice any discrepancy, they know that Eve was trying to intercept their message. They create new keys by throwing away the parts they shared publicly. It's possible that by throwing these parts away, they will not have a key long enough to encrypt the message and they will have to try again.
```
#find which values in rotation string were used to make the key
Alice_keyrotate = makeKey(Bob_rotate,Alice_rotate,Alice_rotate)
Bob_keyrotate = makeKey(Bob_rotate,Alice_rotate,Bob_rotate)
# Detect Eve's interference
#extract a subset of Alice's key
sub_Akey = ''
sub_Arotate = ''
count = 0
for i,j in zip(Alice_rotate,Akey):
if int(check_key[count]) == 1:
sub_Akey += Akey[count]
sub_Arotate += Alice_keyrotate[count]
count += 1
#extract a subset of Bob's key
sub_Bkey = ''
sub_Brotate = ''
count = 0
for i,j in zip(Bob_rotate,Bkey):
if int(check_key[count]) == 1:
sub_Bkey += Bkey[count]
sub_Brotate += Bob_keyrotate[count]
count += 1
print("subset of Alice's key:",sub_Akey)
print("subset of Bob's key: ",sub_Bkey)
#compare Alice and Bob's key subsets
secure = True
for i,j in zip(sub_Akey,sub_Bkey):
if i == j:
secure = True
else:
secure = False
break;
if not secure:
print('Eve detected!')
else:
print('Eve escaped detection!')
#sub_Akey and sub_Bkey are public knowledge now, so we remove them from Akey and Bkey
if secure:
new_Akey = ''
new_Bkey = ''
for index,i in enumerate(check_key):
if int(i) == 0:
new_Akey += Akey[index]
new_Bkey += Bkey[index]
print('new A and B keys: ',new_Akey,new_Bkey)
if(len(mes)>len(new_Akey)):
print('Your new key is not long enough.')
```
# Probability of Detecting Eve
The longer the key, the more likely you will detect Eve. In fact, the [probability](hhttps://en.wikipedia.org/wiki/Quantum_key_distribution#Intercept_and_resend) goes up as a function of $1 - (3/4)^n$ where n is the number of bits Alice and Bob compare in their spot check. So, the longer the key, the more bits you can use to compare and the more likely you will detect Eve.
```
#!!! you may need to execute this cell twice in order to see the output due to an problem with matplotlib
import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0., 30.0)
y = 1-(3/4)**x
plt.plot(y)
plt.title('Probablity of detecting Eve')
plt.xlabel('# of key bits compared')
plt.ylabel('Probablity of detecting Eve')
plt.show()
```
| github_jupyter |
##### Copyright 2018 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#@title MIT License
#
# Copyright (c) 2017 François Chollet
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
```
# Train your first neural network: basic classification
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/keras/basic_classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/basic_classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
This guide trains a neural network model to classify images of clothing, like sneakers and shirts. It's okay if you don't understand all the details, this is a fast-paced overview of a complete TensorFlow program with the details explained as we go.
This guide uses [tf.keras](https://www.tensorflow.org/guide/keras), a high-level API to build and train models in TensorFlow.
```
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
```
## Import the Fashion MNIST dataset
This guide uses the [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) dataset which contains 70,000 grayscale images in 10 categories. The images show individual articles of clothing at low resolution (28 by 28 pixels), as seen here:
<table>
<tr><td>
<img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
alt="Fashion MNIST sprite" width="600">
</td></tr>
<tr><td align="center">
<b>Figure 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>
</td></tr>
</table>
Fashion MNIST is intended as a drop-in replacement for the classic [MNIST](http://yann.lecun.com/exdb/mnist/) dataset—often used as the "Hello, World" of machine learning programs for computer vision. The MNIST dataset contains images of handwritten digits (0, 1, 2, etc) in an identical format to the articles of clothing we'll use here.
This guide uses Fashion MNIST for variety, and because it's a slightly more challenging problem than regular MNIST. Both datasets are relatively small and are used to verify that an algorithm works as expected. They're good starting points to test and debug code.
We will use 60,000 images to train the network and 10,000 images to evaluate how accurately the network learned to classify images. You can access the Fashion MNIST directly from TensorFlow, just import and load the data:
```
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
```
Loading the dataset returns four NumPy arrays:
* The `train_images` and `train_labels` arrays are the *training set*—the data the model uses to learn.
* The model is tested against the *test set*, the `test_images`, and `test_labels` arrays.
The images are 28x28 NumPy arrays, with pixel values ranging between 0 and 255. The *labels* are an array of integers, ranging from 0 to 9. These correspond to the *class* of clothing the image represents:
<table>
<tr>
<th>Label</th>
<th>Class</th>
</tr>
<tr>
<td>0</td>
<td>T-shirt/top</td>
</tr>
<tr>
<td>1</td>
<td>Trouser</td>
</tr>
<tr>
<td>2</td>
<td>Pullover</td>
</tr>
<tr>
<td>3</td>
<td>Dress</td>
</tr>
<tr>
<td>4</td>
<td>Coat</td>
</tr>
<tr>
<td>5</td>
<td>Sandal</td>
</tr>
<tr>
<td>6</td>
<td>Shirt</td>
</tr>
<tr>
<td>7</td>
<td>Sneaker</td>
</tr>
<tr>
<td>8</td>
<td>Bag</td>
</tr>
<tr>
<td>9</td>
<td>Ankle boot</td>
</tr>
</table>
Each image is mapped to a single label. Since the *class names* are not included with the dataset, store them here to use later when plotting the images:
```
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
## Explore the data
Let's explore the format of the dataset before training the model. The following shows there are 60,000 images in the training set, with each image represented as 28 x 28 pixels:
```
train_images.shape
```
Likewise, there are 60,000 labels in the training set:
```
len(train_labels)
```
Each label is an integer between 0 and 9:
```
train_labels
```
There are 10,000 images in the test set. Again, each image is represented as 28 x 28 pixels:
```
test_images.shape
```
And the test set contains 10,000 images labels:
```
len(test_labels)
```
## Preprocess the data
The data must be preprocessed before training the network. If you inspect the first image in the training set, you will see that the pixel values fall in the range of 0 to 255:
```
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
```
We scale these values to a range of 0 to 1 before feeding to the neural network model. For this, we divide the values by 255. It's important that the *training set* and the *testing set* are preprocessed in the same way:
```
train_images = train_images / 255.0
test_images = test_images / 255.0
```
Display the first 25 images from the *training set* and display the class name below each image. Verify that the data is in the correct format and we're ready to build and train the network.
```
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
```
## Build the model
Building the neural network requires configuring the layers of the model, then compiling the model.
### Setup the layers
The basic building block of a neural network is the *layer*. Layers extract representations from the data fed into them. And, hopefully, these representations are more meaningful for the problem at hand.
Most of deep learning consists of chaining together simple layers. Most layers, like `tf.keras.layers.Dense`, have parameters that are learned during training.
```
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
```
The first layer in this network, `tf.keras.layers.Flatten`, transforms the format of the images from a 2d-array (of 28 by 28 pixels), to a 1d-array of 28 * 28 = 784 pixels. Think of this layer as unstacking rows of pixels in the image and lining them up. This layer has no parameters to learn; it only reformats the data.
After the pixels are flattened, the network consists of a sequence of two `tf.keras.layers.Dense` layers. These are densely-connected, or fully-connected, neural layers. The first `Dense` layer has 128 nodes (or neurons). The second (and last) layer is a 10-node *softmax* layer—this returns an array of 10 probability scores that sum to 1. Each node contains a score that indicates the probability that the current image belongs to one of the 10 classes.
### Compile the model
Before the model is ready for training, it needs a few more settings. These are added during the model's *compile* step:
* *Loss function* —This measures how accurate the model is during training. We want to minimize this function to "steer" the model in the right direction.
* *Optimizer* —This is how the model is updated based on the data it sees and its loss function.
* *Metrics* —Used to monitor the training and testing steps. The following example uses *accuracy*, the fraction of the images that are correctly classified.
```
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
```
## Train the model
Training the neural network model requires the following steps:
1. Feed the training data to the model—in this example, the `train_images` and `train_labels` arrays.
2. The model learns to associate images and labels.
3. We ask the model to make predictions about a test set—in this example, the `test_images` array. We verify that the predictions match the labels from the `test_labels` array.
To start training, call the `model.fit` method—the model is "fit" to the training data:
```
model.fit(train_images, train_labels, epochs=5)
```
As the model trains, the loss and accuracy metrics are displayed. This model reaches an accuracy of about 0.88 (or 88%) on the training data.
## Evaluate accuracy
Next, compare how the model performs on the test dataset:
```
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
```
It turns out, the accuracy on the test dataset is a little less than the accuracy on the training dataset. This gap between training accuracy and test accuracy is an example of *overfitting*. Overfitting is when a machine learning model performs worse on new data than on their training data.
## Make predictions
With the model trained, we can use it to make predictions about some images.
```
predictions = model.predict(test_images)
```
Here, the model has predicted the label for each image in the testing set. Let's take a look at the first prediction:
```
predictions[0]
```
A prediction is an array of 10 numbers. These describe the "confidence" of the model that the image corresponds to each of the 10 different articles of clothing. We can see which label has the highest confidence value:
```
np.argmax(predictions[0])
```
So the model is most confident that this image is an ankle boot, or `class_names[9]`. And we can check the test label to see this is correct:
```
test_labels[0]
```
We can graph this to look at the full set of 10 channels
```
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
```
Let's look at the 0th image, predictions, and prediction array.
```
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
```
Let's plot several images with their predictions. Correct prediction labels are blue and incorrect prediction labels are red. The number gives the percent (out of 100) for the predicted label. Note that it can be wrong even when very confident.
```
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
```
Finally, use the trained model to make a prediction about a single image.
```
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
```
`tf.keras` models are optimized to make predictions on a *batch*, or collection, of examples at once. So even though we're using a single image, we need to add it to a list:
```
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
```
Now predict the image:
```
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
```
`model.predict` returns a list of lists, one for each image in the batch of data. Grab the predictions for our (only) image in the batch:
```
np.argmax(predictions_single[0])
```
And, as before, the model predicts a label of 9.
| github_jupyter |
_ELMED219-2021_. Alexander S. Lundervold, 10.01.2021.
# Natural language processing and machine learning: a small case-study
This is a quick example of some techniques and ideas from natural language processing (NLP) and some modern approaches to NLP based on _deep learning_.
> Note: we'll take a close look at what deep learning is in tomorrow's lecture and lab.
> Note: If you want to run this notebook on your own computer, ask Alexander for assistance. The software requirements are different from the other ELMED219 notebooks (and also slightly more tricky to install, depending on your setup).
# Setup
We'll use the [spacy library]() for NLP and the [fastai]() library for deep learning.
```
import spacy
from fastai.text.all import *
from pprint import pprint as pp
```
# Load data
We use a data set collected in the work of Wakamiya et.al, _Tweet Classification Toward Twitter-Based Disease Surveillance: New Data, Methods, and Evaluations_, 2019: https://www.jmir.org/2019/2/e12783/

The data us supposed to represent tweets that discusses one or more of eight symptoms.
From the original paper:
<img src="assets/medweb_examples.png">
We'll only look at the English language tweets:
```
df = pd.read_csv('data/medweb/medwebdata.csv')
df.head()
pp(df['Tweet'][10])
```
From this text the goal is to determine whether the person is talking about one or more of the eight symptoms or conditions listed above:
```
list(df.columns[2:-2])
```
> **BUT:** How can a computer read??
<img src="http://2.bp.blogspot.com/_--uVHetkUIQ/TDae5jGna8I/AAAAAAAAAK0/sBSpLudWmcw/s1600/reading.gif">
# Prepare the data
For a computer, everything is numbers. We have to convert the text to a series of numbers, and then feed those to the computer.
This can be done in two widely used steps in natural language processing: **tokenization** and **numericalization**:
## Tokenization
In tokenization the text is split into single words, called tokens. A simple way to achieve this is to split according to spaces in the text. But then we, among other things, lose punctuation, and also the fact that some words are contractions of multiple words (for example _isn't_ and _don't_).
<img src="https://spacy.io/tokenization-57e618bd79d933c4ccd308b5739062d6.svg">
Here are some result after tokenization:
```
data_lm = TextDataLoaders.from_df(df, text_col='Tweet', is_lm=True, valid_pct=0.1)
data_lm.show_batch(max_n=2)
```
Tokens starting with "xx" are special. `xxbos` means the beginning of the text, `xxmaj` means that the following word is capitalized, `xxup` means that the following word is in all caps, and so on.
The tokens `xxunk` replaces words that are rare in the text corpus. We keep only words that appear at least twice (with a set maximum number of different words, 60.000 in our case). This is called our **vocabulary**.
## Numericalization
We convert tokens to numbers by making a list of all the tokens that have been used and assign them to numbers.
The above text is replaced by numbers, as in this example
```
data_lm.train_ds[0][0]
```
> **We are now in a position where the computer can compute on the text.**
# "Classical" versus deep learning-based NLP
```
#import sys
#!{sys.executable} -m spacy download en
nlp = spacy.load('en')
```
### Sentence Boundary Detection: splitting into sentences
Example sentence:
> _"Patient presents for initial evaluation of cough. Cough is reported to have developed acutely and has been present for 4 days. Symptom severity is moderate. Will return next week."_
```
sentence = "Patient presents for initial evaluation of cough. Cough is reported to have developed acutely and has been present for 4 days. Symptom severity is moderate. Will return next week."
doc = nlp(sentence)
for sent in doc.sents:
print(sent)
```
### Named Entity Recognition
```
for ent in doc.ents:
print(ent.text, ent.label_)
from spacy import displacy
displacy.render(doc, style='ent', jupyter=True)
```
### Dependency parsing
```
displacy.render(doc, style='dep', jupyter=True, options={'distance': 90})
```
> There's a lot more to natural language processing, of course! Have a look at [spaCy 101: Everything you need to know](https://spacy.io/usage/spacy-101) for some examples.
In general, data preparation and feature engineering is a huge and difficult undertaking when using machine learning to analyse text.
However, in what's called _deep learning_ (discussed in detail tomorrow) most of this work is done by the computer! That's because deep learning does feature extraction _and_ prediction in the same model.
This results in much less work and, often, _in much better models_!

# Deep learning language model
We now come to a relatively new and very powerful idea for deep learning and NLP. An idea that created a small revolution in NLP a couple of years ago ([1](https://blog.openai.com/language-unsupervised/), [2](http://ruder.io/nlp-imagenet/))
We want to create a system that can classify text into one or more categories. This is a difficult problem as the computer must somehow implicitly learn to "read".
Idea: why not _first_ teach the computer to "read" and _then_ let it loose on the classification task?
We can teach the computer to "understand" language by training it to predict the next word of a sentence, using as much training data we can get hold of. This is called ***language modelling*** in NLP.
This is a difficult task: to guess the next word of a sentence one has to know a lot about language, and also a lot about the world.
> What word fits here? _"The light turned green and Per crossed the ___"_
Luckily, obtaining large amounts of training data for language models is simple: any text can be used. The labels are simply the next word of a subpart of the text.
We can for example use Wikipedia. After the model performs alright at predicting the next word of Wikipedia text, we can fine-tune it on text that's closer to the classification task we're after.
> This is often called ***transfer learning***.
We can use the tweet text to fine-tune a model that's already been pretrained on Wikipedia:
```
data_lm = TextDataLoaders.from_df(df, text_col='Tweet', is_lm=True, valid_pct=0.1)
data_lm.show_batch(max_n=3)
learn = language_model_learner(data_lm, AWD_LSTM, pretrained=True,
metrics=[accuracy, Perplexity()], wd=0.1).to_fp16()
```
Let's start training:
```
learn.fit_one_cycle(1, 1e-2)
learn.unfreeze()
learn.fit_one_cycle(10, 1e-3)
```
...and save the parts of the model that we can reuse for classification later:
```
learn.save_encoder('medweb_finetuned')
```
## Test the language model
We can test the language model by having it guess the next given number of words on a starting text:
```
def make_text(seed_text, nb_words):
"""
Use the trained language model to produce text.
Input:
seed_text: some text to get the model started
nb_words: number of words to produce
"""
pred = learn.predict(seed_text, nb_words, temperature=0.75)
pp(pred)
make_text("I'm not feeling too good as my", 10)
make_text("No, that's a", 40)
```
Now we have something that seems to produce text that resembles the text to be classified.
> **Note:** It's interesting to see that the model can come up with text that makes some sense (mostly thanks to training on Wikipedia), and that the text resembles the medical tweets (thanks to the fine-tuning).
> **Note** also that an accuracy of 30-40% when predicting the next word of a sentence is pretty impressive, as the number of possibilities is very large (equal to the size of the vocabulary).
> **Also note** that this is not the task we care about: it's a pretext task before the tweet classification.
# Classifier
```
medweb = DataBlock(blocks=(TextBlock.from_df(text_cols='Tweet', seq_len=12, vocab=data_lm.vocab), MultiCategoryBlock),
get_x = ColReader(cols='text'),
get_y = ColReader(cols='labels', label_delim=";"),
splitter = ColSplitter(col='is_test'))
data = medweb.dataloaders(df, bs=8)
```
Now our task is to predict the possible classes the tweets can be assigned to:
```
data.show_batch()
learn_clf = text_classifier_learner(data, AWD_LSTM, seq_len=16, pretrained=True,
drop_mult=0.5, metrics=accuracy_multi).to_fp16()
learn_clf = learn_clf.load_encoder('medweb_finetuned')
learn_clf.fine_tune(12, base_lr=1e-2)
```
## Is it a good classifier?
We can test it out on some example text:
```
learn_clf.predict("I'm feeling really bad. My head hurts. My nose is runny. I've felt like this for days.")
```
It seems to produce reasonable results. _But remember that this is a very small data set._ One cannot expect very great things when asking the model to make predictions on text outside the small material it has been trained on. This illustrates the need for "big data" in deep learning.
### How does it compare to other approaches?
From the [original article](https://www.jmir.org/2019/2/e12783/) that presented the data set:
<img src="assets/medweb_results.png">
# End notes
* This of course only skratches the surface of NLP and deep learning applied to NLP. The goal was to "lift the curtain" and show some of the ideas behind modern text analysis software.
* If you're interested in digging into deep learning for NLP you should check out `fastai` (used above) and also `Hugging Face`: https://huggingface.co.
| github_jupyter |
<a href="https://colab.research.google.com/github/kalz2q/mycolabnotebooks/blob/master/affinitydesigner.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# メモ
affinity designer を学ぶ
```
%%html
<svg width="300" viewBox="0 0 348 316" xmlns="http://www.w3.org/2000/svg" xml:space="preserve" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2"><path d="M486.164 398.146c2.45.093 12.441.502 15.805 1.174 11.935 2.387 23.571 1.381 35.674 1.381 20.092 0 48.896-3.66 73.566-12.632 16.986-6.177 31.996-14.917 41.252-26.488 19.944-24.929 18.465-56.276 8.519-85.211-15.674-45.595-44.719-80.416-86.639-104.235-31.185-17.718-69.82-28.067-105.704-28.067-87.093 0-172.7 76.964-142.21 168.433 4.568 13.705 11.67 27.178 20.466 38.706 25.816 33.834 68.594 47.593 108.706 52.617a4.874 4.874 0 0 0 1.534-.051l29.031-5.627Zm-1.628-9.622-28.493 5.522h-.002c-37.391-4.784-77.35-17.249-101.392-48.757-8.152-10.684-14.733-23.172-18.966-35.873-28.308-84.922 52.095-155.592 132.954-155.592 34.248 0 71.121 9.883 100.885 26.794 39.786 22.606 67.356 55.65 82.232 98.924 8.857 25.766 10.848 53.745-6.911 75.945-8.263 10.328-21.807 17.9-36.968 23.414-23.551 8.564-51.052 12.044-70.232 12.044-11.452 0-22.467 1.067-33.761-1.191-3.798-.76-15.293-1.281-17.549-1.365-.297-.011-.472-.015-.503-.016a4.706 4.706 0 0 0-1.294.151Z" style="fill:#0800ff" transform="translate(-320.133 -88.683)"/><path d="M433.374 178.23c-.615 3.031-2.362 10.974-2.126 16.825.157 3.876 1.319 7.011 3.08 8.773 1.228 1.228 2.505 1.552 3.595 1.588a5.1 5.1 0 0 0 2.939-.797c.695-.436 1.483-1.171 2.072-2.316.52-1.01.958-2.995 1.248-4.013.801-2.802.821-7.009.35-11.457-.3-2.837-.775-5.789-1.232-8.435 4.57-21.973 12.196-39.671 23.939-59.072 2.511-4.148 7.67-11.16 11.734-15.941.36 5.596-.345 12.302-.203 14.577 1.092 17.465 7.185 34.175 8.142 51.399.14 2.535-.764 8.912-.429 13.405.251 3.364 1.338 5.978 2.707 7.346 1.388 1.388 2.852 1.671 4.143 1.58 1.193-.084 2.465-.553 3.563-1.742.59-.638 1.286-1.819 1.741-3.304.409-1.333.808-3.523.885-3.961.956-3.406 3.184-10.014 4.162-12.873 4.347-12.706 8.651-25.402 13.927-37.762-.121 2.311-.324 4.621-.648 6.725-2.541 16.517-4.013 33.221 1.968 49.172.845 2.253 2.388 3.135 3.873 3.477 1.355.312 4.137.06 6.556-2.921 4.148-5.109 12.571-23.639 13.435-25.244 3.866-7.179 13.565-26.837 22.755-39.461.377-.517.749-1.023 1.119-1.515-.269 2.453-.564 5.075-.868 7.663-.81 6.883-.073 14.213.406 21.467.42 6.35.668 12.639-1.085 18.4-1.41 4.632-5.697 9.626-8.806 14.597-2.766 4.422-4.692 8.866-4.692 12.968a4.88 4.88 0 0 0 4.878 4.878 4.88 4.88 0 0 0 4.878-4.878c0-3.311 2.455-6.753 4.865-10.334 3.292-4.892 6.827-9.847 8.21-14.39 2.087-6.856 1.987-14.328 1.487-21.885-.44-6.651-1.194-13.371-.452-19.683 1.196-10.169 2.193-19.205 2.209-20.84.033-3.396-2.399-4.512-2.8-4.704-1.23-.589-2.537-.683-3.922-.128-3.852 1.54-8.633 6.213-13.314 12.642-9.457 12.989-19.48 33.191-23.458 40.578-.584 1.086-4.781 10.49-8.531 17.43-2.329-11.748-.91-23.836.932-35.806 1.598-10.388.926-22.874.529-25.203-.817-4.798-5.454-4.436-6.105-4.273-2.882.721-5.414 2.944-7.306 6.031-1.86 3.033-3.144 6.937-4.123 9.176-5.16 11.794-9.404 23.903-13.571 36.058-1.851-14.884-6.574-29.504-7.523-44.694-.171-2.738.807-11.733-.137-17.811-.671-4.324-2.541-7.456-4.825-8.802-.915-.54-2.048-.864-3.437-.656-.873.13-2.563.789-4.414 2.508-4.24 3.937-13.125 15.608-16.802 21.683-7.762 12.824-13.802 24.947-18.409 37.921-.26-10.551.623-20.994 2.923-31.809.028-.132 4.935-20.87 5.981-24.919l.09-.34a4.866 4.866 0 0 0-.563-4.278c-1.289-1.926-3.007-2.389-4.807-2.108-.694.108-2.368.384-3.518 2.458-.052.093-.359.71-.648 1.827-1.063 4.115-6.049 25.196-6.077 25.33-3.956 18.596-4.014 36.162-1.344 54.852.183 1.281.502 3.028.854 5.021Zm7.599-81.721c1.207 1.648 2.761 2.1 4.396 1.903a4.873 4.873 0 0 1-4.396-1.903Zm7.956-.257c-1.037 1.625-2.405 1.973-3.133 2.098a4.845 4.845 0 0 0 1.776-.734 4.84 4.84 0 0 0 1.357-1.364Z" style="fill:#0b00ff" transform="translate(-320.133 -88.683)"/><path d="M463.917 344.892a29.243 29.243 0 0 1 2.547-.002c4.668.248 9.715.949 11.603.949 14.005 0 27.968-2.735 42.084-1.726 5.107.364 10.222.869 15.349.869.59 0 3.812.191 5.57-.024 1.306-.161 2.29-.574 2.862-.931 1.845-1.149 2.446-2.763 2.446-4.351a4.88 4.88 0 0 0-6.98-4.403 833.022 833.022 0 0 0-3.898-.047c-4.895 0-9.778-.496-14.654-.845-14.349-1.025-28.544 1.702-42.779 1.702-2.143 0-8.469-.952-13.513-1.02-2.85-.038-5.379.237-7.04.814-3.149 1.095-4.564 3.248-4.787 5.616-.147 1.564.27 3.888 2.779 6.535a4.881 4.881 0 0 0 6.896.184 4.86 4.86 0 0 0 1.515-3.32Zm72.711-5.458c-.004.08-.006.161-.006.242l.006-.242Zm.212-1.201Z" style="fill:#e63025" transform="translate(-324.225 -87.076)"/><path d="M585.591 268.866a4.871 4.871 0 0 0 3.913 1.967 4.88 4.88 0 0 0 4.878-4.878c0-2.028-.665-3.495-1.567-4.593-.944-1.149-2.232-1.958-3.837-2.341-1.243-.297-2.767-.316-4.361-.054-2.559.422-5.382 1.575-6.586 2.131-3.938 1.817-7.22 4.618-11.144 6.429-3.926 1.812-7.332 2.265-10.495 3.121-4.208 1.138-8.09 2.791-12.091 7.117a4.88 4.88 0 0 0 .269 6.893 4.88 4.88 0 0 0 6.893-.269c3.058-3.306 6.085-4.016 9.407-4.808 3.09-.737 6.392-1.482 10.105-3.196 3.924-1.811 7.207-4.612 11.144-6.429.534-.247 2.273-.754 3.472-1.09ZM387.15 262.967a4.882 4.882 0 0 0 4.623 3.766 4.861 4.861 0 0 0 3.744-1.605c.403.186.803.375 1.19.564 4.549 2.22 11.175 6.153 12.462 6.922.586 1.135 2.551 4.904 4.138 7.608 1.812 3.088 3.902 5.856 5.626 7.12 1.653 1.212 3.312 1.58 4.745 1.452 1.359-.121 2.668-.651 3.834-1.817a4.88 4.88 0 0 0 0-6.898 4.863 4.863 0 0 0-3.412-1.427c-.862-1.19-2.442-3.431-3.352-5.082-1.877-3.405-3.496-6.593-4.054-7.275-.455-.556-1.669-1.601-3.513-2.76-2.899-1.82-7.802-4.467-12.195-6.611-3.035-1.481-5.839-2.722-7.628-3.339-1.426-.493-2.533-.651-3.156-.651-2.255 0-3.429 1.045-4.134 1.919-.786.974-1.25 2.191-1.102 3.712.058.596.243 1.368.699 2.211.261.482 1.146 1.722 1.485 2.191Zm33.595 16.987c-.045.04-.089.081-.132.125l.132-.125Z" style="fill:#0b00ff" transform="translate(-320.133 -88.683)"/></svg>
%%html
<svg width="300" viewBox="0 0 348 316" style="background-color:hotpink;" ><path d="M486.164 398.146c2.45.093 12.441.502 15.805 1.174 11.935 2.387 23.571 1.381 35.674 1.381 20.092 0 48.896-3.66 73.566-12.632 16.986-6.177 31.996-14.917 41.252-26.488 19.944-24.929 18.465-56.276 8.519-85.211-15.674-45.595-44.719-80.416-86.639-104.235-31.185-17.718-69.82-28.067-105.704-28.067-87.093 0-172.7 76.964-142.21 168.433 4.568 13.705 11.67 27.178 20.466 38.706 25.816 33.834 68.594 47.593 108.706 52.617a4.874 4.874 0 0 0 1.534-.051l29.031-5.627Zm-1.628-9.622-28.493 5.522h-.002c-37.391-4.784-77.35-17.249-101.392-48.757-8.152-10.684-14.733-23.172-18.966-35.873-28.308-84.922 52.095-155.592 132.954-155.592 34.248 0 71.121 9.883 100.885 26.794 39.786 22.606 67.356 55.65 82.232 98.924 8.857 25.766 10.848 53.745-6.911 75.945-8.263 10.328-21.807 17.9-36.968 23.414-23.551 8.564-51.052 12.044-70.232 12.044-11.452 0-22.467 1.067-33.761-1.191-3.798-.76-15.293-1.281-17.549-1.365-.297-.011-.472-.015-.503-.016a4.706 4.706 0 0 0-1.294.151Z" style="fill:#0800ff" transform="translate(-320.133 -88.683)"/><path d="M433.374 178.23c-.615 3.031-2.362 10.974-2.126 16.825.157 3.876 1.319 7.011 3.08 8.773 1.228 1.228 2.505 1.552 3.595 1.588a5.1 5.1 0 0 0 2.939-.797c.695-.436 1.483-1.171 2.072-2.316.52-1.01.958-2.995 1.248-4.013.801-2.802.821-7.009.35-11.457-.3-2.837-.775-5.789-1.232-8.435 4.57-21.973 12.196-39.671 23.939-59.072 2.511-4.148 7.67-11.16 11.734-15.941.36 5.596-.345 12.302-.203 14.577 1.092 17.465 7.185 34.175 8.142 51.399.14 2.535-.764 8.912-.429 13.405.251 3.364 1.338 5.978 2.707 7.346 1.388 1.388 2.852 1.671 4.143 1.58 1.193-.084 2.465-.553 3.563-1.742.59-.638 1.286-1.819 1.741-3.304.409-1.333.808-3.523.885-3.961.956-3.406 3.184-10.014 4.162-12.873 4.347-12.706 8.651-25.402 13.927-37.762-.121 2.311-.324 4.621-.648 6.725-2.541 16.517-4.013 33.221 1.968 49.172.845 2.253 2.388 3.135 3.873 3.477 1.355.312 4.137.06 6.556-2.921 4.148-5.109 12.571-23.639 13.435-25.244 3.866-7.179 13.565-26.837 22.755-39.461.377-.517.749-1.023 1.119-1.515-.269 2.453-.564 5.075-.868 7.663-.81 6.883-.073 14.213.406 21.467.42 6.35.668 12.639-1.085 18.4-1.41 4.632-5.697 9.626-8.806 14.597-2.766 4.422-4.692 8.866-4.692 12.968a4.88 4.88 0 0 0 4.878 4.878 4.88 4.88 0 0 0 4.878-4.878c0-3.311 2.455-6.753 4.865-10.334 3.292-4.892 6.827-9.847 8.21-14.39 2.087-6.856 1.987-14.328 1.487-21.885-.44-6.651-1.194-13.371-.452-19.683 1.196-10.169 2.193-19.205 2.209-20.84.033-3.396-2.399-4.512-2.8-4.704-1.23-.589-2.537-.683-3.922-.128-3.852 1.54-8.633 6.213-13.314 12.642-9.457 12.989-19.48 33.191-23.458 40.578-.584 1.086-4.781 10.49-8.531 17.43-2.329-11.748-.91-23.836.932-35.806 1.598-10.388.926-22.874.529-25.203-.817-4.798-5.454-4.436-6.105-4.273-2.882.721-5.414 2.944-7.306 6.031-1.86 3.033-3.144 6.937-4.123 9.176-5.16 11.794-9.404 23.903-13.571 36.058-1.851-14.884-6.574-29.504-7.523-44.694-.171-2.738.807-11.733-.137-17.811-.671-4.324-2.541-7.456-4.825-8.802-.915-.54-2.048-.864-3.437-.656-.873.13-2.563.789-4.414 2.508-4.24 3.937-13.125 15.608-16.802 21.683-7.762 12.824-13.802 24.947-18.409 37.921-.26-10.551.623-20.994 2.923-31.809.028-.132 4.935-20.87 5.981-24.919l.09-.34a4.866 4.866 0 0 0-.563-4.278c-1.289-1.926-3.007-2.389-4.807-2.108-.694.108-2.368.384-3.518 2.458-.052.093-.359.71-.648 1.827-1.063 4.115-6.049 25.196-6.077 25.33-3.956 18.596-4.014 36.162-1.344 54.852.183 1.281.502 3.028.854 5.021Zm7.599-81.721c1.207 1.648 2.761 2.1 4.396 1.903a4.873 4.873 0 0 1-4.396-1.903Zm7.956-.257c-1.037 1.625-2.405 1.973-3.133 2.098a4.845 4.845 0 0 0 1.776-.734 4.84 4.84 0 0 0 1.357-1.364Z" style="fill:#0b00ff" transform="translate(-320.133 -88.683)"/><path d="M463.917 344.892a29.243 29.243 0 0 1 2.547-.002c4.668.248 9.715.949 11.603.949 14.005 0 27.968-2.735 42.084-1.726 5.107.364 10.222.869 15.349.869.59 0 3.812.191 5.57-.024 1.306-.161 2.29-.574 2.862-.931 1.845-1.149 2.446-2.763 2.446-4.351a4.88 4.88 0 0 0-6.98-4.403 833.022 833.022 0 0 0-3.898-.047c-4.895 0-9.778-.496-14.654-.845-14.349-1.025-28.544 1.702-42.779 1.702-2.143 0-8.469-.952-13.513-1.02-2.85-.038-5.379.237-7.04.814-3.149 1.095-4.564 3.248-4.787 5.616-.147 1.564.27 3.888 2.779 6.535a4.881 4.881 0 0 0 6.896.184 4.86 4.86 0 0 0 1.515-3.32Zm72.711-5.458c-.004.08-.006.161-.006.242l.006-.242Zm.212-1.201Z" style="fill:#e63025" transform="translate(-324.225 -87.076)"/><path d="M585.591 268.866a4.871 4.871 0 0 0 3.913 1.967 4.88 4.88 0 0 0 4.878-4.878c0-2.028-.665-3.495-1.567-4.593-.944-1.149-2.232-1.958-3.837-2.341-1.243-.297-2.767-.316-4.361-.054-2.559.422-5.382 1.575-6.586 2.131-3.938 1.817-7.22 4.618-11.144 6.429-3.926 1.812-7.332 2.265-10.495 3.121-4.208 1.138-8.09 2.791-12.091 7.117a4.88 4.88 0 0 0 .269 6.893 4.88 4.88 0 0 0 6.893-.269c3.058-3.306 6.085-4.016 9.407-4.808 3.09-.737 6.392-1.482 10.105-3.196 3.924-1.811 7.207-4.612 11.144-6.429.534-.247 2.273-.754 3.472-1.09ZM387.15 262.967a4.882 4.882 0 0 0 4.623 3.766 4.861 4.861 0 0 0 3.744-1.605c.403.186.803.375 1.19.564 4.549 2.22 11.175 6.153 12.462 6.922.586 1.135 2.551 4.904 4.138 7.608 1.812 3.088 3.902 5.856 5.626 7.12 1.653 1.212 3.312 1.58 4.745 1.452 1.359-.121 2.668-.651 3.834-1.817a4.88 4.88 0 0 0 0-6.898 4.863 4.863 0 0 0-3.412-1.427c-.862-1.19-2.442-3.431-3.352-5.082-1.877-3.405-3.496-6.593-4.054-7.275-.455-.556-1.669-1.601-3.513-2.76-2.899-1.82-7.802-4.467-12.195-6.611-3.035-1.481-5.839-2.722-7.628-3.339-1.426-.493-2.533-.651-3.156-.651-2.255 0-3.429 1.045-4.134 1.919-.786.974-1.25 2.191-1.102 3.712.058.596.243 1.368.699 2.211.261.482 1.146 1.722 1.485 2.191Zm33.595 16.987c-.045.04-.089.081-.132.125l.132-.125Z" style="fill:#0b00ff" transform="translate(-320.133 -88.683)"/></svg>
```
| github_jupyter |
# Pocessing GWO hourly meteorological data
**Author: Jun Sasaki Coded on February 13, 2022 Updated on February 14, 2022.**<br>
Extract and plot GWO (Ground Weather Observation) hourly data.
```
from metdata import gwo
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from matplotlib.dates import date2num, YearLocator, MonthLocator, DayLocator, DateFormatter
import pandas as pd
import numpy as np
from pandas.plotting import register_matplotlib_converters
# Set GWO/Hourly/ directory path containing GWO data
dirpath = "d:/dat/met/JMA_DataBase/GWO/Hourly/"
%matplotlib inline
datetime_ini = "2020-12-1 00:00:00"
datetime_end = "2021-12-30 00:00:00"
#datetime_ini = "2010-9-2 00:00:00"
#datetime_end = "2010-9-3 00:00:00"
stn = "Tokyo"
met = gwo.Hourly(datetime_ini=datetime_ini, datetime_end=datetime_end,
stn=stn, dirpath=dirpath)
```
## Gets pandas DataFrame
pandas DataFrame can be obtained by invoking `.df` method
```
print(met.df.columns)
met.df.head()
```
# Plot using [Matplotlib](https://matplotlib.org/stable/index.html)
Extract a 1D scalar or vector variable from DataFrame.
```
data = gwo.Data1D(df=met.df, col_1='kion')
```
## Example of scalar 1-D time series plot
- Rolling mean is applied to `Plot1D()` by setting its arguments of `window` in odd integer number and `center` (default is `True`).
```
### xlim = (parse("2014-01-15"), parse("2014-02-16")) ### ex. for datetime
ylabel='Temperature (degC)'
xlim = None
#xlim = (parse("1990-09-02"), parse("1992-09-03"))
dx = 7
ylim = None
dy = 2
## Set window=1 when no plot.
window=1
#try:
plot_config = gwo.Data1D_PlotConfig(xlim=xlim, ylim=ylim,
x_minor_locator=DayLocator(interval=dx),
y_minor_locator = MultipleLocator(dy),
format_xdata = DateFormatter('%Y-%m-%d'),
ylabel = ylabel)
gwo.Plot1D(plot_config, data, window=window,
center=True).save_plot('data.png', dpi=600)
```
## Example of time series wind vector plot with its speed
```
wind = gwo.Data1D(met.df, 'u', 'v')
print(wind.v[0:10])
#xlim = (parse("2013-12-25 00:00:00"),parse("2014-01-10 00:00:00"))
vlabel = 'Wind speed (m/s)'
ylabel = 'Wind vector (m/s)'
png_vector = "gwo_hourly_wind.png"
xlim = None
#xlim = (parse("1990-09-02"), parse("1990-09-03"))
dx = 7 # x_minor_locator interval
#ylim = None
ylim = (-15, 15)
dy = 1 # y_minor_locator interval
window=25 # Rolling mean window in odd integer; center: rolling mean at center
magnitude = True # True: Plot magnitudes, False: No magnitudes
plot_config = gwo.Data1D_PlotConfig(xlim = xlim, ylim = ylim,
x_minor_locator = DayLocator(interval=dx),
y_minor_locator = MultipleLocator(dy),
format_xdata = DateFormatter('%Y-%m-%d'),
ylabel=ylabel, vlabel=vlabel, vlabel_loc = 'lower center')
gwo.Plot1D(plot_config, wind, window=window,
center=True).save_vector_plot(png_vector,
magnitude = magnitude, dpi=600)
```
# Plot using [hvPlot](https://hvplot.holoviz.org/)
This is for interactive plotting but not suitable for saving graphics into files.
```
import hvplot.pandas
data.df[['kion', 'sped']].hvplot()
def hook(plot, element):
plot.handles['xaxis'].axis_label_text_font_style = 'normal'
plot.handles['yaxis'].axis_label_text_font_style = 'normal'
```
### How to specify options for hvPlot (オプション指定方法)
- Many of the Holoviews options can be specified with the hvPlot argument.
- `hooks` defines the function `hook(plot, element)`, which is specified in hvPlot as `.opts(hooks=[hook])`. Unconfirmed, but it seems that all options can be specified, including Bokeh options that are not defined as arguments in hvPlot or Holoviews.
- 基本的にはhvplotの引数でHoloviewsのオプションの多くが指定できる
- `hooks`は関数`hook(plot, element)`を定義し,hvPlotで`.opts(hooks=[hook])`として指定する.未確認だが,hvPlotやHoloviewsの引数としては定義されていないBokehのオプションを含め,すべてのオプションが指定できそう
```
data.df['kion'].hvplot(xlim=(datetime(2020,1,2), datetime(2020,3,4)),
xticks=10, ylabel='Temperature (degC)',
ylim=(-4,35), yticks=10, width=600, height=200,
line_color='red', line_width=0.5,
fontsize={'xticks':12,'yticks':12 ,'ylabel':14},
title='').opts(hooks=[hook])
```
# Check missing rows in DataFrame.
```
datetime_ini = "2010-9-2 00:00:00"
datetime_end = "2010-9-2 23:00:00"
stn = "Chiba"
dirpath = "d:/dat/met/JMA_DataBase/GWO/Hourly/"
met_check = gwo.Check(datetime_ini=datetime_ini, datetime_end=datetime_end,
stn=stn, dirpath=dirpath)
## Create a complete pandas DatetieIndex
datetime_index = pd.date_range(datetime_ini, datetime_end, freq='H')
met_check.df.index
datetime_index.values[0] in met_check.df.index.values
```
### Create a mask for extracting missing rows
- [`np.isis(https://numpy.org/doc/stable/reference/generated/numpy.isin.html)`]()
- [`np.logical_not()`](https://numpy.org/doc/stable/reference/generated/numpy.logical_not.html).
```
mask = np.logical_not(np.isin(datetime_index, met_check.df.index))
datetime_index[mask]
mask
```
| github_jupyter |
#### Measures of central tendencies
```
from typing import List
daily_minutes = [1,68.77,51.25,52.08,38.36,44.54,57.13,51.4,41.42,31.22,34.76,54.01,38.79,47.59,49.1,27.66,41.03,36.73,48.65,28.12,46.62,35.57,32.98,35,26.07,23.77,39.73,40.57,31.65,31.21,36.32,20.45,21.93,26.02,27.34,23.49,46.94,30.5,33.8,24.23,21.4,27.94,32.24,40.57,25.07,19.42,22.39,18.42,46.96,23.72,26.41,26.97,36.76,40.32,35.02,29.47,30.2,31,38.11,38.18,36.31,21.03,30.86,36.07,28.66,29.08,37.28,15.28,24.17,22.31,30.17,25.53,19.85,35.37,44.6,17.23,13.47,26.33,35.02,32.09,24.81,19.33,28.77,24.26,31.98,25.73,24.86,16.28,34.51,15.23,39.72,40.8,26.06,35.76,34.76,16.13,44.04,18.03,19.65,32.62,35.59,39.43,14.18,35.24,40.13,41.82,35.45,36.07,43.67,24.61,20.9,21.9,18.79,27.61,27.21,26.61,29.77,20.59,27.53,13.82,33.2,25,33.1,36.65,18.63,14.87,22.2,36.81,25.53,24.62,26.25,18.21,28.08,19.42,29.79,32.8,35.99,28.32,27.79,35.88,29.06,36.28,14.1,36.63,37.49,26.9,18.58,38.48,24.48,18.95,33.55,14.24,29.04,32.51,25.63,22.22,19,32.73,15.16,13.9,27.2,32.01,29.27,33,13.74,20.42,27.32,18.23,35.35,28.48,9.08,24.62,20.12,35.26,19.92,31.02,16.49,12.16,30.7,31.22,34.65,13.13,27.51,33.2,31.57,14.1,33.42,17.44,10.12,24.42,9.82,23.39,30.93,15.03,21.67,31.09,33.29,22.61,26.89,23.48,8.38,27.81,32.35,23.84]
daily_hours = [x_i / 60 for x_i in daily_minutes]
num_friends = [100.0,49,41,40,25,21,21,19,19,18,18,16,15,15,15,15,14,14,13,13,13,13,12,12,11,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,9,8,8,8,8,8,8,8,8,8,8,8,8,8,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
def mean(xs: List[float]) -> float:
return sum(xs) / len(xs)
assert mean([1,1,1]) == 1
assert mean([5,3]) == 4
# underscore is used to denote a private function
def _median_odd(xs:List[float]) -> float:
"""if len(xs) is odd, the median is the middle element"""
return sorted(xs)[len(xs) // 2]
def _median_even(xs:List[float]) -> float:
"""If len(xs) is even, it's the average of the middle two elements"""
sorted_xs = sorted(xs)
hi_midpoint = len(xs) // 2
low_midpoint = hi_midpoint - 1
return (sorted_xs[hi_midpoint] + sorted_xs[low_midpoint]) / 2
def median(v: List[float]) -> float:
"""Finds the 'middle-most value of v"""
return _median_even(v) if len(v) % 2 == 0 else _median_odd(v)
assert median([1, 10, 2, 9, 5]) == 5
assert median([1, 9, 2, 10]) == (2 + 9) / 2
def quantile(xs: List[float], p: float) -> float:
"""Returns the pth percentile vakue in x"""
p_index = int(p * len(xs))
return sorted(xs)[p_index]
assert quantile(num_friends, 0.10) == 1
assert quantile(num_friends, 0.25) == 3
assert quantile(num_friends, 0.75) == 9
assert quantile(num_friends, 0.90) == 13
from collections import Counter
def mode(x: List[float]) -> List[float]:
"""Returns a list, since there might be more then one mode"""
counts = Counter(x)
max_count = max(counts.values())
return [x_i for x_i, count in counts.items()
if count == max_count]
assert set(mode(num_friends)) == {1,6}
```
#### Dispersion
```
def data_range(xs: List[float]) -> float:
return max(xs) - min(xs)
assert data_range(num_friends) == 99
from linear_algebra import sum_of_squares
def de_mean(xs: List[float]) -> List[float]:
"""Translate xs by subtrating its mean(so result as mean 0)"""
x_bar = mean(xs)
return [x - x_bar for x in xs]
# sample of the large population (n-1) therefore almost the average square deviation from its mean
# how a single variable deviates from its mean
def variance(xs: List[float]) -> float:
"""Almost the average square deviation from the mean"""
assert len(xs) >= 2, "variance requires at least two elements"
n = len(xs)
deviations = de_mean(xs)
return sum_of_squares(deviations) / (n -1)
assert 81.54 < variance(num_friends) < 81.55
import math
# variance as a different unit to mean, range.. in this case it would be friends**2; std deviation reverts back to original unit
# suffers from the same outlier problem as the mean
def standard_deviation(xs: List[float]) -> float:
""" The standard deviation is the square root of the variance"""
return math.sqrt(variance(xs))
assert 0.02 < standard_deviation(num_friends) < 9.04
# plainly unaffected by small number of outliers
def interquartile_range(xs: List[float]) -> float:
"""Returns de difference of the 75%-ile and the 25%-ile"""
return quantile(xs, 0.75) - quantile(xs, 0.25)
assert interquartile_range(num_friends) == 6
```
#### Correlation
```
from linear_algebra import dot
# covariance can be difficult to interpret; a "large " positive number means when x is up so does y
# and "large" negative when x is large but y is small. closer to zero nocovariance
def covariance(xs: List[float], ys: List[float]) -> float:
assert len(xs) == len(ys), "xs and ys must have the same sumber of elements"
n = len(xs)
return dot(de_mean(xs), de_mean(ys)) / (n - 1)
assert 22.42 < covariance(num_friends, daily_minutes) < 22.43
assert 22.42 / 60 < covariance(num_friends, daily_hours) < 22.43 / 60
# the correlation in unitless and always lies between -1 (perfect anticorrelation) and 1 (perfect correlation)
# correlation measures relation between varables ALL ELSE BEING EQUAL (SEE PAG 72)
def correlation(xs: List[float], ys: List[float]) -> float:
"""Measures how much xs and ys vary in tandem about their means"""
stdev_x = standard_deviation(xs)
stdev_y = standard_deviation(ys)
if stdev_x > 0 and stdev_y > 0:
return covariance(xs, ys) / stdev_x / stdev_y
else:
return 0 # if no variation, correlation is zero
assert 0.24 < correlation(num_friends, daily_minutes) < 0.25
assert 0.24 < correlation(num_friends, daily_hours) < 0.25
# remove one outlier, 100 seems incorrect data point
outlier = num_friends.index(100)
num_friends_good = [friends
for idx, friends in enumerate(num_friends)
if idx != outlier]
daily_minutes_good = [minutes
for idx, minutes in enumerate(daily_minutes)
if idx != outlier]
```
| github_jupyter |
```
pip install jupyter-dash
pip install dash_daq
pip install --ignore-installed --upgrade plotly==4.5.0
```
At this point, restart the runtime environment for Colab
```
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
import random
import scipy.stats
import plotly.express as px
from jupyter_dash import JupyterDash
import dash_core_components as dcc
import dash_daq as daq
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import plotly.express as px
from itertools import cycle
import plotly
%load_ext autoreload
%autoreload 2
print(plotly.__version__)
df = pd.read_csv('https://raw.githubusercontent.com/wesleybeckner/ds_for_engineers/main/data/truffle_margin/margin_data.csv')
df['Width'] = df['Width'].apply(str)
df['Height'] = df['Height'].apply(str)
descriptors = df.columns[:-3]
delimiters = df.columns[:-3]
moodsdf = pd.DataFrame()
pop = list(df['EBITDA'])
# pop = np.random.choice(pop, size=int(1e5))
for delimiter in delimiters:
grouped = df.groupby(delimiter)['EBITDA']
group_with_values = grouped.apply(list)
# bootstrap population of values based on groups
# pop = np.random.choice((np.concatenate(group_with_values)),
# size=int(1e4))
for index, group in enumerate(group_with_values):
stat, p, m, table = scipy.stats.median_test(group, pop)
median = np.median(group)
mean = np.mean(group)
size = len(group)
moodsdf = pd.concat([moodsdf,
pd.DataFrame([delimiter, group_with_values.index[index],
stat, p, m, mean, median, size, table]).T])
moodsdf.columns = ['descriptor', 'group', 'pearsons_chi_square', 'p_value', 'grand_median', 'group_mean',
'group_median', 'size', 'table']
moodsdf = moodsdf.loc[moodsdf['p_value'] < 1e-3]
moodsdf = moodsdf.sort_values('group_median').reset_index(drop=True)
def make_violin_plot(sort='Worst', select=[0,5], descriptors=None):
if sort == 'Best':
local_df = moodsdf.sort_values('group_median', ascending=False)
local_df = local_df.reset_index(drop=True)
else:
local_df = moodsdf
if descriptors != None:
local_df = local_df.loc[local_df['descriptor'].isin(descriptors)]
fig = go.Figure()
for index in range(select[0],select[1]):
x = df.loc[(df[local_df.iloc[index]['descriptor']] == \
local_df.iloc[index]['group'])]['EBITDA']
y = local_df.iloc[index]['descriptor'] + ': ' + df.loc[(df[local_df\
.iloc[index]['descriptor']] == local_df.iloc[index]['group'])]\
[local_df.iloc[index]['descriptor']]
name = '€ {:.0f}'.format(x.median())
fig.add_trace(go.Violin(x=y,
y=x,
name=name,
box_visible=True,
meanline_visible=True))
fig.update_layout({
"plot_bgcolor": "#FFFFFF",
"paper_bgcolor": "#FFFFFF",
"title": 'EBITDA by Product Descriptor (Median in Legend)',
"yaxis.title": "EBITDA (€)",
"height": 325,
"font": dict(
size=10),
"margin": dict(
l=0,
r=0,
b=0,
t=30,
pad=4
),
})
return fig
def make_sunburst_plot(clickData=None, toAdd=None, col=None, val=None):
if clickData != None:
col = clickData["points"][0]['x'].split(": ")[0]
val = clickData["points"][0]['x'].split(": ")[1]
elif col == None:
col = moodsdf.iloc[-1]['descriptor']
val = moodsdf.iloc[-1]['group']
desc = list(descriptors[:-2])
if col in desc:
desc.remove(col)
if toAdd != None:
for item in toAdd:
desc.append(item)
test = df.loc[df[col] == val]
fig = px.sunburst(test, path=desc[:], color='EBITDA', title='{}: {}'.format(
col, val),
color_continuous_scale=px.colors.sequential.Viridis
)
fig.update_layout({
"plot_bgcolor": "#FFFFFF",
"title": '(Select in Violin) {}: {}'.format(col,val),
"paper_bgcolor": "#FFFFFF",
"height": 325,
"font": dict(
size=10),
"margin": dict(
l=0,
r=0,
b=0,
t=30,
pad=4
),
})
return fig
def make_ebit_plot(df, select=None, sort='Worst', descriptors=None):
families = df[df.columns[0]].unique()
colors = ['#636EFA', '#EF553B', '#00CC96', '#AB63FA', '#FFA15A', '#19D3F3',\
'#FF6692', '#B6E880', '#FF97FF', '#FECB52']
colors_cycle = cycle(colors)
color_dic = {'{}'.format(i): '{}'.format(j) for i, j in zip(families,
colors)}
fig = go.Figure()
if select == None:
for data in px.scatter(
df,
x='Product',
y='EBITDA',
color=df.columns[0],
color_discrete_map=color_dic,
opacity=1).data:
fig.add_trace(
data
)
elif select != None:
color_dic = {'{}'.format(i): '{}'.format(j) for i, j in zip(select,
colors)}
for data in px.scatter(
df,
x='Product',
y='EBITDA',
color=df.columns[0],
color_discrete_map=color_dic,
opacity=0.09).data:
fig.add_trace(
data
)
if sort == 'Best':
local_df = moodsdf.sort_values('group_median', ascending=False)
elif sort == 'Worst':
local_df = moodsdf
new_df = pd.DataFrame()
if descriptors != None:
local_df = local_df.loc[local_df['descriptor'].isin(descriptors)]
for index in select:
x = df.loc[(df[local_df.iloc[index]\
['descriptor']] == local_df.iloc[index]['group'])]
x['color'] = next(colors_cycle) # for line shapes
new_df = pd.concat([new_df, x])
new_df = new_df.reset_index(drop=True)
# for data in px.scatter(
# new_df,
# x='Product',
# y='EBITDA',
# color=df.columns[0],
# color_discrete_map=color_dic,
# opacity=1).data:
# fig.add_trace(
# data
# )
shapes=[]
for index, i in enumerate(new_df['Product']):
shapes.append({'type': 'line',
'xref': 'x',
'yref': 'y',
'x0': i,
'y0': -4e5,
'x1': i,
'y1': 4e5,
'line':dict(
dash="dot",
color=new_df['color'][index],)})
fig.update_layout(shapes=shapes)
fig.update_layout({
"plot_bgcolor": "#FFFFFF",
"paper_bgcolor": "#FFFFFF",
"title": 'Rank Order EBITDA by {}'.format(df.columns[0]),
"yaxis.title": "EBITDA (€)",
"height": 325,
"font": dict(
size=10),
"xaxis": dict(
showticklabels=False
),
"margin": dict(
l=0,
r=0,
b=0,
t=30,
pad=4
),
"xaxis.tickfont.size": 8,
})
return fig
# Build App
external_stylesheets = ['../assets/styles.css', '../assets/s1.css', 'https://codepen.io/chriddyp/pen/bWLwgP.css']
app = JupyterDash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.Div([
html.Div([
html.P('Descriptors'),
dcc.Dropdown(id='descriptor_dropdown',
options=[{'label': i, 'value': i} for i in descriptors],
value=descriptors,
multi=True,
className="dcc_control"),
html.P('Number of Descriptors:', id='descriptor-number'),
dcc.RangeSlider(
id='select',
min=0,
max=moodsdf.shape[0],
step=1,
value=[0,10]),
html.P('Sort by:'),
dcc.RadioItems(
id='sort',
options=[{'label': i, 'value': j} for i, j in \
[['Low EBITDA', 'Worst'],
['High EBITDA', 'Best']]],
value='Best',
labelStyle={'display': 'inline-block'},
style={"margin-bottom": "10px"},),
html.P('Toggle view Violin/Descriptor Data'),
daq.BooleanSwitch(
id='daq-violin',
on=False,
style={"margin-bottom": "10px", "margin-left": "0px",
'display': 'inline-block'}),
],
className='mini_container',
id='descriptorBlock',
style={'width': '32%', 'display': 'inline-block'}
),
html.Div([
dcc.Graph(
id='ebit_plot',
figure=make_ebit_plot(df)),
],
className='mini_container',
style={'width': '65%', 'float': 'right', 'display': 'inline-block'},
id='ebit-family-block'
),
], className='row container-display',
),
html.Div([
html.Div([
dcc.Graph(
id='violin_plot',
figure=make_violin_plot()),
],
className='mini_container',
style={'width': '65%', 'display': 'inline-block'},
id='violin',
),
html.Div([
dcc.Dropdown(id='length_width_dropdown',
options=[{'label': 'Height', 'value': 'Height'},
{'label': 'Width', 'value': 'Width'}],
value=['Width'],
multi=True,
placeholder="Include in sunburst chart...",
className="dcc_control"),
dcc.Graph(
id='sunburst_plot',
figure=make_sunburst_plot()
),
],
className='mini_container',
style={'width': '32%', 'display': 'inline-block'},
id='sunburst',
),
], className='row container-display',
style={'margin-bottom': '50px'},
),
], className='pretty container'
)
@app.callback(
Output('sunburst_plot', 'figure'),
[Input('violin_plot', 'clickData'),
Input('length_width_dropdown', 'value'),
Input('sort', 'value'),
Input('select', 'value'),
Input('descriptor_dropdown', 'value')])
def display_sunburst_plot(clickData, toAdd, sort, select, descriptors):
if sort == 'Best':
local_df = moodsdf.sort_values('group_median', ascending=False)
local_df = local_df.reset_index(drop=True)
else:
local_df = moodsdf
if descriptors != None:
local_df = local_df.loc[local_df['descriptor'].isin(descriptors)]
local_df = local_df.reset_index(drop=True)
col = local_df['descriptor'][select[0]]
val = local_df['group'][select[0]]
return make_sunburst_plot(clickData, toAdd, col, val)
@app.callback(
[Output('select', 'max'),
Output('select', 'value')],
[Input('descriptor_dropdown', 'value')]
)
def update_descriptor_choices(descriptors):
max_value = moodsdf.loc[moodsdf['descriptor'].isin(descriptors)].shape[0]
value = min(5, max_value)
return max_value, [0, value]
@app.callback(
Output('descriptor-number', 'children'),
[Input('select', 'value')]
)
def display_descriptor_number(select):
return "Number of Descriptors: {}".format(select[1]-select[0])
@app.callback(
Output('violin_plot', 'figure'),
[Input('sort', 'value'),
Input('select', 'value'),
Input('descriptor_dropdown', 'value')]
)
def display_violin_plot(sort, select, descriptors):
return make_violin_plot(sort, select, descriptors)
@app.callback(
Output('ebit_plot', 'figure'),
[Input('sort', 'value'),
Input('select', 'value'),
Input('descriptor_dropdown', 'value'),
Input('daq-violin', 'on')]
)
def display_ebit_plot(sort, select, descriptors, switch):
if switch == True:
select = list(np.arange(select[0],select[1]))
return make_ebit_plot(df, select, sort=sort, descriptors=descriptors)
else:
return make_ebit_plot(df)
app.run_server(mode='external', port='8881')
```
| github_jupyter |
## Problem 1
___
In this problem, we will build a Multilayer Perceptron (MLP) and train it on the MNIST hand-written digit dataset.
### Summary:
___
[Question 1](#1.1)
[Question 2](#1.2)
[Question 3](#1.3)
[Question 4](#1.4)
___
### 1. Building the Model <a id='1.1'></a>
__________
**1.1) Build an MLP and choose the values of $h^1$ and $h^2$ such that the total number of parameters (including biases) falls within the range of [0.5M, 1.0M].**
The model is implemented within the NN class below.
Given that the input size is 784 and we have 10 classes, we estimaed the size of each hidden layer as follow:
- first hidden layer: 500 units
- second hidden layer: 400 units
The total number of parameters in our model with these settings is: $(784+1)*500 + (500+1)*400 + (400+1)*10 = 596910$
**1.2) Implement the forward and backward propagation of the MLP in numpy without using any of the deep learning frameworks that provides automatic differentiation.**
Our algorithm implements minibatch gradient descent, which allows us to save a lot of computation thanks to numpy's optimizations for matrix operations (we can use 1-example stochastic gradient descent by specifying batch_size = 1).
We implemented the forward and backward propagation using matrices to represent the minibatches and the neural network parameters. This way, we avoid using looped sums and replace them by numpy matrix operations.
We have also let to the user the possibility to use biases or not.
In our implementation, all matrices are transposed compared to the course notations. This has to be highlighted because it changes the order of all matrix operations.
#### Forward:
Each batch is represented by a $\mbox{batch_size} \times 784$ matrix. It can be treated as the output $H_0$ of an imaginary layer with index $0$.
For each hidden layer $i$ within $1\leq i \leq L$, we compute the preactivations matrix as $ A_{i} = H_{i-1}W_{i} + b_{i}$, with the following dimensions:
- $A_i$ : the preactivations matrix of dimensions $\mbox{batch_size} \times h^{i}$
- $H_{i-1}$ : the postactivations matrix of dimensions $\mbox{batch_size} \times h^{i-1}$
- $W_{i}$ : the weights matrix of dimensions $h^{i-1} \times h^{i}$ ($h^i$ being the number of units of the $i^{th}$ layer)
- $b_{i}$: the biases matrix of dimensions $1 \times h^{i}$
As we can see, $H_{i-1}W_{i}$ and $b_{i}$ don't have the same first dimension, but thanks to the broadcast property provided by numpy, this is not an issue.
After using this linear transformation, we apply an activation function (for example ReLU) on $A_{i}$, which gives us $H_{i} = \mbox{activation}(A_{i})$.
The only exception is the output layer, which has a different activation function (softmax) that defines the outputs $H_{L+1}$ as a $batch\_size \times 10$ matrix of $batch\_size$ sets of total probabilities over the $10$ possible labels.
#### Backward:
We have implemented the backpropagation algorithm as follows:
The preactivation gradients of the output layer $L+1$ are represented by the $batch\_size \times 10$ matrix $\nabla A_{L+1}$, which is calculated according to:
$$
\nabla A_{L+1} = -(\mbox{true labels} - H_{L+1})
$$
(The 'true labels' matrix is a one-hot encoding of the real class of each example of the minibatch)
Then for each layer $i$ starting from $L+1$ to $1$:
- The weights gradients matrix is computed as:
$$
\nabla W_{i} = \frac{H_{i-1}^T \nabla A_i}{\mbox{batch_size}}
$$
This operation saves us a lot of computation and memory thanks to optimized matrix operations, as it computes at once the element-wise mean over the minibatch of the $\mbox{batch_size}$ matrices of dimensions $h^{i-1} \times h^{i}$ that would have been computed if we had considered each example separately. Indeed the matrix-product
$$
\underset{(h^{i-1} \times \mbox{batch_size})}{H_{i-1}^T} \underset{(\mbox{batch_size} \times h^{i})}{\nabla A_i}
$$
is the element-wise sum of the $\mbox{batch_size}$ matrices obtained for each example of the minibatch by the vector-product
$$
\underset{(h^{i-1} \times 1)}{h_{i-1}^T} \underset{(1 \times h^{i})}{\nabla a_i}
$$
presented in the course. Dividing by $\mbox{batch_size}$ gives us directly the average that has to be used for the upcoming update.
- Similarly, the biases gradients matrix is computed as:
$$
\nabla db_i = \mbox{mean over batch dimension}(\nabla A_i)
$$
which gives us a $1 \times h^{i}$ vector of the mean of the biases gradients over the minibatch, homogeneous to the $b_i$ vector as we have seen before.
- If the layer $i$ is not the layer $1$, we also need to compute $\nabla H_{i-1}$ and $\nabla A_{i-1}$ as follows:
$$
\nabla H_{i-1} = \nabla A_i \nabla W_i^T
\\
\nabla A_{i-1} = \nabla H_{i-1} \odot \mbox{activation}(A_{i-1})
$$
so we can retropropagate gradients to the previous ($i-1$) layer and compute $\nabla W_{i-1}$ and $\nabla b_{i-1}$.
**1.3) Train the MLP using the probability loss ($\textit{cross entropy}$) as training criterion. We minimize this criterion to optimize the model parameters using $\textit{stochastic gradient descent}$.**
Our algorithm minimizes the cross entropy, estimated by:
$$
-\frac{1}{N}\sum_{i=1}^{N} \mbox{true labels} * \mbox{log}(\mbox{prediction})
$$
To choose our best model, we pick the one with the highest validation accuracy and we keep this model in the bestWeights attribute, which can be saved on the hard drive to be loaded later in test mode. When the validation accuracy ceases to improve for a number of epochs defined by the patience attribute, the training stops as we consider being in overfitting regime (early stopping).
```
# This part of the code implements live plotting of the train and validation loss and accuracy:
from IPython.display import clear_output
from matplotlib import pyplot as plt
import collections
%matplotlib inline
def live_plot(data_dict1, title1, data_dict2, title2, figsize=(7,5), bestEpoch=0):
"""
plots the train and test error and the accuracy
after each epoch
"""
clear_output(wait=True)
plt.figure(figsize=figsize)
plt.subplot(1,2,1)
for label,data in data_dict1.items():
plt.plot(data, label=label)
if label == 'validation accuracy':
plt.plot(bestEpoch, data[bestEpoch], "ro")
plt.title(title1)
plt.grid(True)
plt.xlabel('epoch')
plt.legend(loc='center left')
plt.subplot(1,2,2)
for label,data in data_dict2.items():
plt.plot(data, label=label)
if label == 'validation loss':
plt.plot(bestEpoch, data[bestEpoch], "ro")
plt.title(title2)
plt.grid(True)
plt.xlabel('epoch')
plt.legend(loc='center left')
plt.show();
# This part implements the NN:
import numpy as np
import pickle
import copy
import time
"""MLP class :
The model implemented as follows :
Each layers is represented by a b vector (biases) and a W matrix (weights)
These are referenced by the weights dictionary. The format is :
self.weights[f"X{n}"] where X = b, W
NB : In our implementation, all matrices are transposed compared to the class notations
"""
class NN(object):
def __init__(self,
hidden_dims = (1024, 2048), # dimensions of each hidden layers
n_hidden = 2, # number of hidden layers
mode = 'train', # current mode : train/test
datapath = None, # path where to find the .pkl file
model_path = None, # path where to save/load the model
epsilon = 1e-6, # for cross entropy calculus stability : log(x) = log(epsilon) if x < epsilon
lr = 1e-1, # learning rate
n_epochs = 1000, # max number of epochs
batch_size = 1000, # batch size for training
compute_biases = True, # whether biases are used or not
seed = None, # seed for reproducibility
activation = "relu", # activation function
init_method = "glorot", # initialization method
patience = 5): # number of useless iterations before early stopping
"""
- method: (string) - initializes the weight matrices
-> "zero" for a Zero initialisation of the weights
-> "normal" for a Normal initialisation of the weights
-> "glorot" for a Uniform initialisation of the weights
"""
assert len(hidden_dims) == n_hidden, "Hidden dims mismatch!"
self.hidden_dims = hidden_dims
self.n_hidden = n_hidden
self.mode = mode
self.datapath = datapath
self.model_path = model_path
self.epsilon = epsilon
self.lr = lr
self.n_epochs = n_epochs
self.batch_size = batch_size
self.compute_biases = compute_biases
self.init_method = init_method
self.seed = seed
self.activation_str = activation
self.patience = patience
self.dataplot1 = collections.defaultdict(list) # history of the train and validation accuracies
self.dataplot2 = collections.defaultdict(list) # history of the train and validation losses
self.earlyStopped = False # this is set to True when early stopping happens
self.bestValAccuracy = 0 # max value of the validation accuracy
self.bestEpoch = 0 # epoch where we reached the min value of the validation loss
self.bestWeights = None # optimal weights dictionary (contains biases when compute_biases=True)
# train, validation and test sets :
if datapath.lower().endswith(".npy"):
self.tr, self.va, self.te = np.load(open(datapath, "rb"))
elif datapath.lower().endswith(".pkl"):
u = pickle._Unpickler(open(datapath, 'rb'))
u.encoding = 'latin1'
self.tr, self.va, self.te = u.load()
else:
raise Exception("Unknown data source type!")
def initialize_weights(self, dims):
"""
Initializes the weights and biases according to the specified method :
- dims: (list of two integers) - the size of input/output layers
:return: None
"""
if self.seed is not None:
np.random.seed(self.seed)
if self.mode == "train":
self.weights = {}
all_dims = [dims[0]] + list(self.hidden_dims) + [dims[1]]
#print("Layers dimensions are : ", all_dims)
for layer_n in range(1, self.n_hidden + 2):
if self.init_method == "zero":
self.weights[f"W{layer_n}"] = np.zeros(shape=(all_dims[layer_n - 1],all_dims[layer_n])).astype('float64')
elif self.init_method == "normal":
#Be aware of the error you get "true divide", can be solved by dividing by dim_layer[n-1]
self.weights[f"W{layer_n}"] = np.random.normal(loc=0.0, scale=1.0, size=(all_dims[layer_n - 1],
all_dims[layer_n])).astype('float64')/all_dims[layer_n-1]
elif self.init_method == "glorot":
b = np.sqrt(6.0/(all_dims[layer_n]+all_dims[layer_n-1]))
self.weights[f"W{layer_n}"] = np.random.uniform(low=-b, high=b, size=(all_dims[layer_n - 1],
all_dims[layer_n])).astype('float64')
else:
raise Exception("The provided name for the initialization method is invalid.")
if self.compute_biases:
self.weights[f"b{layer_n}"] = np.zeros((1, all_dims[layer_n]))
elif self.mode == "test": # test mode is to load the weights of an existing model
self.weights = np.load(self.model_path)
else:
raise Exception("Unknown Mode!")
def relu(self,x, prime=False):# Prime for Step function, else ReLu
"""
Definition of the Relu function and its derivative
"""
if prime:
return x > 0
return np.maximum(0, x)
def sigmoid(self,x, prime=False):
"""
Defition of the Sigmoid function and its derivative
"""
if prime:
return self.sigmoid(x)*(1 - self.sigmoid(x))
return 1/(1 + np.exp(-x))
def tanh(self,x, prime=False):
"""
Defition of the tanh function and its derivative
"""
if prime:
return 1 - np.power(self.tanh(x),2)
return np.tanh(x)
def activation(self, input_, prime=False):
"""
Selecting and applying a given activation function
on the preactivation values
"""
if self.activation_str == "relu":
return self.relu(input_,prime)
elif self.activation_str == "sigmoid":
return self.sigmoid(input_,prime)
elif self.activation_str == "tanh":
return self.tanh(input_,prime)
else:
raise Exception("Unsupported activation!")
def softmax(self, input_): # Computes the softmax of the input
"""
Definition of the softmax function: activation function
for the output layer
"""
Z = np.exp(input_ - np.max(input_)) # softmax(x-C) = softmax(x) (stability)
return Z / np.sum(Z, axis=1, keepdims=True)
def forward(self, input_): # Forward propagation : computes the outputs (cache) from the input
"""
Implementation of the forward propagation process to calculate
the preactivation values and the values of the hidden units
"""
cache = {"H0": input_}
for layer in range(1, self.n_hidden + 1):
if self.compute_biases:
cache[f"A{layer}"] = cache[f"H{layer-1}"] @ self.weights[f"W{layer}"] + self.weights[f"b{layer}"]
else:
cache[f"A{layer}"] = cache[f"H{layer-1}"] @ self.weights[f"W{layer}"]
cache[f"H{layer}"] = self.activation(cache[f"A{layer}"])
layer = self.n_hidden + 1
if self.compute_biases:
cache[f"A{layer}"] = cache[f"H{layer-1}"] @ self.weights[f"W{layer}"] + self.weights[f"b{layer}"]
else:
cache[f"A{layer}"] = cache[f"H{layer-1}"] @ self.weights[f"W{layer}"]
cache[f"H{layer}"] = self.softmax(cache[f"A{layer}"]) # softmax on last layer
return cache
def backward(self, cache, labels): # Backward propagation : computes the gradients from the outputs (cache)
"""
Implementation of the backward propagation process
"""
output = cache[f"H{self.n_hidden+1}"]
grads = {f"dA{self.n_hidden+1}": - (labels - output)}
for layer in range(self.n_hidden + 1, 0, -1):
# the following operation averages at once all the matrices
# that we would have calculated for each example of the minibatch if we had not represented it in matrix form :
grads[f"dW{layer}"] = cache[f"H{layer-1}"].T @ grads[f"dA{layer}"] / self.batch_size
# we need to do the same for the biases gradients :
if self.compute_biases:
grads[f"db{layer}"] = np.mean(grads[f"dA{layer}"], axis=0, keepdims=True)
if layer > 1:
grads[f"dH{layer-1}"] = grads[f"dA{layer}"] @ self.weights[f"W{layer}"].T
grads[f"dA{layer-1}"] = grads[f"dH{layer-1}"] * self.activation(cache[f"A{layer-1}"], prime=True)
return grads
def update(self, grads): # To update the weights and the biases
"""
Updating the weights and the biases with a given gradient input
"""
for layer in range(1, self.n_hidden + 1):
self.weights[f"W{layer}"] = self.weights[f"W{layer}"] - self.lr * grads[f"dW{layer}"]
if self.compute_biases:
self.weights[f"b{layer}"] = self.weights[f"b{layer}"] - self.lr * grads[f"db{layer}"]
def loss(self, prediction, labels): # Computes the cross entropy
"""
Calculating the cross entropy loss
"""
prediction[np.where(prediction < self.epsilon)] = self.epsilon
prediction[np.where(prediction > 1 - self.epsilon)] = 1 - self.epsilon
return -1 * np.sum(labels * np.log(prediction)) / prediction.shape[0]
def compute_loss_and_accuracy(self, X, y): # Stores the accuracy/loss of the train/validation sets
"""
Computing the loss and accuracy metrics and the prediction on a given dataset
"""
on_y = self._one_hot(y)
vCache = self.forward(X)
vOut = np.argmax(vCache[f"H{self.n_hidden + 1}"], axis=1)
vAccuracy = np.mean(y == vOut)
vLoss = self.loss(vCache[f"H{self.n_hidden+1}"], on_y)
return vLoss, vAccuracy, vOut
def _one_hot(self,y):
"""
Implementation of the OneHot Encoding
"""
_, y_train = self.tr
return np.eye(np.max(y_train) - np.min(y_train) + 1)[y]
def train(self,show_graph = True, save_model = False):
"""
Implementation of the training process
"""
X_train, y_train = self.tr
y_onehot = self._one_hot(y_train)
dims = [X_train.shape[1], y_onehot.shape[1]]
self.initialize_weights(dims)
n_batches = int(np.ceil(X_train.shape[0] / self.batch_size))
countES = 0
for epoch in range(self.n_epochs):
for batch in range(n_batches):
minibatchX = X_train[self.batch_size * batch:self.batch_size * (batch + 1), :]
minibatchY = y_onehot[self.batch_size * batch:self.batch_size * (batch + 1), :]
cache = self.forward(minibatchX)
grads = self.backward(cache, minibatchY)
self.update(grads)
X_tr, y_tr = self.tr
trLoss, trAccuracy,_ = self.compute_loss_and_accuracy(X_tr, y_tr)
X_val, y_val = self.va
valLoss, valAccuracy,_ = self.compute_loss_and_accuracy(X_val, y_val)
if valAccuracy > self.bestValAccuracy:
self.bestValAccuracy = valAccuracy # we choose or best model according to Accuracy
self.bestEpoch = epoch
self.bestWeights = copy.deepcopy(self.weights)
countES = 0
self.dataplot1['train accuracy'].append(trAccuracy)
self.dataplot1['validation accuracy'].append(valAccuracy)
self.dataplot2['train loss'].append(trLoss)
self.dataplot2['validation loss'].append(valLoss)
if show_graph:
live_plot(self.dataplot1, "Accuracy", self.dataplot2, "Loss", (14,5), self.bestEpoch)
countES += 1
if countES >= self.patience: # early stopping
self.earlyStopped = True
break
if save_model:
# Save the best model:
if self.model_path is None:
string_hdims = "-".join(map(str,self.hidden_dims))
model_name = f"best-model-init-{self.init_method}-lr-{self.lr}-hdims-{string_hdims}-batches-{self.batch_size}.npz"
np.savez(model_name,**self.bestWeights)
else:
np.savez(self.model_path,**self.bestWeights)
self.weights = copy.deepcopy(self.bestWeights)
return self.dataplot1, self.dataplot2
def test(self):
"""
Implementation of the testing process
"""
X_te, y_te = self.te
vLoss, vAccuracy,_ = self.compute_loss_and_accuracy(X_te, y_te)
print(f"Test Accuracy:{vAccuracy}, Test Loss:{vLoss}")
def finite_difference(self, eps, lay, nb_params = 10):
"""
eps: epsilon step used to estimate the finite differencef
lay: layer where to estimate the finite difference
"""
X, y = self.va
self.batch_size = 1
first_label = self._one_hot(y[0]).reshape(1,-1)
realCache = self.forward(X[0].reshape(1,-1))
realGrad = self.backward(realCache, first_label)
gradient_approx = np.zeros(nb_params)
gradient_real = np.zeros(nb_params)
increment = 0
for iLine, line in enumerate(self.weights[f"W{lay}"]):
for iCol, col in enumerate(line):
self.weights[f"W{lay}"][iLine, iCol] += eps
# w+ :
plusCache = self.forward(X[0].reshape(1,-1))
self.weights[f"W{lay}"][iLine, iCol] -= 2*eps
# w- :
moinsCache = self.forward(X[0].reshape(1,-1))
plusLoss = self.loss(plusCache[f"H{self.n_hidden+1}"],first_label)
moinsLoss = self.loss(moinsCache[f"H{self.n_hidden+1}"],first_label)
gradient_approx[increment] = (plusLoss - moinsLoss)/(2*eps)
gradient_real[increment] = realGrad[f"dW{lay}"][iLine,iCol]
self.weights[f"W{lay}"][iLine, iCol] += eps
increment += 1
if increment==10:
self.weights = copy.deepcopy(self.bestWeights)
return gradient_approx, gradient_real, np.max(np.abs(gradient_approx - gradient_real)) # np.linalg.norm(gradient_approx - gradient_real)
self.weights = copy.deepcopy(self.bestWeights)
return gradient_approx, gradient_real, np.max(np.abs(gradient_approx - gradient_real))
```
For the following questions, the architecture we chose is a 2-layer neural network of size $(500, 400)$. The batch size is $100$, and the learning rate is $0.5$, which we found converges very quickly with this setting.
### 2. Initialization <a id='1.2'></a>
___
**2.1) Train the model for 10 epochs using the initialization methods and record the average loss measured on the training data at the end of each epoch (10 values for each setup).**
**- Initialization with zeros:** The training phase stops after 5 epochs because we use the early stopping option which stops it from reaching 10 epochs.
```
#Zero_Init
neural_net = NN(hidden_dims=(500, 400),
n_hidden=2, # number of hidden layers
mode='train', # current mode : train/test
datapath="mnist.pkl", # path where to find the .pkl file
model_path=None, # path where to save/load the model
epsilon = 1e-8, # for cross entropy calculus stability : log(x) = log(epsilon) if x < epsilon
lr = 5e-1, # learning rate
n_epochs = 10, # max number of epochs
batch_size = 100, # batch size for training
compute_biases = True, # whether biases are used or not
init_method = "zero") # initialization method
_,_ = neural_net.train()
```
**- Initialization with a Standard Normal distribution:**
```
#Normal init
neural_net = NN(hidden_dims=(500, 400),
n_hidden=2, # number of hidden layers
mode='train', # current mode : train/test
datapath="mnist.pkl", # path where to find the .pkl file
model_path=None, # path where to save/load the model
epsilon = 1e-8, # for cross entropy calculus stability : log(x) = log(epsilon) if x < epsilon
lr = 5e-1, # learning rate
n_epochs = 10, # max number of epochs
batch_size = 100, # batch size for training
compute_biases = True, # whether biases are used or not
init_method = "normal") # initialization method
normal_hist_acc, normal_hist_loss = neural_net.train()
```
**- Initialization with Glorot method:**
```
#Glorot_Init
neural_net = NN(hidden_dims=(500, 400),
n_hidden=2, # number of hidden layers
mode='train', # current mode : train/test
datapath="mnist.pkl", # path where to find the .pkl file
model_path=None, # path where to save/load the model
epsilon = 1e-8, # for cross entropy calculus stability : log(x) = log(epsilon) if x < epsilon
lr = 5e-1, # learning rate
n_epochs = 20, # max number of epochs
batch_size = 100, # batch size for training
compute_biases = True, # whether biases are used or not
init_method = "glorot") # initialization method
glorot_hist_acc, glorot_hist_loss = neural_net.train()
```
**2.2) Compare the three setups by plotting the losses against the training time (epoch) and comment on the result.**
- Zero initialization for the weights leads to no change at all in accuracy. We can indeed show that doing so will lead to no possible update of the weights during the retropropagation, as
$$
\nabla H_{i-1} = \nabla A_i \nabla W_i^T
$$
will give zero gradients. Actually, only the biases of the last layer will be changed according to
$$
\nabla A_{L+1} = -(\mbox{true labels} - H_{L+1})\\
\mbox{and}\\
\nabla b_i = \mbox{mean over batch dimension}(\nabla A_i)
$$
in such a way that will not impact the predictions over each example from an epoch to another.
- If we simply generate the weights following a normal distribution $N(0,1)$, this leads to quick numeric explosion and errors. Indeed, weights will add up and are likely to lead to exponential overflows (or zero divisions in our stabilized softmax). We chose to tackle this issue by dividing the normal weights by the dimension of the previous layer. This way, preactivation values will stay around \[-1,1\] at each layer. After 10 epochs, this initialization method leads to a 91% accuracy and no overfitting (the model needs to train more).
- If we use the formula from Glorot, Bengio - 2010, we observe extremely fast convergence as the validation accuracy reaches 96% after the first epoch (versus 48% for normal initialization) and the model starts overfitting (regarding the validation accuracy) at more than 98% accuracy after 17 epochs.
### 3. Hyperparameter Search <a id='1.3'></a>
___
**3.1) Find out a combination of hyper-parameters (model architecture, learning rate, nonlinearity, etc.) such that the average accuracy rate on the validation set ($r^{(valid)}$) is at least 97\%.**
Although we have found a model that performs better than 98% relatively quickly by hand, in this part we perform a random search in hope to find a better model.
```
from IPython.display import clear_output
class RandomSearch:
def __init__(self, model, params, n_itters, seed = None):
self.params = params # parameters assignation grid (dictionary)
self.n_itters = n_itters # number of searches
self.model = model # NN class to use for random search
self.seed = seed # seed for reproducibility
def run(self):
if self.seed is not None:
np.random.seed(self.seed)
results = []
best_acc = None
best_loss = None
params = [p for p in self._generate_grid()]
for it, selected_params in enumerate(params):
print(f"Random search: test n°{it+1} - KeyboardInterrupt to stop after this iteration")
try:
#print(f"{it}","-".join(map(lambda x: str(x),selected_params.items())))
instance_model = self.model(seed = self.seed,**selected_params)
acc, loss = instance_model.train(show_graph=False,save_model=False)
id_best = np.argmax(acc["validation accuracy"])
results.append((selected_params,
loss["validation loss"][id_best],
acc["validation accuracy"][id_best],
instance_model.earlyStopped))
#if best_acc is None or best_loss> loss["validation loss"][id_best]:
#print(f"Found a better model: Accuracy from {best_acc} to {acc['validation accuracy'][id_best]}")
#best_loss = loss["validation loss"][id_best]
#best_acc = acc["validation accuracy"][id_best]
#print(f'Model {it+1}, accuracy {acc["validation accuracy"][id_best]:.5f}, loss {loss["validation loss"][id_best]:.5f}, Best Model: acc {best_acc:.5f}, loss {best_loss:.5f}')
except KeyboardInterrupt:
break
clear_output(wait=True)
return results
def _generate_grid(self):
for it in range(self.n_itters):
yield self._select_params()
def _select_params(self):
selected_params = {}
for key,value in self.params.items():
if key == "hidden_dims":
continue
elif key == "n_hidden":
assert type(value) == int and value>0
hidden_units = []
for _ in range(value):
hidden_units.append(np.random.choice(self.params["hidden_dims"]))
selected_params["hidden_dims"] = hidden_units
selected_params[key]=value
elif type(value)==list or type(value)==tuple:
selected_params[key] = np.random.choice(self.params[key])
else:
selected_params[key]=value
return selected_params
from collections import defaultdict
import pandas as pd
grid_params = {
"hidden_dims":[400,500,600],
"n_hidden":2,
"lr":[.3,.5,.7],
"n_epochs":[10,15,20],
"batch_size":[75,100,125],
"activation":["tanh","sigmoid","relu"],
"datapath":"mnist.pkl",
"patience":[3,5,10]
}
rd_search = RandomSearch(
model = NN,
params = grid_params,
n_itters = 10,
seed = 42
)
experiments = rd_search.run()
output = defaultdict(list)
for obs in experiments:
for key,val in obs[0].items():
output[key].append(val)
output["loss"].append(obs[1])
output["accuracy"].append(obs[2])
output["train_complete"].append(obs[3])
sorted_output= pd.DataFrame(output)
sorted_output.sort_values(by="accuracy",ascending=False)
```
ReLU has not been randomly selected here, so we don't know about its efficience as an activation function. However it is clear that sigmoid is ineffective with these parameters. It is unclear how the other parameters affect the performance. No model has been fully trained, which implies that either de patience is too large, or the number of epochs is too small.
We now perform another random search considering only relu and tanh, smaller patience, and slightly wider ranges for the other parameters:
```
from collections import defaultdict
import pandas as pd
grid_params = {
"hidden_dims":[400,500,600],
"n_hidden":2,
"lr":[.5,.75,.1],
"n_epochs":[10,20,30],
"batch_size":[50,100,150],
"activation":["tanh","relu"],
"datapath":"mnist.pkl",
"patience":[1,2,3]
}
rd_search = RandomSearch(
model = NN,
params = grid_params,
n_itters = 10,
seed = 43
)
experiments = rd_search.run()
output = defaultdict(list)
for obs in experiments:
for key,val in obs[0].items():
output[key].append(val)
output["loss"].append(obs[1])
output["accuracy"].append(obs[2])
output["train_complete"].append(obs[3])
sorted_output= pd.DataFrame(output)
sorted_output.sort_values(by="accuracy",ascending=False)
```
Stopping using sigmoid as an activation function has sensibly improved our overall performance. It now becomes clear that the determining parameter is the learning rate. Reducing patience too much might also have stopped the training too early. As increasing the learning rate seems to increase the performance, let us try another iteration of Random Search using greater learning rates, greater number of epochs, and greater patience:
```
from collections import defaultdict
import pandas as pd
grid_params = {
"hidden_dims":[400,500,600],
"n_hidden":2,
"lr":[.75,1.0,1.5,2.0,2.5],
"n_epochs":[20,30,40],
"batch_size":[50,100,150],
"activation":["tanh","relu"],
"datapath":"mnist.pkl",
"patience":[5,10]
}
rd_search = RandomSearch(
model = NN,
params = grid_params,
n_itters = 10,
seed = 45
)
experiments = rd_search.run()
output = defaultdict(list)
for obs in experiments:
for key,val in obs[0].items():
output[key].append(val)
output["loss"].append(obs[1])
output["accuracy"].append(obs[2])
output["train_complete"].append(obs[3])
sorted_output= pd.DataFrame(output)
sorted_output.sort_values(by="accuracy",ascending=False)
```
tanh seems to be our best activation function, although it is apparently slow to train compared to ReLU regarding the number of epochs, as only one out of seven tanh have completed full training here. Since we now have a small dataset of hyperparamters, we could go further using regression techniques to find an even better combination. We could also have implemented an heuristic similar to what we have done here by hand in three itartions that would converge toward a locally optimal combination of the hyperparameters (e.g. simulated annealing). However for now let us just fully train our best model:
```
neural_net = NN(hidden_dims=(500, 500),
n_hidden=2,
mode='train',
datapath="mnist.pkl",
lr = 1.5,
n_epochs = 1000, # arbitrarily big number because we want to reach early stopping
batch_size = 50,
activation = 'tanh',
patience = 10,
seed = 45)
hist_acc, hist_loss = neural_net.train()
print("Model validation accuracy: ", neural_net.bestValAccuracy)
```
Nothing has changed: during Random Search we did not wait until the end of patience, although we had found the same optimum. We also notice that the optimum regarding validation accuracy is way into the overfitting regime regarding validation loss.
### 4. Validation of the gratients using Finite Difference <a id='1.4'></a>
___
In this part we use finite difference to check whether our calculations of the gradients are correct or not.
The finite difference gradients $\nabla^N$ are an estimation of the gradients with respect to each parameter of the second layer. Let $\theta$ be the vector of all the paramaters of this layer, the finite difference of the $i^{\mbox{th}}$ parameters is defined as:
$$\nabla^N_i = \frac{L(\theta_1, \dots, \theta_{i-1}, \theta_i + \epsilon, \theta_{i+1}, \dots, \theta_p) - L(\theta_1, \dots, \theta_{i-1}, \theta_i - \epsilon, \theta_{i+1}, \dots, \theta_p)}{2 \epsilon}$$
where $\epsilon = \frac{1}{N}$
#### The model we will use is:
```
neural_net = NN(hidden_dims=(500, 400),
n_hidden=2, # number of hidden layers
mode='train', # current mode : train/test
datapath="mnist.pkl", # path where to find the .pkl file
model_path=None, # path where to save/load the model
epsilon = 1e-8, # for cross entropy calculus stability : log(x) = log(epsilon) if x < epsilon
lr = 5e-1, # learning rate
n_epochs = 10, # max number of epochs
batch_size = 100, # batch size for training
compute_biases = True, # whether biases are used or not
init_method = "glorot", # initialization method
seed = 42) # seed to reproduce results
hist_acc, hist_loss = neural_net.train()
```
#### 4.1) Evaluate the finite difference gradients $\nabla^N \in \mathbb{R}^p$ using $\epsilon = \frac{1}{N}$ for different values of $N$
Now, we compute the forward and the backward algorithms using only one example from the validation dataset (We will use the first example). Then we compute the gradients approximations and we calculate the difference using $\max_{1 \leq i \leq p} |\nabla^N_i - \frac{\partial L}{\partial \theta_i}| $. We will use the finite_difference method in the NN class.
```
N=[]
# Choose 20 values for N
for i in range(0,5):
for k in range(1,5):
N.append(k*(10**i))
diff = np.zeros(len(N))
# For each N, compute finite_difference.
for n in range(len(N)):
app,re,diff[n] = neural_net.finite_difference(1/N[n],2)
```
#### 4.2) Plot the maximum difference between the true gradient and the finite difference gradient as a function of $N$
Let us see the evolution of the finite_difference. We will use the logarithmic scale.
```
plt.plot(N,diff)
plt.title('Max difference between computed gradients and finite differences')
plt.xlabel('N')
plt.ylabel('Max difference')
plt.yscale('log')
plt.xscale('log')
print("The steepest positive gradient is ", np.max(re), ", and the steepest negative gradient is ", np.min(re))
print("The difference is minimal for epsilon = ", 1/N[np.argmin(diff)])
```
We notice that the difference between our real gradients and estimated gradients is small. Therefore, we can consider that our real gradients are well calculated (steepest real gradients are of the order of $10^{-7}$, which means that the difference is still very significant).
We also notice that the estimated gradients are closest to the real gradients for $\epsilon = 0.003$, which is not the smallest tested value although theoretically we should get closer to the real gradients as $\epsilon$ decreases. This might be explained by numerical unstability. We become numerically unstable for very small values of $\epsilon$ (ref. the deep learning course given by Andrew NG ).
| github_jupyter |
This Notebook is a short example of how to use the Ising solver implemented using the QAOA algorithm. We start by declaring the import of the ising function.
```
from grove.ising.ising_qaoa import ising
from mock import patch
```
This code finds the global minima of an Ising model with external fields of the form
$$f(x)= \Sigma_i h_i x_i + \Sigma_{i,j} J_{i,j} x_i x_j.$$
Two adjacent sites $i,j$ have an interaction equal to $J_{i,j}$. There is also an external magnetic field $h_i$ that affects each individual spin. The discrete variables take the values $x_i \in \{+1,-1\}$.
In order to assert the correctness of the code we will find the minima of the following Ising model
$$f(x)=x_0+x_1-x_2+x_3-2 x_0 x_1 +3 x_2 x_3.$$
Which corresponds to $x_{min}=[-1, -1, 1, -1]$ in numerical order, with a minimum value of $f(x_{min})=-9$.
This Ising code runs on quantum hardware, which means that we need to specify a connection to a QVM or QPU. Due to the absence of a real connection in this notebook, we will mock out the response to correspond to the expected value. In order to run this notebook on a QVM or QPU, replace cxn with a valid PyQuil connection object.
```
with patch("pyquil.api.SyncConnection") as cxn:
cxn.run_and_measure.return_value = [[1,1,0,1]]
cxn.expectation.return_value = [-0.4893891813015294, 0.8876822987380573, -0.4893891813015292, -0.9333372094534063, -0.9859245403423198, 0.9333372094534065]
```
The input for the code in the default mode corresponds simply to the parameters $h_i$ and $J_{i,j}$, that we specify as a list in numerical order and a dictionary. The code returns the bitstring of the minima, the minimum value, and the QAOA quantum circuit used to obtain that result.
```
J = {(0, 1): -2, (2, 3): 3}
h = [1, 1, -1, 1]
solution, min_energy, circuit = ising(h, J, connection=cxn)
```
It is also possible to specify the Trotterization order for the QAOA algorithm used to implement the Ising model. By default this value is equal to double the number of variables. It is also possible to change the verbosity of the function, which is True by default. There are more advanced parameters that can be specified and are not described here.
```
solution_2, min_energy_2, circuit_2 = ising(h, J, num_steps=9, verbose=False, connection=cxn)
```
For large Ising problems, or those with many and close suboptimal minima, it is possible for the code to not return the global minima. Increasing the number of steps can solve this problem.
Finally, we will check if the correct bitstring was found, corresponding to the global minima, in both runs.
```
assert solution == [-1, -1, 1, -1], "Found bitstring for first run does not correspond to global minima"
print("Energy for first run solution", min_energy)
assert solution_2 == [-1, -1, 1, -1], "Found bitstring for second run does not correspond to global minima"
print("Energy for second run solution", min_energy_2)
```
If the assertions succeeded, and the energy was equal to $-9$, we have found the correct solution for both runs.
```
```
| github_jupyter |
```
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from ml.data import create_lineal_data
from ml.visualization import decision_boundary
%matplotlib inline
```
# Función de coste y gradiente
## Generación de datos
### Entrenamiento
```
np.random.seed(0) # Para hacer más determinística la generación de datos
samples_per_class = 5
Xa = np.c_[create_lineal_data(0.75, 0.9, spread=0.2, data_size=samples_per_class)]
Xb = np.c_[create_lineal_data(0.5, 0.75, spread=0.2, data_size=samples_per_class)]
X_train = np.r_[Xa, Xb]
y_train = np.r_[np.zeros(samples_per_class), np.ones(samples_per_class)]
cmap_dots = ListedColormap(['tomato', 'dodgerblue'])
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cmap_dots, edgecolors='k')
plt.show()
```
### Validación
```
np.random.seed(0) # Para hacer más determinística la generación de datos
samples_per_class = 25
Xa = np.c_[create_lineal_data(0.75, 0.9, spread=0.2, data_size=samples_per_class)]
Xb = np.c_[create_lineal_data(0.5, 0.75, spread=0.2, data_size=samples_per_class)]
X_val = np.r_[Xa, Xb]
y_val = np.r_[np.zeros(samples_per_class), np.ones(samples_per_class)]
cmap_dots = ListedColormap(['tomato', 'dodgerblue'])
plt.scatter(X_val[:, 0], X_val[:, 1], c=y_val, cmap=cmap_dots, edgecolors='k')
plt.show()
```
## Regresión Logística
### Función de coste y gradiente
```
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def logloss(w, x, y):
m = y.shape[0]
y_hat = sigmoid(x.dot(w))
cost1 = np.log(y_hat).dot(y)
cost2 = np.log(1 - y_hat).dot(1 - y)
J = -(cost1 + cost2)
return J
def logloss_gradient(w, x, y):
m = y.shape[0]
y_hat = sigmoid(x.dot(w))
gradient = np.dot(x.T, y_hat - y)
return gradient
```
### Algoritmo de optimización (descenso por la gradiente)
```
def gradient_descent(w, x_train, y_train, x_val, y_va, cost_function,
cost_function_gradient, alpha=0.01, max_iter=1000):
train_costs = np.zeros(max_iter)
val_costs = np.zeros(max_iter)
for iteration in range(max_iter):
train_costs[iteration] = cost_function(w, x_train, y_train)
val_costs[iteration] = cost_function(w, x_val, y_val)
gradient = cost_function_gradient(w, x_train, y_train)
w = w - alpha * gradient
return w, train_costs, val_costs
# Agregar el vector de bias a los ejemplos (bias trick)
X_b_train = np.c_[np.ones(X_train.shape[0]), X_train]
X_b_val = np.c_[np.ones(X_val.shape[0]), X_val]
w0 = np.zeros(X_b_train.shape[1]) # Initial weights
w, train_costs, val_costs = gradient_descent(w0, X_b_train, y_train, X_b_val, y_val,
logloss, logloss_gradient, max_iter=20000)
```
### Exactitud (entrenamiento vs validación)
```
y_pred = (X_b_train.dot(w) >= 0.5).astype(np.int) # Obtenemos las predicciones (como 0 o 1)
accuracy = (y_train == y_pred).astype(np.int).sum() / y_train.shape[0] # Calcular la exactitud
print("Exactitud del algoritmo para conjunto de entrenamiento: %.2f" % accuracy)
y_pred = (X_b_val.dot(w) >= 0.5).astype(np.int) # Obtenemos las predicciones (como 0 o 1)
accuracy = (y_val == y_pred).astype(np.int).sum() / y_val.shape[0] # Calcular la exactitud
print("Exactitud del algoritmo para conjunto de validación: %.2f" % accuracy)
```
### Curva de aprendizaje (entrenamiento vs validación)
```
plt.plot(train_costs, label="Datos de entrenamiento")
plt.plot(val_costs, label="Datos de validación")
plt.xlabel("Iteraciones")
plt.ylabel("Costo")
plt.title("Curva de aprendizaje")
plt.legend()
plt.show()
```
### Frontera de decisión
```
xx, yy, Z = decision_boundary(np.r_[X_train, X_val], w)
cmap_back = ListedColormap(['lightcoral', 'skyblue'])
cmap_dots = ['tomato', 'dodgerblue', 'red', 'darkslateblue']
plt.figure(figsize=(6, 5), dpi= 80, facecolor='w', edgecolor='k')
plt.pcolormesh(xx, yy, Z, cmap=cmap_back)
for i in (0, 1):
plt.scatter(X_train[y_train==i, 0], X_train[y_train==i, 1],
color=cmap_dots[i], label='Entrenamiento clase %d' % i,
edgecolor='k', s=20)
plt.scatter(X_val[y_val==i, 0], X_val[y_val==i, 1],
color=cmap_dots[i+2], label='Validación clase %d' % i,
edgecolor='k', s=20)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.legend()
plt.show()
```
| github_jupyter |
**Math - Linear Algebra**
*Linear Algebra is the branch of mathematics that studies [vector spaces](https://en.wikipedia.org/wiki/Vector_space) and linear transformations between vector spaces, such as rotating a shape, scaling it up or down, translating it (ie. moving it), etc.*
*Machine Learning relies heavily on Linear Algebra, so it is essential to understand what vectors and matrices are, what operations you can perform with them, and how they can be useful.*
# Vectors
## Definition
A vector is a quantity defined by a magnitude and a direction. For example, a rocket's velocity is a 3-dimensional vector: its magnitude is the speed of the rocket, and its direction is (hopefully) up. A vector can be represented by an array of numbers called *scalars*. Each scalar corresponds to the magnitude of the vector with regards to each dimension.
For example, say the rocket is going up at a slight angle: it has a vertical speed of 5,000 m/s, and also a slight speed towards the East at 10 m/s, and a slight speed towards the North at 50 m/s. The rocket's velocity may be represented by the following vector:
**velocity** $= \begin{pmatrix}
10 \\
50 \\
5000 \\
\end{pmatrix}$
Note: by convention vectors are generally presented in the form of columns. Also, vector names are generally lowercase to distinguish them from matrices (which we will discuss below) and in bold (when possible) to distinguish them from simple scalar values such as ${meters\_per\_second} = 5026$.
A list of N numbers may also represent the coordinates of a point in an N-dimensional space, so it is quite frequent to represent vectors as simple points instead of arrows. A vector with 1 element may be represented as an arrow or a point on an axis, a vector with 2 elements is an arrow or a point on a plane, a vector with 3 elements is an arrow or point in space, and a vector with N elements is an arrow or a point in an N-dimensional space… which most people find hard to imagine.
## Purpose
Vectors have many purposes in Machine Learning, most notably to represent observations and predictions. For example, say we built a Machine Learning system to classify videos into 3 categories (good, spam, clickbait) based on what we know about them. For each video, we would have a vector representing what we know about it, such as:
**video** $= \begin{pmatrix}
10.5 \\
5.2 \\
3.25 \\
7.0
\end{pmatrix}$
This vector could represent a video that lasts 10.5 minutes, but only 5.2% viewers watch for more than a minute, it gets 3.25 views per day on average, and it was flagged 7 times as spam. As you can see, each axis may have a different meaning.
Based on this vector our Machine Learning system may predict that there is an 80% probability that it is a spam video, 18% that it is clickbait, and 2% that it is a good video. This could be represented as the following vector:
**class_probabilities** $= \begin{pmatrix}
0.80 \\
0.18 \\
0.02
\end{pmatrix}$
## Vectors in python
In python, a vector can be represented in many ways, the simplest being a regular python list of numbers:
```
[10.5, 5.2, 3.25, 7.0]
```
Since we plan to do quite a lot of scientific calculations, it is much better to use NumPy's `ndarray`, which provides a lot of convenient and optimized implementations of essential mathematical operations on vectors (for more details about NumPy, check out the [NumPy tutorial](tools_numpy.ipynb)). For example:
```
import numpy as np
video = np.array([10.5, 5.2, 3.25, 7.0])
video
```
The size of a vector can be obtained using the `size` attribute:
```
video.size
```
The $i^{th}$ element (also called *entry* or *item*) of a vector $\textbf{v}$ is noted $\textbf{v}_i$.
Note that indices in mathematics generally start at 1, but in programming they usually start at 0. So to access $\textbf{video}_3$ programmatically, we would write:
```
video[2] # 3rd element
```
## Plotting vectors
To plot vectors we will use matplotlib, so let's start by importing it (for details about matplotlib, check the [matplotlib tutorial](tools_matplotlib.ipynb)):
```
%matplotlib inline
import matplotlib.pyplot as plt
```
### 2D vectors
Let's create a couple very simple 2D vectors to plot:
```
u = np.array([2, 5])
v = np.array([3, 1])
```
These vectors each have 2 elements, so they can easily be represented graphically on a 2D graph, for example as points:
```
x_coords, y_coords = zip(u, v)
plt.scatter(x_coords, y_coords, color=["r","b"])
plt.axis([0, 9, 0, 6])
plt.grid()
plt.show()
```
Vectors can also be represented as arrows. Let's create a small convenience function to draw nice arrows:
```
def plot_vector2d(vector2d, origin=[0, 0], **options):
return plt.arrow(origin[0], origin[1], vector2d[0], vector2d[1],
head_width=0.2, head_length=0.3, length_includes_head=True,
**options)
```
Now let's draw the vectors **u** and **v** as arrows:
```
plot_vector2d(u, color="r")
plot_vector2d(v, color="b")
plt.axis([0, 9, 0, 6])
plt.grid()
plt.show()
```
### 3D vectors
Plotting 3D vectors is also relatively straightforward. First let's create two 3D vectors:
```
a = np.array([1, 2, 8])
b = np.array([5, 6, 3])
```
Now let's plot them using matplotlib's `Axes3D`:
```
from mpl_toolkits.mplot3d import Axes3D
subplot3d = plt.subplot(111, projection='3d')
x_coords, y_coords, z_coords = zip(a,b)
subplot3d.scatter(x_coords, y_coords, z_coords)
subplot3d.set_zlim3d([0, 9])
plt.show()
```
It is a bit hard to visualize exactly where in space these two points are, so let's add vertical lines. We'll create a small convenience function to plot a list of 3d vectors with vertical lines attached:
```
def plot_vectors3d(ax, vectors3d, z0, **options):
for v in vectors3d:
x, y, z = v
ax.plot([x,x], [y,y], [z0, z], color="gray", linestyle='dotted', marker=".")
x_coords, y_coords, z_coords = zip(*vectors3d)
ax.scatter(x_coords, y_coords, z_coords, **options)
subplot3d = plt.subplot(111, projection='3d')
subplot3d.set_zlim([0, 9])
plot_vectors3d(subplot3d, [a,b], 0, color=("r","b"))
plt.show()
```
## Norm
The norm of a vector $\textbf{u}$, noted $\left \Vert \textbf{u} \right \|$, is a measure of the length (a.k.a. the magnitude) of $\textbf{u}$. There are multiple possible norms, but the most common one (and the only one we will discuss here) is the Euclidian norm, which is defined as:
$\left \Vert \textbf{u} \right \| = \sqrt{\sum_{i}{\textbf{u}_i}^2}$
We could implement this easily in pure python, recalling that $\sqrt x = x^{\frac{1}{2}}$
```
def vector_norm(vector):
squares = [element**2 for element in vector]
return sum(squares)**0.5
print("||", u, "|| =")
vector_norm(u)
```
However, it is much more efficient to use NumPy's `norm` function, available in the `linalg` (**Lin**ear **Alg**ebra) module:
```
import numpy.linalg as LA
LA.norm(u)
```
Let's plot a little diagram to confirm that the length of vector $\textbf{v}$ is indeed $\approx5.4$:
```
radius = LA.norm(u)
plt.gca().add_artist(plt.Circle((0,0), radius, color="#DDDDDD"))
plot_vector2d(u, color="red")
plt.axis([0, 8.7, 0, 6])
plt.grid()
plt.show()
```
Looks about right!
## Addition
Vectors of same size can be added together. Addition is performed *elementwise*:
```
print(" ", u)
print("+", v)
print("-"*10)
u + v
```
Let's look at what vector addition looks like graphically:
```
plot_vector2d(u, color="r")
plot_vector2d(v, color="b")
plot_vector2d(v, origin=u, color="b", linestyle="dotted")
plot_vector2d(u, origin=v, color="r", linestyle="dotted")
plot_vector2d(u+v, color="g")
plt.axis([0, 9, 0, 7])
plt.text(0.7, 3, "u", color="r", fontsize=18)
plt.text(4, 3, "u", color="r", fontsize=18)
plt.text(1.8, 0.2, "v", color="b", fontsize=18)
plt.text(3.1, 5.6, "v", color="b", fontsize=18)
plt.text(2.4, 2.5, "u+v", color="g", fontsize=18)
plt.grid()
plt.show()
```
Vector addition is **commutative**, meaning that $\textbf{u} + \textbf{v} = \textbf{v} + \textbf{u}$. You can see it on the previous image: following $\textbf{u}$ *then* $\textbf{v}$ leads to the same point as following $\textbf{v}$ *then* $\textbf{u}$.
Vector addition is also **associative**, meaning that $\textbf{u} + (\textbf{v} + \textbf{w}) = (\textbf{u} + \textbf{v}) + \textbf{w}$.
If you have a shape defined by a number of points (vectors), and you add a vector $\textbf{v}$ to all of these points, then the whole shape gets shifted by $\textbf{v}$. This is called a [geometric translation](https://en.wikipedia.org/wiki/Translation_%28geometry%29):
```
t1 = np.array([2, 0.25])
t2 = np.array([2.5, 3.5])
t3 = np.array([1, 2])
x_coords, y_coords = zip(t1, t2, t3, t1)
plt.plot(x_coords, y_coords, "c--", x_coords, y_coords, "co")
plot_vector2d(v, t1, color="r", linestyle=":")
plot_vector2d(v, t2, color="r", linestyle=":")
plot_vector2d(v, t3, color="r", linestyle=":")
t1b = t1 + v
t2b = t2 + v
t3b = t3 + v
x_coords_b, y_coords_b = zip(t1b, t2b, t3b, t1b)
plt.plot(x_coords_b, y_coords_b, "b-", x_coords_b, y_coords_b, "bo")
plt.text(4, 4.2, "v", color="r", fontsize=18)
plt.text(3, 2.3, "v", color="r", fontsize=18)
plt.text(3.5, 0.4, "v", color="r", fontsize=18)
plt.axis([0, 6, 0, 5])
plt.grid()
plt.show()
```
Finally, substracting a vector is like adding the opposite vector.
## Multiplication by a scalar
Vectors can be multiplied by scalars. All elements in the vector are multiplied by that number, for example:
```
print("1.5 *", u, "=")
1.5 * u
```
Graphically, scalar multiplication results in changing the scale of a figure, hence the name *scalar*. The distance from the origin (the point at coordinates equal to zero) is also multiplied by the scalar. For example, let's scale up by a factor of `k = 2.5`:
```
k = 2.5
t1c = k * t1
t2c = k * t2
t3c = k * t3
plt.plot(x_coords, y_coords, "c--", x_coords, y_coords, "co")
plot_vector2d(t1, color="r")
plot_vector2d(t2, color="r")
plot_vector2d(t3, color="r")
x_coords_c, y_coords_c = zip(t1c, t2c, t3c, t1c)
plt.plot(x_coords_c, y_coords_c, "b-", x_coords_c, y_coords_c, "bo")
plot_vector2d(k * t1, color="b", linestyle=":")
plot_vector2d(k * t2, color="b", linestyle=":")
plot_vector2d(k * t3, color="b", linestyle=":")
plt.axis([0, 9, 0, 9])
plt.grid()
plt.show()
```
As you might guess, dividing a vector by a scalar is equivalent to multiplying by its multiplicative inverse (reciprocal):
$\dfrac{\textbf{u}}{\lambda} = \dfrac{1}{\lambda} \times \textbf{u}$
Scalar multiplication is **commutative**: $\lambda \times \textbf{u} = \textbf{u} \times \lambda$.
It is also **associative**: $\lambda_1 \times (\lambda_2 \times \textbf{u}) = (\lambda_1 \times \lambda_2) \times \textbf{u}$.
Finally, it is **distributive** over addition of vectors: $\lambda \times (\textbf{u} + \textbf{v}) = \lambda \times \textbf{u} + \lambda \times \textbf{v}$.
## Zero, unit and normalized vectors
* A **zero-vector ** is a vector full of 0s.
* A **unit vector** is a vector with a norm equal to 1.
* The **normalized vector** of a non-null vector $\textbf{u}$, noted $\hat{\textbf{u}}$, is the unit vector that points in the same direction as $\textbf{u}$. It is equal to: $\hat{\textbf{u}} = \dfrac{\textbf{u}}{\left \Vert \textbf{u} \right \|}$
```
plt.gca().add_artist(plt.Circle((0,0),1,color='c'))
plt.plot(0, 0, "ko")
plot_vector2d(v / LA.norm(v), color="k")
plot_vector2d(v, color="b", linestyle=":")
plt.text(0.3, 0.3, "$\hat{u}$", color="k", fontsize=18)
plt.text(1.5, 0.7, "$u$", color="b", fontsize=18)
plt.axis([-1.5, 5.5, -1.5, 3.5])
plt.grid()
plt.show()
```
## Dot product
### Definition
The dot product (also called *scalar product* or *inner product* in the context of the Euclidian space) of two vectors $\textbf{u}$ and $\textbf{v}$ is a useful operation that comes up fairly often in linear algebra. It is noted $\textbf{u} \cdot \textbf{v}$, or sometimes $⟨\textbf{u}|\textbf{v}⟩$ or $(\textbf{u}|\textbf{v})$, and it is defined as:
$\textbf{u} \cdot \textbf{v} = \left \Vert \textbf{u} \right \| \times \left \Vert \textbf{v} \right \| \times cos(\theta)$
where $\theta$ is the angle between $\textbf{u}$ and $\textbf{v}$.
Another way to calculate the dot product is:
$\textbf{u} \cdot \textbf{v} = \sum_i{\textbf{u}_i \times \textbf{v}_i}$
### In python
The dot product is pretty simple to implement:
```
def dot_product(v1, v2):
return sum(v1i * v2i for v1i, v2i in zip(v1, v2))
dot_product(u, v)
```
But a *much* more efficient implementation is provided by NumPy with the `dot` function:
```
np.dot(u,v)
```
Equivalently, you can use the `dot` method of `ndarray`s:
```
u.dot(v)
```
**Caution**: the `*` operator will perform an *elementwise* multiplication, *NOT* a dot product:
```
print(" ",u)
print("* ",v, "(NOT a dot product)")
print("-"*10)
u * v
```
### Main properties
* The dot product is **commutative**: $\textbf{u} \cdot \textbf{v} = \textbf{v} \cdot \textbf{u}$.
* The dot product is only defined between two vectors, not between a scalar and a vector. This means that we cannot chain dot products: for example, the expression $\textbf{u} \cdot \textbf{v} \cdot \textbf{w}$ is not defined since $\textbf{u} \cdot \textbf{v}$ is a scalar and $\textbf{w}$ is a vector.
* This also means that the dot product is **NOT associative**: $(\textbf{u} \cdot \textbf{v}) \cdot \textbf{w} ≠ \textbf{u} \cdot (\textbf{v} \cdot \textbf{w})$ since neither are defined.
* However, the dot product is **associative with regards to scalar multiplication**: $\lambda \times (\textbf{u} \cdot \textbf{v}) = (\lambda \times \textbf{u}) \cdot \textbf{v} = \textbf{u} \cdot (\lambda \times \textbf{v})$
* Finally, the dot product is **distributive** over addition of vectors: $\textbf{u} \cdot (\textbf{v} + \textbf{w}) = \textbf{u} \cdot \textbf{v} + \textbf{u} \cdot \textbf{w}$.
### Calculating the angle between vectors
One of the many uses of the dot product is to calculate the angle between two non-zero vectors. Looking at the dot product definition, we can deduce the following formula:
$\theta = \arccos{\left ( \dfrac{\textbf{u} \cdot \textbf{v}}{\left \Vert \textbf{u} \right \| \times \left \Vert \textbf{v} \right \|} \right ) }$
Note that if $\textbf{u} \cdot \textbf{v} = 0$, it follows that $\theta = \dfrac{π}{2}$. In other words, if the dot product of two non-null vectors is zero, it means that they are orthogonal.
Let's use this formula to calculate the angle between $\textbf{u}$ and $\textbf{v}$ (in radians):
```
def vector_angle(u, v):
cos_theta = u.dot(v) / LA.norm(u) / LA.norm(v)
return np.arccos(np.clip(cos_theta, -1, 1))
theta = vector_angle(u, v)
print("Angle =", theta, "radians")
print(" =", theta * 180 / np.pi, "degrees")
```
Note: due to small floating point errors, `cos_theta` may be very slightly outside of the $[-1, 1]$ interval, which would make `arccos` fail. This is why we clipped the value within the range, using NumPy's `clip` function.
### Projecting a point onto an axis
The dot product is also very useful to project points onto an axis. The projection of vector $\textbf{v}$ onto $\textbf{u}$'s axis is given by this formula:
$\textbf{proj}_{\textbf{u}}{\textbf{v}} = \dfrac{\textbf{u} \cdot \textbf{v}}{\left \Vert \textbf{u} \right \| ^2} \times \textbf{u}$
Which is equivalent to:
$\textbf{proj}_{\textbf{u}}{\textbf{v}} = (\textbf{v} \cdot \hat{\textbf{u}}) \times \hat{\textbf{u}}$
```
u_normalized = u / LA.norm(u)
proj = v.dot(u_normalized) * u_normalized
plot_vector2d(u, color="r")
plot_vector2d(v, color="b")
plot_vector2d(proj, color="k", linestyle=":")
plt.plot(proj[0], proj[1], "ko")
plt.plot([proj[0], v[0]], [proj[1], v[1]], "b:")
plt.text(1, 2, "$proj_u v$", color="k", fontsize=18)
plt.text(1.8, 0.2, "$v$", color="b", fontsize=18)
plt.text(0.8, 3, "$u$", color="r", fontsize=18)
plt.axis([0, 8, 0, 5.5])
plt.grid()
plt.show()
```
# Matrices
A matrix is a rectangular array of scalars (ie. any number: integer, real or complex) arranged in rows and columns, for example:
\begin{bmatrix} 10 & 20 & 30 \\ 40 & 50 & 60 \end{bmatrix}
You can also think of a matrix as a list of vectors: the previous matrix contains either 2 horizontal 3D vectors or 3 vertical 2D vectors.
Matrices are convenient and very efficient to run operations on many vectors at a time. We will also see that they are great at representing and performing linear transformations such rotations, translations and scaling.
## Matrices in python
In python, a matrix can be represented in various ways. The simplest is just a list of python lists:
```
[
[10, 20, 30],
[40, 50, 60]
]
```
A much more efficient way is to use the NumPy library which provides optimized implementations of many matrix operations:
```
A = np.array([
[10,20,30],
[40,50,60]
])
A
```
By convention matrices generally have uppercase names, such as $A$.
In the rest of this tutorial, we will assume that we are using NumPy arrays (type `ndarray`) to represent matrices.
## Size
The size of a matrix is defined by its number of rows and number of columns. It is noted $rows \times columns$. For example, the matrix $A$ above is an example of a $2 \times 3$ matrix: 2 rows, 3 columns. Caution: a $3 \times 2$ matrix would have 3 rows and 2 columns.
To get a matrix's size in NumPy:
```
A.shape
```
**Caution**: the `size` attribute represents the number of elements in the `ndarray`, not the matrix's size:
```
A.size
```
## Element indexing
The number located in the $i^{th}$ row, and $j^{th}$ column of a matrix $X$ is sometimes noted $X_{i,j}$ or $X_{ij}$, but there is no standard notation, so people often prefer to explicitely name the elements, like this: "*let $X = (x_{i,j})_{1 ≤ i ≤ m, 1 ≤ j ≤ n}$*". This means that $X$ is equal to:
$X = \begin{bmatrix}
x_{1,1} & x_{1,2} & x_{1,3} & \cdots & x_{1,n}\\
x_{2,1} & x_{2,2} & x_{2,3} & \cdots & x_{2,n}\\
x_{3,1} & x_{3,2} & x_{3,3} & \cdots & x_{3,n}\\
\vdots & \vdots & \vdots & \ddots & \vdots \\
x_{m,1} & x_{m,2} & x_{m,3} & \cdots & x_{m,n}\\
\end{bmatrix}$
However in this notebook we will use the $X_{i,j}$ notation, as it matches fairly well NumPy's notation. Note that in math indices generally start at 1, but in programming they usually start at 0. So to access $A_{2,3}$ programmatically, we need to write this:
```
A[1,2] # 2nd row, 3rd column
```
The $i^{th}$ row vector is sometimes noted $M_i$ or $M_{i,*}$, but again there is no standard notation so people often prefer to explicitely define their own names, for example: "*let **x**$_{i}$ be the $i^{th}$ row vector of matrix $X$*". We will use the $M_{i,*}$, for the same reason as above. For example, to access $A_{2,*}$ (ie. $A$'s 2nd row vector):
```
A[1, :] # 2nd row vector (as a 1D array)
```
Similarly, the $j^{th}$ column vector is sometimes noted $M^j$ or $M_{*,j}$, but there is no standard notation. We will use $M_{*,j}$. For example, to access $A_{*,3}$ (ie. $A$'s 3rd column vector):
```
A[:, 2] # 3rd column vector (as a 1D array)
```
Note that the result is actually a one-dimensional NumPy array: there is no such thing as a *vertical* or *horizontal* one-dimensional array. If you need to actually represent a row vector as a one-row matrix (ie. a 2D NumPy array), or a column vector as a one-column matrix, then you need to use a slice instead of an integer when accessing the row or column, for example:
```
A[1:2, :] # rows 2 to 3 (excluded): this returns row 2 as a one-row matrix
A[:, 2:3] # columns 3 to 4 (excluded): this returns column 3 as a one-column matrix
```
## Square, triangular, diagonal and identity matrices
A **square matrix** is a matrix that has the same number of rows and columns, for example a $3 \times 3$ matrix:
\begin{bmatrix}
4 & 9 & 2 \\
3 & 5 & 7 \\
8 & 1 & 6
\end{bmatrix}
An **upper triangular matrix** is a special kind of square matrix where all the elements *below* the main diagonal (top-left to bottom-right) are zero, for example:
\begin{bmatrix}
4 & 9 & 2 \\
0 & 5 & 7 \\
0 & 0 & 6
\end{bmatrix}
Similarly, a **lower triangular matrix** is a square matrix where all elements *above* the main diagonal are zero, for example:
\begin{bmatrix}
4 & 0 & 0 \\
3 & 5 & 0 \\
8 & 1 & 6
\end{bmatrix}
A **triangular matrix** is one that is either lower triangular or upper triangular.
A matrix that is both upper and lower triangular is called a **diagonal matrix**, for example:
\begin{bmatrix}
4 & 0 & 0 \\
0 & 5 & 0 \\
0 & 0 & 6
\end{bmatrix}
You can construct a diagonal matrix using NumPy's `diag` function:
```
np.diag([4, 5, 6])
```
If you pass a matrix to the `diag` function, it will happily extract the diagonal values:
```
D = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
])
np.diag(D)
```
Finally, the **identity matrix** of size $n$, noted $I_n$, is a diagonal matrix of size $n \times n$ with $1$'s in the main diagonal, for example $I_3$:
\begin{bmatrix}
1 & 0 & 0 \\
0 & 1 & 0 \\
0 & 0 & 1
\end{bmatrix}
Numpy's `eye` function returns the identity matrix of the desired size:
```
np.eye(3)
```
The identity matrix is often noted simply $I$ (instead of $I_n$) when its size is clear given the context. It is called the *identity* matrix because multiplying a matrix with it leaves the matrix unchanged as we will see below.
## Adding matrices
If two matrices $Q$ and $R$ have the same size $m \times n$, they can be added together. Addition is performed *elementwise*: the result is also a $m \times n$ matrix $S$ where each element is the sum of the elements at the corresponding position: $S_{i,j} = Q_{i,j} + R_{i,j}$
$S =
\begin{bmatrix}
Q_{11} + R_{11} & Q_{12} + R_{12} & Q_{13} + R_{13} & \cdots & Q_{1n} + R_{1n} \\
Q_{21} + R_{21} & Q_{22} + R_{22} & Q_{23} + R_{23} & \cdots & Q_{2n} + R_{2n} \\
Q_{31} + R_{31} & Q_{32} + R_{32} & Q_{33} + R_{33} & \cdots & Q_{3n} + R_{3n} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
Q_{m1} + R_{m1} & Q_{m2} + R_{m2} & Q_{m3} + R_{m3} & \cdots & Q_{mn} + R_{mn} \\
\end{bmatrix}$
For example, let's create a $2 \times 3$ matrix $B$ and compute $A + B$:
```
B = np.array([[1,2,3], [4, 5, 6]])
B
A
A + B
```
**Addition is *commutative***, meaning that $A + B = B + A$:
```
B + A
```
**It is also *associative***, meaning that $A + (B + C) = (A + B) + C$:
```
C = np.array([[100,200,300], [400, 500, 600]])
A + (B + C)
(A + B) + C
```
## Scalar multiplication
A matrix $M$ can be multiplied by a scalar $\lambda$. The result is noted $\lambda M$, and it is a matrix of the same size as $M$ with all elements multiplied by $\lambda$:
$\lambda M =
\begin{bmatrix}
\lambda \times M_{11} & \lambda \times M_{12} & \lambda \times M_{13} & \cdots & \lambda \times M_{1n} \\
\lambda \times M_{21} & \lambda \times M_{22} & \lambda \times M_{23} & \cdots & \lambda \times M_{2n} \\
\lambda \times M_{31} & \lambda \times M_{32} & \lambda \times M_{33} & \cdots & \lambda \times M_{3n} \\
\vdots & \vdots & \vdots & \ddots & \vdots \\
\lambda \times M_{m1} & \lambda \times M_{m2} & \lambda \times M_{m3} & \cdots & \lambda \times M_{mn} \\
\end{bmatrix}$
A more concise way of writing this is:
$(\lambda M)_{i,j} = \lambda (M)_{i,j}$
In NumPy, simply use the `*` operator to multiply a matrix by a scalar. For example:
```
2 * A
```
Scalar multiplication is also defined on the right hand side, and gives the same result: $M \lambda = \lambda M$. For example:
```
A * 2
```
This makes scalar multiplication **commutative**.
It is also **associative**, meaning that $\alpha (\beta M) = (\alpha \times \beta) M$, where $\alpha$ and $\beta$ are scalars. For example:
```
2 * (3 * A)
(2 * 3) * A
```
Finally, it is **distributive over addition** of matrices, meaning that $\lambda (Q + R) = \lambda Q + \lambda R$:
```
2 * (A + B)
2 * A + 2 * B
```
## Matrix multiplication
So far, matrix operations have been rather intuitive. But multiplying matrices is a bit more involved.
A matrix $Q$ of size $m \times n$ can be multiplied by a matrix $R$ of size $n \times q$. It is noted simply $QR$ without multiplication sign or dot. The result $P$ is an $m \times q$ matrix where each element is computed as a sum of products:
$P_{i,j} = \sum_{k=1}^n{Q_{i,k} \times R_{k,j}}$
The element at position $i,j$ in the resulting matrix is the sum of the products of elements in row $i$ of matrix $Q$ by the elements in column $j$ of matrix $R$.
$P =
\begin{bmatrix}
Q_{11} R_{11} + Q_{12} R_{21} + \cdots + Q_{1n} R_{n1} &
Q_{11} R_{12} + Q_{12} R_{22} + \cdots + Q_{1n} R_{n2} &
\cdots &
Q_{11} R_{1q} + Q_{12} R_{2q} + \cdots + Q_{1n} R_{nq} \\
Q_{21} R_{11} + Q_{22} R_{21} + \cdots + Q_{2n} R_{n1} &
Q_{21} R_{12} + Q_{22} R_{22} + \cdots + Q_{2n} R_{n2} &
\cdots &
Q_{21} R_{1q} + Q_{22} R_{2q} + \cdots + Q_{2n} R_{nq} \\
\vdots & \vdots & \ddots & \vdots \\
Q_{m1} R_{11} + Q_{m2} R_{21} + \cdots + Q_{mn} R_{n1} &
Q_{m1} R_{12} + Q_{m2} R_{22} + \cdots + Q_{mn} R_{n2} &
\cdots &
Q_{m1} R_{1q} + Q_{m2} R_{2q} + \cdots + Q_{mn} R_{nq}
\end{bmatrix}$
You may notice that each element $P_{i,j}$ is the dot product of the row vector $Q_{i,*}$ and the column vector $R_{*,j}$:
$P_{i,j} = Q_{i,*} \cdot R_{*,j}$
So we can rewrite $P$ more concisely as:
$P =
\begin{bmatrix}
Q_{1,*} \cdot R_{*,1} & Q_{1,*} \cdot R_{*,2} & \cdots & Q_{1,*} \cdot R_{*,q} \\
Q_{2,*} \cdot R_{*,1} & Q_{2,*} \cdot R_{*,2} & \cdots & Q_{2,*} \cdot R_{*,q} \\
\vdots & \vdots & \ddots & \vdots \\
Q_{m,*} \cdot R_{*,1} & Q_{m,*} \cdot R_{*,2} & \cdots & Q_{m,*} \cdot R_{*,q}
\end{bmatrix}$
Let's multiply two matrices in NumPy, using `ndarray`'s `dot` method:
$E = AD = \begin{bmatrix}
10 & 20 & 30 \\
40 & 50 & 60
\end{bmatrix}
\begin{bmatrix}
2 & 3 & 5 & 7 \\
11 & 13 & 17 & 19 \\
23 & 29 & 31 & 37
\end{bmatrix} =
\begin{bmatrix}
930 & 1160 & 1320 & 1560 \\
2010 & 2510 & 2910 & 3450
\end{bmatrix}$
```
D = np.array([
[ 2, 3, 5, 7],
[11, 13, 17, 19],
[23, 29, 31, 37]
])
E = A.dot(D)
E
```
Let's check this result by looking at one element, just to be sure: looking at $E_{2,3}$ for example, we need to multiply elements in $A$'s $2^{nd}$ row by elements in $D$'s $3^{rd}$ column, and sum up these products:
```
40*5 + 50*17 + 60*31
E[1,2] # row 2, column 3
```
Looks good! You can check the other elements until you get used to the algorithm.
We multiplied a $2 \times 3$ matrix by a $3 \times 4$ matrix, so the result is a $2 \times 4$ matrix. The first matrix's number of columns has to be equal to the second matrix's number of rows. If we try to multiple $D$ by $A$, we get an error because D has 4 columns while A has 2 rows:
```
try:
D.dot(A)
except ValueError as e:
print("ValueError:", e)
```
This illustrates the fact that **matrix multiplication is *NOT* commutative**: in general $QR ≠ RQ$
In fact, $QR$ and $RQ$ are only *both* defined if $Q$ has size $m \times n$ and $R$ has size $n \times m$. Let's look at an example where both *are* defined and show that they are (in general) *NOT* equal:
```
F = np.array([
[5,2],
[4,1],
[9,3]
])
A.dot(F)
F.dot(A)
```
On the other hand, **matrix multiplication *is* associative**, meaning that $Q(RS) = (QR)S$. Let's create a $4 \times 5$ matrix $G$ to illustrate this:
```
G = np.array([
[8, 7, 4, 2, 5],
[2, 5, 1, 0, 5],
[9, 11, 17, 21, 0],
[0, 1, 0, 1, 2]])
A.dot(D).dot(G) # (AB)G
A.dot(D.dot(G)) # A(BG)
```
It is also ***distributive* over addition** of matrices, meaning that $(Q + R)S = QS + RS$. For example:
```
(A + B).dot(D)
A.dot(D) + B.dot(D)
```
The product of a matrix $M$ by the identity matrix (of matching size) results in the same matrix $M$. More formally, if $M$ is an $m \times n$ matrix, then:
$M I_n = I_m M = M$
This is generally written more concisely (since the size of the identity matrices is unambiguous given the context):
$MI = IM = M$
For example:
```
A.dot(np.eye(3))
np.eye(2).dot(A)
```
**Caution**: NumPy's `*` operator performs elementwise multiplication, *NOT* a matrix multiplication:
```
A * B # NOT a matrix multiplication
```
**The @ infix operator**
Python 3.5 [introduced](https://docs.python.org/3/whatsnew/3.5.html#pep-465-a-dedicated-infix-operator-for-matrix-multiplication) the `@` infix operator for matrix multiplication, and NumPy 1.10 added support for it. If you are using Python 3.5+ and NumPy 1.10+, you can simply write `A @ D` instead of `A.dot(D)`, making your code much more readable (but less portable). This operator also works for vector dot products.
```
import sys
print("Python version: {}.{}.{}".format(*sys.version_info))
print("Numpy version:", np.version.version)
# Uncomment the following line if your Python version is ≥3.5
# and your NumPy version is ≥1.10:
#A @ D
```
Note: `Q @ R` is actually equivalent to `Q.__matmul__(R)` which is implemented by NumPy as `np.matmul(Q, R)`, not as `Q.dot(R)`. The main difference is that `matmul` does not support scalar multiplication, while `dot` does, so you can write `Q.dot(3)`, which is equivalent to `Q * 3`, but you cannot write `Q @ 3` ([more details](http://stackoverflow.com/a/34142617/38626)).
## Matrix transpose
The transpose of a matrix $M$ is a matrix noted $M^T$ such that the $i^{th}$ row in $M^T$ is equal to the $i^{th}$ column in $M$:
$ A^T =
\begin{bmatrix}
10 & 20 & 30 \\
40 & 50 & 60
\end{bmatrix}^T =
\begin{bmatrix}
10 & 40 \\
20 & 50 \\
30 & 60
\end{bmatrix}$
In other words, ($A^T)_{i,j}$ = $A_{j,i}$
Obviously, if $M$ is an $m \times n$ matrix, then $M^T$ is an $n \times m$ matrix.
Note: there are a few other notations, such as $M^t$, $M′$, or ${^t}M$.
In NumPy, a matrix's transpose can be obtained simply using the `T` attribute:
```
A
A.T
```
As you might expect, transposing a matrix twice returns the original matrix:
```
A.T.T
```
Transposition is distributive over addition of matrices, meaning that $(Q + R)^T = Q^T + R^T$. For example:
```
(A + B).T
A.T + B.T
```
Moreover, $(Q \cdot R)^T = R^T \cdot Q^T$. Note that the order is reversed. For example:
```
(A.dot(D)).T
D.T.dot(A.T)
```
A **symmetric matrix** $M$ is defined as a matrix that is equal to its transpose: $M^T = M$. This definition implies that it must be a square matrix whose elements are symmetric relative to the main diagonal, for example:
\begin{bmatrix}
17 & 22 & 27 & 49 \\
22 & 29 & 36 & 0 \\
27 & 36 & 45 & 2 \\
49 & 0 & 2 & 99
\end{bmatrix}
The product of a matrix by its transpose is always a symmetric matrix, for example:
```
D.dot(D.T)
```
## Converting 1D arrays to 2D arrays in NumPy
As we mentionned earlier, in NumPy (as opposed to Matlab, for example), 1D really means 1D: there is no such thing as a vertical 1D-array or a horizontal 1D-array. So you should not be surprised to see that transposing a 1D array does not do anything:
```
u
u.T
```
We want to convert $\textbf{u}$ into a row vector before transposing it. There are a few ways to do this:
```
u_row = np.array([u])
u_row
```
Notice the extra square brackets: this is a 2D array with just one row (ie. a 1x2 matrix). In other words it really is a **row vector**.
```
u[np.newaxis, :]
```
This quite explicit: we are asking for a new vertical axis, keeping the existing data as the horizontal axis.
```
u[np.newaxis]
```
This is equivalent, but a little less explicit.
```
u[None]
```
This is the shortest version, but you probably want to avoid it because it is unclear. The reason it works is that `np.newaxis` is actually equal to `None`, so this is equivalent to the previous version.
Ok, now let's transpose our row vector:
```
u_row.T
```
Great! We now have a nice **column vector**.
Rather than creating a row vector then transposing it, it is also possible to convert a 1D array directly into a column vector:
```
u[:, np.newaxis]
```
## Plotting a matrix
We have already seen that vectors can been represented as points or arrows in N-dimensional space. Is there a good graphical representation of matrices? Well you can simply see a matrix as a list of vectors, so plotting a matrix results in many points or arrows. For example, let's create a $2 \times 4$ matrix `P` and plot it as points:
```
P = np.array([
[3.0, 4.0, 1.0, 4.6],
[0.2, 3.5, 2.0, 0.5]
])
x_coords_P, y_coords_P = P
plt.scatter(x_coords_P, y_coords_P)
plt.axis([0, 5, 0, 4])
plt.show()
```
Of course we could also have stored the same 4 vectors as row vectors instead of column vectors, resulting in a $4 \times 2$ matrix (the transpose of $P$, in fact). It is really an arbitrary choice.
Since the vectors are ordered, you can see the matrix as a path and represent it with connected dots:
```
plt.plot(x_coords_P, y_coords_P, "bo")
plt.plot(x_coords_P, y_coords_P, "b--")
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
```
Or you can represent it as a polygon: matplotlib's `Polygon` class expects an $n \times 2$ NumPy array, not a $2 \times n$ array, so we just need to give it $P^T$:
```
from matplotlib.patches import Polygon
plt.gca().add_artist(Polygon(P.T))
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
```
## Geometric applications of matrix operations
We saw earlier that vector addition results in a geometric translation, vector multiplication by a scalar results in rescaling (zooming in or out, centered on the origin), and vector dot product results in projecting a vector onto another vector, rescaling and measuring the resulting coordinate.
Similarly, matrix operations have very useful geometric applications.
### Addition = multiple geometric translations
First, adding two matrices together is equivalent to adding all their vectors together. For example, let's create a $2 \times 4$ matrix $H$ and add it to $P$, and look at the result:
```
H = np.array([
[ 0.5, -0.2, 0.2, -0.1],
[ 0.4, 0.4, 1.5, 0.6]
])
P_moved = P + H
plt.gca().add_artist(Polygon(P.T, alpha=0.2))
plt.gca().add_artist(Polygon(P_moved.T, alpha=0.3, color="r"))
for vector, origin in zip(H.T, P.T):
plot_vector2d(vector, origin=origin)
plt.text(2.2, 1.8, "$P$", color="b", fontsize=18)
plt.text(2.0, 3.2, "$P+H$", color="r", fontsize=18)
plt.text(2.5, 0.5, "$H_{*,1}$", color="k", fontsize=18)
plt.text(4.1, 3.5, "$H_{*,2}$", color="k", fontsize=18)
plt.text(0.4, 2.6, "$H_{*,3}$", color="k", fontsize=18)
plt.text(4.4, 0.2, "$H_{*,4}$", color="k", fontsize=18)
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
```
If we add a matrix full of identical vectors, we get a simple geometric translation:
```
H2 = np.array([
[-0.5, -0.5, -0.5, -0.5],
[ 0.4, 0.4, 0.4, 0.4]
])
P_translated = P + H2
plt.gca().add_artist(Polygon(P.T, alpha=0.2))
plt.gca().add_artist(Polygon(P_translated.T, alpha=0.3, color="r"))
for vector, origin in zip(H2.T, P.T):
plot_vector2d(vector, origin=origin)
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
```
Although matrices can only be added together if they have the same size, NumPy allows adding a row vector or a column vector to a matrix: this is called *broadcasting* and is explained in further details in the [NumPy tutorial](tools_numpy.ipynb). We could have obtained the same result as above with:
```
P + [[-0.5], [0.4]] # same as P + H2, thanks to NumPy broadcasting
```
### Scalar multiplication
Multiplying a matrix by a scalar results in all its vectors being multiplied by that scalar, so unsurprisingly, the geometric result is a rescaling of the entire figure. For example, let's rescale our polygon by a factor of 60% (zooming out, centered on the origin):
```
def plot_transformation(P_before, P_after, text_before, text_after, axis = [0, 5, 0, 4], arrows=False):
if arrows:
for vector_before, vector_after in zip(P_before.T, P_after.T):
plot_vector2d(vector_before, color="blue", linestyle="--")
plot_vector2d(vector_after, color="red", linestyle="-")
plt.gca().add_artist(Polygon(P_before.T, alpha=0.2))
plt.gca().add_artist(Polygon(P_after.T, alpha=0.3, color="r"))
plt.text(P_before[0].mean(), P_before[1].mean(), text_before, fontsize=18, color="blue")
plt.text(P_after[0].mean(), P_after[1].mean(), text_after, fontsize=18, color="red")
plt.axis(axis)
plt.grid()
P_rescaled = 0.60 * P
plot_transformation(P, P_rescaled, "$P$", "$0.6 P$", arrows=True)
plt.show()
```
### Matrix multiplication – Projection onto an axis
Matrix multiplication is more complex to visualize, but it is also the most powerful tool in the box.
Let's start simple, by defining a $1 \times 2$ matrix $U = \begin{bmatrix} 1 & 0 \end{bmatrix}$. This row vector is just the horizontal unit vector.
```
U = np.array([[1, 0]])
```
Now let's look at the dot product $U \cdot P$:
```
U.dot(P)
```
These are the horizontal coordinates of the vectors in $P$. In other words, we just projected $P$ onto the horizontal axis:
```
def plot_projection(U, P):
U_P = U.dot(P)
axis_end = 100 * U
plot_vector2d(axis_end[0], color="black")
plt.gca().add_artist(Polygon(P.T, alpha=0.2))
for vector, proj_coordinate in zip(P.T, U_P.T):
proj_point = proj_coordinate * U
plt.plot(proj_point[0][0], proj_point[0][1], "ro")
plt.plot([vector[0], proj_point[0][0]], [vector[1], proj_point[0][1]], "r--")
plt.axis([0, 5, 0, 4])
plt.grid()
plt.show()
plot_projection(U, P)
```
We can actually project on any other axis by just replacing $U$ with any other unit vector. For example, let's project on the axis that is at a 30° angle above the horizontal axis:
```
angle30 = 30 * np.pi / 180 # angle in radians
U_30 = np.array([[np.cos(angle30), np.sin(angle30)]])
plot_projection(U_30, P)
```
Good! Remember that the dot product of a unit vector and a matrix basically performs a projection on an axis and gives us the coordinates of the resulting points on that axis.
### Matrix multiplication – Rotation
Now let's create a $2 \times 2$ matrix $V$ containing two unit vectors that make 30° and 120° angles with the horizontal axis:
$V = \begin{bmatrix} \cos(30°) & \sin(30°) \\ \cos(120°) & \sin(120°) \end{bmatrix}$
```
angle120 = 120 * np.pi / 180
V = np.array([
[np.cos(angle30), np.sin(angle30)],
[np.cos(angle120), np.sin(angle120)]
])
V
```
Let's look at the product $VP$:
```
V.dot(P)
```
The first row is equal to $V_{1,*} P$, which is the coordinates of the projection of $P$ onto the 30° axis, as we have seen above. The second row is $V_{2,*} P$, which is the coordinates of the projection of $P$ onto the 120° axis. So basically we obtained the coordinates of $P$ after rotating the horizontal and vertical axes by 30° (or equivalently after rotating the polygon by -30° around the origin)! Let's plot $VP$ to see this:
```
P_rotated = V.dot(P)
plot_transformation(P, P_rotated, "$P$", "$VP$", [-2, 6, -2, 4], arrows=True)
plt.show()
```
Matrix $V$ is called a **rotation matrix**.
### Matrix multiplication – Other linear transformations
More generally, any linear transformation $f$ that maps n-dimensional vectors to m-dimensional vectors can be represented as an $m \times n$ matrix. For example, say $\textbf{u}$ is a 3-dimensional vector:
$\textbf{u} = \begin{pmatrix} x \\ y \\ z \end{pmatrix}$
and $f$ is defined as:
$f(\textbf{u}) = \begin{pmatrix}
ax + by + cz \\
dx + ey + fz
\end{pmatrix}$
This transormation $f$ maps 3-dimensional vectors to 2-dimensional vectors in a linear way (ie. the resulting coordinates only involve sums of multiples of the original coordinates). We can represent this transformation as matrix $F$:
$F = \begin{bmatrix}
a & b & c \\
d & e & f
\end{bmatrix}$
Now, to compute $f(\textbf{u})$ we can simply do a matrix multiplication:
$f(\textbf{u}) = F \textbf{u}$
If we have a matric $G = \begin{bmatrix}\textbf{u}_1 & \textbf{u}_2 & \cdots & \textbf{u}_q \end{bmatrix}$, where each $\textbf{u}_i$ is a 3-dimensional column vector, then $FG$ results in the linear transformation of all vectors $\textbf{u}_i$ as defined by the matrix $F$:
$FG = \begin{bmatrix}f(\textbf{u}_1) & f(\textbf{u}_2) & \cdots & f(\textbf{u}_q) \end{bmatrix}$
To summarize, the matrix on the left hand side of a dot product specifies what linear transormation to apply to the right hand side vectors. We have already shown that this can be used to perform projections and rotations, but any other linear transformation is possible. For example, here is a transformation known as a *shear mapping*:
```
F_shear = np.array([
[1, 1.5],
[0, 1]
])
plot_transformation(P, F_shear.dot(P), "$P$", "$F_{shear} P$",
axis=[0, 10, 0, 7])
plt.show()
```
Let's look at how this transformation affects the **unit square**:
```
Square = np.array([
[0, 0, 1, 1],
[0, 1, 1, 0]
])
plot_transformation(Square, F_shear.dot(Square), "$Square$", "$F_{shear} Square$",
axis=[0, 2.6, 0, 1.8])
plt.show()
```
Now let's look at a **squeeze mapping**:
```
F_squeeze = np.array([
[1.4, 0],
[0, 1/1.4]
])
plot_transformation(P, F_squeeze.dot(P), "$P$", "$F_{squeeze} P$",
axis=[0, 7, 0, 5])
plt.show()
```
The effect on the unit square is:
```
plot_transformation(Square, F_squeeze.dot(Square), "$Square$", "$F_{squeeze} Square$",
axis=[0, 1.8, 0, 1.2])
plt.show()
```
Let's show a last one: reflection through the horizontal axis:
```
F_reflect = np.array([
[1, 0],
[0, -1]
])
plot_transformation(P, F_reflect.dot(P), "$P$", "$F_{reflect} P$",
axis=[-2, 9, -4.5, 4.5])
plt.show()
```
## Matrix inverse
Now that we understand that a matrix can represent any linear transformation, a natural question is: can we find a transformation matrix that reverses the effect of a given transformation matrix $F$? The answer is yes… sometimes! When it exists, such a matrix is called the **inverse** of $F$, and it is noted $F^{-1}$.
For example, the rotation, the shear mapping and the squeeze mapping above all have inverse transformations. Let's demonstrate this on the shear mapping:
```
F_inv_shear = np.array([
[1, -1.5],
[0, 1]
])
P_sheared = F_shear.dot(P)
P_unsheared = F_inv_shear.dot(P_sheared)
plot_transformation(P_sheared, P_unsheared, "$P_{sheared}$", "$P_{unsheared}$",
axis=[0, 10, 0, 7])
plt.plot(P[0], P[1], "b--")
plt.show()
```
We applied a shear mapping on $P$, just like we did before, but then we applied a second transformation to the result, and *lo and behold* this had the effect of coming back to the original $P$ (we plotted the original $P$'s outline to double check). The second transformation is the inverse of the first one.
We defined the inverse matrix $F_{shear}^{-1}$ manually this time, but NumPy provides an `inv` function to compute a matrix's inverse, so we could have written instead:
```
F_inv_shear = LA.inv(F_shear)
F_inv_shear
```
Only square matrices can be inversed. This makes sense when you think about it: if you have a transformation that reduces the number of dimensions, then some information is lost and there is no way that you can get it back. For example say you use a $2 \times 3$ matrix to project a 3D object onto a plane. The result may look like this:
```
plt.plot([0, 0, 1, 1, 0, 0.1, 0.1, 0, 0.1, 1.1, 1.0, 1.1, 1.1, 1.0, 1.1, 0.1],
[0, 1, 1, 0, 0, 0.1, 1.1, 1.0, 1.1, 1.1, 1.0, 1.1, 0.1, 0, 0.1, 0.1],
"r-")
plt.axis([-0.5, 2.1, -0.5, 1.5])
plt.show()
```
Looking at this image, it is impossible to tell whether this is the projection of a cube or the projection of a narrow rectangular object. Some information has been lost in the projection.
Even square transformation matrices can lose information. For example, consider this transformation matrix:
```
F_project = np.array([
[1, 0],
[0, 0]
])
plot_transformation(P, F_project.dot(P), "$P$", "$F_{project} \cdot P$",
axis=[0, 6, -1, 4])
plt.show()
```
This transformation matrix performs a projection onto the horizontal axis. Our polygon gets entirely flattened out so some information is entirely lost and it is impossible to go back to the original polygon using a linear transformation. In other words, $F_{project}$ has no inverse. Such a square matrix that cannot be inversed is called a **singular matrix** (aka degenerate matrix). If we ask NumPy to calculate its inverse, it raises an exception:
```
try:
LA.inv(F_project)
except LA.LinAlgError as e:
print("LinAlgError:", e)
```
Here is another example of a singular matrix. This one performs a projection onto the axis at a 30° angle above the horizontal axis:
```
angle30 = 30 * np.pi / 180
F_project_30 = np.array([
[np.cos(angle30)**2, np.sin(2*angle30)/2],
[np.sin(2*angle30)/2, np.sin(angle30)**2]
])
plot_transformation(P, F_project_30.dot(P), "$P$", "$F_{project\_30} \cdot P$",
axis=[0, 6, -1, 4])
plt.show()
```
But this time, due to floating point rounding errors, NumPy manages to calculate an inverse (notice how large the elements are, though):
```
LA.inv(F_project_30)
```
As you might expect, the dot product of a matrix by its inverse results in the identity matrix:
$M \cdot M^{-1} = M^{-1} \cdot M = I$
This makes sense since doing a linear transformation followed by the inverse transformation results in no change at all.
```
F_shear.dot(LA.inv(F_shear))
```
Another way to express this is that the inverse of the inverse of a matrix $M$ is $M$ itself:
$((M)^{-1})^{-1} = M$
```
LA.inv(LA.inv(F_shear))
```
Also, the inverse of scaling by a factor of $\lambda$ is of course scaling by a factor or $\frac{1}{\lambda}$:
$ (\lambda \times M)^{-1} = \frac{1}{\lambda} \times M^{-1}$
Once you understand the geometric interpretation of matrices as linear transformations, most of these properties seem fairly intuitive.
A matrix that is its own inverse is called an **involution**. The simplest examples are reflection matrices, or a rotation by 180°, but there are also more complex involutions, for example imagine a transformation that squeezes horizontally, then reflects over the vertical axis and finally rotates by 90° clockwise. Pick up a napkin and try doing that twice: you will end up in the original position. Here is the corresponding involutory matrix:
```
F_involution = np.array([
[0, -2],
[-1/2, 0]
])
plot_transformation(P, F_involution.dot(P), "$P$", "$F_{involution} \cdot P$",
axis=[-8, 5, -4, 4])
plt.show()
```
Finally, a square matrix $H$ whose inverse is its own transpose is an **orthogonal matrix**:
$H^{-1} = H^T$
Therefore:
$H \cdot H^T = H^T \cdot H = I$
It corresponds to a transformation that preserves distances, such as rotations and reflections, and combinations of these, but not rescaling, shearing or squeezing. Let's check that $F_{reflect}$ is indeed orthogonal:
```
F_reflect.dot(F_reflect.T)
```
## Determinant
The determinant of a square matrix $M$, noted $\det(M)$ or $\det M$ or $|M|$ is a value that can be calculated from its elements $(M_{i,j})$ using various equivalent methods. One of the simplest methods is this recursive approach:
$|M| = M_{1,1}\times|M^{(1,1)}| - M_{2,1}\times|M^{(2,1)}| + M_{3,1}\times|M^{(3,1)}| - M_{4,1}\times|M^{(4,1)}| + \cdots ± M_{n,1}\times|M^{(n,1)}|$
* Where $M^{(i,j)}$ is the matrix $M$ without row $i$ and column $j$.
For example, let's calculate the determinant of the following $3 \times 3$ matrix:
$M = \begin{bmatrix}
1 & 2 & 3 \\
4 & 5 & 6 \\
7 & 8 & 0
\end{bmatrix}$
Using the method above, we get:
$|M| = 1 \times \left | \begin{bmatrix} 5 & 6 \\ 8 & 0 \end{bmatrix} \right |
- 2 \times \left | \begin{bmatrix} 4 & 6 \\ 7 & 0 \end{bmatrix} \right |
+ 3 \times \left | \begin{bmatrix} 4 & 5 \\ 7 & 8 \end{bmatrix} \right |$
Now we need to compute the determinant of each of these $2 \times 2$ matrices (these determinants are called **minors**):
$\left | \begin{bmatrix} 5 & 6 \\ 8 & 0 \end{bmatrix} \right | = 5 \times 0 - 6 \times 8 = -48$
$\left | \begin{bmatrix} 4 & 6 \\ 7 & 0 \end{bmatrix} \right | = 4 \times 0 - 6 \times 7 = -42$
$\left | \begin{bmatrix} 4 & 5 \\ 7 & 8 \end{bmatrix} \right | = 4 \times 8 - 5 \times 7 = -3$
Now we can calculate the final result:
$|M| = 1 \times (-48) - 2 \times (-42) + 3 \times (-3) = 27$
To get the determinant of a matrix, you can call NumPy's `det` function in the `numpy.linalg` module:
```
M = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 0]
])
LA.det(M)
```
One of the main uses of the determinant is to *determine* whether a square matrix can be inversed or not: if the determinant is equal to 0, then the matrix *cannot* be inversed (it is a singular matrix), and if the determinant is not 0, then it *can* be inversed.
For example, let's compute the determinant for the $F_{project}$, $F_{project\_30}$ and $F_{shear}$ matrices that we defined earlier:
```
LA.det(F_project)
```
That's right, $F_{project}$ is singular, as we saw earlier.
```
LA.det(F_project_30)
```
This determinant is suspiciously close to 0: it really should be 0, but it's not due to tiny floating point errors. The matrix is actually singular.
```
LA.det(F_shear)
```
Perfect! This matrix *can* be inversed as we saw earlier. Wow, math really works!
The determinant can also be used to measure how much a linear transformation affects surface areas: for example, the projection matrices $F_{project}$ and $F_{project\_30}$ completely flatten the polygon $P$, until its area is zero. This is why the determinant of these matrices is 0. The shear mapping modified the shape of the polygon, but it did not affect its surface area, which is why the determinant is 1. You can try computing the determinant of a rotation matrix, and you should also find 1. What about a scaling matrix? Let's see:
```
F_scale = np.array([
[0.5, 0],
[0, 0.5]
])
plot_transformation(P, F_scale.dot(P), "$P$", "$F_{scale} \cdot P$",
axis=[0, 6, -1, 4])
plt.show()
```
We rescaled the polygon by a factor of 1/2 on both vertical and horizontal axes so the surface area of the resulting polygon is 1/4$^{th}$ of the original polygon. Let's compute the determinant and check that:
```
LA.det(F_scale)
```
Correct!
The determinant can actually be negative, when the transformation results in a "flipped over" version of the original polygon (eg. a left hand glove becomes a right hand glove). For example, the determinant of the `F_reflect` matrix is -1 because the surface area is preserved but the polygon gets flipped over:
```
LA.det(F_reflect)
```
## Composing linear transformations
Several linear transformations can be chained simply by performing multiple dot products in a row. For example, to perform a squeeze mapping followed by a shear mapping, just write:
```
P_squeezed_then_sheared = F_shear.dot(F_squeeze.dot(P))
```
Since the dot product is associative, the following code is equivalent:
```
P_squeezed_then_sheared = (F_shear.dot(F_squeeze)).dot(P)
```
Note that the order of the transformations is the reverse of the dot product order.
If we are going to perform this composition of linear transformations more than once, we might as well save the composition matrix like this:
```
F_squeeze_then_shear = F_shear.dot(F_squeeze)
P_squeezed_then_sheared = F_squeeze_then_shear.dot(P)
```
From now on we can perform both transformations in just one dot product, which can lead to a very significant performance boost.
What if you want to perform the inverse of this double transformation? Well, if you squeezed and then you sheared, and you want to undo what you have done, it should be obvious that you should unshear first and then unsqueeze. In more mathematical terms, given two invertible (aka nonsingular) matrices $Q$ and $R$:
$(Q \cdot R)^{-1} = R^{-1} \cdot Q^{-1}$
And in NumPy:
```
LA.inv(F_shear.dot(F_squeeze)) == LA.inv(F_squeeze).dot(LA.inv(F_shear))
```
## Singular Value Decomposition
It turns out that any $m \times n$ matrix $M$ can be decomposed into the dot product of three simple matrices:
* a rotation matrix $U$ (an $m \times m$ orthogonal matrix)
* a scaling & projecting matrix $\Sigma$ (an $m \times n$ diagonal matrix)
* and another rotation matrix $V^T$ (an $n \times n$ orthogonal matrix)
$M = U \cdot \Sigma \cdot V^{T}$
For example, let's decompose the shear transformation:
```
U, S_diag, V_T = LA.svd(F_shear) # note: in python 3 you can rename S_diag to Σ_diag
U
S_diag
```
Note that this is just a 1D array containing the diagonal values of Σ. To get the actual matrix Σ, we can use NumPy's `diag` function:
```
S = np.diag(S_diag)
S
```
Now let's check that $U \cdot \Sigma \cdot V^T$ is indeed equal to `F_shear`:
```
U.dot(np.diag(S_diag)).dot(V_T)
F_shear
```
It worked like a charm. Let's apply these transformations one by one (in reverse order) on the unit square to understand what's going on. First, let's apply the first rotation $V^T$:
```
plot_transformation(Square, V_T.dot(Square), "$Square$", "$V^T \cdot Square$",
axis=[-0.5, 3.5 , -1.5, 1.5])
plt.show()
```
Now let's rescale along the vertical and horizontal axes using $\Sigma$:
```
plot_transformation(V_T.dot(Square), S.dot(V_T).dot(Square), "$V^T \cdot Square$", "$\Sigma \cdot V^T \cdot Square$",
axis=[-0.5, 3.5 , -1.5, 1.5])
plt.show()
```
Finally, we apply the second rotation $U$:
```
plot_transformation(S.dot(V_T).dot(Square), U.dot(S).dot(V_T).dot(Square),"$\Sigma \cdot V^T \cdot Square$", "$U \cdot \Sigma \cdot V^T \cdot Square$",
axis=[-0.5, 3.5 , -1.5, 1.5])
plt.show()
```
And we can see that the result is indeed a shear mapping of the original unit square.
## Eigenvectors and eigenvalues
An **eigenvector** of a square matrix $M$ (also called a **characteristic vector**) is a non-zero vector that remains on the same line after transformation by the linear transformation associated with $M$. A more formal definition is any vector $v$ such that:
$M \cdot v = \lambda \times v$
Where $\lambda$ is a scalar value called the **eigenvalue** associated to the vector $v$.
For example, any horizontal vector remains horizontal after applying the shear mapping (as you can see on the image above), so it is an eigenvector of $M$. A vertical vector ends up tilted to the right, so vertical vectors are *NOT* eigenvectors of $M$.
If we look at the squeeze mapping, we find that any horizontal or vertical vector keeps its direction (although its length changes), so all horizontal and vertical vectors are eigenvectors of $F_{squeeze}$.
However, rotation matrices have no eigenvectors at all (except if the rotation angle is 0° or 180°, in which case all non-zero vectors are eigenvectors).
NumPy's `eig` function returns the list of unit eigenvectors and their corresponding eigenvalues for any square matrix. Let's look at the eigenvectors and eigenvalues of the squeeze mapping matrix $F_{squeeze}$:
```
eigenvalues, eigenvectors = LA.eig(F_squeeze)
eigenvalues # [λ0, λ1, …]
eigenvectors # [v0, v1, …]
```
Indeed the horizontal vectors are stretched by a factor of 1.4, and the vertical vectors are shrunk by a factor of 1/1.4=0.714…, so far so good. Let's look at the shear mapping matrix $F_{shear}$:
```
eigenvalues2, eigenvectors2 = LA.eig(F_shear)
eigenvalues2 # [λ0, λ1, …]
eigenvectors2 # [v0, v1, …]
```
Wait, what!? We expected just one unit eigenvector, not two. The second vector is almost equal to $\begin{pmatrix}-1 \\ 0 \end{pmatrix}$, which is on the same line as the first vector $\begin{pmatrix}1 \\ 0 \end{pmatrix}$. This is due to floating point errors. We can safely ignore vectors that are (almost) colinear (ie. on the same line).
## Trace
The trace of a square matrix $M$, noted $tr(M)$ is the sum of the values on its main diagonal. For example:
```
D = np.array([
[100, 200, 300],
[ 10, 20, 30],
[ 1, 2, 3],
])
np.trace(D)
```
The trace does not have a simple geometric interpretation (in general), but it has a number of properties that make it useful in many areas:
* $tr(A + B) = tr(A) + tr(B)$
* $tr(A \cdot B) = tr(B \cdot A)$
* $tr(A \cdot B \cdot \cdots \cdot Y \cdot Z) = tr(Z \cdot A \cdot B \cdot \cdots \cdot Y)$
* $tr(A^T \cdot B) = tr(A \cdot B^T) = tr(B^T \cdot A) = tr(B \cdot A^T) = \sum_{i,j}X_{i,j} \times Y_{i,j}$
* …
It does, however, have a useful geometric interpretation in the case of projection matrices (such as $F_{project}$ that we discussed earlier): it corresponds to the number of dimensions after projection. For example:
```
np.trace(F_project)
```
# What next?
This concludes this introduction to Linear Algebra. Although these basics cover most of what you will need to know for Machine Learning, if you wish to go deeper into this topic there are many options available: Linear Algebra [books](http://linear.axler.net/), [Khan Academy](https://www.khanacademy.org/math/linear-algebra) lessons, or just [Wikipedia](https://en.wikipedia.org/wiki/Linear_algebra) pages.
| github_jupyter |
```
import pandas as pd
import os
from tqdm import tqdm
from utils import avg, evidence_to_mask, text_len_scatter
def to_data_df(df, data_dir):
data_df = []
columns = ['text', 'classification', 'rationale' ,'query']
for i in tqdm(range(len(df))):
df_row = df.loc[i]
doc_id = df_row['annotation_id']
query = df_row['query']
evidence_list = df_row['evidences']
if evidence_list:
evidence_list = evidence_list[0]
classification = df_row['classification']
text = ''
file = f'{data_dir}/docs/{doc_id}'
if os.path.isfile(file):
f = open(file, 'r', encoding="utf-8")
for line in f.readlines():
text += line.rstrip() + ' '
else:
print("???")
print(file)
quit()
tokens = text.split()
rationale_mask = evidence_to_mask(tokens, evidence_list)
# joining text and query with [SEP]
# QA = f"{text}"
# QA = f"{text}[SEP] {query}"
# QA = f"{query} [SEP] {text}"
QA = text
rationale_mask = rationale_mask
data_df.append([QA, classification, rationale_mask, query])
data_df = pd.DataFrame(data_df, columns=columns)
# return data_df
data_df_shuffled=data_df.sample(frac=1).reset_index(drop=True)
return data_df_shuffled
dataset = "movies"
data_dir = f'../data/{dataset}'
train = pd.read_json(f'{data_dir}/train.jsonl', lines=True)
test = pd.read_json(f'{data_dir}/test.jsonl', lines=True)
val = pd.read_json(f'{data_dir}/val.jsonl', lines=True)
train_data_df = to_data_df(train, data_dir)
# train_data_df.to_csv(f"{dataset}/train.csv",index_label="id")
test_data_df = to_data_df(test, data_dir)
# test_data_df.to_csv(f"{dataset}/test.csv",index_label="id")
val_data_df = to_data_df(val, data_dir)
# val_data_df.to_csv(f"{dataset}/val.csv",index_label="id")
d = text_len_scatter(train_data_df,test_data_df,val_data_df)
LABELSIZE = 23
all_texts = list(train_data_df['text']) + list(test_data_df['text']) + list(val_data_df['text'])
all_text_lens = [len(x.split()) for x in all_texts]
import matplotlib.pyplot as plt
plt.hist(all_text_lens)
plt.ylabel(ylabel="number of instances ", fontsize=LABELSIZE)
plt.xlabel(xlabel="text length", fontsize=LABELSIZE)
plt.axvline(x=512, linestyle="dashed", color="black")
plt.savefig("movies_distribution.png", bbox_inches = 'tight', dpi=300)
plt.show()
def generate_class_stats(train_df, test_df, val_df):
text_lens_0 = []
text_lens_1 = []
rationale_lens_0 = []
rationale_lens_1 = []
rationale_percent_0 = []
rationale_percent_1 = []
class_distribution = [0,0]
for df in [train_df, test_df, val_df]:
for i in range(len(df)):
df_row = df.loc[i]
clas = df_row['classification']
text = df_row['text']
rationale = df_row['rationale']
text_len = len(text.split())
rationale_len = rationale.count(1)
rationale_percent = rationale_len/text_len
if clas == "NEG":
text_lens_0.append(text_len)
rationale_lens_0.append(rationale_len)
rationale_percent_0.append(rationale_percent)
class_distribution[0] += 1
else:
text_lens_1.append(text_len)
rationale_lens_1.append(rationale_len)
rationale_percent_1.append(rationale_percent)
class_distribution[1] += 1
return text_lens_0,text_lens_1,rationale_lens_0,rationale_lens_1,rationale_percent_0,rationale_percent_1,class_distribution
text_lens_0,text_lens_1,rationale_lens_0,rationale_lens_1,rationale_percent_0,rationale_percent_1,class_distribution = generate_class_stats(train_data_df,test_data_df,val_data_df)
text_lens_all = text_lens_0 + text_lens_1
rationale_lens_all = rationale_lens_0 + rationale_lens_1
rationale_percent_all = rationale_percent_0 + rationale_percent_1
class_distr = [class_distribution[0]/sum(class_distribution),class_distribution[1]/sum(class_distribution)]
for l in [rationale_lens_all,text_lens_all,rationale_percent_all,text_lens_0,text_lens_1,rationale_lens_0,rationale_lens_1,rationale_percent_0,rationale_percent_1]:
print(avg(l))
class_distr
```
| github_jupyter |
# Notebook served by Voilà
#### Notebook copied from https://github.com/ChakriCherukuri/mlviz
<h2>Gradient Descent</h2>
* Given a the multi-variable function $\large {F(x)}$ differentiable in a neighborhood of a point $\large a$
* $\large F(x)$ decreases fastest if one goes from $\large a$ in the direction of the negative gradient of $\large F$ at $\large a$, $\large -\nabla{F(a)}$
<h3>Gradient Descent Algorithm:</h3>
* Choose a starting point, $\large x_0$
* Choose the sequence $\large x_0, x_1, x_2, ...$ such that
$ \large x_{n+1} = x_n - \eta \nabla(F(x_n) $
So convergence of the gradient descent depends on the starting point $\large x_0$ and the learning rate $\large \eta$
```
from time import sleep
import numpy as np
from ipywidgets import *
import bqplot.pyplot as plt
from bqplot import Toolbar
f = lambda x: np.exp(-x) * np.sin(5 * x)
df = lambda x: -np.exp(-x) * np.sin(5 * x) + 5 * np.cos(5 *x) * np.exp(-x)
x = np.linspace(0.5, 2.5, 500)
y = f(x)
def update_sol_path(x, y):
with sol_path.hold_sync():
sol_path.x = x
sol_path.y = y
with sol_points.hold_sync():
sol_points.x = x
sol_points.y = y
def gradient_descent(x0, f, df, eta=.1, tol=1e-6, num_iters=10):
x = [x0]
i = 0
while i < num_iters:
x_prev = x[-1]
grad = df(x_prev)
x_curr = x_prev - eta * grad
x.append(x_curr)
sol_lbl.value = sol_lbl_tmpl.format(x_curr)
sleep(.5)
update_sol_path(x, [f(i) for i in x])
if np.abs(x_curr - x_prev) < tol:
break
i += 1
txt_layout = Layout(width='150px')
x0_box = FloatText(description='x0', layout=txt_layout, value=2.4)
eta_box = FloatText(description='Learning Rate',
style={'description_width':'initial'},
layout=txt_layout, value=.1)
go_btn = Button(description='GO', button_style='success', layout=Layout(width='50px'))
reset_btn = Button(description='Reset', button_style='success', layout=Layout(width='100px'))
sol_lbl_tmpl = 'x = {:.4f}'
sol_lbl = Label()
# sol_lbl.layout.width = '300px'
# plot of curve and solution
fig_layout = Layout(width='720px', height='500px')
fig = plt.figure(layout=fig_layout, title='Gradient Descent', display_toolbar=True)
fig.pyplot = Toolbar(figure=fig)
curve = plt.plot(x, y, colors=['dodgerblue'], stroke_width=2)
sol_path = plt.plot([], [], colors=['#ccc'], opacities=[.7])
sol_points = plt.plot([], [], 'mo', default_size=20)
def optimize():
f.marks = [curve]
gradient_descent(x0_box.value, f, df, eta=eta_box.value)
def reset():
curve.scales['x'].min = .4
curve.scales['x'].max = 2.5
curve.scales['y'].min = -.5
curve.scales['y'].max = .4
sol_path.x = sol_path.y = []
sol_points.x = sol_points.y = []
sol_lbl.value = ''
go_btn.on_click(lambda btn: optimize())
reset_btn.on_click(lambda btn: reset())
final_fig = VBox([fig, fig.pyplot],
layout=Layout(overflow_x='hidden'))
HBox([final_fig, VBox([x0_box, eta_box, go_btn, reset_btn, sol_lbl])])
```
| github_jupyter |
# Wrangle & Analyze Data
### (WeRateDogs Twitter Archive)
<ul>
<li><a href="#intro">Introduction</a></li>
<li><a href="#wrangle">Data Wrangling</a></li>
<ul>
<li><a href="#gather">Gathering Data</a></li>
<li><a href="#assess">Assessing Data</a></li>
<li><a href="#clean">Cleaning Data</a></li>
</ul>
<li><a href="#analyze">Storing, Analyzing and Visualizing Data</a></li>
</ul>
<a id='intro'></a>
## Introduction
> The dataset which will be wrangling (and analyzing and visualizing) is the tweet archive of Twitter user @dog_rates, also known as WeRateDogs. WeRateDogs is a Twitter account that rates people's dogs with a humorous comment about the dog. I will use Python (and its libraries) to analyze and vusualize the dataset through jupyter notebook.
```
#import library
import pandas as pd
import numpy as np
import requests
import os
import time
import matplotlib.pyplot as plt
from matplotlib import cm
%matplotlib inline
import seaborn as sns
```
<a id='wrangle'></a>
# Data Wrangling
<a id='gather'></a>
## Gathering Data
> <b>In this part, we will gather three parts of data</b>
<ol>
<li>The WeRateDogs Twitter archive. Downloading the file named twitter-archive-enhanced.csv</li>
<li>The tweet image predictions, i.e., what breed of dog (or other object, animal, etc.) is present in each tweet according to a neural network. This file (image_predictions.tsv) is hosted on Udacity's servers and should be downloaded programmatically using the Requests library and the following URL: https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv</li>
<li>Each tweet's retweet count and favorite ("like") count at minimum. Using the tweet IDs in the WeRateDogs Twitter archive, query the Twitter API for each tweet's JSON data using Python's Tweepy library and store each tweet's entire set of JSON data in a file called tweet_json.txt file. Each tweet's JSON data should be written to its own line. Then read this .txt file line by line into a pandas DataFrame with (at minimum) tweet ID, retweet count, and favorite count. Note: do not include your Twitter API keys, secrets, and tokens in your project submission.</li>
</ol>
<b>Step 1. Importing Twitter Archieve File</b>
```
#import csv file
df_archive = pd.read_csv('twitter-archive-enhanced.csv')
df_archive.head()
```
<b>Step 2. Programatically Download Tweet Image Predictions TSV</b>
```
#using requests library to download tweet image prediction tsv and store it as image_predictions.tsv
url = 'https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv'
response = requests.get(url)
with open('image_predictions.tsv', mode='wb') as file:
file.write(response.content)
# Import the tweet image predictions TSV file into a DataFrame
df_img = pd.read_csv('image_predictions.tsv', sep='\t')
df_img.head()
```
<b>Step 3. Downloading Tweet JSON Data</b>
```
import tweepy
from tweepy import OAuthHandler
import json
from timeit import default_timer as timer
# Query Twitter API for each tweet in the Twitter archive and save JSON in a text file
# These are hidden to comply with Twitter's API terms and conditions
consumer_key = 'HIDDEN'
consumer_secret = 'HIDDEN'
access_token = 'HIDDEN'
access_secret = 'HIDDEN'
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
# NOTE TO STUDENT WITH MOBILE VERIFICATION ISSUES:
# NOTE TO REVIEWER: this student had mobile verification issues so the following
# Twitter API code was sent to this student from a Udacity instructor
# Tweet IDs for which to gather additional data via Twitter's API
tweet_ids = df_archive.tweet_id.values
len(tweet_ids)
# Query Twitter's API for JSON data for each tweet ID in the Twitter archive
count = 0
fails_dict = {}
start = timer()
# Save each tweet's returned JSON as a new line in a .txt file
with open('tweet_json.txt', 'w') as outfile:
# This loop will likely take 20-30 minutes to run because of Twitter's rate limit
for tweet_id in tweet_ids:
count += 1
print(str(count) + ": " + str(tweet_id))
try:
tweet = api.get_status(tweet_id, tweet_mode='extended')
print("Success")
json.dump(tweet._json, outfile)
outfile.write('\n')
except tweepy.TweepError as e:
print("Fail")
fails_dict[tweet_id] = e
pass
end = timer()
print(end - start)
print(fails_dict)
# read json txt file and save as a df
tw_json = []
with open('tweet-json.txt', 'r') as json_data:
#make a loop to read file
line = json_data.readline()
while line:
status = json.loads(line)
# extract variable
status_id = status['id']
status_ret_count = status['retweet_count']
status_fav_count = status['favorite_count']
# make a dictionary
json_file = {'tweet_id': status_id,
'retweet_count': status_ret_count,
'favorite_count': status_fav_count
}
tw_json.append(json_file)
# read next line
line = json_data.readline()
#convert the dictionary list to a df
df_json = pd.DataFrame(tw_json, columns = ['tweet_id', 'retweet_count', 'favorite_count'])
df_json.head()
```
<a id='assess'></a>
# Assessing Data
> <b>After gathering each of the above pieces of data, assess them visually and programmatically for quality and tidiness issues.</b>
>Using two types of assessment:
>1. Visual assessment: scrolling through the data in your preferred software application (Google Sheets, Excel, a text editor, etc.).
>2. Programmatic assessment: using code to view specific portions and summaries of the data (pandas' head, tail, and info methods, for example).
<ul>
<li><a href="#quality"><b>Quality Issues</b></a> -- issues with content. Low quality data is also known as dirty data.</li>
<li><a href="#tidy"><b>Tidiness Issues</b></a> -- issues with structure that prevent easy analysis. Untidy data is also known as messy data. Tidy data requirements:</li>
<ol>
<li>Each variable forms a column.</li>
<li>Each observation forms a row.</li>
<li>Each type of observational unit forms a table.</li>
</ol>
</ul>
<b>Step 1. Assessing Twitter Archive File</b>
```
#print out the head()
df_archive.head()
df_archive.shape
df_archive.info()
```
There are 2356 rows and 17 columns.
From the info above, we found that six columns have missing values including 'in_reply_to_status_id', 'in_reply_to_user_id', 'retweeted_status_user_id', 'retweeted_status_timestamp'.
```
#check with in_reply_to_status_id column
df_archive.in_reply_to_status_id.value_counts()
type(df_archive['tweet_id'][0])
type(df_archive['in_reply_to_status_id'][1])
type(df_archive['in_reply_to_user_id'][1])
type(df_archive['retweeted_status_id'][1])
type(df_archive['retweeted_status_user_id'][1])
```
<b>Problem #1: The format of id is wrong and it should be changed to integer.</b>
```
#check column rating_numerator
df_archive.rating_numerator.describe()
df_archive.rating_numerator.value_counts().sort_index()
#check column rating_denominator
df_archive.rating_denominator.describe()
df_archive.rating_denominator.value_counts().sort_index()
```
<b>Problem #2: The rating denominator could only be 10 and other values are invalid.</b>
```
#check timestamp column
df_archive['timestamp'].value_counts()
type(df_archive['timestamp'][0])
df_archive['retweeted_status_timestamp'].value_counts()
type(df_archive['retweeted_status_timestamp'][0])
```
<b>Problem #3: When we explore the type of two timestamp columns, one is string, the other one is float. The format of timestamp should be changed to datetime.</b>
```
df_archive.name.value_counts().sort_index(ascending = True)
#collect all error names
error_name = df_archive.name.str.contains('^[a-z]', regex = True)
df_archive[error_name].name.value_counts().sort_index()
len(df_archive[error_name])
```
<b>Problem #4: There are 109 invalid names not starting with a capitalized alphabet.</b>
```
#check with four columns of dogs' stage
df_archive.doggo.value_counts()
df_archive.floofer.value_counts()
df_archive.pupper.value_counts()
df_archive.puppo.value_counts()
```
<b>Problem #5: The "None" value should be changed to "NaN" in these four columns.</b>
```
#show the number of retweet
df_archive.retweeted_status_id.isnull().value_counts()
df_archive.retweeted_status_user_id.isnull().value_counts()
```
<b>Problem #6: As we do not want the duplicated information, so we would clear away the rows of retweet.</b>
```
df_archive.source.value_counts()
```
<b>Problem #7: source name should be changed and moved the attached link. </b>
<b>Step 2. Tweet Image Predictions</b>
```
df_img.head()
df_img.shape
df_img.info()
```
<b>Problem #8: There are 2075 rows and 12 columns. Based on Twitter Archive file (2356 rows), we know some pictures are missing.</b>
```
#check if id duplicate
df_img.tweet_id.duplicated().value_counts()
##check if jpg duplicate
df_img.jpg_url.duplicated().value_counts()
```
<b>Problem #9: There are 66 jpg_url which are duplicated.</b>
```
#check the image number column
df_img.img_num.value_counts()
```
Some people have posted more than one picture.
<b>Step 3. Checking JSON File</b>
```
df_json.head()
df_json.shape
df_json.info()
df_json.describe()
df_json.tweet_id.duplicated().value_counts()
df_json['tweet_id'].nunique()
```
There are 2354 rows and 3 columns. No null variables. No duplicated tweet id.
```
type(df_json['tweet_id'][0])
type(df_json['retweet_count'][0])
type(df_json['favorite_count'][0])
```
The format of variable is correct.
<a id='quality'></a>
# Summary
## Quality Issue
<b>Twitter Archive File</b>
<ol>
<li>The format of id is wrong and it should be changed to integer.</li>
<li>The rating denominator could only be 10 and other values are invalid.</li>
<li>When we explore the type of two timestamp columns, one is string, the other one is float. The format of timestamp should be changed to datetime.</li>
<li>There are 109 invalid names not starting with a capitalized alphabet.</li>
<li>In four columns of dog's stage, the "None" value should be changed to "NaN" in these four columns.</li>
<li>As we do not want the duplicated information, so we would clear away the rows of retweet based on retweet id.</li>
<li>Change the value for source column.</li>
</ol>
<b>Tweet Image Predictions TSV</b>
<ul>
<li>There are 2075 rows in prediction file, 2354 rows in JSON data. Based on Twitter Archive file (2356 rows), we know some rows are not matching.</li>
<li>There are 66 jpg_url which are duplicated.</li>
</ul>
<b>JSON File</b>
<ul>
<li>No quality issue for the json file.</li>
</ul>
<a id='tidy'></a>
## Tidiness Issue
<ul>
<li>df_archive could drop empty columns of retweet infomation like 'in_reply_to_status_id', 'in_reply_to_user_id', 'retweeted_status_user_id', 'retweeted_status_timestamp'.</li>
<li>The four columns of dog's stage should be merged into one column.</li>
<li>Merging JSON file, df_archive dataframe and img file into one.</li>
</ul>
<a id='clean'></a>
## Cleaning Data
> <b>Store the clean DataFrame(s) in a CSV file with the main one named twitter_archive_master.csv. Analyze and visualize your wrangled data in your wrangle_act.ipynb Jupyter Notebook.</b>
>The issues that satisfy the Project Motivation must be cleaned:
>1. Cleaning includes merging individual pieces of data according to the rules of tidy data.
>2. The fact that the rating numerators are greater than the denominators does not need to be cleaned. This unique rating system is a big part of the popularity of WeRateDogs.
### Dealing with tidiness issue
#### 1. Drop the columns we do not use
```
#make a copy of three data files
df_archive_clean = df_archive.copy()
df_img_clean = df_img.copy()
df_json_clean = df_json.copy()
#drop useless columns
df_archive_clean = df_archive_clean.drop(['in_reply_to_status_id', 'in_reply_to_user_id', 'retweeted_status_user_id', 'retweeted_status_timestamp'], axis=1)
#fill the null url
df_archive_clean.expanded_urls.head()
#the website url should be 'https://twitter.com/dog_rates/status/' plus their id, so we could fill it
df_archive_clean.expanded_urls = 'https://twitter.com/dog_rates/status/' + df_archive_clean.tweet_id.astype(str)
#check with the df see if everything is fixed
df_archive_clean.info()
```
#### 2. Merge four columns of dog's stage into one
```
#replace 'None' to ''
df_archive_clean[['doggo', 'floofer', 'pupper', 'puppo']] = df_archive_clean[['doggo', 'floofer', 'pupper', 'puppo']].replace('None', '')
df_archive_clean.head()
#combine four columns to stage
df_archive_clean['dog_stage'] = df_archive_clean['doggo'] + df_archive_clean['floofer'] + df_archive_clean['pupper'] + df_archive_clean['puppo']
#drop other four stages columns
df_archive_clean = df_archive_clean.drop(['doggo', 'floofer', 'pupper', 'puppo'], axis=1)
df_archive_clean.dog_stage.value_counts()
#replace the null value and multiple stage
df_archive_clean['dog_stage'] = df_archive_clean['dog_stage'].replace('', np.nan)
df_archive_clean['dog_stage'] = df_archive_clean['dog_stage'].replace('doggopupper', 'multiple')
df_archive_clean['dog_stage'] = df_archive_clean['dog_stage'].replace('doggofloofer', 'multiple')
df_archive_clean['dog_stage'] = df_archive_clean['dog_stage'].replace('doggopuppo', 'multiple')
#double check with our df
df_archive_clean.dog_stage.value_counts()
```
#### 3. Merge three files into one
```
master_clean = pd.merge(df_archive_clean, df_img_clean, on = 'tweet_id', how = 'inner')
master_clean = pd.merge(master_clean, df_json_clean, on = 'tweet_id', how = 'inner')
master_clean.info()
```
### Dealing with quality issue
#### 1. change data format and drop the retweet rows
```
#change timestamp format from string to datetime
master_clean.timestamp = pd.to_datetime(master_clean.timestamp)
#change id format from float to int
id_clean = master_clean.retweeted_status_id
id_clean = id_clean.dropna()
id_clean = id_clean.astype('int64')
#drop the retweet rows
master_clean = master_clean.drop(master_clean[master_clean.retweeted_status_id.apply(lambda x : x in id_clean.values)].index.values, axis = 0)
master_clean = master_clean.drop('retweeted_status_id', axis=1)
master_clean.info()
```
#### 2. fix the rating part
```
# rating_denominator should be 10.
master_clean.rating_denominator.value_counts()
#check those rating_denominator is not equal to 10
pd.set_option('display.max_colwidth', 150)
master_clean[['tweet_id', 'text', 'rating_numerator', 'rating_denominator']].query('rating_denominator != 10')
master_clean.query('rating_denominator != 10').shape[0]
#find the error rating and fix it
#740373189193256964(14/10);722974582966214656(13/10);716439118184652801(11/10);666287406224695296(9/10);682962037429899265(10/10);
master_clean.loc[master_clean.tweet_id == 740373189193256964, 'rating_numerator':'rating_denominator'] = [14, 10]
master_clean.loc[master_clean.tweet_id == 722974582966214656, 'rating_numerator':'rating_denominator'] = [13, 10]
master_clean.loc[master_clean.tweet_id == 716439118184652801, 'rating_numerator':'rating_denominator'] = [11, 10]
master_clean.loc[master_clean.tweet_id == 666287406224695296, 'rating_numerator':'rating_denominator'] = [9, 10]
master_clean.loc[master_clean.tweet_id == 682962037429899265, 'rating_numerator':'rating_denominator'] = [10, 10]
#number of rows that rating denominator is not equal to 10
master_clean.query('rating_denominator != 10').shape[0]
master_clean[['text', 'rating_numerator', 'rating_denominator']].query('rating_denominator != 10')
#drop all the rows above
master_clean = master_clean.drop([345, 415, 734, 924, 1022, 1047, 1065, 1131, 1207, 1379, 1380, 1512, 1571], axis = 0)
master_clean.query('rating_denominator != 10').shape[0] #the rows are droped
master_clean.info()
```
#### 3. replace those invalid names to 'None'
```
master_clean.reset_index(drop=True, inplace=True)
error_name = master_clean.name.str.contains('^[a-z]', regex = True)
master_clean[error_name].name.value_counts().sort_index()
#change name to 'None'
save = []
counter = 0
for i in master_clean.name:
if error_name[counter] == False:
save.append(i)
else:
save.append('None')
counter += 1
master_clean.name = np.array(save)
master_clean.name.value_counts()
```
#### 4. creat a new column called 'breed' and if p1 confidence >= 95% and p1_dog is True or p2 confidence <= 1% and p2_dog is True, then put the predicted breed into breed column
```
#create breed column
master_clean['breed'] = 'None'
master_clean.breed.value_counts()
#put all right category into breed
save = []
for i in range(master_clean.breed.shape[0]):
if master_clean.p1_conf.iloc[i] >= 0.95 and master_clean.p1_dog.iloc[i]:
save.append(master_clean.p1.iloc[i])
elif master_clean.p2_conf.iloc[i] <= 0.01 and master_clean.p2_dog.iloc[i]:
save.append(master_clean.p2.iloc[i])
else:
save.append('Unsure')
master_clean['breed'] = np.array(save)
#format the breed names
master_clean['breed'] = master_clean.breed.str.capitalize().str.replace('_',' ')
master_clean.breed.value_counts()
```
#### 5. drop p1/p2/p3 columns
```
#drop p2 p3 columns
master_clean = master_clean.drop(['p1','p1_conf','p1_dog','p2','p2_conf','p2_dog','p3','p3_conf','p3_dog'], axis = 1)
master_clean.info()
```
#### 6. rename source column
```
master_clean.source.value_counts()
master_clean3 = master_clean
master_clean3['source'] = master_clean3.source.replace({'<a href="http://twitter.com/download/iphone" rel="nofollow">Twitter for iPhone</a>' : 'Twitter for Iphone',
'<a href="http://twitter.com" rel="nofollow">Twitter Web Client</a>' : 'Twitter Web Client',
'<a href="https://about.twitter.com/products/tweetdeck" rel="nofollow">TweetDeck</a>' : 'TweetDeck'
})
master_clean3.source.value_counts()
```
<a id='analyze'></a>
## Storing, Analyzing and Visualizing Data
> <b>Clean each of the issues you documented while assessing. Perform this cleaning in wrangle_act.ipynb as well. The result should be a high quality and tidy master pandas DataFrame (or DataFrames, if appropriate).Then analyze and visualize the wrangled data. </b>
<ul>
<li><a href="#insight">Insights</a> </li>
<li><a href="#visual">Visualization</a></li>
</ul>
<b>Storing</b>
```
master_clean.to_csv('twitter_archive_master.csv')
twitter_archive_master = pd.read_csv('twitter_archive_master.csv')
twitter_archive_master.head()
```
<a id='insight'></a>
## Insights
### Q1: Which dog has highest retweet counts?
```
twitter_archive_master.sort_values(by = 'retweet_count', ascending = False).iloc[0]
```
The dog has highest retweet counts. Its stage is doggo. The pose retweeted 79515 times and received 131075 favorite counts.
### Q2: Which dog receive most favorite counts?
```
twitter_archive_master.sort_values(by = 'favorite_count', ascending = False).iloc[0]
```
The dog has most favorite counts, its stage is puppo. The pose retweeted 48265 times and received 132810 favorite counts.
### Q3: What way do users most use to log in WeRateDog?
```
twitter_archive_master.source.value_counts()
#calculate the proportion
twitter_archive_master.source.value_counts().iloc[0]/twitter_archive_master.shape[0]
```
98% of the WeRateDog users like to use Twitter through iPhone.
### Q4: What is the relation between retweet counts and favorite counts?
```
twitter_archive_master.describe()
twitter_archive_master.retweet_count.mean()
twitter_archive_master.favorite_count.mean()
twitter_archive_master.favorite_count.mean()/twitter_archive_master.retweet_count.mean()
```
The mean of retweet counts is 27775.6, the mean of favorite counts is 8925.8. And the the favorite counts are as three times as many as the retweet counts. It means that people would always thump up for a pose, but they might not retweet it.
<a id='visual'></a>
## Visualization
### #1. What is the most popular dog breed?
```
#see the value of breed
twitter_archive_master.breed.value_counts()
#total dog breed (without unsure)
twitter_archive_master.breed.value_counts().shape[0] - 1
#create a bar chart to find popular dog breed
plt.figure(figsize = (9, 9))
breed_filter=twitter_archive_master.groupby('breed').filter(lambda x: len(x) >= 3 and len(x) <= 100)
breed_filter['breed'].value_counts(ascending=True).plot(kind = 'barh', alpha = 0.8, color = 'pink')
plt.title('The Most Popular Dog Breed', fontsize=20)
plt.xlabel('Counts',fontsize=18)
plt.ylabel('Dog Breed',fontsize=18);
```
Through this chart, the most popular dog breed is Pug with 21 counts. The second popular dog breeds are Pembroke and Samoyed with 19 counts. The third is Golden retriever with 18 counts. The dog breed has 50 categories in total.
### #2. What is the proportion of dog stages? And what's the relation between favorite counts and its dog stage?
```
twitter_archive_master.dog_stage.value_counts()
#create a pie chart
plt.figure(figsize=(12,9))
sns.set(style='darkgrid')
name = twitter_archive_master['dog_stage'].value_counts()
explode = (0, 0, 0, 0, 0.1)
plt.pie(name, explode, labels = name.index, shadow=True, textprops={'fontsize': 20}, autopct='%1.1f%%', startangle = 230)
plt.axis('equal')
plt.title('The Proportion of Dog Stage', fontsize=35)
plt.legend();
#calculate the average favorite counts based on dog stage
avg_fav = twitter_archive_master.groupby('dog_stage').favorite_count.mean()
avg_fav
#create a bar chart to represent the relation between fav_counts and dog_stage
plt.figure(figsize = (9, 9))
plt.bar(avg_fav.index.values, avg_fav, color = 'orange', alpha = 0.8)
plt.title('The Relation between Favorite Counts and Dog Stage', fontsize=18)
plt.xlabel('Dog Stage', fontsize=15)
plt.ylabel('Favorite Counts', fontsize=15);
```
Based on the pie chart, the largest proportion of dog stage is pupper accounting for 66.3%, and the less of it is floofer with 2.3%. But from the bar chart, the pupper receives less favorite counts. The dog stage which gets most of favorite counts is puppo, puppo is only 7.2% of all dog stage. The floofer receives average 13206 favorite counts which is the second largest proportion of receiving favorite counts. So, the relation between favorite counts and dog stage number has not positive relationship.
| github_jupyter |
# Determine derivative of Jacobian from angular velocity to exponential rates
Peter Corke 2021
SymPy code to deterine the time derivative of the mapping from angular velocity to exponential coordinate rates.
```
from sympy import *
```
A rotation matrix can be expressed in terms of exponential coordinates (also called Euler vector)
$
\mathbf{R} = e^{[\varphi]_\times}
$
where $\mathbf{R} \in SO(3)$ and $\varphi \in \mathbb{R}^3$.
The mapping from angular velocity $\omega$ to exponential coordinate rates $\dot{\varphi}$ is
$
\dot{\varphi} = \mathbf{A} \omega
$
where $\mathbf{A}$ is given by (2.107) of [Robot Dynamics Lecture Notes, Robotic Systems Lab, ETH Zurich, 2018](https://ethz.ch/content/dam/ethz/special-interest/mavt/robotics-n-intelligent-systems/rsl-dam/documents/RobotDynamics2018/RD_HS2018script.pdf)
$
\mathbf{A} = I_{3 \times 3} - \frac{1}{2} [v]_\times + [v]^2_\times \frac{1}{\theta^2} \left( 1 - \frac{\theta}{2} \frac{\sin \theta}{1 - \cos \theta} \right)
$
where $\theta = \| \varphi \|$ and $v = \hat{\varphi}$
We simplify the equation as
$
\mathbf{A} = I_{3 \times 3} - \frac{1}{2} [v]_\times + [v]^2_\times \Theta
$
where
$
\Theta = \frac{1}{\theta^2} \left( 1 - \frac{\theta}{2} \frac{\sin \theta}{1 - \cos \theta} \right)
$
We can find the derivative using the chain rule
$
\dot{\mathbf{A}} = - \frac{1}{2} [\dot{v}]_\times + 2 [v]_\times [\dot{v}]_\times \Theta + [v]^2_\times \dot{\Theta}
$
We start by defining some symbols
```
Theta, theta, theta_dot, t = symbols('Theta theta theta_dot t', real=True)
```
We start by finding an expression for $\Theta$ which depends on $\theta(t)$
```
theta_t = Function(theta)(t)
Theta = 1 / theta_t ** 2 * (1 - theta_t / 2 * sin(theta_t) / (1 - cos(theta_t)))
Theta
```
and now determine the derivative
```
T_dot = Theta.diff(t)
T_dot
```
which is a somewhat complex expression that depends on $\theta(t)$ and $\dot{\theta}(t)$.
We will remove the time dependency and generate code
```
T_dot = T_dot.subs([(theta_t.diff(t), theta_dot), (theta_t, theta)])
pycode(T_dot)
```
In order to evaluate the line above we need an expression for $\theta$ and $\dot{\theta}$. $\theta$ is the norm of $\varphi$ whose elements are functions of time
```
phi_names = ('varphi_0', 'varphi_1', 'varphi_2')
phi = [] # names of angles, eg. theta
phi_t = [] # angles as function of time, eg. theta(t)
phi_d = [] # derivative of above, eg. d theta(t) / dt
phi_n = [] # symbol to represent above, eg. theta_dot
for i in phi_names:
phi.append(symbols(i, real=True))
phi_t.append(Function(phi[-1])(t))
phi_d.append(phi_t[-1].diff(t))
phi_n.append(i + '_dot')
```
Compute the norm
```
theta = Matrix(phi_t).norm()
theta
```
and find its derivative
```
theta_dot = theta.diff(t)
theta_dot
```
and now remove the time dependenices
```
theta_dot = theta_dot.subs(a for a in zip(phi_d, phi_n))
theta_dot = theta_dot.subs(a for a in zip(phi_t, phi))
theta_dot
```
which is simply the dot product over the norm.
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/GetStarted/02_adding_data_to_qgis.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/02_adding_data_to_qgis.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=GetStarted/02_adding_data_to_qgis.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/GetStarted/02_adding_data_to_qgis.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
```
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
```
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
# Load an image.
image = ee.Image('LANDSAT/LC08/C01/T1/LC08_044034_20140318')
# Center the map on the image.
Map.centerObject(image, 9)
# Display the image.
Map.addLayer(image, {}, 'Landsat 8 original image')
# Define visualization parameters in an object literal.
vizParams = {'bands': ['B5', 'B4', 'B3'],
'min': 5000, 'max': 15000, 'gamma': 1.3}
# Center the map on the image and display.
Map.centerObject(image, 9)
Map.addLayer(image, vizParams, 'Landsat 8 False color')
# Use Map.addLayer() to add features and feature collections to the map. For example,
counties = ee.FeatureCollection('TIGER/2016/Counties')
Map.addLayer(ee.Image().paint(counties, 0, 2), {}, 'counties')
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
# Starbucks Capstone Challenge
## Project Overview
This data set contains simulated data that mimics customer behavior on the Starbucks rewards mobile app. Once every few days, Starbucks sends out an offer to users of the mobile app. An offer can be merely an advertisement for a drink or an actual offer such as a discount or BOGO (buy one get one free). Some users might not receive any offer during certain weeks.
Not all users receive the same offer, and that is the challenge to solve with this data set.
Here we have 3 simulated datasets from starbucks about offers to users, users info and events like transcations and giving out offers.
## Our Aim
#### 1-try to predict using a machine learning given a user's info and offer's info if that user is likely to use the offer and that could help figuring out which users should starbucks target
#### 2-find the most correlated features that affects offers getting completed from both the user side and the offer side
#### 3-(Bonus)to predict the total amount a person could spend on starbucks products given that person demographics
### Exploration
First thing I will do here is simply check all 3 data sets see if we can find anything intersting and figure out there structure, possible outliers and try to do a few visulaizations to see the distbution of age for the users and income of the users, and percentage of males to females
### Cleaning
Second thing I will clean the data so that it can be input to the ML learning model to predict if a user would respond to an offer or not and to predict the total amount a user would spend at starbucks
### Implementation
I will check the correlation from the cleaned dataframe and in order to predict if a user will respond to an offer or not I will build the a random forrest model and use gridsearch to optimize the hyperparameters.
I will build a random forrest and a gradient booster model to predict the total amount users would spend at starbucks as well and use gridsearch to optimize the hyperparameters
### Conclusion and future optimiztions
This section covers the results of the model and what I sugget to improve the results
## Metrics
I will use to evalute the preformance of the offer success model 3 metrics the F1-score, precision, and recall
Precision is the ratio of correctly predicted positive observations to the total predicted positive observations.
Precision = TP/TP+FP
Recall (Sensitivity) - Recall is the ratio of correctly predicted positive observations to the all observations in actual class
Recall = TP/TP+FN
F1 score - F1 Score is the weighted average of Precision and Recall. Therefore, this score takes both false positives and false negatives into account.
F1 Score = 2*(Recall * Precision) / (Recall + Precision)
I will use to evalute the preformance of the total amount spent by users model using R-Sqaured or the coefficient of determination which is a statistical measure in a regression model that determines the proportion of variance in the dependent variable that can be explained by the independent variable. In other words, r-squared shows how well the data fit the regression model (the goodness of fit).
R_squared=1−(sum squared regression (SSR)/total sum of squares (SST))
SST, is the squared differences between the observed dependent variable and its mean
SSR, is the sum of the differences between the predicted value and the mean of the dependent variable. Think of it as a measure that describes how well our line fits the data.
# Data Sets
The data is contained in three files:
* portfolio.json - containing offer ids and meta data about each offer (duration, type, etc.)
* profile.json - demographic data for each customer
* transcript.json - records for transactions, offers received, offers viewed, and offers completed
Here is the schema and explanation of each variable in the files:
**portfolio.json**
* id (string) - offer id
* offer_type (string) - type of offer ie BOGO, discount, informational
* difficulty (int) - minimum required spend to complete an offer
* reward (int) - reward given for completing an offer
* duration (int) - time for offer to be open, in days
* channels (list of strings)
**profile.json**
* age (int) - age of the customer
* became_member_on (int) - date when customer created an app account
* gender (str) - gender of the customer (note some entries contain 'O' for other rather than M or F)
* id (str) - customer id
* income (float) - customer's income
**transcript.json**
* event (str) - record description (ie transaction, offer received, offer viewed, etc.)
* person (str) - customer id
* time (int) - time in hours since start of test. The data begins at time t=0
* value - (dict of strings) - either an offer id or transaction amount depending on the record
## Exploring the 3 dataframes
```
import pandas as pd
import numpy as np
import math
import json
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.metrics import r2_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import GradientBoostingClassifier
% matplotlib inline
# read in the json files
portfolio = pd.read_json('data/portfolio.json', orient='records', lines=True)
profile = pd.read_json('data/profile.json', orient='records', lines=True)
transcript = pd.read_json('data/transcript.json', orient='records', lines=True)
portfolio.head()
portfolio.shape
portfolio
profile.head()
profile.dtypes
mean=profile['age'].mean()
std=profile['age'].std()
print("The mean for the age of the customers is {} and the stddev is {}".format(mean,std))
mean_1=profile['income'].mean()
std_1=profile['income'].std()
print("The income for the age of the customers is {} and the stddev is {}".format(mean_1,std_1))
profile.isnull().sum() * 100 / len(profile)
```
It seems that the exact percentage of users that didn't report gender didn't report income as well
### Data Visualization
```
#Age Distribution
plt.hist(profile['age'])
plt.xlabel('Age')
plt.ylabel('Number of customers')
```
We can see an some outliers here probably with a fake age as there are too many near 120 years howver most of the users seem to be between 40 and 80 years old
```
#Income distribution
profile_nonan=profile.dropna(axis=0)
plt.hist(profile_nonan['income'])
plt.xlabel('income')
plt.ylabel('Number of customers')
#Percentage of male ,female and others
labels=['Male','Female','Other']
sizes=[sum(profile_nonan['gender']=='M'),sum(profile_nonan['gender']=='F'),sum(profile_nonan['gender']=='O')]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
transcript.head()
```
## Data Preprocessing
```
portfolio.head()
#changing channel and offer type into dummy variable
portfolio['email']=portfolio['channels'].astype(str).str.contains('email').astype(int)
portfolio['web']=portfolio['channels'].astype(str).str.contains('web').astype(int)
portfolio['mobile']=portfolio['channels'].astype(str).str.contains('mobile').astype(int)
portfolio['social']=portfolio['channels'].astype(str).str.contains('social').astype(int)
portfolio.drop(['channels'],axis=1,inplace=True)
portfolio = pd.concat([portfolio.drop('offer_type', axis=1), pd.get_dummies(portfolio['offer_type'])], axis=1)
portfolio.head()
profile.head()
#drop dummy gender variable
profile = pd.concat([profile.drop('gender', axis=1), pd.get_dummies(profile['gender'])], axis=1)
##profile['became_member_on'] = profile['became_member_on'].apply(lambda x: pd.to_datetime(str(x), format='%Y%m%d'))
profile.head()
transcript.head()
#Values for event
np.unique(transcript.event.values)
transcript_offer=transcript[transcript['value'].astype(str).str.contains('offer')]
transcript_offer.head()
#to get only the id and nothing else and removing the value column
transcript_offer['offer_id']=transcript_offer['value'].astype(str).str.slice(14,46)
transcript_offer.drop(['value'],inplace=True,axis=1)
#get dummy vars for event and drop event
dummies=pd.get_dummies(transcript_offer['event'])
transcript_offer = pd.concat([transcript_offer,dummies], axis=1)
transcript_offer.drop(['event'],inplace=True,axis=1)
transcript_offer.head()
df = transcript_offer[(~transcript_offer.duplicated(['person','offer_id']))]
sum(transcript_offer.duplicated(['person','offer_id']))
```
To create a model to predict if a user responds to an offer we I want us to have each row represnts if a user recived offer,viewed it and completed it so we need to combine our dummy varaibles by person and offer_id
```
df['offer completed']=transcript_offer.groupby(['person','offer_id']).sum()['offer completed'].values
df['offer received']=transcript_offer.groupby(['person','offer_id']).sum()['offer received'].values
df['offer viewed']=transcript_offer.groupby(['person','offer_id']).sum()['offer viewed'].values
df.shape
#creat Df for transactions
transcript_transcation=transcript[transcript['value'].astype(str).str.contains('amount')]
#to get only the amount and nothing else and removing the value column
transcript_transcation['amount']=transcript_transcation['value'].astype(str).str.slice(10,-1)
transcript_transcation.drop(['value'],inplace=True,axis=1)
#check if amount is int
transcript_transcation.dtypes
#transform amount into int
transcript_transcation['amount']=pd.to_numeric(transcript_transcation['amount'])
transcript_transcation.head()
#get total spent by each user
total=dict(transcript_transcation.groupby(['person']).sum()['amount'])
#append that total to the profile Df to each user will
profile['total'] = profile.id.map(total)
profile.head()
df=pd.merge(df,profile,left_on='person', right_on='id',how='left')
df.head()
df=pd.merge(df,portfolio,left_on='offer_id', right_on='id',how='left')
df.head()
df.isnull().sum() * 100 / len(df)
df.dropna(inplace=True,axis=0)
#We want to evaluate if a person will respond to an offer therfore I will drop users who didn't view the offer
df=df[df['offer viewed']>=1]
```
## Modelling and Implementation
```
Var_Corr_person_income = profile.drop(['id'],axis=1).corr()
# plot the heatmap and annotation on it
fig, ax = plt.subplots(figsize=(8,8))
sns.heatmap(Var_Corr_person_income, xticklabels=Var_Corr_person_income.columns, yticklabels=Var_Corr_person_income.columns, annot=True,ax=ax)
```
total_amount_spent seems to have a positive correlation with income and females and a negative correlation with age
so it seems that younger females with hgiher income will spend more money on starbucks
```
profile.dropna(inplace=True)
X_total=profile.drop(['total','id'],axis=1)
y_total=profile['total']
x_train_total,x_test_total,y_train_total,y_test_total=train_test_split(X_total,y_total,test_size=.20,random_state=42)
pipeline_total = Pipeline(
[
("clf",RandomForestRegressor())
]
)
parameters = {
'clf__n_estimators':[100,200],
'clf__n_jobs':[-1],
'clf__max_depth':[5,10]
}
cv_total = GridSearchCV(pipeline_total, param_grid=parameters,n_jobs=-1)
cv_total.fit(x_train_total,y_train_total)
y_pred_total=cv_total.predict(x_test_total)
r2_score=r2_score(y_test_total, y_pred_total)
df.head()
#checking which features of a person most affect the offer being completed
Var_Corr_person_offer = df[['offer completed','age','became_member_on','income','F', 'M',
'O', 'total']].corr()
# plot the heatmap and annotation on it
fig, ax = plt.subplots(figsize=(8,8))
sns.heatmap(Var_Corr_person_offer, xticklabels=Var_Corr_person_offer.columns, yticklabels=Var_Corr_person_offer.columns, annot=True,ax=ax)
```
From the above not many deductions could be made buy it does't look like a strong correlation with any of these users features
```
#emails value is always one so will remove it from here and from the model
Var_Corr_offer = df[['offer completed','difficulty', 'duration', 'reward',
'web', 'mobile', 'social', 'bogo', 'discount', 'informational']].corr()
# plot the heatmap and annotation on it
fig, ax = plt.subplots(figsize=(10,10))
sns.heatmap(Var_Corr_offer, xticklabels=Var_Corr_offer.columns, yticklabels=Var_Corr_offer.columns, annot=True,ax=ax)
```
The correlation between the offer being completed and other offer features are also not particulary strong the strongest one seems to be if the offer channel is social media with correlation =0.014
```
X=df.drop(['person','time','offer_id','id_x','id_y','offer completed'],axis=1)
y=df['offer completed']
#changing the y classes to either success or failure only instead of the count
y[y>1]=1
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=.20)
X.columns
pipeline_KNN = Pipeline(
[
("clf", KNeighborsClassifier())
]
)
parameters = {
'clf__n_jobs':[-1],
}
cv_KNN = GridSearchCV(pipeline_KNN, param_grid=parameters,n_jobs=-1,cv=10)
cv_KNN.fit(x_train, y_train)
y_pred_KNN=cv_KNN.predict(x_test)
print(classification_report(y_test, y_pred_KNN))
#check for overfitting
y_pred_KNN_train=cv_KNN.predict(x_train)
print(classification_report(y_train, y_pred_KNN_train))
pipeline_RF = Pipeline(
[
("clf", RandomForestClassifier())
]
)
parameters = {
'clf__n_estimators':[100,200],
'clf__n_jobs':[-1],
'clf__max_depth':[5,10]
}
cv_RF = GridSearchCV(pipeline_RF, param_grid=parameters,n_jobs=-1,cv=10)
cv_RF.fit(x_train, y_train)
y_pred_RF=cv_RF.predict(x_test)
print(classification_report(y_test, y_pred_RF))
```
The n_estimators here are the number of trees and max_depth is the depth of the tree so it is important to iterate to find a on them to see which one preforms the best and doesn't under or overfit ,and n_jobs=-1 means to use full processing power. I use a cross validation fold of 10 as well to avoid overfitting
```
#check for overfitting by predicating on train data to see accuacry
y_pred_RF_Train=cv_RF.predict(x_train)
print(classification_report(y_train, y_pred_RF_Train))
# to improve scores will try standaridization and Gradient boosting
parameters = {
"learning_rate": [ 0.1, 0.2],
"max_depth":[5,10],
}
scaler = StandardScaler()
x_train=scaler.fit_transform(x_train)
x_test=scaler.fit_transform(x_test)
cv_GB = GridSearchCV(GradientBoostingClassifier(), parameters, cv=10, n_jobs=-1)
cv_GB.fit(x_train, y_train)
y_pred_gb=cv_GB.predict(x_test)
print(classification_report(y_test, y_pred_gb))
```
learning_rate shrinks the contribution of each tree, max depth is the depth of the tree both of these are important to avoid under and over fitting.
```
#check for overfitting by predicating on train data to see accuacry
y_pred_gb_Train=cv_GB.predict(x_train)
print(classification_report(y_train, y_pred_gb_Train))
```
### Model Evaluation and validation
```
## Evaluating the total amount spent model
r2_score
## Evaluting Both ML models
print(classification_report(y_test, y_pred_KNN,target_names=['KNN_no','KNN_yes']))
print(classification_report(y_test, y_pred_RF,target_names=['rand_forrest_no','rand_forrest_yes']))
print(classification_report(y_test, y_pred_gb,target_names=['gradient_boosting_no','gradient_boosting_yes']))
```
### Justfication
#### For the offer predication
I tried at first KNN, then Random Forrest and gradient booster. In the begning Random forest was overfitting as it was doing so well on the training set with an accuarcy of 100% and on the test set 40% so I added the hyperparameter max_depth to limit the tree depth and tried to find a balance between that and number of trees.
Gradient booster didn't overfit as it is more robust to it the grid search main objective here was to optimize further.
when the classification was reporting how many offers were completed and thus had 4 classes it had a much lower test accuarcy I fixed this by changing the values 0 if it was a 0 and to 1 if if it is more
We also have the results of both models on the training sets with the hyperparameter optimization.
Almost all models result in a 50% average F1 score with doing better with predicating users not accepting the offer. We tested on the training set to ensure no overfitting happened.
#### For total amount
We achieved a R2 score of 0.216 which is not great but looks promising with more data
## Conclusion
### Reflection
Here we were attempting to create a ML model to accuartely predict if a user would respond to an offer given his info and the offer info that would help us create personalized offers that we know that users would use and help us understand which users prefer which offers
What was particulary challenging was to pre process the data to set it up to feed it to the models as we created a dataframe of every unique user offer combination,the details of the user and the offer.we could set this up as a function and use that as soon as we have more data, we also found some abnormalities in the age distbution that might be something to check for the future,The customer gender seems to be almost equally even between male and female with just a few classified as others.
I used a random forrest model and a gradient booster and preformed a gridsearch to optimizte hyperparameters
if anyone has any ideas about how to improve I would love to hear from you
### Improvements
Two things I would do to try to improve this either get more data see if that improves the model preformance since even on the training data with different models and different hyperaparameters the training accuracy is not sky high or capture more features get for example a customer statisfaction rating get customers to fill out a survey of how often do they go to starbucks, get other info about the users like where they live how near is starbucks to them ,etc... . Another thing we could do is a wider grid search with more parameters but that model would require quite some time to go through everything
| github_jupyter |
<table>
<tr>
<td style="background-color:#ffffff;"><a href="https://qsoftware.lu.lv/index.php/qworld/" target="_blank"><img src="..\images\qworld.jpg" width="70%" align="left"></a></td>
<td style="background-color:#ffffff;" width="*"></td>
<td style="background-color:#ffffff;vertical-align:text-top;"><a href="https://qsoftware.lu.lv" target="_blank"><img src="..\images\logo.jpg" width="25%" align="right"></a></td>
</tr>
<tr><td colspan="3" align="right" style="color:#777777;background-color:#ffffff;font-size:12px;">
prepared by <a href="http://abu.lu.lv" target="_blank">Abuzer Yakaryilmaz</a>
</td></tr>
<tr><td colspan="3" align="right" style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;">
This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros.
</td></tr>
</table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
<h2>Multiple Rotations</h2>
The trivial way of implementing more than one rotation in parallel is to use a separate qubit for each rotation.
If we have $ t $ different rotations with angles $ \theta_1,\ldots,\theta_t $, then we can use $ t $ qubits.
Alternatively, we can use $ \log_2 (t) + 1 $ qubits (assuming that $t$ is a power of 2) that implement the following unitary matrix:
$$
R(\theta_1,\ldots,\theta_t) =
\mymatrix{rr|rr|cc|rr}{
\cos \theta_1 & -\sin \theta_1 & 0 & 0 & \cdots & \cdots & 0 & 0 \\
\sin \theta_1 & \cos \theta_1 & 0 & 0 & \cdots & \cdots & 0 & 0 \\ \hline
0 & 0 & \cos \theta_2 & -\sin \theta_2 & \cdots & \cdots & 0 & 0 \\
0 & 0 & \sin \theta_2 & \cos \theta_2 & \cdots & \cdots & 0 & 0 \\ \hline
\vdots & \vdots & \vdots & \vdots & \ddots & & \vdots & \vdots \\
\vdots & \vdots & \vdots & \vdots & & \ddots & \vdots & \vdots \\ \hline
0 & 0 & 0 & 0 & \cdots & \cdots & \cos \theta_t & -\sin \theta_t \\
0 & 0 & 0 & 0 & \cdots & \cdots & \sin \theta_t & \cos \theta_t \\
} .
$$
We can use this idea to solve the problem $\sf MOD_p$ (see <a href="B72_Rotation_Automata.ipynb" target="_blank">Rotation Automata</a>).
We implement $ t $ rotation automata in this way.
At the beginning of the computation, we apply Hadamard operator in each qubit. Then, we apply the operator $ R(\theta_1,\ldots,\theta_t) $ for each symbol from the stream. Once the stream is finished, we apply Hadamard operator in each qubit again.
If we observe only state 0 in each qubit, then we consider the stream having the length of a multiple of $\sf p$. Otherwise, we consider the stream having the length of not a multiple of $\sf p$.
<h3> Constructing $ R(\theta_1,\theta_2) $ </h3>
When $t=2$, $ \log_2 (2) + 1 = 2 $. So, both implementations use the same number of qubits.
But, it is a good starting point to construct the following unitary operator:
$$
R(\theta_1,\theta_2) =
\mymatrix{rrrr}{
\cos \theta_1 & -\sin \theta_1 & 0 & 0 \\
\sin \theta_1 & \cos \theta_1 & 0 & 0 \\
0 & 0 & \cos \theta_2 & -\sin \theta_2 \\
0 & 0 & \sin \theta_2 & \cos \theta_2 \\
} .
$$
<div style="background-color:#f8f8f8;">
<b> Technical Remark:</b>
When two qubits are combined (tensored) in qiskit, say $ qreg[0] $ and $ qreg[1] $, they are ordered as $ qreg[1] \otimes qreg[0] $.
If there are $n$ qubits, say $ qreg[0],\ldots,qreg[n-1] $ to be combined, they are ordered in qiskit as
$$ qreg[n-1] \otimes \cdots \otimes qreg[0] . $$
</div>
We use a controlled rotation gate $ cu3 $ in qiskit.
<b> Gate $u3$: </b>
The gate $ u3 $ is a generic one-qubit gate for rotation on Bloch sphere. It takes three parameters, and if we pass zeros as the second and third parameters, we implement our rotation gate $ ry $:
u3(2*theta,0,0,qubit)
is equivalent to
ry(2*theta,qubit)
Both make a rotation with angle $\theta$ in the real-valued qubit in counter-clockwise direction.
<b> Gate $cu3$: </b>
The two-qubit gate $ cu3 $ takes five parameters. We use it as follows:
cu3(2*theta,0,0,control_qubit,target_qubit)
If the control qubit is in state $ \ket{1} $, then the rotation
ry(2*theta,target_qubit)
is applied (to the target qubit).
The base states of two qubits are ordered as $ \myarray{c}{00 \\ 01 \\ 10 \\ 11 } $
or equivalently grouped as $ \myarray{c}{ 0 \otimes \myvector{0 \\ 1} \\ \hline 1 \otimes \myvector{0 \\ 1} } $.
We can apply a rotation to the first qubit controlled by the second qubit.
To construct $ R(\theta_1,\theta_2) $:
<ol>
<li> When the second qubit is in state $ \ket{0} $, we can apply the rotation with angle $ \theta_1 $. </li>
<li> When the second qubit is in state $ \ket{1} $, we can apply the rotation with angle $ \theta_2 $. </li>
</ol>
Now, we implement this by also printing the constructed unitary matrix.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
# the angles of rotations
theta1 = pi/4
theta2 = pi/6
# the circuit with two qubits
qreg = QuantumRegister(2)
creg = ClassicalRegister(2)
mycircuit = QuantumCircuit(qreg,creg)
# when the second qubit is in |0>, the first qubit is rotated by theta1
mycircuit.x(qreg[1])
mycircuit.cu3(2*theta1,0,0,qreg[1],qreg[0])
mycircuit.x(qreg[1])
# when the second qubit is in |1>, the first qubit is rotated by theta2
mycircuit.cu3(2*theta2,0,0,qreg[1],qreg[0])
# we read the unitary matrix
job = execute(mycircuit,Aer.get_backend('unitary_simulator'),optimization_level=0)
u=job.result().get_unitary(mycircuit,decimals=3)
# we print the unitary matrix in nice format
for i in range(len(u)):
s=""
for j in range(len(u)):
val = str(u[i][j].real)
while(len(val)<8): val = " "+val
s = s + val
print(s)
```
<h3>Task 1</h3>
Verify that the printed matrix is $ R(\pi/4,\pi/6) $.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
# the angles of rotations
theta1 = pi/4
theta2 = pi/6
print(round(cos(theta1),3),-round(sin(theta1),3),0,0)
print(round(sin(theta1),3),-round(cos(theta1),3),0,0)
print(0,0,round(cos(theta2),3),-round(sin(theta2),3))
print(0,0,round(sin(theta2),3),-round(cos(theta2),3))
```
<a href="B76_Multiple_Rotations_Solutions.ipynb#task1">click for our solution</a>
<h3> Constructing $ R(\theta_1,\theta_2,\theta_3,\theta_4) $ </h3>
We can use $ \log_2(4) + 1 = 3 $ qubits to construct $ R(\theta_1,\theta_2,\theta_3,\theta_4) $.
The base states of three qubits are ordered as $ \myarray{c}{000 \\ 001 \\ 010 \\ 011 \\ 100 \\ 101 \\ 110 \\ 111 } $
or equivalently grouped as $
\myarray{c}{
00 \otimes \myvector{0 \\ 1} \\ \hline
01 \otimes \myvector{0 \\ 1} \\ \hline
10 \otimes \myvector{0 \\ 1} \\ \hline
11 \otimes \myvector{0 \\ 1}
} $.
By using a rotation gate controlled by two qubits, we can easily implement our unitary operator.
But, if we have a rotation gate controlled by only one qubit, then we use additional tricks (and qubits) and controlled CNOT gate by two qubits (also called Toffoli gate):
circuit.ccx(control-qubit1,control-qubit2,target-qubit)
<div style="background-color:#f9f9f9;">
In general, if $ t = 2^n $, then we can construct $ R(\theta_1,\ldots,\theta_t) $ by using no more than $ 2\log_2(t) $ qubits (instead of $t$ qubits).
</div>
<h3> Pseudo construction </h3>
We start with a construction using three angles.
<h3> Task 2</h3>
Consider a quantum circuit with 3 qubits.
When the third qubit is in state $ \ket{1} $, apply the gate
cu3(2*theta1,0,0,qreg[2],qreg[0])
When the second qubit is in state $ \ket{1} $, apply the gate
cu3(2*theta2,0,0,qreg[1],qreg[0])
When the third qubit is in state $ \ket{0} $, apply the gate
cu3(2*theta3,0,0,qreg[2],qreg[0])
Guess the corresponding unitary matrix, which should be of the form:
$$
\mymatrix{rr|rr|rr|rr}{
\cos a_1 & -\sin a_1 & 0 & 0 & 0 & 0 & 0 & 0 \\
\sin a_1 & \cos a_1 & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline
0 & 0 & \cos a_2 & -\sin a_2 & 0 & 0 & 0 & 0 \\
0 & 0 & \sin a_2 & \cos a_2 & 0 & 0 & 0 & 0 \\ \hline
0 & 0 & 0 & 0 & \cos a_3 & -\sin a_3 & 0 & 0 \\
0 & 0 & 0 & 0 & \sin a_3 & \cos a_3 & 0 & 0 \\ \hline
0 & 0 & 0 & 0 & 0 & 0 & \cos a_4 & -\sin a_4 \\
0 & 0 & 0 & 0 & 0 & 0 & \sin a_4 & \cos a_4 \\
}
$$
In other words, find $a_1$, $ a_2 $, $a_3$, and $a_4$ in terms of $ \theta_1 $, $\theta_2$, and $ \theta_3 $.
<a href="B76_Multiple_Rotations_Solutions.ipynb#task2">click for our solution</a>
<h3>Task 3</h3>
Implement Task 2 by picking three angles, and verify the constructed matrix.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi, sin, cos
# the angle of rotation
theta1 = pi/23
theta2 = 2*pi/23
theta3 = 4*pi/23
precision = 3
print("a1 = theta3 => sin(a1) = ",round(sin(theta3),precision))
print("a2 = theta2+theta3 => sin(a2) = ",round(sin(theta2+theta3),precision))
print("a3 = theta1 => sin(a3) = ",round(sin(theta1),precision))
print("a4 = theta1+theta2 => sin(a4) = ",round(sin(theta1+theta2),precision))
print()
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q,c)
qc.cu3(2*theta1,0,0,q[2],q[0])
qc.cu3(2*theta2,0,0,q[1],q[0])
qc.x(q[2])
qc.cu3(2*theta3,0,0,q[2],q[0])
qc.x(q[2])
job = execute(qc,Aer.get_backend('unitary_simulator'),optimization_level=0)
unitary_matrix=job.result().get_unitary(qc,decimals=precision)
for i in range(len(unitary_matrix)):
s=""
for j in range(len(unitary_matrix)):
val = str(unitary_matrix[i][j].real)
while(len(val)<precision+4): val = " "+val
s = s + val
print(s)
```
<a href="B76_Multiple_Rotations_Solutions.ipynb#task3">click for our solution</a>
<h3>Task 4</h3>
Create a circuit for solving problem $ \sf MOD_{31} $ by using the implementation in Task 3.
Pick $ \theta_1 $, $ \theta_2 $, and $ \theta_3 $ randomly.
At the beginning of the stream and after reading the stream, apply Hadamard operators to each qubit.
Execute your quantum program on the streams of lengths from 1 to 31.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi, sin, cos
from random import randrange
a1 = randrange(1,31)
theta1 =a1*2*pi/31
a2 = randrange(1,31)
theta2 = a2*2*pi/31
a3 = randrange(1,31)
theta3 = a3*2*pi/31
max_percentange = 0
# fExecute your quantum program on the streams of lengths from 1 to 31.
for i in range(1,32):
# initialize the circuit
q = QuantumRegister(3)
c = ClassicalRegister(3)
qc = QuantumCircuit(q,c)
# apply Hadamard operators to each qubit
for m in range(3):
qc.h(q[m])
print("stream of length",i,"is being read")
for j in range(i):
# controlled rotation when the third qubit is |1>
qc.cu3(2*theta1,0,0,q[2],q[0])
# controlled rotation when the second qubit is |1>
qc.cu3(2*theta2,0,0,q[1],q[0])
# controlled rotation when the third qubit is |0>
qc.x(q[2])
qc.cu3(2*theta3,0,0,q[2],q[0])
qc.x(q[2])
# apply Hadamard operators to each qubit
for m in range(3):
qc.h(q[m])
# measure
qc.measure(q,c)
# execute the circuit N times
N = 1000
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=N)
counts = job.result().get_counts(qc)
print(counts)
if '000' in counts.keys():
c = counts['000']
else:
c = 0
print('000 is observed',c,'times out of',N)
percentange = round(c/N*100,1)
if max_percentange < percentange and i != 31: max_percentange = percentange
print("the ration of 000 is ",percentange,"%")
print()
print("maximum percentage of observing unwanted '000' is",max_percentange)
```
<a href="B76_Multiple_Rotations_Solutions.ipynb#task4">click for our solution</a>
<h3>Task 5 (optional)</h3>
Based on Task 4, design your own solution for problem $ \sf MOD_{91} $ by using four qubits.
Remark that up to 8 different rotations can be implemented by using four qubits.
```
#
# your solution is here
#
```
<h3> Main construction </h3>
To implement an operator controlled by two qubits, we use an auxiliary qubit.
Depending on the desired values of two qubits, the auxiliary qubit is flipped to $ \ket{1} $ and then the operation is implemented controlled by the auxiliary qubit.
Here we describe the case when the control qubits are in state $ \ket{01} $.
We also draw the circuit.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
# initialize the circuit
qreg = QuantumRegister(4)
circuit = QuantumCircuit(qreg)
# we use the fourth qubit as the auxiliary
# apply a rotation to the first qubit when the third and second qubits are in states |0> and |1>
# change the state of the third qubit to |1>
circuit.x(qreg[2])
# if both the third and second qubits are in states |1>, the state of auxiliary qubit is changed to |1>
circuit.ccx(qreg[2],qreg[1],qreg[3])
# the rotation is applied to the first qubit if the state of auxiliary qubit is |1>
circuit.cu3(2*pi/6,0,0,qreg[3],qreg[0])
# reverse the effects
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.x(qreg[2])
circuit.draw()
```
Based on this idea, different rotation operators are applied to the first qubit when the third and second qubits are in $ \ket{00} $, $ \ket{01} $, $ \ket{10} $, and $ \ket{11} $.
We present how to construct $ R(\pi/10,2\pi/10,3\pi/10,4\pi/10) $.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi,sin
# the angles of rotations
theta1 = pi/10
theta2 = 2*pi/10
theta3 = 3*pi/10
theta4 = 4*pi/10
# for verification, print sin(theta)'s
print("sin(theta1) = ",round(sin(theta1),3))
print("sin(theta2) = ",round(sin(theta2),3))
print("sin(theta3) = ",round(sin(theta3),3))
print("sin(theta4) = ",round(sin(theta4),3))
print()
qreg = QuantumRegister(4)
circuit = QuantumCircuit(qreg)
# the third qubit is in |0>
# the second qubit is in |0>
circuit.x(qreg[2])
circuit.x(qreg[1])
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.cu3(2*theta1,0,0,qreg[3],qreg[0])
# reverse the effects
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.x(qreg[1])
circuit.x(qreg[2])
# the third qubit is in |0>
# the second qubit is in |1>
circuit.x(qreg[2])
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.cu3(2*theta2,0,0,qreg[3],qreg[0])
# reverse the effects
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.x(qreg[2])
# the third qubit is in |1>
# the second qubit is in |0>
circuit.x(qreg[1])
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.cu3(2*theta3,0,0,qreg[3],qreg[0])
# reverse the effects
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.x(qreg[1])
# the third qubit is in |1>
# the second qubit is in |1>
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.cu3(2*theta4,0,0,qreg[3],qreg[0])
# reverse the effects
circuit.ccx(qreg[2],qreg[1],qreg[3])
# read the corresponding unitary matrix
job = execute(circuit,Aer.get_backend('unitary_simulator'),optimization_level=0)
unitary_matrix=job.result().get_unitary(circuit,decimals=3)
for i in range(len(unitary_matrix)):
s=""
for j in range(len(unitary_matrix)):
val = str(unitary_matrix[i][j].real)
while(len(val)<7): val = " "+val
s = s + val
print(s)
```
<b>Remarks:</b>
The constructed matrix is bigger than our main matrix because of the auxiliary qubit.
Our main matrix appears at the top-left quarter of the constructed matrix.
The rest of the constructed matrix does not affect our computation unless the auxiliary qubit is set to state $ \ket{1} $ (except the auxiliary operations).
<h3>Task 6 (optional)</h3>
Assume that $\theta_1=\frac{\pi}{11}$, $\theta_2=2\frac{\pi}{11}$, $\theta_3=4\frac{\pi}{11}$, and $\theta_4=8\frac{\pi}{11}$ are the given angles in the above construction.
Calculate (by hand or in your mind) the angles of the rotations in the bottom-left quarter of the constructed matrix by following the construction steps.
<h3>Task 7</h3>
Create a circuit for solving problem $ \sf MOD_{61} $ by using the above implementation.
Pick $ \theta_1 $, $ \theta_2 $, $ \theta_3 $, and $ \theta_4 $ randomly.
At the beginning of the stream and after reading the stream, apply Hadamard operators to each qubit.
Execute your quantum program on the streams of lengths 1, 11, 21, 31, 41, 51, and 61.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi,sin
from random import randrange
# the angle of rotation
k1 = randrange(1,61)
theta1 = k1*2*pi/61
k2 = randrange(1,61)
theta2 = k2*2*pi/61
k3 = randrange(1,61)
theta3 = k3*2*pi/61
k4 = randrange(1,61)
theta4 = k4*2*pi/61
max_percentange = 0
# for each stream of length of 1, 11, 21, 31, 41, 51, and 61
for i in [1,11,21,31,41,51,61]:
#for i in range(1,62):
# initialize the circuit
qreg = QuantumRegister(4)
creg = ClassicalRegister(4)
circuit = QuantumCircuit(qreg,creg)
# Hadamard operators before reading the stream
for m in range(3):
circuit.h(qreg[m])
# read the stream of length i
print("stream of length",i,"is being read")
for j in range(i):
# the third qubit is in |0>
# the second qubit is in |0>
circuit.x(qreg[2])
circuit.x(qreg[1])
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.cu3(2*theta1,0,0,qreg[3],qreg[0])
# reverse the effects
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.x(qreg[1])
circuit.x(qreg[2])
# the third qubit is in |0>
# the second qubit is in |1>
circuit.x(qreg[2])
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.cu3(2*theta2,0,0,qreg[3],qreg[0])
# reverse the effects
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.x(qreg[2])
# the third qubit is in |1>
# the second qubit is in |0>
circuit.x(qreg[1])
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.cu3(2*theta3,0,0,qreg[3],qreg[0])
# reverse the effects
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.x(qreg[1])
# the third qubit is in |1>
# the second qubit is in |1>
circuit.ccx(qreg[2],qreg[1],qreg[3])
circuit.cu3(2*theta4,0,0,qreg[3],qreg[0])
# reverse the effects
circuit.ccx(qreg[2],qreg[1],qreg[3])
# Hadamard operators after reading the stream
for m in range(3):
circuit.h(qreg[m])
# we measure after reading the whole stream
circuit.measure(qreg,creg)
# execute the circuit N times
N = 1000
job = execute(circuit,Aer.get_backend('qasm_simulator'),shots=N)
counts = job.result().get_counts(circuit)
print(counts)
if '0000' in counts.keys():
c = counts['0000']
else:
c = 0
print('0000 is observed',c,'times out of',N)
percentange = round(c/N*100,1)
if max_percentange < percentange and i != 61: max_percentange = percentange
print("the ration of 0000 is ",percentange,"%")
print()
print("maximum percentage of observing unwanted '0000' is",max_percentange)
```
<a href="B76_Multiple_Rotations_Solutions.ipynb#task7">click for our solution</a>
<h3>Task 8</h3>
How many qubits we use to implement the main construction having 16 rotations in parallel?
Please specify the number of control qubits and auxiliary qubits.
<a href="B76_Multiple_Rotations_Solutions.ipynb#task8">click for our solution</a>
<h3>Bonus (saving some qubits)</h3>
We can use additional trick to save some qubits in our implementation. The idea relies on the following fact: if you apply a rotation between two NOT gates, the rotation will happen in the opposite direction.
We can use this idea to implement a rotation by $\theta$ in the following way:
<ul>
<li>Rotate in the qubit by $\frac{\theta}{2}$;</li>
<li>Apply NOT to the qubit;</li>
<li>Rotate in the qubit by $-\frac{\theta}{2}$;</li>
<li>Apply NOT to the qubit.</li>
</ul>
As a result we will rotate in the qubit by $\theta$. We can control NOT and rotation operations and perform a rotation only when all control qubits are in state 1, relying on the following simple facts:
<ul>
<li>Two NOT gates result into identity operation;</li>
<li>Rotations by $\frac{\theta}{2}$ and $-\frac{\theta}{2}$ result into identity operation.</li>
</ul>
Below you can see the code that shows how can we use the discussed ideas to control rotations on one qubit by three qubits. If the state of at least one of the control qubits is 0, then the identity will be applied to the controlled qubit.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
qreg11 = QuantumRegister(4)
creg11 = ClassicalRegister(4)
theta = pi/4
# define our quantum circuit
mycircuit11 = QuantumCircuit(qreg11,creg11)
def ccc_ry(angle,q1,q2,q3,q4):
mycircuit11.cu3(angle/2,0,0,q3,q4)
mycircuit11.ccx(q1,q2,q4)
mycircuit11.cu3(-angle/2,0,0,q3,q4)
mycircuit11.ccx(q1,q2,q4)
ccc_ry(2*theta,qreg11[3],qreg11[2],qreg11[1],qreg11[0])
mycircuit11.draw(output='mpl')
```
The code below demonstrates the implementation of 8 rotations with total 4 qubits, one of which is controlled by others.
```
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi
qreg12 = QuantumRegister(4)
creg12 = ClassicalRegister(4)
theta1 = pi/16
theta2 = 2*pi/16
theta3 = 3*pi/16
theta4 = 4*pi/16
theta5 = 5*pi/16
theta6 = 6*pi/16
theta7 = 7*pi/16
theta8 = 8*pi/16
# define our quantum circuit
mycircuit12 = QuantumCircuit(qreg12,creg12)
def ccc_ry(angle,q1,q2,q3,q4):
mycircuit12.cu3(angle/2,0,0,q3,q4)
mycircuit12.ccx(q1,q2,q4)
mycircuit12.cu3(-angle/2,0,0,q3,q4)
mycircuit12.ccx(q1,q2,q4)
mycircuit12.x(qreg12[3])
mycircuit12.x(qreg12[2])
mycircuit12.x(qreg12[1])
ccc_ry(2*theta1,qreg12[3],qreg12[2],qreg12[1],qreg12[0])
mycircuit12.x(qreg12[1])
mycircuit12.x(qreg12[2])
mycircuit12.x(qreg12[3])
mycircuit12.x(qreg12[3])
mycircuit12.x(qreg12[2])
#mycircuit12.x(qreg12[1])
ccc_ry(2*theta2,qreg12[3],qreg12[2],qreg12[1],qreg12[0])
#mycircuit12.x(qreg12[1])
mycircuit12.x(qreg12[2])
mycircuit12.x(qreg12[3])
mycircuit12.x(qreg12[3])
#mycircuit12.x(qreg12[2])
mycircuit12.x(qreg12[1])
ccc_ry(2*theta3,qreg12[3],qreg12[2],qreg12[1],qreg12[0])
mycircuit12.x(qreg12[1])
#mycircuit12.x(qreg12[2])
mycircuit12.x(qreg12[3])
mycircuit12.x(qreg12[3])
#mycircuit12.x(qreg12[2])
#mycircuit12.x(qreg12[1])
ccc_ry(2*theta4,qreg12[3],qreg12[2],qreg12[1],qreg12[0])
#mycircuit12.x(qreg12[1])
#mycircuit12.x(qreg12[2])
mycircuit12.x(qreg12[3])
#mycircuit12.x(qreg12[3])
mycircuit12.x(qreg12[2])
mycircuit12.x(qreg12[1])
ccc_ry(2*theta5,qreg12[3],qreg12[2],qreg12[1],qreg12[0])
mycircuit12.x(qreg12[1])
mycircuit12.x(qreg12[2])
#mycircuit12.x(qreg12[3])
#mycircuit12.x(qreg12[3])
mycircuit12.x(qreg12[2])
#mycircuit12.x(qreg12[1])
ccc_ry(2*theta6,qreg12[3],qreg12[2],qreg12[1],qreg12[0])
#mycircuit12.x(qreg12[1])
mycircuit12.x(qreg12[2])
#mycircuit12.x(qreg12[3])
#mycircuit12.x(qreg12[3])
#mycircuit12.x(qreg12[2])
mycircuit12.x(qreg12[1])
ccc_ry(2*theta7,qreg12[3],qreg12[2],qreg12[1],qreg12[0])
mycircuit12.x(qreg12[1])
#mycircuit12.x(qreg12[2])
#mycircuit12.x(qreg12[3])
#mycircuit12.x(qreg12[3])
#mycircuit12.x(qreg12[2])
#mycircuit12.x(qreg12[1])
ccc_ry(2*theta8,qreg12[3],qreg12[2],qreg12[1],qreg12[0])
#mycircuit12.x(qreg12[1])
#mycircuit12.x(qreg12[2])
#mycircuit12.x(qreg12[3])
job = execute(mycircuit12,Aer.get_backend('unitary_simulator'),optimization_level=0)
u=job.result().get_unitary(mycircuit12,decimals=3)
for i in range(len(u)):
s=""
for j in range(len(u)):
val = str(u[i][j].real)
while(len(val)<7): val = " "+val
s = s + val
print(s)
```
<h3>Task 9</h3>
By using the discussed ideas, how many qubits can we have to implement 16 rotations in parallel?
Please specify the number of control qubits and auxiliary qubits.
<a href="B76_Multiple_Rotations_Solutions.ipynb#task9">click for our solution</a>
| github_jupyter |
```
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import pickle
from tqdm.notebook import tqdm
from tqdm import trange
%matplotlib inline
def read_list_of_arrays(filename):
A = pickle.load(open(filename, 'rb'))
if len(A) == 3:
print(A[1][0], A[2][0])
A = A[0]
dim = A[0].flatten().shape[0]
B = np.zeros((len(A), dim))
for i in range(len(A)):
B[i, :] = A[i].flatten()
return B
epochs = np.arange(500, 5500, 500)
epochs
cloud_base = read_list_of_arrays('/gan-clouds/timegan_data.pickle')
clouds = []
for ep in epochs:
epo = ep
clouds.append(read_list_of_arrays('/gan-clouds/timegan_various_epochs5k/model_%d.pickle' % epo))
cloud_base.shape
for cloud in clouds:
print(cloud.shape)
```
### Compute cross-barcodes
```
import mtd
res1 = []
trials = 50
for i in trange(len(clouds)):
np.random.seed(7)
barcs = [mtd.calc_cross_barcodes(cloud_base, clouds[i], batch_size1 = 100, batch_size2 = 1000,\
cuda = 1, pdist_device = 'gpu') for _ in range(trials)]
res1.append(barcs)
res2 = []
trials = 50
for i in trange(len(clouds)):
np.random.seed(7)
barcs = [mtd.calc_cross_barcodes(clouds[i], cloud_base, batch_size1 = 100, batch_size2 = 1000,\
cuda = 1, pdist_device = 'gpu') for _ in range(trials)]
res2.append(barcs)
```
### Absolute barcodes
```
barc = mtd.calc_cross_barcodes(clouds[-1], np.zeros((0,0)), batch_size1 = 100, batch_size2 = 0)
barc = mtd.calc_cross_barcodes(cloud_base, np.zeros((0,0)), batch_size1 = 100, batch_size2 = 0)
def get_scores(res, args_dict, trials = 10):
scores = []
for i in range(len(res)):
barc_list = []
for exp_id, elem in enumerate(res[i]):
barc_list.append(mtd.get_score(elem, **args_dict))
r = sum(barc_list) / len(barc_list)
scores.append(r)
return scores
scores = get_scores(res1, {'h_idx' : 1, 'kind' : 'sum_length'})
for ep, s in zip(epochs, scores):
print(s)
scores = get_scores(res2, {'h_idx' : 1, 'kind' : 'sum_length'})
for ep, s in zip(epochs, scores):
print(s)
#pickle.dump(res1, open('res1_timegan.pickle', 'wb'))
#pickle.dump(res2, open('res2_timegan.pickle', 'wb'))
```
### PCA
```
import numpy as np
from sklearn.decomposition import PCA
%pylab inline
import matplotlib.pyplot as plt
# Create data
def plot2(data, groups = ("base", "cloud")):
colors = ("red", "green")
# Create plot
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
for data, color, group in zip(data, colors, groups):
x, y = data
ax.scatter(x, y, alpha=0.5, c=color, edgecolors='none', s=5, label=group)
#plt.title('Matplot scatter plot')
plt.legend(loc=2)
plt.show()
```
#### PCA from base+last GAN
```
all_pca = []
for i in range(len(epochs)):
pca = PCA(n_components=2)
cb = np.concatenate((cloud_base, clouds[-1]))
pca.fit(cb)
cb = cloud_base
cloud_base_pca = pca.transform(cb)
data = [(cloud_base_pca[:,0], cloud_base_pca[:,1])]
cg = clouds[i]
cloud_pca = pca.transform(cg)
data.append((cloud_pca[:,0], cloud_pca[:,1]))
all_pca.append(data)
plot2(data, groups = ("real", "generated, epoch %d" % epochs[i]))
#pickle.dump(all_pca, open('timegan_all_pca.pickle', 'wb'))
```
| github_jupyter |
```
import torch
import argparse
import csv
import datetime
import math
import torch.nn as nn
from torch.nn.functional import leaky_relu, softmax
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from collections import Counter
from GANutils import *
from utils import *
from validationUtils import *
from plotUtils import *
from TUutils import *
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import clear_output
df = pd.read_csv('Logs/wgangp-2019-11-28_114731.csv', header=None)
data = pd.read_pickle('Data/TU_onehot')
data = back_from_dummies(data)
data = data.drop(['HomeAdrMunCode'], axis=1)
data_oh = encode_onehot(data)
# Creates four polar axes, and accesses them through the returned array
df = pd.read_csv('Logs/wgangp-2019-11-30_130226.csv', header=None)
## Logs/wgangp-2019-11-28_114731.csv - good
## Logs/wgangp-2019-11-30_105956 + converged too fast
fig, axes = plt.subplots(2, 2, figsize=(20,10))
axes[0, 0].plot(df[1])
axes[0, 0].set_title('Critic Loss')
axes[0, 0].set_ylabel('Loss')
axes[0, 0].set_xlabel('Epoch')
axes[0, 1].plot(df[2])
axes[0, 1].set_title('Generator Loss')
axes[0, 1].set_ylabel('Loss')
axes[0, 1].set_xlabel('Epoch')
axes[1, 0].plot(df[3])
axes[1, 0].plot(df[4])
axes[1, 0].legend(['Real Data', 'Fake Data'])
axes[1, 0].set_title('Average Scores')
axes[1, 0].set_ylabel('Score')
axes[1, 0].set_xlabel('Epoch')
axes[1, 1].plot(df[3] - df[4])
axes[1, 1].set_title('Margin')
axes[1, 1].set_ylabel('Score')
axes[1, 1].set_xlabel('Epoch')
plt.show()
fig.savefig('Figs/1.png')
def gen_noise(size:int, batch_size:int):
'''
Generates a 1-d vector of gaussian sampled random values
'''
n = Variable(torch.randn([batch_size,size]), requires_grad=False)
return n
def sample_gumbel(shape, eps=1e-20):
unif = torch.rand(*shape).to(device)
g = -torch.log(-torch.log(unif + eps))
return g
def sample_gumbel_softmax(logits, temperature):
"""
Input:
logits: Tensor of log probs, shape = BS x k
temperature = scalar
Output: Tensor of values sampled from Gumbel softmax.
These will tend towards a one-hot representation in the limit of temp -> 0
shape = BS x k
"""
g = sample_gumbel(logits.shape)
h = (g + logits)/temperature
h_max = h.max(dim=-1, keepdim=True)[0]
h = h - h_max
cache = torch.exp(h)
y = cache / cache.sum(dim=-1, keepdim=True)
return y
INPUT_SIZE = 100
cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if cuda else "cpu")
class Generator (nn.Module):
def __init__(self,
input_size: int,
hidden_size: int,
temperature: float,
cat: Counter):
super(Generator, self).__init__()
self.cat = cat
self.cat_n = list(cat.values())
self.output_size = sum(self.cat.values())
self.temperature = torch.Tensor([temperature]).to(device)
self.l1 = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.LeakyReLU(negative_slope=0.2),
nn.BatchNorm1d(hidden_size, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(0.3)
)
self.l2 = nn.Sequential(
nn.Linear(hidden_size, hidden_size * 2),
nn.LeakyReLU(negative_slope = 0.2),
nn.BatchNorm1d(hidden_size * 2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(0.3)
)
self.l3 = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size * 3),
nn.LeakyReLU(negative_slope = 0.2),
nn.BatchNorm1d(hidden_size * 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(0.3)
)
self.l4 = nn.Sequential(
nn.Linear(hidden_size * 3, hidden_size * 2),
nn.LeakyReLU(negative_slope = 0.2),
nn.BatchNorm1d(hidden_size * 2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(0.3)
)
self.out = nn.Sequential(
nn.Linear(hidden_size * 2, self.output_size))
def forward(self,x):
x=self.l1(x)
x=self.l2(x)
x=self.l3(x)
x=self.l4(x)
x=self.out(x)
### Softmax per class
x = (x.split(self.cat_n, dim=1))
out = torch.cat([sample_gumbel_softmax(v, temperature = self.temperature) for v in x], dim=1)
return out
G = torch.load('Logs/wgangp-2019-11-30_130226')
G = G.to(device)
z = gen_noise(INPUT_SIZE, 100000).to(device)
output = G.forward(z)
output = output.cpu().detach().numpy()
output = output.astype(int)
fake_oh = pd.DataFrame(output, columns=data_oh.columns)
fake_oh.head()
fake = back_from_dummies(fake_oh)
fake.head()
data.columns
data = data.astype('category')
fake = fake.astype('category')
evaluate(data, fake, ['RespSex', 'RespPrimOcc', 'RespEdulevel'], data.columns, data, data)
evaluate(data, fake, ['HomeAdrNUTS', 'NuclFamType','RespSex', 'ResphasDrivlic'], data.columns, data, data)
data = pd.read_pickle('Data/TU_onehot')
data = back_from_dummies(data)
data.astype('category').describe()
```
| github_jupyter |
# SP via class imbalance
Example [test scores](https://www.brookings.edu/blog/social-mobility-memos/2015/07/29/when-average-isnt-good-enough-simpsons-paradox-in-education-and-earnings/)
SImpson's paradox can also occur due to a class imbalance, where for example, over time the value of several differnt subgroups all increase, but the totla average decreases over tme. This is also am mportant tpe to catch because this can inicate a large class disparity beased on the subgrouping variable.
```
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from mlsim import sp_plot
t = np.linspace(0,50,11)
count_rate = np.asarray([1,1.5,1.4])
count_pow = np.asarray([1,1.4, 1.3])
count_0 = np.asarray([100,60,40])
count = np.asarray([count_0 + count_rate*(t_i**count_pow) for t_i in t])
share = count/np.asarray([np.sum(count, axis=1)]*3).T
score_rate = np.asarray([.2, .25, .3])
score_0 = [310,290,280]
scores_group = np.asarray([score_0 + score_rate*t_i for t_i in t])
total_score = np.sum(scores_group*share,axis=1)
total_score
plt.plot(t,scores_group)
plt.plot(t,total_score,'k', label ='average')
plt.title('score per group and averge');
plt.plot(t,count)
plt.title('count per group over time');
```
We can change the numbers a bit to see tht it still works.
```
t = np.linspace(0,50,11)
count_rate = np.asarray([.5,3,1.8])
count_pow = np.asarray([1,1,1.15]) #1.24, 1.13])
group_names = ['W','B','H']
count_0 = np.asarray([200,60,40])
count = np.asarray([np.floor(count_0 + count_rate*(t_i**count_pow)) for t_i in t])
share = count/np.asarray([np.sum(count, axis=1)]*3).T
score_rate = np.asarray([.1, .112, .25])
score_0 = [310,270,265]
scores_group = np.asarray([score_0 + score_rate*t_i for t_i in t])
total_score = np.sum(scores_group*share,axis=1)
plt.figure(figsize=(12,4))
plt.subplot(1,3,1)
plt.plot(t,scores_group)
plt.plot(t,total_score,'k', label ='average')
plt.title('score per group and averge');
plt.subplot(1,3,2)
plt.plot(t,count)
plt.title('count per group');
plt.subplot(1,3,3)
plt.plot(t,share)
plt.title('% per group');
```
The above is occuring in aggregate data, we should generate and aim to detect from the individual measurements. So we can expand the above generator. We'll use the group score and counts to draw the indivdual rows of our table.
```
dat = [[t_t,np.random.normal(loc=sg,scale=5),g ]
for sg_t, c_t,t_t in zip(scores_group,count,t)
for sg,c,g in zip(sg_t,c_t,group_names)
for i in range(int(c))]
len(dat)
df = pd.DataFrame(data=dat,columns = ['year','score','race'])
df.head()
df.groupby(['race','year']).mean().unstack()
df.groupby(['year']).mean().T
```
The overall goes down while each of the groupwise means goes up, as expected.
```
df.groupby('race').corr()
df.corr()
```
We can see this in the correlation matrices as well, so our existing detector will work, but it has an intuitively different generating mechanism.
```
sp_plot(df,'year','score','race',domain_range=[-1, 51, 225, 350])
```
Vizually, the scatter plots for this are also somewhat different, the groups are not as separable as they were in the regression-based examples we worked with initially.
# Generalizing this
instead of setting a growth rate and being completely computational, we can set the start and end and then add noise in the middle
```
# set this final value
score_t = (score_0*score_growth*N_t).T
total_t = .85*total_0
count_t = total_t*np.linalg.pinv(score_t)
count = np.linspace(count_0,count_t,N_t)
share = count/np.asarray([np.sum(count, axis=1)]*3).T
scores_group = np.asarray([score_0 + score_rate*t_i for t_i in t])
total_score = np.sum(scores_group*share,axis=1)
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
plt.plot(t,scores_group)
plt.plot(t,total_score,'k', label ='average')
plt.title('score per group and averge');
plt.subplot(1,2,2)
plt.plot(t,count)
plt.title('count per group');
N_t = 11
t = np.linspace(0,50,N_t)
group_names = ['W','B','H']
`
count_0 = np.asarray([200,60,40])
count_0
share_0 = count_0/np.asarray([np.sum(count_0)]*3).T
score_0 = np.asarray([310,270,265])
score_growth = [1.1,1.3,1.4]
total_0 = np.sum(share_0*score_0)
total_0
```
| github_jupyter |
```
#写函数,求n个随机整数均值的平方根,整数范围在m与k之间
import random,math
def pingfanggeng():
m = int(input('请输入一个大于0的整数,作为随机整数的下界,回车结束。'))
k = int(input('请输入一个大于0的整数,作为随机整数的上界,回车结束。'))
n = int(input('请输入随机整数的个数,回车结束。'))
i=0
total=0
while i<n:
total=total+random.randint(m,k)
i=i+1
average=total/n
number=math.sqrt(average)
print(number)
def main():
pingfanggeng()
if __name__ == '__main__':
main()
#写函数,共n个随机整数,整数范围在m与k之间,求西格玛log(随机整数)及西格玛1/log(随机整数)
import random,math
def xigema():
m = int(input('请输入一个大于0的整数,作为随机整数的下界,回车结束。'))
k = int(input('请输入一个大于0的整数,作为随机整数的上界,回车结束。'))
n = int(input('请输入随机整数的个数,回车结束。'))
i=0
total1=0
total2=0
while i<n:
total1=total1+math.log(random.randint(m,k))
total2=total2+1/(math.log(random.randint(m,k)))
i=i+1
print(total1)
print(total2)
def main():
xigema()
if __name__ == '__main__':
main()
#写函数,求s=a+aa+aaa+aaaa+aa...a的值,其中a是[1,9]之间的随机整数。例如2+22+222+2222+22222(此时共有5个数相加),几个数相加由键盘输入
import random
def sa():
n = int(input('请输入整数的个数,回车结束。'))
a=random.randint(1,9)
i=0
s=0
f=0
while i<n:
f=math.pow(10,i)*a+f
s=s+f
i=i+1
print(s)
def main():
sa()
if __name__ == '__main__':
main()
import random, math
def win():
print('Win!')
def lose():
print('Lose!')
def menu():
print('''=====游戏菜单=====
1. 游戏说明
2. 开始游戏
3. 退出游戏
4. 制作团队
=====游戏菜单=====''')
def guess_game():
n = int(input('请输入一个大于0的整数,作为神秘整数的上界,回车结束。'))
number = int(input('请输入神秘整数,回车结束。'))
max_times = math.ceil(math.log(n, 2))
guess_times = 0
while guess_times <= max_times:
guess = random.randint(1, n)
guess_times += 1
print('一共可以猜', max_times, '次')
print('你已经猜了', guess_times, '次')
if guess == number:
win()
print('神秘数字是:', guess)
print('你比标准次数少', max_times-guess_times, '次')
break
elif guess > number:
print('抱歉,你猜大了')
else:
print('抱歉,你猜小了')
else:
print('神秘数字是:', number)
lose()
# 主函数
def main():
while True:
menu()
choice = int(input('请输入你的选择'))
if choice == 1:
show_instruction()
elif choice == 2:
guess_game()
elif choice == 3:
game_over()
break
else:
show_team()
#主程序
if __name__ == '__main__':
main()
```
| github_jupyter |
# `pandas` Part 2: this notebook is a 2nd lesson on `pandas`
## The main objective of this tutorial is to slice up some DataFrames using `pandas`
>- Reading data into DataFrames is step 1
>- But most of the time we will want to select specific pieces of data from our datasets
# Learning Objectives
## By the end of this tutorial you will be able to:
1. Select specific data from a pandas DataFrame
2. Insert data into a DataFrame
## Files Needed for this lesson: `winemag-data-130k-v2.csv`
>- Download this csv from Canvas prior to the lesson
## The general steps to working with pandas:
1. import pandas as pd
>- Note the `as pd` is optional but is a common alias used for pandas and makes writing the code a bit easier
2. Create or load data into a pandas DataFrame or Series
>- In practice, you will likely be loading more datasets than creating but we will learn both
3. Reading data with `pd.read_`
>- Excel files: `pd.read_excel('fileName.xlsx')`
>- Csv files: `pd.read_csv('fileName.csv')`
4. After steps 1-3 you will want to check out your DataFrame
>- Use `shape` to see how many records and columns are in your DataFrame
>- Use `head()` to show the first 5-10 records in your DataFrame
5. Then you will likely want to slice up your data into smaller subset datasets
>- This step is the focus of this lesson
Narrated type-along videos are available:
- Part 1: https://youtu.be/uA96V-u8wkE
- Part 2: https://youtu.be/fsc0G77c5Kc
# First, check your working directory
# Step 1: Import pandas and give it an alias
# Step 2 Read Data Into a DataFrame
>- Knowing how to create your own data can be useful
>- However, most of the time we will read data into a DataFrame from a csv or Excel file
## File Needed: `winemag-data-130k-v2.csv`
>- Make sure you download this file from Canvas and place in your working directory
### Read the csv file with `pd.read_csv('fileName.csv`)
>- Set the index to column 0
### Check how many rows/records and columns are in the the `wine_reviews` DataFrame
>- Use `shape`
### Check a couple of rows of data
### Now we can access columns in the dataframe using syntax similar to how we access values in a dictionary
### To get a single value...
### Using the indexing operator and attribute selection like we did above should seem familiar
>- We have accessed data like this using dictionaries
>- However, pandas also has it's own selection/access operators, `loc` and `iloc`
>- For basic operations, we can use the familiar dictionary syntax
>- As we get more advanced, we should use `loc` and `iloc`
>- It might help to think of `loc` as "label based location" and `iloc` as "index based location"
### Both `loc` and `iloc` start with with the row then the column
#### Use `iloc` for index based location similar to what we have done with lists and dictionaries
#### Use `loc` for label based location. This uses the column names vs indexes to retrieve the data we want.
# First, let's look at index based selection using `iloc`
## As we work these examples, remember we specify row first then column
### Selecting the first row using `iloc`
>- For the wine reviews dataset this is our header row
### To return all the rows of a particular column with `iloc`
>- To get everything, just put a `:` for row and/or column
### To return the first three rows of the first column...
### To return the second and third rows...
### We can also pass a list for the rows to get specific values
### Can we pass lists for both rows and columns...?
### We can also go from the end of the rows just like we did with lists
>- The following gets the last 5 records for country in the dataset
### To get the last 5 records for all columns...
# Label-Based Selection with `loc`
## With `loc`, we use the names of the columns to retrieve data
### Get all the records for the following fields/columns using `loc`:
>- taster_name
>- taster_twitter_handle
>- points
# Notice we have been using the default index so far
## We can change the index with `set_index`
# Conditional Selection
>- Suppose we only want to analyze data for one country, reviewer, etc...
>- Or we want to pull the data only for points and/or prices above a certain criteria
## Which wines are from the US with 95 or greater points?
# Some notes on our previous example:
>- We just quickly took at dataset that has almost 130K rows and reduced it to one that has 993
>- This tells us that less that 1% of the wines are from the US and have ratings of 95 or higher
>- With some simple slicing using pandas we already have some decent start to an analytics project
# Q: What are all the wines from Italy or that have a rating higher than 95?
>- To return the results for an "or" question use the pipe `|` between your conditions
# Q: What are all the wines from Italy or France?
>- We can do this with an or statement or the `isin()` selector
>- Note: if you know SQL, this is the same thing as the IN () statement
>- Using `isin()` replaces multiple "or" statements and makes your code a little shorter
# Q: What are all the wines without prices?
>- Here we can use the `isnull` method to show when values are not entered for a particular column
# What are all the wines with prices?
>- Use `notnull()`
# We can also add columns/fields to our DataFrames
| github_jupyter |
# Exp 101 analysis
See `./informercial/Makefile` for experimental
details.
```
import os
import numpy as np
from IPython.display import Image
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from infomercial.exp import meta_bandit
from infomercial.exp import epsilon_bandit
from infomercial.exp import beta_bandit
from infomercial.exp import softbeta_bandit
from infomercial.local_gym import bandit
from infomercial.exp.meta_bandit import load_checkpoint
import gym
def plot_meta(env_name, result):
"""Plots!"""
# episodes, actions, scores_E, scores_R, values_E, values_R, ties, policies
episodes = result["episodes"]
actions =result["actions"]
bests =result["p_bests"]
scores_E = result["scores_E"]
scores_R = result["scores_R"]
values_R = result["values_R"]
values_E = result["values_E"]
ties = result["ties"]
policies = result["policies"]
# -
env = gym.make(env_name)
best = env.best
print(f"Best arm: {best}, last arm: {actions[-1]}")
# Plotz
fig = plt.figure(figsize=(6, 14))
grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8)
# Arm
plt.subplot(grid[0, 0])
plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit")
plt.plot(episodes, np.repeat(best, np.max(episodes)+1),
color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylim(-.1, np.max(actions)+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# Policy
policies = np.asarray(policies)
episodes = np.asarray(episodes)
plt.subplot(grid[1, 0])
m = policies == 0
plt.scatter(episodes[m], policies[m], alpha=.4, s=2, label="$\pi_E$", color="purple")
m = policies == 1
plt.scatter(episodes[m], policies[m], alpha=.4, s=2, label="$\pi_R$", color="grey")
plt.ylim(-.1, 1+.1)
plt.ylabel("Controlling\npolicy")
plt.xlabel("Episode")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# score
plt.subplot(grid[2, 0])
plt.scatter(episodes, scores_E, color="purple", alpha=0.4, s=2, label="E")
plt.plot(episodes, scores_E, color="purple", alpha=0.4)
plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R")
plt.plot(episodes, scores_R, color="grey", alpha=0.4)
plt.plot(episodes, np.repeat(tie_threshold, np.max(episodes)+1),
color="violet", alpha=0.8, ls='--', linewidth=2)
plt.ylabel("Score")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Q
plt.subplot(grid[3, 0])
plt.scatter(episodes, values_E, color="purple", alpha=0.4, s=2, label="$Q_E$")
plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="$Q_R$")
plt.plot(episodes, np.repeat(tie_threshold, np.max(episodes)+1),
color="violet", alpha=0.8, ls='--', linewidth=2)
plt.ylabel("Value")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Ties
plt.subplot(grid[4, 0])
plt.scatter(episodes, bests, color="red", alpha=.5, s=2)
plt.ylabel("p(best)")
plt.xlabel("Episode")
plt.ylim(0, 1)
# Ties
plt.subplot(grid[5, 0])
plt.scatter(episodes, ties, color="black", alpha=.5, s=2, label="$\pi_{tie}$ : 1\n $\pi_\pi$ : 0")
plt.ylim(-.1, 1+.1)
plt.ylabel("Ties index")
plt.xlabel("Episode")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def plot_epsilon(env_name, result):
"""Plots!"""
# episodes, actions, scores_E, scores_R, values_E, values_R, ties, policies
episodes = result["episodes"]
actions =result["actions"]
bests =result["p_bests"]
scores_R = result["scores_R"]
values_R = result["values_R"]
epsilons = result["epsilons"]
# -
env = gym.make(env_name)
best = env.best
print(f"Best arm: {best}, last arm: {actions[-1]}")
# Plotz
fig = plt.figure(figsize=(6, 14))
grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8)
# Arm
plt.subplot(grid[0, 0])
plt.scatter(episodes, actions, color="black", alpha=.5, s=2, label="Bandit")
for b in best:
plt.plot(episodes, np.repeat(b, np.max(episodes)+1),
color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylim(-.1, np.max(actions)+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# score
plt.subplot(grid[1, 0])
plt.scatter(episodes, scores_R, color="grey", alpha=0.4, s=2, label="R")
plt.ylabel("Score")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# Q
plt.subplot(grid[2, 0])
plt.scatter(episodes, values_R, color="grey", alpha=0.4, s=2, label="$Q_R$")
plt.ylabel("Value")
plt.xlabel("Episode")
# plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# best
plt.subplot(grid[3, 0])
plt.scatter(episodes, bests, color="red", alpha=.5, s=2)
plt.ylabel("p(best)")
plt.xlabel("Episode")
plt.ylim(0, 1)
# Decay
plt.subplot(grid[4, 0])
plt.scatter(episodes, epsilons, color="black", alpha=.5, s=2)
plt.ylabel("$\epsilon_R$")
plt.xlabel("Episode")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
def plot_critic(critic_name, env_name, result):
# -
env = gym.make(env_name)
best = env.best
# Data
critic = result[critic_name]
arms = list(critic.keys())
values = list(critic.values())
# Plotz
fig = plt.figure(figsize=(8, 3))
grid = plt.GridSpec(1, 1, wspace=0.3, hspace=0.8)
# Arm
plt.subplot(grid[0])
plt.scatter(arms, values, color="black", alpha=.5, s=30)
plt.plot([best]*10, np.linspace(min(values), max(values), 10), color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylabel("Value")
plt.xlabel("Arm")
```
# Load and process data
```
data_path ="/Users/qualia/Code/infomercial/data/"
exp_name = "exp97"
sorted_params = load_checkpoint(os.path.join(data_path, f"{exp_name}_sorted.pkl"))
# print(sorted_params.keys())
best_params = sorted_params[0]
sorted_params
```
# Performance
of best parameters
```
env_name = 'BanditTwoHigh10-v0'
num_episodes = 1000
# Run w/ best params
result = epsilon_bandit(
env_name=env_name,
num_episodes=num_episodes,
lr_R=best_params["lr_R"],
epsilon=best_params["epsilon"],
seed_value=2,
)
print(best_params)
plot_epsilon(env_name, result=result)
plot_critic('critic_R', env_name, result)
```
# Sensitivity
to parameter choices
```
total_Rs = []
eps = []
lrs_R = []
lrs_E = []
trials = list(sorted_params.keys())
for t in trials:
total_Rs.append(sorted_params[t]['total_R'])
lrs_R.append(sorted_params[t]['lr_R'])
eps.append(sorted_params[t]['epsilon'])
# Init plot
fig = plt.figure(figsize=(5, 18))
grid = plt.GridSpec(6, 1, wspace=0.3, hspace=0.8)
# Do plots:
# Arm
plt.subplot(grid[0, 0])
plt.scatter(trials, total_Rs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("total R")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.scatter(trials, lrs_R, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("Sorted params")
plt.ylabel("lr_R")
_ = sns.despine()
plt.subplot(grid[2, 0])
plt.scatter(lrs_R, total_Rs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("lrs_R")
plt.ylabel("total_Rs")
_ = sns.despine()
plt.subplot(grid[3, 0])
plt.scatter(eps, total_Rs, color="black", alpha=.5, s=6, label="total R")
plt.xlabel("epsilon")
plt.ylabel("total_Rs")
_ = sns.despine()
```
# Parameter correlations
```
from scipy.stats import spearmanr
spearmanr(eps, lrs_R)
spearmanr(eps, total_Rs)
spearmanr(lrs_R, total_Rs)
```
# Distributions
of parameters
```
# Init plot
fig = plt.figure(figsize=(5, 6))
grid = plt.GridSpec(3, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(eps, color="black")
plt.xlabel("epsilon")
plt.ylabel("Count")
_ = sns.despine()
plt.subplot(grid[1, 0])
plt.hist(lrs_R, color="black")
plt.xlabel("lr_R")
plt.ylabel("Count")
_ = sns.despine()
```
of total reward
```
# Init plot
fig = plt.figure(figsize=(5, 2))
grid = plt.GridSpec(1, 1, wspace=0.3, hspace=0.8)
plt.subplot(grid[0, 0])
plt.hist(total_Rs, color="black", bins=50)
plt.xlabel("Total reward")
plt.ylabel("Count")
# plt.xlim(0, 10)
_ = sns.despine()
```
| github_jupyter |
# Creating a Linear Cellular Automaton
Let's start by creating a linear cellular automaton

> A cellular automaton is a discrete model of computation studied in automata theory.
It consists of a regular grid of cells, each in one of a finite number of states, such as on and off (in contrast to a coupled map lattice). The grid can be in any finite number of dimensions. For each cell, a set of cells called its neighborhood is defined relative to the specified cell.An initial state (time t = 0) is selected by assigning a state for each cell. A new generation is created (advancing t by 1), according to some fixed rule (generally, a mathematical function) that determines the new state of each cell in terms of the current state of the cell and the states of the cells in its neighborhood. Typically, the rule for updating the state of cells is the same for each cell and does not change over time.
## Rules
For this example, the rule for updating the state of the cells is:
> For each cell of the automaton, it will take the state of its left neighboring cell.
With this rule, the cells will move one step to the right side every generation.
## Implementing cells
To implement cells, you can extend the class `DiscreteTimeModel`, and define the abstract protected methods.
```
from typing import Dict
from gsf.dynamic_system.dynamic_systems import DiscreteEventDynamicSystem
from gsf.models.models import DiscreteTimeModel
class Cell(DiscreteTimeModel):
"""Cell of the linear cellular automaton
It has an state alive or dead. When receives an input, changes its state to that input.
Its output is the state.
Attributes:
_symbol (str): Symbol that represents the cell when it is printed in console.
"""
_symbol: str
def __init__(self, dynamic_system: DiscreteEventDynamicSystem, state: bool, symbol: str = None):
"""
Args:
dynamic_system (DiscreteEventDynamicSystem): Automaton Grid where the cell belongs.
state (bool); State that indicates whether the cell is alive (True) or dead (False).
symbol (str): Symbol that represents the cell when it is printed in console.
"""
super().__init__(dynamic_system, state=state)
self._symbol = symbol or "\u2665"
def _state_transition(self, state: bool, inputs: Dict[str, bool]) -> bool:
"""
Receives an input and changes the state of the cell.
Args:
state (bool); Current state of the cell.
inputs: A dictionary where the key is the input source cell and the value the output of that cell.
Returns:
The new state of the cell.
"""
next_state: bool = list(inputs.values())[0]
return next_state
def _output_function(self, state: bool) -> bool:
"""
Returns the state of the cell.
"""
return state
def __str__(self):
"""Prints the cell with the defined symbol"""
is_alive = self.get_state()
if is_alive:
return self._symbol
else:
return "-"
```
The `Cell` class, must receive the `DiscreteEventDynamicSystem` where the model belongs. We also include the state of the cell as a bool and a symbol that represents the cells when they will be printed.
When a generation is running, the framework will obtain the outputs of every cell defined by `_output_function`, and will inject them on the next model by `_state_transition`. The state transition member, receives a dict with the source input model and its state, and returns the new state that will take the cell.
`DiscreteTimeModels` will schedule their transitions indefinitely every so often with a constant period in between.
## Implementing the Automaton
The Automaton is a dynamic system, a discrete event dynamic system, so it extends `DiscreteEventDynamicSystem`.
```
from random import random, seed
from typing import List
from gsf.dynamic_system.dynamic_systems import DiscreteEventDynamicSystem
class LinearAutomaton(DiscreteEventDynamicSystem):
"""Linear Automaton implementation
It has a group of cells, connected between them. The output cell of each cell is its right neighbor.
Attributes:
_cells (List[Cell]): Group of cells of the linear automaton.
"""
_cells: List[Cell]
def __init__(self, cells: int = 5, random_seed: int = 42):
"""
Args:
cells (int): Number of cells of the automaton.
random_seed (int): Random seed for determinate the state of the seeds.
"""
super().__init__()
seed(random_seed)
self._create_cells(cells)
self._create_relations(cells)
def _create_cells(self, cells: int):
"""Appends the cells to the automaton.
Args:
cells (int): Number of cells of the automaton.
"""
self._cells = []
for i in range(cells):
is_alive = random() < 0.5
self._cells.append(Cell(self, is_alive))
def _create_relations(self, cells: int):
"""Creates the connections between the left cell and the right cell.
Args:
cells (int): Number of cells of the automaton.
"""
for i in range(cells):
self._cells[i-1].add(self._cells[i])
def __str__(self):
"""Changes the format to show the linear automaton when is printed"""
s = ""
for cell in self._cells:
s += str(cell)
return s
```
The `LinearAutomaton` receives the number of cells that it will have and a random seed to determine the initial random state of the cells.
First it creates the cells, setting the state as alive or dead with a probability of 0.5, and giving as `DiscreteEventDynamicSystem` the current linear automaton.
Then, it connects the models, setting as the output of the `cell[i-1]`, the `cell[i]`.
`DiscreteEventDynamicSystem`s link models, route outputs and inputs between models and execute the transitions of the models.
## Running the simulation
We defined a dynamic system, so we can simulate it. Use the class `DiscreteEventExperiment` to run 5 geneations!
```
from gsf.experiments.experiment_builders import DiscreteEventExperiment
linear_automaton = LinearAutomaton(cells=10)
experiment = DiscreteEventExperiment(linear_automaton)
print(linear_automaton)
experiment.simulation_control.start(stop_time=5)
experiment.simulation_control.wait()
print(linear_automaton)
```
Create the experiment with the linear automaton as `DiscreteEventDynamicSystem`. Then, run it during 5 generations.
As the simulation runs in a different thread, we wait for it to finish with `experiment.simulation_control.wait()`.
Try the example by your custom params and have fun.
| github_jupyter |
# Try It Yourself
There are only three problems in this last set of exercises, but they're all pretty tricky, so be on guard!
Run the setup code below before working on the questions.
```
from learntools.core import binder; binder.bind(globals())
from learntools.python.ex7 import *
print('Setup complete.')
```
# Exercises
## 1.
After completing the exercises on lists and tuples, Jimmy noticed that, according to his `estimate_average_slot_payout` function, the slot machines at the Learn Python Casino are actually rigged *against* the house, and are profitable to play in the long run.
Starting with $200 in his pocket, Jimmy has played the slots 500 times, recording his new balance in a list after each spin. He used Python's `matplotlib` library to make a graph of his balance over time:
```
# Import the jimmy_slots submodule
from learntools.python import jimmy_slots
# Call the get_graph() function to get Jimmy's graph
graph = jimmy_slots.get_graph()
graph
```
As you can see, he's hit a bit of bad luck recently. He wants to tweet this along with some choice emojis, but, as it looks right now, his followers will probably find it confusing. He's asked if you can help him make the following changes:
1. Add the title "Results of 500 slot machine pulls"
2. Make the y-axis start at 0.
3. Add the label "Balance" to the y-axis
After calling `type(graph)` you see that Jimmy's graph is of type `matplotlib.axes._subplots.AxesSubplot`. Hm, that's a new one. By calling `dir(graph)`, you find three methods that seem like they'll be useful: `.set_title()`, `.set_ylim()`, and `.set_ylabel()`.
Use these methods to complete the function `prettify_graph` according to Jimmy's requests. We've already checked off the first request for you (setting a title).
(Remember: if you don't know what these methods do, use the `help()` function!)
```
def prettify_graph(graph):
"""Modify the given graph according to Jimmy's requests: add a title, make the y-axis
start at 0, label the y-axis. (And, if you're feeling ambitious, format the tick marks
as dollar amounts using the "$" symbol.)
"""
graph.set_title("Results of 500 slot machine pulls")
# Complete steps 2 and 3 here
graph = jimmy_slots.get_graph()
prettify_graph(graph)
graph
```
**Bonus:** Can you format the numbers on the y-axis so they look like dollar amounts? e.g. $200 instead of just 200.
(We're not going to tell you what method(s) to use here. You'll need to go digging yourself with `dir(graph)` and/or `help(graph)`.)
```
# Check your answer (Run this code cell to receive credit!)
q1.solution()
```
## 2. <span title="Spicy" style="color: coral">🌶️🌶️</span>
This is a very hard problem. Feel free to skip it if you are short on time:
Luigi is trying to perform an analysis to determine the best items for winning races on the Mario Kart circuit. He has some data in the form of lists of dictionaries that look like...
[
{'name': 'Peach', 'items': ['green shell', 'banana', 'green shell',], 'finish': 3},
{'name': 'Bowser', 'items': ['green shell',], 'finish': 1},
# Sometimes the racer's name wasn't recorded
{'name': None, 'items': ['mushroom',], 'finish': 2},
{'name': 'Toad', 'items': ['green shell', 'mushroom'], 'finish': 1},
]
`'items'` is a list of all the power-up items the racer picked up in that race, and `'finish'` was their placement in the race (1 for first place, 3 for third, etc.).
He wrote the function below to take a list like this and return a dictionary mapping each item to how many times it was picked up by first-place finishers.
```
def best_items(racers):
"""Given a list of racer dictionaries, return a dictionary mapping items to the number
of times those items were picked up by racers who finished in first place.
"""
winner_item_counts = {}
for i in range(len(racers)):
# The i'th racer dictionary
racer = racers[i]
# We're only interested in racers who finished in first
if racer['finish'] == 1:
for i in racer['items']:
# Add one to the count for this item (adding it to the dict if necessary)
if i not in winner_item_counts:
winner_item_counts[i] = 0
winner_item_counts[i] += 1
# Data quality issues :/ Print a warning about racers with no name set. We'll take care of it later.
if racer['name'] is None:
print("WARNING: Encountered racer with unknown name on iteration {}/{} (racer = {})".format(
i+1, len(racers), racer['name'])
)
return winner_item_counts
```
He tried it on a small example list above and it seemed to work correctly:
```
sample = [
{'name': 'Peach', 'items': ['green shell', 'banana', 'green shell',], 'finish': 3},
{'name': 'Bowser', 'items': ['green shell',], 'finish': 1},
{'name': None, 'items': ['mushroom',], 'finish': 2},
{'name': 'Toad', 'items': ['green shell', 'mushroom'], 'finish': 1},
]
best_items(sample)
```
However, when he tried running it on his full dataset, the program crashed with a `TypeError`.
Can you guess why? Try running the code cell below to see the error message Luigi is getting. Once you've identified the bug, fix it in the cell below (so that it runs without any errors).
Hint: Luigi's bug is similar to one we encountered in the [tutorial](#$TUTORIAL_URL$) when we talked about star imports.
```
# Import luigi's full dataset of race data
from learntools.python.luigi_analysis import full_dataset
# Fix me!
def best_items(racers):
winner_item_counts = {}
for i in range(len(racers)):
# The i'th racer dictionary
racer = racers[i]
# We're only interested in racers who finished in first
if racer['finish'] == 1:
for i in racer['items']:
# Add one to the count for this item (adding it to the dict if necessary)
if i not in winner_item_counts:
winner_item_counts[i] = 0
winner_item_counts[i] += 1
# Data quality issues :/ Print a warning about racers with no name set. We'll take care of it later.
if racer['name'] is None:
print("WARNING: Encountered racer with unknown name on iteration {}/{} (racer = {})".format(
i+1, len(racers), racer['name'])
)
return winner_item_counts
# Try analyzing the imported full dataset
best_items(full_dataset)
#_COMMENT_IF(PROD)_
q2.hint()
# Check your answer (Run this code cell to receive credit!)
q2.solution()
```
## 3. <span title="A bit spicy" style="color: darkgreen ">🌶️</span>
Suppose we wanted to create a new type to represent hands in blackjack. One thing we might want to do with this type is overload the comparison operators like `>` and `<=` so that we could use them to check whether one hand beats another. e.g. it'd be cool if we could do this:
```python
>>> hand1 = BlackjackHand(['K', 'A'])
>>> hand2 = BlackjackHand(['7', '10', 'A'])
>>> hand1 > hand2
True
```
Well, we're not going to do all that in this question (defining custom classes is a bit beyond the scope of these lessons), but the code we're asking you to write in the function below is very similar to what we'd have to write if we were defining our own `BlackjackHand` class. (We'd put it in the `__gt__` magic method to define our custom behaviour for `>`.)
Fill in the body of the `blackjack_hand_greater_than` function according to the docstring.
```
def blackjack_hand_greater_than(hand_1, hand_2):
"""
Return True if hand_1 beats hand_2, and False otherwise.
In order for hand_1 to beat hand_2 the following must be true:
- The total of hand_1 must not exceed 21
- The total of hand_1 must exceed the total of hand_2 OR hand_2's total must exceed 21
Hands are represented as a list of cards. Each card is represented by a string.
When adding up a hand's total, cards with numbers count for that many points. Face
cards ('J', 'Q', and 'K') are worth 10 points. 'A' can count for 1 or 11.
When determining a hand's total, you should try to count aces in the way that
maximizes the hand's total without going over 21. e.g. the total of ['A', 'A', '9'] is 21,
the total of ['A', 'A', '9', '3'] is 14.
Examples:
>>> blackjack_hand_greater_than(['K'], ['3', '4'])
True
>>> blackjack_hand_greater_than(['K'], ['10'])
False
>>> blackjack_hand_greater_than(['K', 'K', '2'], ['3'])
False
"""
pass
# Check your answer
q3.check()
#_COMMENT_IF(PROD)_
q3.hint()
#_COMMENT_IF(PROD)_
q3.solution()
```
## The end
You've finished the Python course. Congrats!
As always, if you have any questions about these exercises, or anything else you encountered in the course, come to the [Learn Forum](https://kaggle.com/learn-forum).
You probably didn't put in all these hours of learning Python just to play silly games of chance, right? If you're interested in applying your newfound Python skills to some data science tasks, check out some of our other **[Kaggle Courses](https://www.kaggle.com/learn/overview)**. Some good next steps are:
1. [Machine learning with scikit-learn](https://www.kaggle.com/learn/intro-to-machine-learning)
2. [Pandas for data manipulation](https://www.kaggle.com/learn/pandas)
3. [Deep learning with TensorFlow](https://www.kaggle.com/learn/deep-learning)
Happy Pythoning!
| github_jupyter |
# Exercise 4
Hi everyone, today we are going to have an introduction to Machine Learning and Deep Learning, as well as we will work with the Linear/Logistic regression and Correlation.
# Part 1: Curve Fitting:
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
Sometimes we are going to find some S-shaped curves while working with neural networks. Such curves are the so-called [Sigmoid function](https://en.wikipedia.org/wiki/Sigmoid_function).
A simple sigmoid function is given by:
$$\sigma (x) = \frac{1}{1+exp{(-x)}}$$
```
# We define then the sigmoid function
def sigmoid(x, a, b, c, d):
return a * 1/(1+np.exp(-b*x+c))+d # we consider here amplitude, slope, shift and relative shift.
```
$$\sigma_{a,b,c,d} (x) = a \cdot \frac{1}{1+exp{(-b \cdot x + c)}} + d$$
```
true_x = np.arange(-10, 10, .1) # Array for x-axis
true_y = sigmoid(true_x, 1, 2.1, 3.3, 0) # Creating a sigmoid using some given values and the x-array
#########
## Generate some fake measurements from experiment
#########
xdata = np.arange(-10,10,1)
y = sigmoid(xdata, 1, 2.1, 3.3, 0)
y_noise = 0.1 * np.random.randn(y.size) # generating some random values (noise)
ydata = y+y_noise # Adding this noise to the original sigmoid function
print(ydata)
# Plot:
plt.plot(true_x, true_y, '--', label = 'original sigmoid')
plt.plot(xdata, ydata, 'ko', label = 'noisy measurements')
plt.legend()
```
## Fitting using 'curve_fit'
```
from scipy.optimize import curve_fit
values = curve_fit(sigmoid, xdata, ydata)[0]
#curve_fit() returns the values for the parameters (popt) and their covariance (pcov)
# Why [0]? This way we save only popt on 'values'
y_fit=sigmoid(xdata, *values) # Evaluating such values
# Plot
plt.plot(true_x, true_y, '--', label = 'original sigmoid')
plt.plot(xdata, ydata, 'ko', label = 'sigmoid with noise')
plt.plot(xdata, y_fit, label = 'curve obtained with curve_fit')
plt.legend()
```
Note: You can get also a so-called 'optimize warning'. This could be solved by using another optimzation method and/or bounds.
Optimzation methods available are:
* [Levenberg-Marquardt algorithm](https://en.wikipedia.org/wiki/Levenberg%E2%80%93Marquardt_algorithm) ('lm').
* [Trust Region Reflective algorithm](https://optimization.mccormick.northwestern.edu/index.php/Trust-region_methods) (‘trf’).
* [Dogleg algorithm](https://en.wikipedia.org/wiki/Powell%27s_dog_leg_method) (‘dogbox’)
Check [SciPy's documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html)
## Using minimize
```
from scipy.optimize import minimize
```
To use [minimize](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html) we need to define a function to be... guess what?
Correct! minimized
```
def helper(values, x_data, y_data):
# We minimize the mean-squared error (MSE)
# we need to return a single number! (--> .sum()!)
return ((y_data-sigmoid(x_data, *values))**2).sum()
m = minimize(helper, [1, 1, 1, 1], args=(xdata, ydata)) # Where [1, 1, 1, 1] represents the initial guess!
m
y_fit2= sigmoid(xdata, *m.x) # Evaluating with the x-array obtained with minimize(),
# *m.x --> python hack to quickly unpack the values
plt.plot(xdata, ydata, 'ko', label = 'sigmoid with noise')
plt.plot(xdata, y_fit2, label = 'curve obtained with minimize')
plt.legend()
plt.plot(xdata, y_fit2, '-', label = 'curve obtained with minimize')
plt.plot(xdata, y_fit, '-', label = 'curve obtained with curve_fit')
plt.legend()
```
# Using scikit-learn (To Do):
You will have to run a similar calculation but now with [scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html).
First of all, you have to define a linear function, namely: $$y = mx+b$$
1. To Do: Define the linear function
```
def linearFunction(x, m, b):
return m*x+b
x = np.arange(-4,4,0.5)
m = 2
b = 1
line = linearFunction(x,m,b) # Creating a line using your function and the values given
rng = np.random.default_rng()
l_noise = 0.5 * rng.normal(size=x.size)
noisy_line = line+l_noise
plt.plot(x, line, '--', label = 'original linear')
plt.plot(x, noisy_line, 'k', label = 'noisy linear')
plt.legend()
```
## 2.To Do: Use scikit-learn to perform linear regression
--> Use the documentation
The plotting assumes that the LinearModel is called `reg`, and the predicted line is called `line_pred`.
```
from sklearn.linear_model import LinearRegression
X = x.reshape((-1, 1))
reg = LinearRegression()
reg.fit(X, noisy_line)
line_pred = reg.predict(X)
print('Intercept b:', reg.intercept_)
print('\nSlope m:', reg.coef_)
plt.plot(x, noisy_line, 'k--', label = 'noisy linear')
plt.plot(x, line_pred, label = 'prediction')
plt.legend()
plt.plot(x, line, '--', label = 'original')
plt.plot(x, line_pred, 'o', label = 'prediction')
plt.legend()
```
## Curve fitting/Minimize
3. To Do: Now use `curve_fit` **or** `minimize` to fit the linear equation using the noisy data `x` and `noisy_line`.
```
## Curve fit
values = curve_fit(linearFunction, x, noisy_line)[0]
pred_curve_fit = linearFunction(x, *values)
## Minimize
def helper(values, x_data, y_data):
return ((y_data-linearFunction(x_data, *values))**2).sum()
m = minimize(helper, [1, 1], args=(x, noisy_line))
pred_minimize_fit = linearFunction(x, *m.x)
## Plot everything
plt.plot(x, line, '--', label = 'original')
plt.plot(x, pred_curve_fit, 'x', label = 'pred_curve_fit', markersize=12)
plt.plot(x, pred_minimize_fit, 'o', label = 'pred_minimize_fit')
plt.legend()
```
___________________________________
# Part 2: MLP and CNN
```
import tensorflow as tf # Library for machine learning and AI
```
* Video: [Why Tensorflow?](https://www.youtube.com/watch?v=yjprpOoH5c8)
* Keras is an API for defining a model by layers (More info: [tf.keras](https://www.tensorflow.org/api_docs/python/tf/keras), [Building models with tf.keras](https://www.deeplearningdemystified.com/article/pdl-1))
* MNIST is a size-normalized database of handwritten digits used very often as example in deep and machine learning.
```
mnist = tf.keras.datasets.mnist #loading mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data() #assigning the test and train data
x_train, x_test = x_train / 255.0, x_test / 255.0
```
4. To Do: What represents _x_ and what _y_ in this example?
Answer: x - Training data, e.g. images; y - Labels
5. To Do: Plot a sample of the dataset and print its respective label
```
random_sample = np.random.randint(0, len(x_train))
plt.imshow(x_train[random_sample])
print('Label:', y_train[random_sample])
```
## Creating the MLP model
```
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
]) # Multilayer perceptron
```
6. To Do: Name 5 examples of activation functions
Answer:
* Sigmoid
* Tanh
* ReLU
* Linear
* Binary
```
model.compile("adam", "sparse_categorical_crossentropy", metrics=['acc']) #Configurations of the model
```
7. To Do: What is `adam`?
Answer: An optimizer
8. To Do: What does `sparse_categorical_crossentropy` mean?
Answer: Loss-Function --> Labels are integers
9. To Do: What are `epochs`?
Answer: Specifies iterations over the entire dataset during the training process
```
h = model.fit(x_train, y_train, epochs=10) # Training the model
```
# Plotting how the model learned:
The 'keys()' method returns the list of keys contained in a dictionary, e.g.:
```
print(h.history.keys())
```
10. To Do: Plot the loss (edit just one line)
```
x_axis = np.arange(10)
y_axis = h.history['loss']
plt.plot(x_axis, y_axis, 'ko--')
plt.ylabel("loss")
plt.xlabel("epoch")
plt.xticks(np.arange(10), np.arange(10)+1) # sets the ticks for the plot on the x axis
```
# Evaluating the model on previously unseen data:
```
model.evaluate(x_test, y_test)
```
## MAGIC, around 98% accuracy!
# Convolutional Neural Network
CNNs have usually a higher performance than other neural networks for image analysis. They contain a convolutional layer, a pooling layer and a Fully Connected (FC) layer:
```
cnn = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(8, (3,3), input_shape=(28,28,1), padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(16, (3,3), padding='same', activation='relu'),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(32, (3,3), padding='same', activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation='softmax')
])
```
11. To Do: What is "(3,3)" doing in Conv2D?
Answer: Kernel size
12. To Do: Next, **compile** the model similar to the MLP shown above:
```
cnn.compile("adam", "sparse_categorical_crossentropy", metrics=['acc'])
```
13. To Do: Next, you will **fit** the model. There is an error. Can you fix it?
```
h2 = cnn.fit(x_train[..., None], y_train, epochs=10)
```
Plot and evaluate your fancy CNN⚛
```
plt.plot(np.arange(10), h2.history['loss'], 'ko--')
plt.ylabel("loss")
plt.xlabel("epoch")
plt.xticks(np.arange(10), np.arange(10)+1)
cnn.evaluate(x_test, y_test)
```
Report here the final test accuracy: 99%
```
```
| github_jupyter |
# Figures for comparison of arrival direction and joint models
Here use the output from the `arrival_vs_joint` notebook to plot the figures shown in the paper.
<br>
<br>
*This code is used to produce Figures 6, 7 and 8 (left panel) in Capel & Mortlock (2019).*
```
import numpy as np
import h5py
import matplotlib as mpl
from matplotlib import pyplot as plt
import seaborn as sns
from pandas import DataFrame
from fancy import Data, Results
from fancy.plotting import AllSkyMap, Corner
from fancy.plotting.colours import *
# to match paper style
plt.style.use('minimalist')
# Define output files
source_type = "SBG_23"
detector_type = "PAO"
exp_factor = 1.
sim_output_file = '../output/joint_model_simulation_{0}_{2}_epsx{1:.0f}.h5'.format(source_type, exp_factor, detector_type)
arrival_output_file = '../output/arrival_direction_fit_{0}_{2}_epsx{1:.0f}.h5'.format(source_type, exp_factor, detector_type)
joint_output_file = '../output/joint_fit_{0}_{2}_epsx{1:.0f}.h5'.format(source_type, exp_factor, detector_type)
'''set detector and detector properties'''
if detector_type == "TA":
from fancy.detector.TA2015 import detector_params, Eth
elif detector_type == "PAO":
from fancy.detector.auger2014 import detector_params, Eth
else:
raise Exception("Undefined detector type!")
```
## Figure 6
The simulated data set and the Auger exposure.
```
from astropy.coordinates import SkyCoord
from astropy import units as u
from fancy.detector.exposure import m_dec
from fancy.interfaces.stan import Direction
# modify exposure contained in detector_params
# detector_params[3] == alpha_T
detector_params[3] *= exp_factor
# Read in simulation data
with h5py.File(sim_output_file, 'r') as f:
uhecr = f['uhecr']
arrival_direction = Direction(uhecr['unit_vector'][()])
energy = uhecr['energy'][()]
source = f['source']
source_direction = Direction(source['unit_vector'][()])
# Calculate the exposure as a function of declination
num_points = 220
rightascensions = np.linspace(-180, 180, num_points)
declinations = np.linspace(-np.pi/2, np.pi/2, num_points)
m = np.asarray([m_dec(d, detector_params) for d in declinations])
exposure_factor = (m / np.max(m))
# reset exposure factor
detector_params[3] /= exp_factor
# Colourmaps and normalisation
# Exposure
exp_cmap = mpl.colors.LinearSegmentedColormap.from_list('custom',
[lightgrey, grey], N = 6)
norm_proj = mpl.colors.Normalize(exposure_factor.min(), exposure_factor.max())
# UHECR energy
# max. of energy bin, set s.t. values in
# max(np.digitize(energy, energy_bins) - 1) < len(uhecr_color)
Emax = np.ceil(np.max(energy) / 10.) * 10.
uhecr_color = [lightblue, midblue, darkblue]
uhecr_cmap = mpl.colors.ListedColormap(uhecr_color)
energy_bins = np.logspace(np.log(Eth), np.log(Emax), 4, base = np.e)
uhecr_norm = mpl.colors.BoundaryNorm(energy_bins, uhecr_cmap.N)
# Legend
legend_elements = [mpl.lines.Line2D([0], [0], marker='o', color = 'w',
label = 'sources', markersize = 10,
markerfacecolor = 'k'),
mpl.lines.Line2D([0], [0], marker='o', color='w',
label='UHECRs', markersize = 15,
markerfacecolor = midblue, alpha = 0.8)]
# Figure
fig, ax = plt.subplots()
fig.set_size_inches((12, 6))
skymap = AllSkyMap(projection = 'hammer', lon_0 = 0, lat_0 = 0);
# Sources
for lon, lat in np.nditer([source_direction.lons, source_direction.lats]):
skymap.tissot(lon, lat, 2.0, 30, facecolor = 'k', alpha = 1.0, zorder = 5)
# UHECRs
for lon, lat, E in np.nditer([arrival_direction.lons, arrival_direction.lats, energy]):
i = np.digitize(E, energy_bins) - 1
skymap.tissot(lon, lat, 3.0 + (i*2), 30,
facecolor = uhecr_cmap.colors[i], alpha = 0.8, zorder = i+2)
# Exposure
# Uses scatter as bug with AllSkyMap.pcolormesh and contour that I still need to fix...
for dec, proj in np.nditer([declinations, exposure_factor]):
decs = np.tile(dec, num_points)
c = SkyCoord(ra = rightascensions * u.rad,
dec = decs * u.rad, frame = 'icrs')
lon = c.galactic.l.deg
lat = c.galactic.b.deg
if (proj == 0):
skymap.scatter(lon, lat, latlon = True, linewidth = 3,
color = white, alpha = 1, zorder = 1)
else:
skymap.scatter(lon, lat, latlon = True, linewidth = 3,
color = exp_cmap(norm_proj(proj)), alpha = 1, zorder = 1)
# Annotation
skymap.draw_border()
skymap.draw_standard_labels(minimal = True, fontsize = 20)
ax.legend(handles = legend_elements, bbox_to_anchor = (0.8, 0.85), fontsize = 20)
# Colorbar
cb_ax = plt.axes([0.25, 0, .5, .05], frameon = False)
bar = mpl.colorbar.ColorbarBase(cb_ax, norm = uhecr_norm, cmap = uhecr_cmap,
orientation = 'horizontal', drawedges = True,
alpha = 1)
bar.set_label('$\hat{E}$ / EeV', color = 'k', fontsize = 20)
bar.ax.tick_params(labelsize = 20)
```
## Figure 7
Comparison of the joint and arrival direction fits.
```
# Get f samples for both models and true f value.
f_a = Results(arrival_output_file).get_chain(['f'])['f']
f_j = Results(joint_output_file).get_chain(['f'])['f']
f_true = Results(sim_output_file).get_truths(['f'])['f']
fig, ax = plt.subplots()
fig.set_size_inches((6, 4))
sns.distplot(f_a, hist = False,
kde_kws = {'shade' : True, 'lw' : 2, 'zorder' : 0},
color = grey, label = 'arrival direction')
sns.distplot(f_j, hist = False,
kde_kws = {'shade' : True, 'lw' : 2, 'zorder' : 1},
color = purple, label = 'joint')
ax.axvline(f_true, 0, 10, color = 'k', zorder = 3, lw = 2., alpha = 0.7)
ax.set_xlim(0, 1)
# ax.set_ylim(0, 10)
ax.set_xlabel('$f$')
ax.set_ylabel('$P(f | \hat{E}, \hat{\omega})$')
ax.legend(bbox_to_anchor = (0.65, 1.0));
```
## Figure 8 (left panel)
```
# Get chains from joint fit and truths from simulation
results_sim = Results(sim_output_file)
results_fit = Results(joint_output_file)
keys = ['F0', 'L', 'alpha', 'B', 'f']
chain = results_fit.get_chain(keys)
# Convert form Stan units to plot units
chain['F0'] = chain['F0'] / 1.0e3 # km^-2 yr^-1
chain['L'] = chain['L'] * 10 # 10^-38 yr^-1
truth_keys = ['F0', 'L', 'alpha', 'B', 'f']
truth = results_sim.get_truths(truth_keys)
info_keys = ['Eth', 'Eth_sim']
info = results_sim.get_truths(info_keys)
# Correct for different Eth in sim and fit
# Also scale to plot units
flux_scale = (info['Eth'] / info['Eth_sim'])**(1 - truth['alpha'])
truth['F0'] = truth['F0'] * flux_scale # km^-2 yr^-1
truth['L'] = truth['L'][0] * flux_scale / 1.0e39 * 10 # 10^-38 yr^-1
labels = {}
labels['L'] = r'$L$ / $10^{38}$ $\mathrm{yr}^{-1}$'
labels['F0'] = r'$F_0$ / $\mathrm{km}^{-2} \ \mathrm{yr}^{-1}$'
labels['B'] = r'$B$ / $\mathrm{nG}$'
labels['alpha'] = r'$\alpha$'
labels['f'] = r'$f$'
params = np.column_stack([chain[key] for key in keys])
truths = [truth[key] for key in keys]
# Make nicely labelled dict
chain_for_df = {}
for key in keys:
chain_for_df[labels[key]] = chain[key]
# Make ordered dataframe
df = DataFrame(data = chain_for_df)
df = df[[labels['F0'], labels['L'], labels['alpha'], labels['B'], labels['f']]]
corner = Corner(df, truths, color=purple, contour_color=purple_contour)
```
### Footnote
In the paper I made a small typo in the plot labelling, $F_0$ is indeed in units of $\rm{km}^{-2} \ yr^{-1}$. Also, there are some small differences in the fit posteriors due to the use of a random seed.
| github_jupyter |
# Setting up the Data Science Environment
One of the largest hurdles beginners face is setting up an environment that they can quickly get up and running and analyzing data.
### Objectives
1. Understand the difference between interactive computing and executing a file
1. Ensure that Anaconda is installed properly with Python 3
1. Know what a path is and why its useful
1. Understand the difference between a Python, iPython, Jupyter Notebook, and Jupyter Lab
1. Know how to execute a Python file from the command line
1. Be aware of Anaconda Navigator
1. Most important Jupyter Notebook tips
# Interactive Computing vs Executing a File
### Interactive Computing
Nearly all the work that we do today will be done **interactively**, meaning that we will be typing one, or at most a few lines of code into an **input** area and executing it. The result will be displayed in an **output** area.
### Executing a Python File
The other way we can execute Python code is by writing it within a file and then executing the entire contents of that file.
### Interactive Computing for Data Science
Interactive computing is the most popular way to analyze data using Python. You can get instant feedback which will direct how the analysis progresses.
### Writing Code in Files to Build Software
All software has code written in a text file. This code is executed in its entirety. You cannot add or change code once the file has been executed. Although most tutorials (including this one) will use an interactive environment to do data science, you will eventually need to take your exploratory work from an interactive session and put in inside of a file.
# Ensuring that Anaconda is Installed Properly with Python 3
You should have already [downloaded Anaconda][1]. Jan 1, 2020 will mark the last day that Python will be officially supported. Let's ensure that you are running the latest version of Python 3.
1. Open up a terminal (Mac/Linux) or the Command Prompt (and not the Anaconda Prompt on Windows) and enter in **`python`**
1. Ensure that in the header you see Python version 3.X where X >= 6
![][2]
3. If you don't see this header with the three arrow **`>>>`** prompts and instead see an error, then we need to troubleshoot here.
## Troubleshooting
### Windows
The error message that you will see is **`'python' is not recognized as an internal or external command...`**
This means that your computer cannot find where the program **`python`** is located on your machine. Let's find out where it is located.
1. Open up the program **Anaconda Prompt**
1. Type in **`python`** and you should now be able to get the interactive prompt
1. Exit out of the prompt by typing in **`exit()`**
1. The reason you cannot get **`python`** to run in the **Command Prompt** is that during installation you did not check the box to add

1. It is perfectly well and good to use **Anaconda Prompt** from now on
1. If you so desire, you can [manually configure][3] your **Command Prompt**
### Mac/Linux
The error message you should have received is **`python: command not found`**. Let's try and find out where Python is installed on your machine.
1. Run the command: **`$ which -a python`**
![][4]
1. This outputs a list of all the locations where there is an executable file with the name **`python`**
1. This location must be contained in something called the **path**. The path is a list (separated by colons) containing directories to look through to find executable files
1. Let's output the path with the command: **`$ echo $PATH`**
![][5]
1. My path contains the directory (**`/Users/Ted/Anaconda/bin`**) from above so running the command **`python`** works for me.
1. If your path does not have the directory outputted from step 1 then we will need to edit a file called **`.bash_profile`** (or **`.profile`** on some linux machines)
1. Make sure you are in your home directory and run the command:
> **`nano .bash_profile`**
1. This will open up the file **`.bash_profile`**, which may be empty
1. Add the following line inside of it: **`export PATH="/Users/Ted/anaconda3/bin:$PATH"`**
1. Exit (**`ctrl + x`**) and make sure to save
1. Close and reopen the terminal and execute: **`$ echo $PATH`**
1. The path should be updated with the Anaconda directory prepended to the front
1. Again, type in **`python`** and you should be good to go
1. **`.bash_profile`** is itself a file of commands that get executed each time you open a new terminal.
### More on the path (all operating systems)
The path is a list of directories that the computer will search in order, from left to right, to find an executable program with the name you entered on the command line. It is possible to have many executables with the same name but in different folders. The first one found will be the one executed.
### Displaying the path
* Windows: **`$ path`** or **`$ set %PATH%`**
* Mac/Linux **`$ echo $PATH`**
### Finding the location of a program
* Windows: **` where program_name`**
* Mac\Linux: **`which program_name`**
### Editing the path
* Windows: Use the [set (or setx)][6] command or from a [GUI][7]
* Mac\Linux: By editing the **`.bash_profile`** as seen above
# python vs ipython
**`python`** and **`ipython`** are both executable programs that run Python interactively from the command line. The **`python`** command runs the default interpreter, which comes prepackaged with Python. There is almost no reason to ever run this program. It has been surpassed by **`ipython`** (interactive Python) which you also run from the command line. It adds lots of functionality such as syntax highlighting and special commands.
# iPython vs Jupyter Notebook
The Jupyter Notebook is a browser based version of iPython. Instead of being stuck within the confines of the command line, you are given a powerful web application that allows you to intertwine both code, text, and images. [See this][8] for more details of the internals
![][9]
# Jupyter Lab
Jupyter Lab is yet another interactive browser-based program that allows you to have windows for notebooks, terminals, data previews, and text editors all on one screen.
# Executing Python Files
An entire file of Python code can be executed either from the command line or from within this notebook. We execute the file by placing the location of the file after the **`python`** command. For instance, if you are in the home directory of this repository, the following run the following on the command line to play a number guessing game.
**`python scripts/guess_number.py`**
### Use a magic function to run a script inside the notebook
Instead of going to the command line, you can run a script directly in the notebook. Run the next two cells.
[1]: https://www.anaconda.com/download
[2]: ../images/pythonterminal.png
[3]: https://medium.com/@GalarnykMichael/install-python-on-windows-anaconda-c63c7c3d1444
[4]: ../images/which_python.png
[5]: ../images/path_mac.png
[6]: https://stackoverflow.com/questions/9546324/adding-directory-to-path-environment-variable-in-windows
[7]: https://www.computerhope.com/issues/ch000549.htm
[8]: http://jupyter.readthedocs.io/en/latest/architecture/how_jupyter_ipython_work.html
[9]: ../images/jupyter_internal.png
```
%matplotlib notebook
```
%run /Users/jasvirdhillon/Documents/GitHub/Intro-Data-Science-Python-master/scripts/rain.py
# Anaconda Navigator vs Command Line
Anaconda comes with a simple GUI to launch Jupyter Notebooks and Labs and several other programs. This is just a point and click method for doing the same thing on the command line.
# Important Jupyter Notebook Tips
### Code vs Markdown Cells
* Each cell is either a **Code** cell or a **Markdown** cell.
* Code cells always have **`In [ ]`** to the left of them and understand Python code
* Markdown cells have nothing to the left and understand [markdown](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet), a simple language to quickly formatting text.
### Edit vs Command Mode
* Each cell is either in **edit** or **command** mode
* When in edit mode, the border of the cell will be **green** and there will be a cursor in the cell so you can type
* When in command mode, the border will be **blue** with no cursor present
* When in edit mode, press **ESC** to switch to command mode
* When in command mode, press **Enter** to switch to edit mode (or just click in the cell)
### Keyboard Shortcuts
* **Shift + Enter** executes the current code block and moves the cursor to the next cell
* **Ctrl + Enter** executes the current code block and keeps the cursor in the same cell
* Press **Tab** frequently when writing code to get a pop-up menu with the available commands
* When calling a method, press **Shift + Tab + Tab** to have a pop-up menu with the documentation
* **ESC** then **a** inserts a cell above
* **ESC** then **b** inserts a cell below
* **ESC** then **d + d** deletes a cell
| github_jupyter |
```
import os, sys, time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import scipy.sparse
import sklearn
from sklearn.pipeline import Pipeline
from sklearn.model_selection import PredefinedSplit
from sklearn.linear_model import LinearRegression, ElasticNet, ElasticNetCV
import xgboost as xgb
import lightgbm as lgbm
sys.path.insert(1, './')
import Utility
import imp
#imp.reload(Utility)
from Utility import *
for p in [np, pd, sklearn, lgbm]:
print (p.__name__, p.__version__)
```
In this file, we load cleaned training data, train model, and save model.
# Load Cleaned Data
```
train_df = pd.read_hdf('../CleanData/trainDF.h5', 'df')
valid_df = pd.read_hdf('../CleanData/validDF.h5', 'df')
train_df.info()
valid_df.info()
Y_train = train_df['target']
Y_valid = valid_df['target']
```
# LightGBM
```
lgbm_features = ['avg_item_price_lag_1', 'target_lag_1', 'target_shop_lag_1', 'target_item_lag_1',
'target_item_category_lag_1', 'avg_item_price_lag_2', 'target_lag_2', 'target_shop_lag_2',
'target_item_lag_2', 'target_item_category_lag_2', 'avg_item_price_lag_3', 'target_lag_3',
'target_shop_lag_3', 'target_item_lag_3', 'target_item_category_lag_3', 'avg_item_price_lag_4',
'target_lag_4', 'target_shop_lag_4', 'target_item_lag_4', 'target_item_category_lag_4',
'avg_item_price_lag_5', 'target_lag_5', 'target_shop_lag_5', 'target_item_lag_5',
'target_item_category_lag_5', 'avg_item_price_lag_6', 'target_lag_6', 'target_shop_lag_6',
'target_item_lag_6', 'target_item_category_lag_6', 'avg_item_price_lag_12', 'target_lag_12',
'target_shop_lag_12', 'target_item_lag_12', 'target_item_category_lag_12', 'shop_mean',
'item_mean', 'shop_item_mean', 'item_category_mean', 'month']
lgbm_train_data = lgbm.Dataset(train_df[lgbm_features], label=Y_train, feature_name=lgbm_features) #categorical_feature
lgbm_valid_data = lgbm.Dataset(valid_df[lgbm_features], label=Y_valid, feature_name=lgbm_features)
params = {'objective':'regression', 'metric':['rmse'], 'boosting_type':'gbdt', 'num_rounds':100, 'eta':0.2,
'max_depth':8, 'min_data_in_leaf':150, 'min_gain_to_split':0.01,
'feature_fraction':0.7, 'bagging_freq':0, 'bagging_fraction':1.0, 'lambda_l1':0,
'lambda_l2':0.001, 'early_stopping_round':20, 'verbosity':1}
eval_metrics = {}
start = time.time()
lgbm_model= lgbm.train(params, lgbm_train_data, valid_sets=[lgbm_train_data, lgbm_valid_data],
valid_names=['train', 'valid'], evals_result=eval_metrics, verbose_eval=True)
end = time.time()
print(end-start)
# Plot training progress of light GBM across number of iterations.
plot_lgbm_eval_metrics(eval_metrics)
# Show variable importance.
show_lgbm_var_imp(lgbm_model)
# Predict on training and validation set.
Z_train_lgbm = lgbm_model.predict(train_df[lgbm_features])
Z_valid_lgbm = lgbm_model.predict(valid_df[lgbm_features]).clip(0,20)
# Compute performance on training and validation set.
compute_reg_score(Y_train, Z_train_lgbm)
print('-'*100)
compute_reg_score(Y_valid, Z_valid_lgbm)
```
# Linear Model
```
# Start with linear model using all features. We will use elastic net with some parameters, and fine tune later.
lr_features = ['avg_item_price_lag_1', 'target_lag_1', 'target_shop_lag_1', 'target_item_lag_1',
'target_item_category_lag_1', 'avg_item_price_lag_2', 'target_lag_2', 'target_shop_lag_2',
'target_item_lag_2', 'target_item_category_lag_2', 'avg_item_price_lag_3', 'target_lag_3',
'target_shop_lag_3', 'target_item_lag_3', 'target_item_category_lag_3', 'avg_item_price_lag_4',
'target_lag_4', 'target_shop_lag_4', 'target_item_lag_4', 'target_item_category_lag_4',
'avg_item_price_lag_5', 'target_lag_5', 'target_shop_lag_5', 'target_item_lag_5',
'target_item_category_lag_5', 'avg_item_price_lag_6', 'target_lag_6', 'target_shop_lag_6',
'target_item_lag_6', 'target_item_category_lag_6', 'avg_item_price_lag_12', 'target_lag_12',
'target_shop_lag_12', 'target_item_lag_12', 'target_item_category_lag_12', 'shop_mean',
'item_mean', 'shop_item_mean', 'item_category_mean', 'month']
#lr_model = LinearRegression(normalize=True, n_jobs=-1)
lr_model = ElasticNet(normalize=True, alpha=1e-8, l1_ratio=0.1)
lr_model.fit(train_df[lr_features], Y_train)
# Predict on training and validation set.
Z_train_lr = lr_model.predict(train_df[lr_features])
Z_valid_lr = lr_model.predict(valid_df[lr_features]).clip(0,20)
# Compute performance on training and validation set.
compute_reg_score(Y_train, Z_train_lr)
print('-'*100)
compute_reg_score(Y_valid, Z_valid_lr)
# Let's examine the coeeficients estimated by elastic net.
beta = lr_model.coef_
beta = pd.Series(beta, index=lr_features)
beta
# Beta coefficients are all non-zeros. We will need to tune elastic net to do feature selection for linear model.
# Do cross-validation to tune elastic net.
# Prepare cross validation data.
test_fold = np.full(train_df.shape[0], -1, dtype=np.int8)
sel = train_df['date_block_num']>=25 #use 25,26,27 as validation set when tuning elastic net
test_fold[sel] = 0
ps = PredefinedSplit(test_fold=test_fold)
# Base params
max_iter = 1000
alphas = None
#alphas = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1]
n_alphas = 10
#l1_ratio = [.1, .3, .5, .7, .9, .95, .99, 1]
l1_ratio = [.1, .5, 1]
ecv_params = {'cv': ps, 'random_state': 0, # Changing this could do ensembling options
'alphas': alphas, 'n_alphas': n_alphas, 'l1_ratio': l1_ratio,
'eps': 0.001, 'tol': 0.0001, 'max_iter': max_iter, 'fit_intercept': True, 'normalize': True,
'positive': False, 'selection': 'random', 'verbose': 2, 'n_jobs': -1
}
# Tune
ecv = ElasticNetCV()
ecv = ecv.set_params(**ecv_params)
ecv = ecv.fit(train_df[lr_features], Y_train)
# Get best parameter from ElasticNetCV.
best_params = (ecv.alpha_, ecv.l1_ratio_, ecv.n_iter_)
best_params
# Get the corresponding elastic net coefficients.
beta = pd.Series(ecv.coef_, index=lr_features)
beta
# Let's visualize the magnitude of coefficients estimated by the best elastic net model.
fig = plt.figure(figsize=(12,6))
sel = np.abs(beta)>0.01
beta[sel].plot.bar()
plt.xticks(rotation=45)
# Generate predictions from the best elastic net model.
Z_train_ecv = ecv.predict(train_df[lr_features])
Z_valid_ecv = ecv.predict(valid_df[lr_features]).clip(0,20)
# Compute performance of the best elastic net model.
compute_reg_score(Y_train, Z_train_ecv)
print('-'*100)
compute_reg_score(Y_valid, Z_valid_ecv)
# Performance is comparable to the model without fine-tuning.
# We will retrain linear model on the training data using features selected by the best elastic net model only.
lr_features = ['target_lag_1', 'target_lag_2', 'target_lag_3', 'target_lag_4', 'target_lag_5', 'target_lag_6',
'shop_mean', 'item_mean', 'shop_item_mean', 'item_category_mean']
lr_model = LinearRegression(normalize=True, n_jobs=-1)
lr_model.fit(train_df[lr_features], Y_train)
# Let's examine the estimated coefficients.
beta = lr_model.coef_
beta = pd.Series(beta, index=lr_features)
beta
# Predict on training and validation set.
Z_train_lr = lr_model.predict(train_df[lr_features])
Z_valid_lr = lr_model.predict(valid_df[lr_features]).clip(0,20)
# Compute performance on training and validation set.
compute_reg_score(Y_train, Z_train_lr)
print('-'*100)
compute_reg_score(Y_valid, Z_valid_lr)
```
# Ensembling
We will combine predictions from light GBM and linear model. First, check that the two set of predictions are not overly correlated.
```
plt.scatter(Z_valid_lgbm, Z_valid_lr)
# They are somewhat correlated.
```
### Weighted Averaging
We will use a simple convex combination to combine the two set of predictions. We will find the optimal combination coefficient alpha using grid search on the range of alphas_to_try. The best alpha should have the lowest RMSE on the validation predictions.
```
alphas_to_try = np.linspace(0, 1, 1001)
best_alpha = 0
rmse_train_simple_mix = np.inf
for alpha in alphas_to_try:
Z_mix = alpha*Z_valid_lgbm + (1 - alpha)*Z_valid_lr
rmse = np.sqrt(mean_squared_error(Y_valid, Z_mix))
if rmse<rmse_train_simple_mix:
best_alpha = alpha
rmse_train_simple_mix = rmse
best_alpha
# Compute performance of the best combined validation prediction.
Z_mix = best_alpha*Z_valid_lgbm + (1 - best_alpha)*Z_valid_lr
compute_reg_score(Y_valid, Z_mix)
```
| github_jupyter |
#### stdin and stdout ** skipped**
piping data at the command line if you run Python scripts through it.
```
import sys, re
sys.argv[0]
# sys.argv is the list of command-line arguments
# sys.argv[0] is the name of the program itself
# sys.argv[1] will be the regex specified at the command line
regex = sys.argv[1]
for line in sys.stdin:
if re.search(regex, line):
sys.stdout.write(line)
```
#### Reading and writting files
For example, imagine you have a file full of email addresses, one per line, and that
you need to generate a histogram of the domains. The rules for correctly extracting
domains are somewhat subtle (e.g., the Public Suffix List), but a good first approximation
is to just take the parts of the email addresses that come after the @. (Which
gives the wrong answer for email addresses like joel@mail.datasciencester.com.)
```
def get_domain(email_address: str) -> str:
"""Split on '@' and return the las piece"""
return email_address.lower().split("@")[-1]
# a couple of tests
assert get_domain("abola@gmail.com") == "gmail.com"
assert get_domain("abola@hotmail.com") == "hotmail.com"
# Just stick some data there
with open('data/email_addresses.txt', 'w') as f:
f.write("joelgrus@gmail.com\n")
f.write("joel@m.datasciencester.com\n")
f.write("this is a fake line\n")
f.write("joelgrus@m.datasciencester.com\n")
f.write("joel@hotmail.com\n")
from collections import Counter
with open('data/email_addresses.txt', 'r') as f:
domain_counts = Counter(get_domain(line.strip())
for line in f
if "@" in line)
domain_counts
```
Delimited files
```
import csv
with open('data/colon_delimited_stock_prices.txt', 'w') as f:
f.write("""date:symbol:closing_price
6/20/2014:AAPL:90.91
6/20/2014:MSFT:41.68
6/20/2014:FB:64.5
""")
def process_row(closing_price:float) -> float:
return closing_price > 61
with open('data/colon_delimited_stock_prices.txt') as f:
colon_reader = csv.DictReader(f, delimiter = ":")
for dict_row in colon_reader:
#print(dict_row)
date = dict_row["date"]
symbol = dict_row["symbol"]
closing_price = float(dict_row["closing_price"])
bigger_61 = process_row(closing_price)
print(bigger_61)
today_prices = { 'AAPL' : 90.91, 'MSFT' : 41.68, 'FB' : 64.5 }
with open("data/comma_delimited_stock_prices.txt", "w") as f:
csv_writer = csv.DictWriter(f, delimiter = ',', fieldnames=["stock", "price"])
csv_writer.writeheader()
for stock, price in today_prices.items():
csv_writer.writerow({"stock": stock, "price": price})
```
#### Scrapping the web **fun but not completed now**
Using an Unauthenticated API **same as above**
- goot twitter authentication example and use of Twython API
```
import requests, json
github_user = "giordafrancis"
endpoint = f"https://api.github.com/users/{github_user}"
repos = json.loads(requests.get(endpoint).text)
repos
```
| github_jupyter |
# **Spotify Data Analysis**
---
I want to accomplish 2 things with this project. First, I want to learn how to use the Spotify API. Learning how to use this API serves as a great gateway into the API Universe.
The documentation is amazing, the API calls you can make to Spotify, per day, is more than enough for almost any kind of project and, the information you can get from it is really interesting.
I also wanto to do some research on the song profiles that different countries consume and predict if new releases could be successful in different regions. To accomplish this, I'm going to create a dataframe with all the songs from some of the most popular playlists per country. Once I have these songs I'm going to use the [**Spotify Audio Features**](https://developer.spotify.com/documentation/web-api/reference/#endpoint-get-several-audio-features) on each song and, as the last step, I'm going to use a Machine Learning model to use these features as my Dependent Variable in a prediction exercise.
In addition to that, I wanted to do some research on the characteristics of music that each country consumes and how this consumption has changed over the last decade. To do this I want to create a dataset with all the songs from the most important albums of the last 20 years, analyze their characteristics, and see if there's a particular change in the consumption of certain types of artists or genres.
## 1. Required Libraries
---
```
import requests
import base64
import datetime
import pandas as pd
from urllib.parse import urlencode
from IPython.display import Image
from IPython.core.display import HTML
```
## 2. Helper Functions
---
I created several functions inside the `helper_func.py` python file. I use them inside this notebook to gather all the data I need from Spotify, in the most efficient way possible.
```
%load_ext autoreload
%autoreload 2
import helper_func as hf
```
### 2.1 Description
---
* `auth()`: This functions generates the `access_token` by requesting the Client_ID and the Client_Secret. This token is important because it's the key to use all the functionalities of the API.
* `search_spotify()`: The purpose of this function is to do searches using the Spotify API and get a JSON file with the information that was requested. Using this function, we can search for information about albums, artists, playlists, tracks, shows, and episodes.
* `get_list_of_albums()`: This query will return a dataframe with all the albums from a single artist.
* `album_information()`: This function returns key information about a list of album ids.
* `get_multiple_artists_from_albums()`: Some albums have more than 1 artist. This function creates a dataframe that creates new columns for each of the artists that collaborated on the album.
* `songs_information()`:This function returns a dataframe with all the songs from an artist along with additional data from those songs and the function also returns a list with the unique ids from those songs.
* `artists_from_songs()`: Some songs have more than 1 performer. This list creates a dataframe that adds new columns for each artist that was involved with the song.
* `multiple_artists_songs()`: This function can return a dataframe with detailed information about an artist.
* `song_features()`: This function returns a dataframe with the features that Spotify assigns to each song.
* `playlist_data()`: This function returns a dataframe with key data about a particular playlist.
## 3. Set-Up
---
### 3.1 Access Token
---
Depending on the level of access you want, there are several ways of interacting with the Spotify API.
For my personal workflow I created a function called `auth()`. This function handles all the steps that needed to be fullfilled to get the access token. When called, the function will ask for a `Client_ID` and a `Client_Secret`. You can learn how to get those access in this [**article**](https://developer.spotify.com/documentation/general/guides/app-settings/).
<ins>**Notes:**</ins>
* [**Here**](https://developer.spotify.com/documentation/general/guides/authorization-guide/#client-credentials-flow) you can find the article in which Spotify explains how the `access token` works. In addition, it also mentions different workflows that can be followed in order to get the token.
```
access_token = hf.auth()
```
## 4. Getting Data From Spotify
---
The data from this project comes from the Spotify API. To learn more about the different calls you can make to the API check their documentation by clicking [**here**](https://developer.spotify.com/documentation/web-api/reference/).
I'll be using the "helper functions" that I previously imported as `hf`
### 4.1 `Search` function.
---
There´s a function inside the `helper_functions` library called `search_spotify`. The purpose of this function is to do searches using the Spotify API and get a JSON file in return with the inforamtion that was requested.
Using this function, we can search information about albums, artists, playlists, tracks, shows and episodes.
This function accepts 3 parameters:
* `access_token`: The json response we get after running the `hf.auth()`´function.
* `query`: The term we want to look for.
* `search_type`: Add the string 'albums', 'artists', 'playlists', 'tracks', 'shows' or 'episodes' depending on what result you're looking to get.
<ins>**Notes:**</ins>
* Click [**here**](https://developer.spotify.com/documentation/web-api/reference/#category-search) to learn more about the "Search" module of the Spotify API.
* If you want to test the "Search" API call in a Spotify Console click [**here**](https://developer.spotify.com/console/get-search-item/).
```
# Search_Type Options: album , artist, playlist, track, show and episode
albums = hf.search_spotify(access_token, query="Fine Line", search_type='album')
album_cover = albums["albums"]["items"][0]["images"][1]["url"]
album_id = albums["albums"]["items"][0]["id"]
Image(url= f"{album_cover}", width=200, height=200)
```
### 4.2 Albums Function
---
There´s a function inside the `helper_functions` library called `get_list_of_albums()`. The purpose of this function is to return a list with all the albums from a single artist.
The parameters that this function accepts are:
>* `at`: The Access_Token. REQUIRED
>* `artist`: String with the name of a single artist. OPTIONAL
>* `lookup_id`: ID of a single Artist. OPTIONAL.
>* `market`: Choose the country you would like to get the information from. The default is "US". OPTIONAL
<ins>**Notes:**</ins>
* You must choose to use `artist` or `lookup_id` but not the two at the same time.
* Click [**here**](https://developer.spotify.com/documentation/web-api/reference/#category-albums)) to learn more about the "Albums" module on the Spotify API.
* If you want to test the "Tracks" API call in a Spotify Console click [**here**](https://developer.spotify.com/console/albums/).
```
albums_ids= hf.get_list_of_albums(at=access_token, artist="Ed Sheeran", lookup_id=None)
albums_ids[0:5]
```
### 4.2.1 Information About the Albums
---
There´s a function inside the helper_functions library called `album_information()`. The purpose of this function is to return a dataframe with key information about the albums that are passed to it.
This function simultaneously will return a json file that can be used by other functions inside the the `hf` library.
This function accepts the next parameters:
>* `list_of_albums`. A python list with all the albums that we want to transform into a dataset. REQUIRED.
>* `at`: Which is the Access_Token. REQUIRED
>* `market`: Choose the country you would like to get the information from. The default is "US". OPTIONAL
```
album_info_list, albums_json = hf.album_information(list_of_albums = albums_ids, at=access_token)
album_info_list.head()
```
### 4.2.2 Multiple Artists on a single Album
---
Some albums have more than 1 artist. This function creates a dataframe that creates new columns for each of the artists that collaborated on the album.
The only parameter this function accepts is:
>* `albums_json`: A json file previously generated when the `album_information()` function is called. REQUIRED.
```
album_info_list, albums_json = album_information(list_of_albums = albums_ids, at=access_token)
album_info_list.head()
```
### 4.3 Get all the `Tracks` from a single Artist.
---
There´s a function inside the `helper_functions` library called `song_information()`. The purpose of this function is to get all the tracks from an artist.
The only parameter this function accepts is:
>* `albums_json`: A json file previously generated when the `album_information()` function is called. REQUIRED.
<ins>**Notes:**</ins>
* Click [**here**](https://developer.spotify.com/documentation/web-api/reference/#category-tracks) to learn more about the "Tracks" module on the Spotify API.
* If you want to test the "Tracks" API call in a Spotify Console click [**here**](https://developer.spotify.com/console/get-several-tracks/).
```
list_of_songs_, list_of_songs_tolist = hf.songs_information(albums_json= albums_json)
list_of_songs_.head()
```
### 4.4 Get all the artists that colaborate on the Tracks we're exploring
---
There´s a method inside the `helper_functions` library called `artists_from_songs()`. This function helps to create a dataframe that adds new columns for each artist that was involved with the song.
This function accepts the next parameters:
>* `list_of_songs_ids`: A python list with the unique ids of songs. A list of these characteristics is generated after calling the `songs_information()` function. However, it works with any python list as long as it has the unique id's that Spotify assigns to each song. REQUIRED.
>* `at`: The Access_Token. REQUIRED
<ins>**Notes:**</ins>
* To get a list with all the albums from a single artist, I recommend to use the `album_ids()` method from the `hf` library.
```
artists_in_albums_, songs_json, artists_id_, songs_id_ = hf.artists_from_songs(list_of_songs_ids= list_of_songs_tolist,at=access_token)
artists_in_albums_
```
The function also returns a list with all the artist’s id's. In my example I stored it in the variable called `artists_id_`. Here's an example of how that variable would look like if we printed it:
```
songs_id_[0:10]
```
### 4.5 Get data from the artists
---
There´s a method inside the `helper_functions` library called `multiple_artists_songs()`. This function helps to create a dataframe with key information about the artists we're passing to it.
The parameters that this function accepts are:
>* `at`: The Access Token
>* `list_of_artists_ids`: A python list with the id's that Spotify assigns to each artist. The function `list_of_songs_tolist()` returns a list with this characteristics.
```
artist_list_df= hf.multiple_artists_songs(list_of_artists_ids=artists_id_,at=access_token)
artist_list_df.head()
```
### 4.6 Get Features from each song
---
This function returns a dataframe with the features that Spotify assigns to each song.
The parameters that this function accept are:
>* `at`: The Access Token
>* `list_of_songs_ids`: A python list with the unique id's that Spotify assigns to each track. The functions `list_of_songs_ids()` and `artists_from_songs()` return a list with this characteristisc.
```
song_features, songs_features_json= hf.song_features(list_of_songs_ids=list_of_songs_tolist, at=access_token)
song_features
```
### 4.7 Information from Playlists
---
This function returns a dataframe with key data about a particular playlist.
The parameters that this function accepts are:
>* `at`: The Access Token
>* `playlist_id`: The unique id that Spotify assigns to each playlist.
<ins>**Notes:**</ins>
* Click [**here**](https://developer.spotify.com/documentation/web-api/reference/#category-playlists) to learn more about the information you can get from the "Playlist" API call.
```
play_list_json_V2, empty_list_one_V2= hf.playlist_data(at=access_token, playlist_id="37i9dQZF1DWWZJHBoz7SEG")
empty_list_one_V2.head()
```
## 5. Data Analysis
---
Now we have the neccesary functions to start analyzing our data. As mentioned at the beggining of this notebook, we want to get a sample of songs from different countries and analyze the charactersitics of those songs. Creating this dataframe is going to be a 3 step process:
1. Get 10 playlists from each country. This will create a robust sample dataframe.
2. Get all the songs from those playlists and create a dataframe with them.
3. Once we have the dataframe with all of our songs, we add the features to them.
### 5.1. Getting the playlists
---
Spotify has an API call that gathers the top playlists per country. There's a function inside our `hf` library called `top_playlists()` which accepts a list of countries as parameters and returns a dataframe with the top playlists from those countries.
```
top_playlists_per_country = hf.top_playlists(country= ["CA","GB"], at=access_token)
top_playlists_per_country.iloc[[8,9,10,11],:]
```
### 5.2 Songs from the playlists
---
We can use the `playlist_data()` function to get the songs from the playlists we got in the last step.
```
def get_songs_from_recommended_playlists(playlists):
# Getting the playlists id's from the "top_playlists()" function
playlists_ids= playlists.playlist_id.tolist()
count_playlists_ids= range(len(playlists_ids))
df_countries= playlists[["playlist_id","country"]]
# Getting all the songs from the playlists ids
empty_list_one=[]
for ids_of_songs in count_playlists_ids:
play_list_json_V2, songs_from_playlist= hf.playlist_data(at=access_token, playlist_id=f"{playlists_ids[ids_of_songs]}")
empty_list_one.append(songs_from_playlist)
df_songs_many_features = pd.concat(empty_list_one).merge(df_countries, on="playlist_id", how="inner").drop_duplicates(subset="song_id")
list_df_songs_many_features = df_songs_many_features.song_id.tolist()
return list_df_songs_many_features, df_songs_many_features
list_df_songs_many_features, df_songs_many_features = get_songs_from_recommended_playlists(playlists=top_playlists_per_country)
df_songs_many_features.head()
```
### 5.3 Adding features to each song.
---
Now that we have all the songs from each playlist, we can use the `song_features()` function to add their features.
```
song_features = hf.song_features(list_of_songs_ids=list_df_songs_many_features, at=access_token)
song_features.head()
```
### 5.3.1 Merging the features with the playlist information
---
```
df_p = song_features.merge(df_songs_many_features, on="song_id", how="inner")
df_p.head()
```
### 5.4 Getting the preliminary data analysis
---
```
comparing_countries = df_p.iloc[:,[0,20,24,1,2,3,4,5,6,7,8,9,10,11,-1]].groupby("country").mean().transpose()
comparing_countries
```
| github_jupyter |
```
from molmap import model as molmodel
import molmap
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from joblib import load, dump
tqdm.pandas(ascii=True)
import numpy as np
import tensorflow as tf
import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
np.random.seed(123)
tf.compat.v1.set_random_seed(123)
tmp_feature_dir = './tmpignore'
if not os.path.exists(tmp_feature_dir):
os.makedirs(tmp_feature_dir)
def get_attentiveFP_idx(df):
""" attentiveFP dataset"""
train, valid,test = load('./split_and_data/07_BBBP_attentiveFP.data')
print('training set: %s, valid set: %s, test set %s' % (len(train), len(valid), len(test)))
train_idx = df[df.smiles.isin(train.smiles)].index
valid_idx = df[df.smiles.isin(valid.smiles)].index
test_idx = df[df.smiles.isin(test.smiles)].index
print('training set: %s, valid set: %s, test set %s' % (len(train_idx), len(valid_idx), len(test_idx)))
return train_idx, valid_idx, test_idx
task_name = 'BBBP'
from chembench import load_data
df, _ = load_data(task_name)
train_idx, valid_idx, test_idx = get_attentiveFP_idx(df)
len(train_idx), len(valid_idx), len(test_idx)
mp1 = molmap.loadmap('../descriptor.mp')
mp2 = molmap.loadmap('../fingerprint.mp')
tmp_feature_dir = '../02_OutofTheBox_benchmark_comparison_DMPNN/tmpignore'
if not os.path.exists(tmp_feature_dir):
os.makedirs(tmp_feature_dir)
smiles_col = df.columns[0]
values_col = df.columns[1:]
Y = df[values_col].astype('float').values
Y = Y.reshape(-1, 1)
X1_name = os.path.join(tmp_feature_dir, 'X1_%s.data' % task_name)
X2_name = os.path.join(tmp_feature_dir, 'X2_%s.data' % task_name)
if not os.path.exists(X1_name):
X1 = mp1.batch_transform(df.smiles, n_jobs = 8)
dump(X1, X1_name)
else:
X1 = load(X1_name)
if not os.path.exists(X2_name):
X2 = mp2.batch_transform(df.smiles, n_jobs = 8)
dump(X2, X2_name)
else:
X2 = load(X2_name)
molmap1_size = X1.shape[1:]
molmap2_size = X2.shape[1:]
def get_pos_weights(trainY):
"""pos_weights: neg_n / pos_n """
dfY = pd.DataFrame(trainY)
pos = dfY == 1
pos_n = pos.sum(axis=0)
neg = dfY == 0
neg_n = neg.sum(axis=0)
pos_weights = (neg_n / pos_n).values
neg_weights = (pos_n / neg_n).values
return pos_weights, neg_weights
prcs_metrics = ['MUV', 'PCBA']
print(len(train_idx), len(valid_idx), len(test_idx))
trainX = (X1[train_idx], X2[train_idx])
trainY = Y[train_idx]
validX = (X1[valid_idx], X2[valid_idx])
validY = Y[valid_idx]
testX = (X1[test_idx], X2[test_idx])
testY = Y[test_idx]
epochs = 800
patience = 50 #early stopping
dense_layers = [256, 128, 32]
batch_size = 128
lr = 1e-4
weight_decay = 0
monitor = 'val_loss'
dense_avf = 'relu'
last_avf = None #sigmoid in loss
if task_name in prcs_metrics:
metric = 'PRC'
else:
metric = 'ROC'
results = []
for i, seed in enumerate([7, 77, 77]):
np.random.seed(seed)
tf.compat.v1.set_random_seed(seed)
pos_weights, neg_weights = get_pos_weights(trainY)
loss = lambda y_true, y_pred: molmodel.loss.weighted_cross_entropy(y_true,y_pred, pos_weights, MASK = -1)
model = molmodel.net.DoublePathNet(molmap1_size, molmap2_size,
n_outputs=Y.shape[-1],
dense_layers=dense_layers,
dense_avf = dense_avf,
last_avf=last_avf)
opt = tf.keras.optimizers.Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #
#import tensorflow_addons as tfa
#opt = tfa.optimizers.AdamW(weight_decay = 0.1,learning_rate=0.001,beta1=0.9,beta2=0.999, epsilon=1e-08)
model.compile(optimizer = opt, loss = loss)
if i == 0:
performance = molmodel.cbks.CLA_EarlyStoppingAndPerformance((trainX, trainY),
(validX, validY),
patience = patience,
criteria = monitor,
metric = metric,
)
model.fit(trainX, trainY, batch_size=batch_size,
epochs=epochs, verbose= 0, shuffle = True,
validation_data = (validX, validY),
callbacks=[performance])
else:
model.fit(trainX, trainY, batch_size=batch_size,
epochs = performance.best_epoch + 1, verbose = 1, shuffle = True,
validation_data = (validX, validY))
performance.model.set_weights(model.get_weights())
best_epoch = performance.best_epoch
trainable_params = model.count_params()
train_aucs = performance.evaluate(trainX, trainY)
valid_aucs = performance.evaluate(validX, validY)
test_aucs = performance.evaluate(testX, testY)
final_res = {
'task_name':task_name,
'train_auc':np.nanmean(train_aucs),
'valid_auc':np.nanmean(valid_aucs),
'test_auc':np.nanmean(test_aucs),
'metric':metric,
'# trainable params': trainable_params,
'best_epoch': best_epoch,
'batch_size':batch_size,
'lr': lr,
'weight_decay':weight_decay
}
results.append(final_res)
pd.DataFrame(results).test_auc.mean()
pd.DataFrame(results).test_auc.std()
pd.DataFrame(results).to_csv('./results/%s.csv' % task_name)
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import geopy.distance as gd
from mpl_toolkits.basemap import Basemap
from datetime import datetime, timedelta
pd.options.mode.chained_assignment = None
# from pandarallel import pandarallel
vessel_information = pd.read_csv("./data/Vessel_information.csv")
list_vessel_id = vessel_information.loc[(vessel_information['type'] == 'CSV'), 'vessel_id'].to_list()
df_vessel_daily_data = pd.read_pickle("./data/df_vessel_daily_2019_complete.pkl")
df_vessel_daily_data.loc[df_vessel_daily_data['time_offset'] == 'Z', 'time_offset'] = '00:00'
df_vessel_daily_data = df_vessel_daily_data.loc[df_vessel_daily_data.time_offset.isna() == False]
df_positions_jan2019 = pd.read_csv("./data/positions_jan2019.csv")
df_positions_feb2019 = pd.read_csv("./data/positions_feb2019.csv")
df_positions_mar2019 = pd.read_csv("./data/positions_mar2019.csv")
df_positions_apr2019 = pd.read_csv("./data/positions_apr2019.csv")
df_positions_may2019 = pd.read_csv("./data/positions_may2019.csv")
df_positions_jun2019 = pd.read_csv("./data/positions_june2019.csv")
df_positions_jul2019 = pd.read_csv("./data/positions_july2019.csv")
df_positions_aug2019 = pd.read_csv("./data/positions_aug2019.csv")
df_positions_sep2019 = pd.read_csv("./data/positions_sept2019.csv")
df_positions_oct2019 = pd.read_csv("./data/positions_oct2019.csv")
df_positions_nov2019 = pd.read_csv("./data/positions_nov2019.csv")
df_positions_dec2019 = pd.read_csv("./data/positions_dec2019.csv")
list_df_position = [df_positions_jan2019, df_positions_feb2019,
df_positions_mar2019, df_positions_apr2019,
df_positions_may2019, df_positions_jun2019,
df_positions_jul2019, df_positions_aug2019,
df_positions_sep2019, df_positions_oct2019,
df_positions_nov2019, df_positions_dec2019,
]
df_positions_2019 = pd.concat(list_df_position, axis=0, ignore_index=True)
df_positions_2019 = df_positions_2019[(df_positions_2019['vessel_id'].isin(list_vessel_id))]
df_positions_2019["new_position_received_time"] = pd.to_datetime(df_positions_2019['position_received_time'])
df_positions_2019['new_position_received_time'] = df_positions_2019['new_position_received_time'].dt.tz_localize(None)
df_positions_2019.drop_duplicates(subset=['vessel_id', 'course', 'destination', 'draught', 'heading',
'latitude', 'longitude', 'nav_status', 'speed', 'eta_time',
'position_received_time', 'location', 'api_source'], inplace=True)
df_positions_2019.sort_values(by=['vessel_id', 'new_position_received_time'], inplace=True)
for vessel_id in list_vessel_id:
df_positions_2019.loc[df_positions_2019['vessel_id']==vessel_id, 'prev_lon'] = df_positions_2019.loc[df_positions_2019['vessel_id']==vessel_id, 'longitude'].shift(1)
df_positions_2019.loc[df_positions_2019['vessel_id']==vessel_id, 'prev_lat'] = df_positions_2019.loc[df_positions_2019['vessel_id']==vessel_id, 'latitude'].shift(1)
df_positions_2019.loc[df_positions_2019['vessel_id']==vessel_id, 'prev_receive_time'] = df_positions_2019.loc[df_positions_2019['vessel_id']==vessel_id, 'new_position_received_time'].shift(1)
df_positions_2019['prev_coord'] = df_positions_2019.apply(lambda x: (x.prev_lat, x.prev_lon), axis=1)
df_positions_2019['curr_coord'] = df_positions_2019.apply(lambda x: (x.latitude, x.longitude), axis=1)
# Here is the calculation of mile since the last record (current_coordinate - previous_coordinate)
# This process takes sometime to complete
df_positions_2019['mile_since'] = df_positions_2019.loc[df_positions_2019['prev_lon'].notnull()].apply(
lambda x: gd.distance(x.prev_coord, x.curr_coord).nm, axis=1)
df_positions_2019[['prev_coord', 'curr_coord', 'mile_since']].head(2)
# Here is the calculation of hour since the last record (current_time - previous_time)
# Then convert to hourly format
df_positions_2019['hour_since'] = (df_positions_2019.new_position_received_time - df_positions_2019.prev_receive_time)/np.timedelta64(1, 'h')
df_positions_2019[['hour_since']].head(2)
# Here is the calculation of speed (mile_since - hour_since) --> nautical_mile/hour
df_positions_2019.at[(df_positions_2019['hour_since'] == 0) & (df_positions_2019['mile_since'] > 0), 'hour_since']=0.001
df_positions_2019['speed_nm'] = (df_positions_2019.mile_since / df_positions_2019.hour_since)
df_positions_2019.loc[(df_positions_2019['hour_since'] == 0) & (df_positions_2019['mile_since'] == 0), 'speed_nm'] = 0
df_positions_2019.columns
def apply_label_combine_transit(label, new_label):
for idx, row in df_vessel_daily_data[(df_vessel_daily_data['activity'] == label)
&
(df_vessel_daily_data['vessel_id'].isin(list_vessel_id))].iterrows():
consumption_at_period = row.fuel/row.report_hours
df_positions_2019.loc[((df_positions_2019['new_position_received_time']+timedelta(hours=(int(row.time_offset.split(":")[0])),
minutes=(int(row.time_offset.split(":")[1]))))
>= row.new_report_time-timedelta(hours=row.report_hours ))
&
((df_positions_2019['new_position_received_time']+timedelta(hours=(int(row.time_offset.split(":")[0])),
minutes=(int(row.time_offset.split(":")[1]))))
< row.new_report_time)
&
(df_positions_2019['vessel_id']== row.vessel_id)
, ['activity_label', 'activity_label2', 'time_period', 'ref_date','transit_type', 'daily_vessel_id', 'fuel_consumption_average']] = [new_label, row.activity_mode, row.time_period, row.new_report_time, label, row.id, consumption_at_period]
def convert_activity_mode(row):
data = {'activity_mode':['002 - Anchor Handling - Medium Main-eng.',
'011 - Towing - Manual',
'010 - Towing - DP Auto Pos',
'AH - Towing',
'003 - Anchor Handling - Heavy Tention'],
'activity':['AH', 'Towing', 'Towing', 'Towing', 'AH']}
df_lookup = pd.DataFrame(data)
df_temp = df_lookup.loc[df_lookup['activity_mode'] == row.activity_label2]
if df_temp.values.size > 0:
return df_temp.activity.values[0]
else:
return None
# def calculate_fuel_consumption(related_dataframe):
# for idx, row in df_vessel_daily_data.loc[(df_vessel_daily_data['vessel_id'].isin(list_vessel_id))
# ].iterrows():
# # print(row.time_offset)
# consumption_at_period = row.fuel/row.report_hours
# df_positions_2019.loc[
# ((df_positions_2019['new_position_received_time']
# + timedelta(hours=(int(row.time_offset.split(":")[0])),
# minutes=(int(row.time_offset.split(":")[1])))) >= row.new_report_time-timedelta(hours=row.report_hours))
# &
# ((df_positions_2019['new_position_received_time']
# + timedelta(hours=(int(row.time_offset.split(":")[0])),
# minutes=(int(row.time_offset.split(":")[1])))) < row.new_report_time)
# , 'fuel_consumption_average'] = consumption_at_period
df_positions_2019.to_pickle("data/[CSV]df_positions_2019_ver1.pkl")
```
#### resize the df_positions (take out some not used columns, resize columns type)
```
df_positions_2019.columns
df_positions_2019[['id', 'vessel_id']].apply(pd.to_numeric, errors='ignore', downcast='integer').info()
df_positions_2019[['course', 'draught', 'longitude', 'latitude',
'speed', 'prev_lon', 'prev_lat',
'mile_since', 'hour_since', 'speed_nm']].apply(pd.to_numeric, errors='ignore', downcast='float').info()
# pd.to_numeric(df_positions_90_2019[['id', 'vessel_id']], )
df_positions_2019['vessel_id'] = df_positions_2019['vessel_id'].astype('int16')
df_positions_2019['id'] = df_positions_2019['id'].astype('int32')
df_positions_2019[['course', 'draught', 'longitude', 'latitude',
'speed', 'prev_lon', 'prev_lat',
'mile_since', 'hour_since', 'speed_nm']] = df_positions_2019[['course', 'draught', 'longitude', 'latitude',
'speed', 'prev_lon', 'prev_lat',
'mile_since', 'hour_since', 'speed_nm']].astype('float32')
df_positions_2019 = df_positions_2019[['id', 'vessel_id','speed',
'new_position_received_time',
'prev_receive_time', 'prev_coord', 'curr_coord', 'mile_since',
'hour_since', 'speed_nm']]
df_positions_2019.to_pickle("data/[resized]df_CSV_positions_2019.pkl")
df_positions_2019 = pd.read_pickle("./data/[resized]df_CSV_positions_2019.pkl")
```
#### End of resizing block
#### Do the model still need this?
```
df_positions_2019.loc[(df_positions_2019.fuel_consumption_average.isna()==False)
&
(df_positions_2019.speed_nm.isna()==False), 'speed_consumption_average'] = df_positions_2019.loc[(df_positions_2019.fuel_consumption_average>0)
&
(df_positions_2019.speed_nm > 0)].apply(lambda x: x.speed_nm/x.fuel_consumption_average, axis=1)
# For CSV type
apply_label_combine_transit('DP', 'DP')
apply_label_combine_transit('Standby','Standby')
apply_label_combine_transit('Transit Eco', 'TransitCombine')
apply_label_combine_transit('Transit', 'TransitCombine')
apply_label_combine_transit('Transit Max', 'TransitCombine')
apply_label_combine_transit('Port', 'Port')
apply_label_combine_transit('AH/Towing', 'AH/Towing')
df_positions_2019.loc[(df_positions_2019['activity_label'] == 'AH/Towing'), 'activity_label'] = df_positions_2019.loc[(df_positions_2019['activity_label'] == 'AH/Towing')].apply(lambda x: convert_activity_mode(x), axis=1)
```
| github_jupyter |
# The Discrete-Time Fourier Transform
*This Jupyter notebook is part of a [collection of notebooks](../index.ipynb) in the bachelors module Signals and Systems, Comunications Engineering, Universität Rostock. Please direct questions and suggestions to [Sascha.Spors@uni-rostock.de](mailto:Sascha.Spors@uni-rostock.de).*
## Theorems
The theorems of the discrete-time Fourier transform (DTFT) relate basic operations applied to discrete signals to their equivalents in the DTFT domain. They are of use to transform signals composed from modified [standard signals](../discrete_signals/standard_signals.ipynb), for the computation of the response of a linear time-invariant (LTI) system and to predict the consequences of modifying a signal or system by certain operations.
### Convolution Theorem
The [convolution theorem](https://en.wikipedia.org/wiki/Convolution_theorem) states that the DTFT of the linear convolution of two discrete signals $x[k]$ and $y[k]$ is equal to the scalar multiplication of their DTFTs $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$ and $Y(e^{j \Omega}) = \mathcal{F}_* \{ y[k] \}$
\begin{equation}
\mathcal{F}_* \{ x[k] * y[k] \} = X(e^{j \Omega}) \cdot Y(e^{j \Omega})
\end{equation}
The theorem can be proven by introducing the [definition of the linear convolution](../discrete_systems/linear_convolution.ipynb) into the [definition of the DTFT](definition.ipynb) and changing the order of summation
\begin{align}
\mathcal{F} \{ x[k] * y[k] \} &= \sum_{k = -\infty}^{\infty} \left( \sum_{\kappa = -\infty}^{\infty} x[\kappa] \cdot y[k - \kappa] \right) e^{-j \Omega k} \\
&= \sum_{\kappa = -\infty}^{\infty} \left( \sum_{k = -\infty}^{\infty} y[k - \kappa] \, e^{-j \Omega k} \right) x[\kappa] \\
&= Y(e^{j \Omega}) \cdot \sum_{\kappa = -\infty}^{\infty} x[\kappa] \, e^{-j \Omega \kappa} \\
&= Y(e^{j \Omega}) \cdot X(e^{j \Omega})
\end{align}
The convolution theorem is very useful in the context of LTI systems. The output signal $y[k]$ of an LTI system is given as the convolution of the input signal $x[k]$ with its impulse response $h[k]$. Hence, the signals and the system can be represented equivalently in the time and frequency domain

Calculation of the system response by transforming the problem into the DTFT domain can be beneficial since this replaces the computation of the linear convolution by a scalar multiplication. The (inverse) DTFT is known for many signals or can be derived by applying the properties and theorems to standard signals and their transforms. In many cases this procedure simplifies the calculation of the system response significantly.
The convolution theorem can also be useful to derive the DTFT of a signal. The key is here to express the signal as convolution of two other signals for which the transforms are known. This is illustrated in the following example.
#### Transformation of the triangular signal
The linear convolution of two [rectangular signals](../discrete_signals/standard_signals.ipynb#Rectangular-Signal) of lengths $N$ and $M$ defines a [signal of trapezoidal shape](../discrete_systems/linear_convolution.ipynb#Finite-Length-Signals)
\begin{equation}
x[k] = \text{rect}_N[k] * \text{rect}_M[k]
\end{equation}
Application of the convolution theorem together with the [DTFT of the rectangular signal](definition.ipynb#Transformation-of-the-Rectangular-Signal) yields its DTFT as
\begin{equation}
X(e^{j \Omega}) = \mathcal{F}_* \{ \text{rect}_N[k] \} \cdot \mathcal{F}_* \{ \text{rect}_M[k] \} =
e^{-j \Omega \frac{N+M-2}{2}} \cdot \frac{\sin(\frac{N \Omega}{2}) \sin(\frac{M \Omega}{2})}{\sin^2 ( \frac{\Omega}{2} )}
\end{equation}
The transform of the triangular signal can be derived from this result. The convolution of two rectangular signals of equal length $N=M$ yields the triangular signal $\Lambda[k]$ of length $2N - 1$
\begin{equation}
\Lambda_{2N - 1}[k] = \begin{cases} k + 1 & \text{for } 0 \leq k < N \\
2N - 1 - k & \text{for } N \leq k < 2N - 1 \\
0 & \text{otherwise}
\end{cases}
\end{equation}
From above result the DTFT of the triangular signal is derived by substitution of $N$ by $M$
\begin{equation}
\mathcal{F}_* \{ \Lambda_{2N - 1}[k] \} =
e^{-j \Omega (N-1)} \cdot \frac{\sin^2(\frac{N \Omega}{2}) }{\sin^2 ( \frac{\Omega}{2} )}
\end{equation}
Both the signal and the magnitude of its DTFT are plotted for illustration
```
%matplotlib inline
import sympy as sym
import numpy as np
import matplotlib.pyplot as plt
N = 7
x = np.convolve(np.ones(N), np.ones(N), mode='full')
plt.stem(x)
plt.xlabel('$k$')
plt.ylabel('$x[k]$')
W = sym.symbols('Omega')
X = sym.exp(-sym.I*W *(N-1)) * sym.sin(N*W/2)**2 / sym.sin(W/2)**2
sym.plot(sym.Abs(X), (W, -5, 5), xlabel='$\Omega$', ylabel='$|X(e^{j \Omega})|$');
```
**Exercise**
* Change the length of the triangular signal in above example. How does its DTFT change?
* The triangular signal introduced above is of odd length $2N - 1$
* Define a triangular signal of even length by convolving two rectangular signals
* Derive its DTFT
* Compare the DTFTs of a triangular signal of odd/even length
### Shift Theorem
The [shift of a signal](../discrete_signals/operations.ipynb#Shift) $x[k]$ can be expressed by a convolution with a shifted Dirac impulse
\begin{equation}
x[k - \kappa] = x[k] * \delta[k - \kappa]
\end{equation}
for $\kappa \in \mathbb{Z}$. This follows from the sifting property of the Dirac impulse. Applying the DTFT to the left- and right-hand side and exploiting the convolution theorem yields
\begin{equation}
\mathcal{F}_* \{ x[k - \kappa] \} = X(e^{j \Omega}) \cdot e^{- j \Omega \kappa}
\end{equation}
where $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$. Note that $\mathcal{F}_* \{ \delta(k - \kappa) \} = e^{- j \Omega \kappa}$ can be derived from the definition of the DTFT together with the sifting property of the Dirac impulse. Above relation is known as shift theorem of the DTFT.
Expressing the DTFT $X(e^{j \Omega}) = |X(e^{j \Omega})| \cdot e^{j \varphi(e^{j \Omega})}$ by its absolute value $|X(e^{j \Omega})|$ and phase $\varphi(e^{j \Omega})$ results in
\begin{equation}
\mathcal{F}_* \{ x[k - \kappa] \} = | X(e^{j \Omega}) | \cdot e^{j (\varphi(e^{j \Omega}) - \Omega \kappa)}
\end{equation}
Shifting of a signal does not change the absolute value of its spectrum but it subtracts the linear contribution $\Omega \kappa$ from its phase.
### Multiplication Theorem
The transform of a multiplication of two signals $x[k] \cdot y[k]$ is derived by introducing the signals into the definition of the DTFT, expressing the signal $x[k]$ by its spectrum $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$ and rearranging terms
\begin{align}
\mathcal{F}_* \{ x[k] \cdot y[k] \} &= \sum_{k=-\infty}^{\infty} x[k] \cdot y[k] \, e^{-j \Omega k} \\
&= \sum_{k=-\infty}^{\infty} \left( \frac{1}{2 \pi} \int_{-\pi}^{\pi} X(e^{j \nu}) \, e^{j \nu k} \; d \nu \right) y[k] \, e^{-j \Omega k} \\
&= \frac{1}{2 \pi} \int_{-\pi}^{\pi} X(e^{j \nu}) \sum_{k=-\infty}^{\infty} y[k] \, e^{-j (\Omega - \nu) k} \; d\nu \\
&= \frac{1}{2 \pi} \int_{-\pi}^{\pi} X(e^{j \nu}) \cdot Y(e^{j (\Omega - \nu)}) d\nu
\end{align}
where $Y(e^{j \Omega}) = \mathcal{F}_* \{ y[k] \}$.
The [periodic (cyclic/circular) convolution](https://en.wikipedia.org/wiki/Circular_convolution) of two aperiodic signals $h(t)$ and $g(t)$ is defined as
\begin{equation}
h(t) \circledast_{T} g(t) = \int_{-\infty}^{\infty} h(\tau) \cdot g_\text{p}(t - \tau) \; d\tau
\end{equation}
where $T$ denotes the period of the convolution, $g_\text{p}(t) = \sum_{n=-\infty}^{\infty} g(t + n T)$ the periodic summation of $g(t)$ and $\tau \in \mathbb{R}$ an arbitrary constant. The periodic convolution is commonly abbreviated by $\circledast_{T}$. With $h_\text{p}(t)$ denoting the periodic summation of $h(t)$ the periodic convolution can be rewritten as
\begin{equation}
h(t) \circledast_{T} g(t) = \int_{\tau_0}^{\tau_0 + T} h_\text{p}(\tau) \cdot g_\text{p}(t - \tau) \; d\tau
\end{equation}
where $\tau_0 \in \mathbb{R}$ denotes an arbitrary constant. The latter definition holds also for two [periodic signals](../periodic_signals/spectrum.ipynb) $h(t)$ and $g(t)$ with period $T$.
Comparison of the DTFT of two multiplied signals with the definition of the periodic convolution reveals that the preliminary result above can be expressed as
\begin{equation}
\mathcal{F}_* \{ x[k] \cdot y[k] \} = \frac{1}{2\pi} \, X(e^{j \Omega}) \circledast_{2 \pi} Y(e^{j \Omega})
\end{equation}
The DTFT of a multiplication of two signals $x[k] \cdot y[k]$ is given by the periodic convolution of their transforms $X(e^{j \Omega})$ and $Y(e^{j \Omega})$ weighted with $\frac{1}{2 \pi}$. The periodic convolution has a period of $T = 2 \pi$. Note, the convolution is performed with respect to the normalized angular frequency $\Omega$.
Applications of the multiplication theorem include the modulation and windowing of signals. The former leads to the modulation theorem introduced later, the latter is illustrated by the following example.
**Example**
Windowing of signals is used to derive signals of finite duration from signals of infinite duration or to truncate signals to a shorter length. The signal $x[k]$ is multiplied by a weighting function $w[k]$ in order to derive the finite length signal
\begin{equation}
y[k] = w[k] \cdot x[k]
\end{equation}
Application of the multiplication theorem yields the spectrum $Y(e^{j \Omega}) = \mathcal{F}_* \{ y[k] \}$ of the windowed signal as
\begin{equation}
Y(e^{j \Omega}) = \frac{1}{2 \pi} W(e^{j \Omega}) \circledast X(e^{j \Omega})
\end{equation}
where $W(e^{j \Omega}) = \mathcal{F}_* \{ w[k] \}$ and $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$. In order to illustrate the consequence of windowing, a cosine signal $x[k] = \cos(\Omega_0 k)$ is truncated to a finite length using a rectangular signal
\begin{equation}
y[k] = \text{rect}_N[k] \cdot \cos(\Omega_0 k)
\end{equation}
where $N$ denotes the length of the truncated signal and $\Omega_0$ its normalized angular frequency. Using the DTFT of the [rectangular signal](definition.ipynb#Transformation-of-the-Rectangular-Signal) and the [cosine signal](properties.ipynb#Transformation-of-the-cosine-and-sine-signal) yields
\begin{align}
Y(e^{j \Omega}) &= \frac{1}{2 \pi} e^{-j \Omega \frac{N-1}{2}} \cdot \frac{\sin \left(\frac{N \Omega}{2} \right)}{\sin \left( \frac{\Omega}{2} \right)} \circledast \frac{1}{2} \left[ {\bot \!\! \bot \!\! \bot} \left( \frac{\Omega + \Omega_0}{2 \pi} \right) + {\bot \!\! \bot \!\! \bot} \left( \frac{\Omega - \Omega_0}{2 \pi} \right) \right] \\
&= \frac{1}{2} \left[ e^{-j (\Omega+\Omega_0) \frac{N-1}{2}} \cdot \frac{\sin \left(\frac{N (\Omega+\Omega_0)}{2} \right)}{\sin \left( \frac{\Omega+\Omega_0}{2} \right)} + e^{-j (\Omega-\Omega_0) \frac{N-1}{2}} \cdot \frac{\sin \left(\frac{N (\Omega-\Omega_0)}{2} \right)}{\sin \left( \frac{\Omega-\Omega_0}{2} \right)} \right]
\end{align}
The latter identity results from the sifting property of the Dirac impulse and the periodicity of both spectra. The signal $y[k]$ and its magnitude spectrum $|Y(e^{j \Omega})|$ are plotted for specific values of $N$ and $\Omega_0$.
```
N = 20
W0 = 2*np.pi/10
k = np.arange(N)
x = np.cos(W0 * k)
plt.stem(k, x)
plt.xlabel('$k$')
plt.ylabel('$y[k]$');
W = sym.symbols('Omega')
Y = 1/2 * ((sym.exp(-sym.I*(W+W0)*(N-1)/2) * sym.sin(N*(W+W0)/2) / sym.sin((W+W0)/2)) +
(sym.exp(-sym.I*(W-W0)*(N-1)/2) * sym.sin(N*(W-W0)/2) / sym.sin((W-W0)/2)))
sym.plot(sym.Abs(Y), (W, -sym.pi, sym.pi), xlabel='$\Omega$', ylabel='$|Y(e^{j \Omega})|$');
```
**Exercise**
* Change the length $N$ of the signal by modifying the example. How does the spectrum change if you decrease or increase the length?
* What happens if you change the normalized angular frequency $\Omega_0$ of the signal?
* Assume a signal that is composed from a superposition of two finite length cosine signals with different frequencies. What qualitative condition has to hold that you can derive these frequencies from inspection of the spectrum?
### Modulation Theorem
The complex modulation of a signal $x[k]$ is defined as $e^{j \Omega_0 k} \cdot x[k]$ with $\Omega_0 \in \mathbb{R}$. The DTFT of the modulated signal is derived by applying the multiplication theorem
\begin{equation}
\mathcal{F}_* \left\{ e^{j \Omega_0 k} \cdot x[k] \right\} = \frac{1}{2 \pi} \cdot {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega - \Omega_0}{2 \pi} \right) \circledast X(e^{j \Omega})
= X \big( e^{j \, (\Omega - \Omega_0)} \big)
\end{equation}
where $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$. Above result states that the complex modulation of a signal leads to a shift of its spectrum. This result is known as modulation theorem.
**Example**
An example for the application of the modulation theorem is the [downsampling/decimation](https://en.wikipedia.org/wiki/Decimation_(signal_processing) of a discrete signal $x[k]$. Downsampling refers to lowering the sampling rate of a signal. The example focuses on the special case of removing every second sample, hence halving the sampling rate. The downsampling is modeled by defining a signal $x_\frac{1}{2}[k]$ where every second sample is set to zero
\begin{equation}
x_\frac{1}{2}[k] = \begin{cases}
x[k] & \text{for even } k \\
0 & \text{for odd } k
\end{cases}
\end{equation}
In order to derive the spectrum $X_\frac{1}{2}(e^{j \Omega}) = \mathcal{F}_* \{ x_\frac{1}{2}[k] \}$, the signal $u[k]$ is introduced where every second sample is zero
\begin{equation}
u[k] = \frac{1}{2} ( 1 + e^{j \pi k} ) = \begin{cases} 1 & \text{for even } k \\
0 & \text{for odd } k \end{cases}
\end{equation}
Using $u[k]$, the process of setting every second sample of $x[k]$ to zero can be expressed as
\begin{equation}
x_\frac{1}{2}[k] = u[k] \cdot x[k]
\end{equation}
Now the spectrum $X_\frac{1}{2}(e^{j \Omega})$ is derived by applying the multiplication theorem and introducing the [DTFT of the exponential signal](definition.ipynb#Transformation-of-the-Exponential-Signal). This results in
\begin{equation}
X_\frac{1}{2}(e^{j \Omega}) = \frac{1}{4 \pi} \left( {\bot \!\! \bot \!\! \bot}\left( \frac{\Omega}{2 \pi} \right) +
{\bot \!\! \bot \!\! \bot}\left( \frac{\Omega - \pi}{2 \pi} \right) \right) \circledast X(e^{j \Omega}) =
\frac{1}{2} X(e^{j \Omega}) + \frac{1}{2} X(e^{j (\Omega- \pi)})
\end{equation}
where $X(e^{j \Omega}) = \mathcal{F}_* \{ x[k] \}$. The spectrum $X_\frac{1}{2}(e^{j \Omega})$ consists of the spectrum of the original signal $X(e^{j \Omega})$ superimposed by the shifted spectrum $X(e^{j (\Omega- \pi)})$ of the original signal. This may lead to overlaps that constitute aliasing. In order to avoid aliasing, the spectrum of the signal $x[k]$ has to be band-limited to $-\frac{\pi}{2} < \Omega < \frac{\pi}{2}$ before downsampling.
### Parseval's Theorem
[Parseval's theorem](https://en.wikipedia.org/wiki/Parseval's_theorem) relates the energy of a discrete signal to its spectrum. The squared absolute value of a signal $x[k]$ represents its instantaneous power. It can be expressed as
\begin{equation}
| x[k] |^2 = x[k] \cdot x^*[k]
\end{equation}
where $x^*[k]$ denotes the complex conjugate of $x[k]$. Transformation of the right-hand side and application of the multiplication theorem results in
\begin{equation}
\mathcal{F}_* \{ x[k] \cdot x^*[k] \} = \frac{1}{2 \pi} \cdot X(e^{j \Omega}) \circledast_{2 \pi} X^*(e^{-j \Omega})
\end{equation}
Introducing the definition of the DTFT and the periodic convolution
\begin{equation}
\sum_{k = -\infty}^{\infty} x[k] \cdot x^*[k] \, e^{-j \Omega k} =
\frac{1}{2 \pi} \int_{-\pi}^{\pi} X(e^{j \nu}) \cdot X^*(e^{j (\Omega - \nu)}) \; d\nu
\end{equation}
Setting $\Omega = 0$ followed by the substitution $\nu = \Omega$ yields Parseval's theorem
\begin{equation}
\sum_{k = -\infty}^{\infty} | x[k] |^2 = \frac{1}{2 \pi} \int_{-\pi}^{\pi} | X(e^{j \Omega}) |^2 \; d\Omega
\end{equation}
The sum over the samples of the squared absolute signal is equal to the integral over its squared absolute spectrum divided by $2 \pi$. Since the left-hand side represents the energy $E$ of the signal $x[k]$, Parseval's theorem states that the energy can be computed alternatively in the spectral domain by integrating over the squared absolute value of the spectrum.
**Copyright**
The notebooks are provided as [Open Educational Resource](https://de.wikipedia.org/wiki/Open_Educational_Resources). Feel free to use the notebooks for your own educational purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *Lecture Notes on Signals and Systems* by Sascha Spors.
| github_jupyter |
# KakaoBrunch12M
KakaoBrunch12M은 [카카오 아레나에서 공개한 데이터](https://arena.kakao.com/datasets?id=2)로 [브런치 서비스](https://brunch.co.kr) 사용자를 통해 수집한 데이터입니다.
이 예제에서는 브런치 데이터에서 ALS를 활용해 특정 글과 유사한 글을 추천하는 예제와 개인화 추천 예제 두 가지를 살펴보겠습니다.
```
import buffalo.data
from buffalo.algo.als import ALS
from buffalo.algo.options import ALSOption
from buffalo.misc import aux
from buffalo.misc import log
from buffalo.data.mm import MatrixMarketOptions
log.set_log_level(1) # set log level 3 or higher to check more information
```
## 데이터 불러오기
```
# 브런치 데이터를 ./data/kakao-brunch-12m/ 아래에 위치했다고 가정하겠습니다.
data_opt = MatrixMarketOptions().get_default_option()
data_opt.input = aux.Option(
{
'main': 'data/kakao-brunch-12m/main',
'iid': 'data/kakao-brunch-12m/iid',
'uid': 'data/kakao-brunch-12m/uid'
}
)
data_opt
import os
import shutil
# KakaoBrunch12M 데이터에는 '#' 으로 시작하는 아이템과 사용자 아이디가 있는데,
# numpy에서 이런 라인은 주석으로 인식하기 때문에 다른 문자로 치환할 필요가 있습니다.
for filename in ['main', 'uid', 'iid']:
src = f'./data/kakao-brunch-12m/{filename}'
dest = f'./data/kakao-brunch-12m/{filename}.tmp'
with open(src, 'r') as fin:
with open(dest, 'w') as fout:
while True:
read = fin.read(4098)
if len(read) == 0:
break
read = read.replace('#', '$')
fout.write(read)
shutil.move(dest, src)
data = buffalo.data.load(data_opt)
data.create()
```
## 유사아이템 추천
```
# ALS 알고리즘의 기본 옵션으로 파라미터를 학습을 하겠습니다.
# 앞선 예제에 비해서는 데이터가 크기 때문에 워커 개수를 늘렸습니다.
als_opt = ALSOption().get_default_option()
als_opt.num_workers = 4
model = ALS(als_opt, data=data)
model.initialize()
model.train()
model.save('brunch.als.model')
# https://brunch.co.kr/@brunch/148 - 작가 인터뷰 - 브랜드 마케터, 정혜윤 by 브런치팀
model.load('brunch.als.model')
similar_items = model.most_similar('@brunch_148', 5)
for rank, (item, score) in enumerate(similar_items):
bid, aid = item.split('_')
print(f'{rank + 1:02d}. {score:.3f} https://brunch.co.kr/{bid}/{aid}')
```
브런치팀이 쓴 글중에서 아래와 같은 글들이 유사한 결과로 나왔습니다.
- https://brunch.co.kr/@brunch/149 : 글의 완성도를 높이는 팁, 맞춤법 검사
- https://brunch.co.kr/@brunch/147 : 크리에이터스 데이'글력' 후기
- https://brunch.co.kr/@brunch/144 : 글을 읽고 쓰는 것, 이 두 가지에만 집중하세요.
- https://brunch.co.kr/@brunch/145 : 10인의 에디터와 함께 하는, 브런치북 프로젝트 #6
- https://brunch.co.kr/@brunch/143 : 크리에이터스 스튜디오 '글쓰기 클래스' 후기
## 개인화 추천 예제
```
# 사용자에 대한 개인화 추천 결과는 topk_recommendation으로 얻을 수 있습니다.
# ALS 모델을 사용하는 가장 기본적인 방식입니다.
for rank, item in enumerate(model.topk_recommendation('$424ec49fa8423d82629c73e6d5ae9408')):
bid, aid = item.split('_')
print(f'{rank + 1:02d}. https://brunch.co.kr/{bid}/{aid}')
# get_weighted_feature를 응용하면 임의의 관심사를 가진 사용자에게
# 전달할 추천 결과를 만들 수 있습니다.
personal_feat = model.get_weighted_feature({
'@lonelyplanet_3': 1, # https://brunch.co.kr/@lonelyplanet/3
'@tube007_66': 1 # https://brunch.co.kr/@tube007/66
})
similar_items = model.most_similar(personal_feat, 10)
for rank, (item, score) in enumerate(similar_items):
bid, aid = item.split('_')
print(f'{rank + 1:02d}. {score:.3f} https://brunch.co.kr/{bid}/{aid}')
```
| github_jupyter |
# Interactive Map - Confirmed Cases in the US by State
> Interactive Visualizations of The Count and Growth of COVID-19 in the US.
- comments: true
- author: Asif Imran
- categories: [growth, usa, altair, interactive]
- image: images/us-growth-state-map.png
- permalink: /growth-map-us-states/
```
#hide
import requests
import numpy as np
import pandas as pd
import altair as alt
alt.data_transformers.disable_max_rows()
#https://github.com/altair-viz/altair/issues/1005#issuecomment-403237407
def to_altair_datetime(dt):
return alt.DateTime(year=dt.year, month=dt.month, date=dt.day,
hours=dt.hour, minutes=dt.minute, seconds=dt.second,
milliseconds=0.001 * dt.microsecond)
#hide
abbr2state = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
state2abbr = {s:a for a,s in abbr2state.items()}
#hide
states_daily_url = 'https://covidtracking.com/api/states/daily'
states_daily_raw = pd.DataFrame(requests.get(states_daily_url).json())
us_daily_df = states_daily_raw.copy()
cols_keep = ['date','state','positive','dateChecked','positiveIncrease','death']
us_daily_df = us_daily_df[cols_keep]
us_daily_df['date'] = pd.to_datetime(us_daily_df['date'], format='%Y%m%d')
us_daily_df['dateChecked'] = pd.to_datetime(us_daily_df['dateChecked'])
us_state_capitals_url = 'https://vega.github.io/vega-datasets/data/us-state-capitals.json'
state_cap_df = pd.DataFrame(requests.get(us_state_capitals_url).json())
state_cap_df['state'] = state_cap_df['state'].apply(lambda s: state2abbr.get(s))
us_daily_df = us_daily_df.merge(state_cap_df, on='state', how='left')
us_daily_df.rename(columns={'positive':'confirmed_count',
'positiveIncrease':'new_cases'}, inplace=True)
state_df = us_daily_df.sort_values('date').groupby(['state']).tail(1)
#hide
states_data = 'https://vega.github.io/vega-datasets/data/us-10m.json'
states = alt.topo_feature(states_data, feature='states')
selector = alt.selection_single(empty='none', fields=['state'], nearest=True, init={'state':'CA'})
curr_date = state_df.date.max().date().strftime('%Y-%m-%d')
dmax = (us_daily_df.date.max() + pd.DateOffset(days=3))
dmin = us_daily_df.date.min()
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
width=500,
height=400
).project('albersUsa')
points = alt.Chart(state_df).mark_circle().encode(
longitude='lon:Q',
latitude='lat:Q',
size=alt.Size('confirmed_count:Q', title= 'Number of Confirmed Cases'),
color=alt.value('steelblue'),
tooltip=['state:N','confirmed_count:Q']
).properties(
title=f'Total Confirmed Cases by State as of {curr_date}'
).add_selection(selector)
timeseries = alt.Chart(us_daily_df).mark_bar().properties(
width=500,
height=350,
title="New Cases by Day",
).encode(
x=alt.X('date:T', title='Date', timeUnit='yearmonthdate',
axis=alt.Axis(format='%y/%m/%d', labelAngle=-30),
scale=alt.Scale(domain=[to_altair_datetime(dmin), to_altair_datetime(dmax)])),
y=alt.Y('new_cases:Q',
axis=alt.Axis(title='# of New Cases',titleColor='steelblue'),
),
color=alt.Color('state:O'),
tooltip=['state:N','date:T','confirmed_count:Q', 'new_cases:Q']
).transform_filter(
selector
).add_selection(alt.selection_single()
)
timeseries_cs = alt.Chart(us_daily_df).mark_line(color='red').properties(
width=500,
height=350,
).encode(
x=alt.X('date:T', title='Date', timeUnit='yearmonthdate',
axis=alt.Axis(format='%y/%m/%d', labelAngle=-30),
scale=alt.Scale(domain=[to_altair_datetime(dmin), to_altair_datetime(dmax)])),
y=alt.Y('confirmed_count:Q',
#scale=alt.Scale(type='log'),
axis=alt.Axis(title='# of Confirmed Cases', titleColor='red'),
),
).transform_filter(
selector
).add_selection(alt.selection_single(nearest=True)
)
final_chart = alt.vconcat(
background + points,
alt.layer(timeseries, timeseries_cs).resolve_scale(y='independent'),
).resolve_scale(
color='independent',
shape='independent',
).configure(
padding={'left':10, 'bottom':40}
).configure_axis(
labelFontSize=10,
labelPadding=10,
titleFontSize=12,
).configure_view(
stroke=None
)
```
### Click On State To Filter Chart Below
```
#hide_input
final_chart
```
Prepared by [Asif Imran](https://twitter.com/5sigma)[^1]
[^1]: Source: ["https://covidtracking.com/api/"](https://covidtracking.com/api/).
| github_jupyter |
# 列表List
- 一个列表可以储存任意大小的数据集合,你可以理解为他是一个容器
```
a = [1,2,3,'a',1.0,True,[1,2]]
a
```
## 先来一个例子爽一爽

## 创建一个列表
- a = [1,2,3,4,5]
```
import numpy as np
a = [[1,2],[3,4]]
np.array(a)
```
## 列表的一般操作

```
a = 'Kim'
b = 'im'
b in a
a = [1,2,'Kim',1.0]
b = 'im'
b in a
a = [1]#列表只能相加和相乘
b = [2]
a + b
a = [1]
b = 5
a * b
a = [1,2,3,4,5]
a[-1]
a = [1,2,4,5]
a[1:3]#前闭后开
a = [1,2,4,5]
a[3:0:-1]#开始:结尾:步长(前闭后开)一旦超过索引值的范围,默认最后一位索引
a = [1,2,4,5]
a[::-1][:3]
a = [1,2,4,5]
a[::2]
a = [1,2,4,5,[100,200,300]]
a[4][2]
a = [[0,1,2],[3,4,5],[6,7,8]]
b = np.array(a)
b
a = [[0,1,2],[3,4,5],[6,7,8]]
b = a[0][0:2]
print(b)
c = a[1][0:2]
print(c)
d = a[0]
a = [[0,1,2],[3,4,5],[6,7,8]]
for i in range(2):
for j in range(2):
print(a[i][j:j+2])
print(a[i+1][j:j+2])
print('--------')
a = [1,2,3,[1,2,3]]
len(a)
a = [1,2,3,[1,2,3]]
N = len(a)
b = a[3][::]
print(b)
c = len(b)
print(c)
print(N -1 + c)
def K():
a = [1,2,3,[1,2,3]]
N = len(a)
b = a[3][::]
print(b)
c = len(b)
print(c)
print(N -1 + c)
K()
```
# 列表索引操作
- Mylist[index]
- 正序索引,逆序索引
- 列表一定注意越界
- 
```
b = [1,2,3]
max(b)
b = [1,2,3]
sum(b)
b = [1,2,3,True]
min(b)
a = [1,2,3,[1,2,3,4]]
for i in a:
print(i)
a = [1,2,3]
b = [1,2,3]
b == a
```
## 列表切片操作
- Mylist[start:end]
- 正序切片,逆序切片
## 列表 +、*、in 、not in
## 使用for循环遍历元素
- for 循环可以遍历一切可迭代元素
## EP:
- 使用while 循环遍历列表
```
Kim = [1,2,3,4,5]
for i in Kim:
print(i)
i = 0
while i < len(Kim):
print(Kim[i])
i +=1
```
## 列表的比较
- \>,<,>=,<=,==,!=
## 列表生成式
[x for x in range(10)]
```
U = [x for x in range(10)]
U
U = [x for x in range(10) if x%2==0]
U
U = [x + 10 for x in range(10)]
U
import random
K = [random.randint(0,10) for i in range(10)]
K
```
## 列表的方法

```
a = 10
b = [100]
b.append(a)
b
a = [1,2,3,4]
b = [100,200,300]
for i in b:
a.append(i)
a
a = [1,2,3,3,4,5,5]
a.count(3)
a = [1,2,2,[2,2]]
for i in a:
if type(i) is list:
print(i.count(2))
a = [1,2,3,4]
b = [300,400]
b.extend(a)
a +b
a = [1,2,3,4]#查询1
a.index(1)
a = [1,2,3,4]
a.insert(1,12)
a
b = [1,2,3,4,6]#所有偶数前加100
c = []
for i in b:
if i %2 == 0 and i != 100 and i not in c:
c.append(i)
index_ = b.index(i)
b.insert(index_,100)
b
a = [1,1,1,2,3]
a.remove(1)
a
a = [1,2,1,5,1,7,1,9,1]
for i in a:
if i//1 == 1:
a.remove(1)
a
```
## 将字符串分割成列表
- split 按照自定义的内容拆分
## EP:


## 列表的复制
- copy 浅复制
- deepcopy import copy 深复制
- http://www.pythontutor.com/visualize.html#mode=edit
## 列表排序
- sort
- sorted
- 列表的多级排序
- 匿名函数
## EP:
- 手动排序该列表[5,3,8,0,17],以升序或者降序
- 1

```
score = eval(input('Enter scores:'))
b = max(score)
for i in score:
if i >= b - 10:
print(i,'grade is A')
elif i >= b - 20:
print(i,'grade is B')
elif i >= b - 30:
print(i,'grade is C')
elif i >= b - 40:
print(i,'grade is D')
else:
print(i,'grade is F')
```
- 2

```
a = [1,5,3,4,7]
a[::-1]
```
- 3

```
T=[-1,11,1,11,12,-1]
for i in T:
count_=T.count(i)
print(i,"出现",count_,'次')
```
- 4

```
score = [65,58,94,77,86,91,80]
sum_ = sum(score)
geshu = len(score)
averange = sum_/geshu
N1 = 0
N2 = 0
for i in score:
if i > averange:
N1 += 1
if i < averange:
N2 += 1
print('大于平均数的分数有:',N1,'个')
print('小于平均数的分数有:',N2,'个')
```
- 5

```
import random
K1 = [random.randint(0,10) for i in range(10)]
K2 = [random.randint(0,10) for i in range(10)]
print(K1,'\n',K1,'\n')
```
- 6

- 7


- 8

- 9

- 10

- 11

- 12

| github_jupyter |
```
%matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, LabelEncoder
train_df = pd.read_csv(Path('Resources/2019loans.csv'))
test_df = pd.read_csv(Path('Resources/2020Q1loans.csv'))
train_df.head()
test_df.head()
# Convert categorical data to numeric and separate target feature for training data
X_train = train_df.drop('loan_status', axis=1)
X_train_dummies = pd.get_dummies(X_train)
y_train_label = LabelEncoder().fit_transform(train_df['loan_status'])
print(y_train_label)
print(X_train_dummies.columns)
X_train_dummies
# Convert categorical data to numeric and separate target feature for testing data
X_test = test_df.drop('loan_status', axis=1)
X_test_dummies = pd.get_dummies(X_test)
y_test_label = LabelEncoder().fit_transform(test_df['loan_status'])
print(y_test_label)
print(X_test_dummies.columns)
X_test_dummies
# add missing dummy variables to testing set
# check which column is missing from the test set
for col in X_train_dummies.columns:
if col not in X_test_dummies.columns:
print(col)
def labeler(binary):
"""flips bits
Args:
binary (int/float): 0 or 1
Returns:
[int]: [1 or 0]
"""
if int(binary) == 0:
return 1
else:
return 0
X_test_dummies['debt_settlement_flag_Y'] = X_test_dummies['debt_settlement_flag_N'].apply(lambda x: labeler(x))
X_test_dummies
```
# Logistic vs. Random Forest Model Unscaled data
## Predictions
I predict the logistic regression model will perform worse on the unscaled dataset compared to the random forest model. Logistic regression models classify the lable based on an arithmatic equation 'weighted' by the feature's numerical value. Therefore, since the dataset is unscaled and highly skewed, the LR is likely to underperform. The random forest classifier is not affected by the scaling of the dataset, so I predict greater accuracy.
```
# Train the Logistic Regression model on the unscaled data and print the model score
classifier = LogisticRegression(max_iter=1000)
classifier.fit(X_train_dummies, y_train_label)
print(f"Training test score: {classifier.score(X_train_dummies, y_train_label)}")
print(f"Testing test score: {classifier.score(X_test_dummies, y_test_label)}")
# Train a Random Forest Classifier model and print the model score
classifier_rf = RandomForestClassifier(n_estimators=200)
classifier_rf.fit(X_train_dummies, y_train_label)
print(f"Training test score: {classifier_rf.score(X_train_dummies, y_train_label)}")
print(f"Testing test score: {classifier_rf.score(X_test_dummies, y_test_label)}")
```
## Performance
As expected, the random forest model outperformed the logisitic regression. The random forest is also overfitted to the training data set indicated by a training test r^2 score of 1.0. I anticipate the logistic regression to perform much better with the scaled dataset.
```
# Scale the data
scaler = StandardScaler().fit(X_train_dummies)
X_train_scaled = scaler.transform(X_train_dummies)
X_test_scaled = scaler.transform(X_test_dummies)
# Train the Logistic Regression model on the scaled data and print the model score
classifier_LR = LogisticRegression(max_iter=1000)
classifier_LR.fit(X_train_scaled, y_train_label)
print(f"Training test score: {classifier_LR.score(X_train_scaled, y_train_label)}")
print(f"Testing test score: {classifier_LR.score(X_test_scaled, y_test_label)}")
# Train a Random Forest Classifier model on the scaled data and print the model score
classifier_rf = RandomForestClassifier(n_estimators=200)
classifier_rf.fit(X_train_scaled, y_train_label)
print(f"Training test score: {classifier_rf.score(X_train_scaled, y_train_label)}")
print(f"Testing test score: {classifier_rf.score(X_test_scaled, y_test_label)}")
```
## Performance on scaled data
The logistic regression model outperformed the random forest model when the data is scaled using the StandardScaler() function. The logistic regression model dramatically increased the testing data set r^2 score from 0.58 -> 0.72, while the random forest decreased the testing data set r^2 score from 0.62 -> 0.58
## Trying feature selection on dataset
# Prediction
Since there are so many features in this training model, I anticipate there are many that are not impactful in the models' decisions
Here I will find the non-essential features and remove them from the model and retest
```
# Determining what features are important in the random forest model
features = classifier_rf.feature_importances_
plt.bar(x = range(len(features)), height=features)
plt.xlabel('Feature number')
plt.ylabel('Feature importance')
plt.title('Feature importance vs. feature index')
plt.show()
sel = SelectFromModel(classifier_LR)
sel.fit(X_train_scaled, y_train_label)
sel.get_support()
# feature selection
# transforming unscaled datasets to remove unimportant features
X_selected_train = sel.transform(X_train_dummies)
X_selected_test = sel.transform(X_test_dummies)
# scale filtered datasets
scaler = StandardScaler().fit(X_selected_train)
X_selected_train_scaled = scaler.transform(X_selected_train)
X_selected_test_scaled = scaler.transform(X_selected_test)
classifier_LR_selected = LogisticRegression(max_iter=1000).fit(X_selected_train_scaled, y_train_label)
print(f'Training Score: {classifier_LR_selected.score(X_selected_train_scaled, y_train_label)}')
print(f'Testing Score: {classifier_LR_selected.score(X_selected_test_scaled, y_test_label)}')
sel = SelectFromModel(classifier_rf)
sel.fit(X_train_scaled, y_train_label)
sel.get_support()
# feature selection
X_selected_train = sel.transform(X_train_dummies)
X_selected_test = sel.transform(X_test_dummies)
scaler = StandardScaler().fit(X_selected_train)
X_selected_train_scaled = scaler.transform(X_selected_train)
X_selected_test_scaled = scaler.transform(X_selected_test)
classifier_rf = RandomForestClassifier(n_estimators=200)
classifier_rf.fit(X_selected_train_scaled, y_train_label)
print(f'Training Score: {classifier_rf.score(X_selected_train_scaled, y_train_label)}')
print(f'Testing Score: {classifier_rf.score(X_selected_test_scaled, y_test_label)}')
```
## Results
Feature selection had little effect on the Random forest testing score, and the logisitic regession performed slightly worse witht he reduce datasets.
| github_jupyter |
> **Tip**: Welcome to the Investigate a Dataset project! You will find tips in quoted sections like this to help organize your approach to your investigation. Before submitting your project, it will be a good idea to go back through your report and remove these sections to make the presentation of your work as tidy as possible. First things first, you might want to double-click this Markdown cell and change the title so that it reflects your dataset and investigation.
# Project: Investigate a Dataset (Replace this with something more specific!)
## Table of Contents
<ul>
<li><a href="#intro">Introduction</a></li>
<li><a href="#wrangling">Data Wrangling</a></li>
<li><a href="#eda">Exploratory Data Analysis</a></li>
<li><a href="#conclusions">Conclusions</a></li>
</ul>
<a id='intro'></a>
## Introduction
> **Tip**: In this section of the report, provide a brief introduction to the dataset you've selected for analysis. At the end of this section, describe the questions that you plan on exploring over the course of the report. Try to build your report around the analysis of at least one dependent variable and three independent variables.
>
> If you haven't yet selected and downloaded your data, make sure you do that first before coming back here. If you're not sure what questions to ask right now, then make sure you familiarize yourself with the variables and the dataset context for ideas of what to explore.
```
#The dataset which is selected is tmdb-movies.csv i.e. movies dataset which contains data on movies and ratings.
#Revenue,Runtime and Popularity is tend to be explored.Over a period span reveneue v/s runtime, runtime v/s popularity and popularity v/s revenue is to be explored.
#Questions which will be answered are:
# 1.Over the decades, what are the popular runtimes?
# 2.Spanning the time periods, is revenue proportional to popularity?
# 3.Does runtime affect popularity?
#only visualization and basic correlations are attempted in this project.And any investigation and exploratory are tentative at its best.
# Use this cell to set up import statements for all of the packages that you
# plan to use.
# Remember to include a 'magic word' so that your visualizations are plotted
# inline with the notebook. See this page for more:
# http://ipython.readthedocs.io/en/stable/interactive/magics.html
import pandas as pd
import numpy as np
import csv
import datetime as datetime
import matplotlib.pyplot as plt
% matplotlib inline
```
<a id='wrangling'></a>
## Data Wrangling
> **Tip**: In this section of the report, you will load in the data, check for cleanliness, and then trim and clean your dataset for analysis. Make sure that you document your steps carefully and justify your cleaning decisions.
### General Properties
```
# Load your data and print out a few lines. Perform operations to inspect data
# types and look for instances of missing or possibly errant data.
df=pd.read_csv('tmdb-movies.csv')
```
> **Tip**: You should _not_ perform too many operations in each cell. Create cells freely to explore your data. One option that you can take with this project is to do a lot of explorations in an initial notebook. These don't have to be organized, but make sure you use enough comments to understand the purpose of each code cell. Then, after you're done with your analysis, create a duplicate notebook where you will trim the excess and organize your steps so that you have a flowing, cohesive report.
> **Tip**: Make sure that you keep your reader informed on the steps that you are taking in your investigation. Follow every code cell, or every set of related code cells, with a markdown cell to describe to the reader what was found in the preceding cell(s). Try to make it so that the reader can then understand what they will be seeing in the following cell(s).
### Data Cleaning (Replace this with more specific notes!)
```
# After discussing the structure of the data and any problems that need to be
# cleaned, perform those cleaning steps in the second part of this section.
```
<a id='eda'></a>
## Exploratory Data Analysis
> **Tip**: Now that you've trimmed and cleaned your data, you're ready to move on to exploration. Compute statistics and create visualizations with the goal of addressing the research questions that you posed in the Introduction section. It is recommended that you be systematic with your approach. Look at one variable at a time, and then follow it up by looking at relationships between variables.
### Research Question 1 (Replace this header name!)
```
# Use this, and more code cells, to explore your data. Don't forget to add
# Markdown cells to document your observations and findings.
```
### Research Question 2 (Replace this header name!)
```
# Continue to explore the data to address your additional research
# questions. Add more headers as needed if you have more questions to
# investigate.
```
<a id='conclusions'></a>
## Conclusions
> **Tip**: Finally, summarize your findings and the results that have been performed. Make sure that you are clear with regards to the limitations of your exploration. If you haven't done any statistical tests, do not imply any statistical conclusions. And make sure you avoid implying causation from correlation!
> **Tip**: Once you are satisfied with your work, you should save a copy of the report in HTML or PDF form via the **File** > **Download as** submenu. Before exporting your report, check over it to make sure that the flow of the report is complete. You should probably remove all of the "Tip" quotes like this one so that the presentation is as tidy as possible. Congratulations!
| github_jupyter |
# Análisis de la Movilidad en Bogotá
¿Cuáles son las rutas más críticas de movilidad y sus características en la ciudad de Bogotá?
Se toman los datos de la plataforma:
https://datos.movilidadbogota.gov.co
```
import pandas as pd
import os
os.chdir('../data_raw')
data_file_list = !ls
data_file_list
data_file_list[len(data_file_list)-1]
```
La adquisición de datos será de 4 meses del año 2019 con el propósito de optimizar espacio de almacenamiento y cargas de procesamiento.
```
''' Función df_builder
Recibe como parámetro de entrada una lista de archivos CSV,
hace la lectura y concatena los dataframes, siendo esta concatenación el retorno.
Los datos en los archivos CSV deben tener la misma estructura.
'''
def df_builder(data_list):
n_files = len(data_list) - 1
df_full = pd.read_csv(data_list[n_files])
for i in range(n_files):
df_i = pd.read_csv(data_list[i])
df_full = pd.concat([df_full, df_i])
return df_full
df_mov = df_builder(data_file_list)
df_mov.shape
df_mov.describe
df_mov.dtypes
## Limpieza de datos
# Verificación que todos los registros correspondan con el año de estudio: 2019
df_mov['AÑO'].value_counts()
```
Dentro de los datasets obtenidos se encuentran datos de otros años.
Vamos a eliminar los registros del año 2020.
```
df_mov.shape # Tamaño original
## Borrar los renglones cuando el AÑO es igual que 2020
df_mov = df_mov.loc[df_mov['AÑO'] == 2019]
df_mov['AÑO'].value_counts() # Verificación
df_mov.shape # Tamaño final del dataframe
```
### Columnas sin datos
Vamos a verificar las columnas que no tienen datos (Nan), posterior las eliminamos para tener un dataset más limpio.
```
df_mov['CODIGO'].value_counts()
df_mov['COEF_BRT'].value_counts()
df_mov['COEF_MIXTO'].value_counts()
df_mov['VEL_MEDIA_BRT'].value_counts()
df_mov['VEL_MEDIA_MIXTO'].value_counts()
df_mov['VEL_MEDIA_PONDERADA'].value_counts()
df_mov['VEL_PONDERADA'].value_counts()
## Borrar las columnas
df_mov = df_mov.drop(labels=['CODIGO', 'COEF_BRT', 'COEF_MIXTO', 'VEL_MEDIA_BRT',
'VEL_MEDIA_MIXTO', 'VEL_MEDIA_PONDERADA', 'VEL_PONDERADA'], axis=1)
df_mov.describe
df_mov.columns
df_mov.to_csv('../notebook/data/data_Mov_Bogota_2019.csv', index=None)
```
## Análisis Unidimensional de las Variables
```
## Conteo de la ocurrencia de una variable y un valor
# Conteo de la movilidad en cada mes
df_mov_sorted = df_mov.sort_values('MES')
df_mov_sorted['MES'].hist(bins=15, xrot=45, grid=True)
##plt.xticks(rotation=45)
df_mov['DIA_SEMANA'].value_counts(normalize=True)
df_mov['NAME_FROM'].value_counts()
df_mov['NAME_TO'].value_counts()
df_mov
```
## Análisis Multidimensional de las Variables
Velocidad promedio versus la trayectoria realizada.
La trayectoria se va a definir como la concatenación entre NAME_FROM y NAME_TO.
```
df_mov['TRAYEC'] = df_mov['NAME_FROM'] + ' - ' +df_mov['NAME_TO']
df_mov['TRAYEC'].value_counts()
```
Mediana de la velocidad promedio en cada trayecto. VEL_PROMEDIO que es más común en cada trayecto:
```
medianVel_Tray = df_mov.groupby('TRAYEC').median()['VEL_PROMEDIO']
medianVel_Tray
```
## Análisis de Texto
```
import nltk
from nltk.corpus import stopwords
print(stopwords.words('spanish'))
list_lite_NAME_TO = df_mov['NAME_TO'].value_counts().sort_values(ascending=False).index[0:10]
list_lite_NAME_TO
df_mov_filter_lite_NAME_TO = df_mov[df_mov['NAME_TO'].isin(list_lite_NAME_TO)]
df_mov_filter_lite_NAME_TO
textos_destino = ''
for row in df_mov_filter_lite_NAME_TO['NAME_TO']:
textos_destino = textos_destino + ' ' + row
## to check the ModuleNotFoundError: No module named 'wordcloud'
## install:
## /anaconda3/bin/python -m pip install wordcloud
import sys
print(sys.executable)
from wordcloud import WordCloud
import matplotlib.pyplot as plt
wc = WordCloud(background_color= 'white')
wc.generate(textos_destino)
plt.axis("off")
plt.imshow(wc, interpolation='bilinear')
plt.show()
```
| github_jupyter |
**Chapter 11 – Training Deep Neural Networks**
_This notebook contains all the sample code and solutions to the exercises in chapter 11._
<table align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml2/blob/master/11_training_deep_neural_networks.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
</table>
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20 and TensorFlow ≥2.0.
```
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
# TensorFlow ≥2.0 is required
import tensorflow as tf
from tensorflow import keras
assert tf.__version__ >= "2.0"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "deep"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
```
# Vanishing/Exploding Gradients Problem
```
def logit(z):
return 1 / (1 + np.exp(-z))
z = np.linspace(-5, 5, 200)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [1, 1], 'k--')
plt.plot([0, 0], [-0.2, 1.2], 'k-')
plt.plot([-5, 5], [-3/4, 7/4], 'g--')
plt.plot(z, logit(z), "b-", linewidth=2)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Saturating', xytext=(3.5, 0.7), xy=(5, 1), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Saturating', xytext=(-3.5, 0.3), xy=(-5, 0), arrowprops=props, fontsize=14, ha="center")
plt.annotate('Linear', xytext=(2, 0.2), xy=(0, 0.5), arrowprops=props, fontsize=14, ha="center")
plt.grid(True)
plt.title("Sigmoid activation function", fontsize=14)
plt.axis([-5, 5, -0.2, 1.2])
save_fig("sigmoid_saturation_plot")
plt.show()
```
## Xavier and He Initialization
```
[name for name in dir(keras.initializers) if not name.startswith("_")]
keras.layers.Dense(10, activation="relu", kernel_initializer="he_normal")
init = keras.initializers.VarianceScaling(scale=2., mode='fan_avg',
distribution='uniform')
keras.layers.Dense(10, activation="relu", kernel_initializer=init)
```
## Nonsaturating Activation Functions
### Leaky ReLU
```
def leaky_relu(z, alpha=0.01):
return np.maximum(alpha*z, z)
plt.plot(z, leaky_relu(z, 0.05), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([0, 0], [-0.5, 4.2], 'k-')
plt.grid(True)
props = dict(facecolor='black', shrink=0.1)
plt.annotate('Leak', xytext=(-3.5, 0.5), xy=(-5, -0.2), arrowprops=props, fontsize=14, ha="center")
plt.title("Leaky ReLU activation function", fontsize=14)
plt.axis([-5, 5, -0.5, 4.2])
save_fig("leaky_relu_plot")
plt.show()
[m for m in dir(keras.activations) if not m.startswith("_")]
[m for m in dir(keras.layers) if "relu" in m.lower()]
```
Let's train a neural network on Fashion MNIST using the Leaky ReLU:
```
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.fashion_mnist.load_data()
X_train_full = X_train_full / 255.0
X_test = X_test / 255.0
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.LeakyReLU(),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
Now let's try PReLU:
```
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(100, kernel_initializer="he_normal"),
keras.layers.PReLU(),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
### ELU
```
def elu(z, alpha=1):
return np.where(z < 0, alpha * (np.exp(z) - 1), z)
plt.plot(z, elu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1, -1], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title(r"ELU activation function ($\alpha=1$)", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("elu_plot")
plt.show()
```
Implementing ELU in TensorFlow is trivial, just specify the activation function when building each layer:
```
keras.layers.Dense(10, activation="elu")
```
### SELU
This activation function was proposed in this [great paper](https://arxiv.org/pdf/1706.02515.pdf) by Günter Klambauer, Thomas Unterthiner and Andreas Mayr, published in June 2017. During training, a neural network composed exclusively of a stack of dense layers using the SELU activation function and LeCun initialization will self-normalize: the output of each layer will tend to preserve the same mean and variance during training, which solves the vanishing/exploding gradients problem. As a result, this activation function outperforms the other activation functions very significantly for such neural nets, so you should really try it out. Unfortunately, the self-normalizing property of the SELU activation function is easily broken: you cannot use ℓ<sub>1</sub> or ℓ<sub>2</sub> regularization, regular dropout, max-norm, skip connections or other non-sequential topologies (so recurrent neural networks won't self-normalize). However, in practice it works quite well with sequential CNNs. If you break self-normalization, SELU will not necessarily outperform other activation functions.
```
from scipy.special import erfc
# alpha and scale to self normalize with mean 0 and standard deviation 1
# (see equation 14 in the paper):
alpha_0_1 = -np.sqrt(2 / np.pi) / (erfc(1/np.sqrt(2)) * np.exp(1/2) - 1)
scale_0_1 = (1 - erfc(1 / np.sqrt(2)) * np.sqrt(np.e)) * np.sqrt(2 * np.pi) * (2 * erfc(np.sqrt(2))*np.e**2 + np.pi*erfc(1/np.sqrt(2))**2*np.e - 2*(2+np.pi)*erfc(1/np.sqrt(2))*np.sqrt(np.e)+np.pi+2)**(-1/2)
def selu(z, scale=scale_0_1, alpha=alpha_0_1):
return scale * elu(z, alpha)
plt.plot(z, selu(z), "b-", linewidth=2)
plt.plot([-5, 5], [0, 0], 'k-')
plt.plot([-5, 5], [-1.758, -1.758], 'k--')
plt.plot([0, 0], [-2.2, 3.2], 'k-')
plt.grid(True)
plt.title("SELU activation function", fontsize=14)
plt.axis([-5, 5, -2.2, 3.2])
save_fig("selu_plot")
plt.show()
```
By default, the SELU hyperparameters (`scale` and `alpha`) are tuned in such a way that the mean output of each neuron remains close to 0, and the standard deviation remains close to 1 (assuming the inputs are standardized with mean 0 and standard deviation 1 too). Using this activation function, even a 1,000 layer deep neural network preserves roughly mean 0 and standard deviation 1 across all layers, avoiding the exploding/vanishing gradients problem:
```
np.random.seed(42)
Z = np.random.normal(size=(500, 100)) # standardized inputs
for layer in range(1000):
W = np.random.normal(size=(100, 100), scale=np.sqrt(1 / 100)) # LeCun initialization
Z = selu(np.dot(Z, W))
means = np.mean(Z, axis=0).mean()
stds = np.std(Z, axis=0).mean()
if layer % 100 == 0:
print("Layer {}: mean {:.2f}, std deviation {:.2f}".format(layer, means, stds))
```
Using SELU is easy:
```
keras.layers.Dense(10, activation="selu",
kernel_initializer="lecun_normal")
```
Let's create a neural net for Fashion MNIST with 100 hidden layers, using the SELU activation function:
```
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="selu",
kernel_initializer="lecun_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="selu",
kernel_initializer="lecun_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
```
Now let's train it. Do not forget to scale the inputs to mean 0 and standard deviation 1:
```
pixel_means = X_train.mean(axis=0, keepdims=True)
pixel_stds = X_train.std(axis=0, keepdims=True)
X_train_scaled = (X_train - pixel_means) / pixel_stds
X_valid_scaled = (X_valid - pixel_means) / pixel_stds
X_test_scaled = (X_test - pixel_means) / pixel_stds
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
```
Now look at what happens if we try to use the ReLU activation function instead:
```
np.random.seed(42)
tf.random.set_seed(42)
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=[28, 28]))
model.add(keras.layers.Dense(300, activation="relu", kernel_initializer="he_normal"))
for layer in range(99):
model.add(keras.layers.Dense(100, activation="relu", kernel_initializer="he_normal"))
model.add(keras.layers.Dense(10, activation="softmax"))
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train_scaled, y_train, epochs=5,
validation_data=(X_valid_scaled, y_valid))
```
Not great at all, we suffered from the vanishing/exploding gradients problem.
# Batch Normalization
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(100, activation="relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(10, activation="softmax")
])
model.summary()
bn1 = model.layers[1]
[(var.name, var.trainable) for var in bn1.variables]
bn1.updates
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
Sometimes applying BN before the activation function works better (there's a debate on this topic). Moreover, the layer before a `BatchNormalization` layer does not need to have bias terms, since the `BatchNormalization` layer some as well, it would be a waste of parameters, so you can set `use_bias=False` when creating those layers:
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.BatchNormalization(),
keras.layers.Dense(300, use_bias=False),
keras.layers.BatchNormalization(),
keras.layers.Activation("relu"),
keras.layers.Dense(100, use_bias=False),
keras.layers.Activation("relu"),
keras.layers.BatchNormalization(),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid))
```
## Gradient Clipping
All Keras optimizers accept `clipnorm` or `clipvalue` arguments:
```
optimizer = keras.optimizers.SGD(clipvalue=1.0)
optimizer = keras.optimizers.SGD(clipnorm=1.0)
```
## Reusing Pretrained Layers
### Reusing a Keras model
Let's split the fashion MNIST training set in two:
* `X_train_A`: all images of all items except for sandals and shirts (classes 5 and 6).
* `X_train_B`: a much smaller training set of just the first 200 images of sandals or shirts.
The validation set and the test set are also split this way, but without restricting the number of images.
We will train a model on set A (classification task with 8 classes), and try to reuse it to tackle set B (binary classification). We hope to transfer a little bit of knowledge from task A to task B, since classes in set A (sneakers, ankle boots, coats, t-shirts, etc.) are somewhat similar to classes in set B (sandals and shirts). However, since we are using `Dense` layers, only patterns that occur at the same location can be reused (in contrast, convolutional layers will transfer much better, since learned patterns can be detected anywhere on the image, as we will see in the CNN chapter).
```
def split_dataset(X, y):
y_5_or_6 = (y == 5) | (y == 6) # sandals or shirts
y_A = y[~y_5_or_6]
y_A[y_A > 6] -= 2 # class indices 7, 8, 9 should be moved to 5, 6, 7
y_B = (y[y_5_or_6] == 6).astype(np.float32) # binary classification task: is it a shirt (class 6)?
return ((X[~y_5_or_6], y_A),
(X[y_5_or_6], y_B))
(X_train_A, y_train_A), (X_train_B, y_train_B) = split_dataset(X_train, y_train)
(X_valid_A, y_valid_A), (X_valid_B, y_valid_B) = split_dataset(X_valid, y_valid)
(X_test_A, y_test_A), (X_test_B, y_test_B) = split_dataset(X_test, y_test)
X_train_B = X_train_B[:200]
y_train_B = y_train_B[:200]
X_train_A.shape
X_train_B.shape
y_train_A[:30]
y_train_B[:30]
tf.random.set_seed(42)
np.random.seed(42)
model_A = keras.models.Sequential()
model_A.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_A.add(keras.layers.Dense(n_hidden, activation="selu"))
model_A.add(keras.layers.Dense(8, activation="softmax"))
model_A.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_A.fit(X_train_A, y_train_A, epochs=20,
validation_data=(X_valid_A, y_valid_A))
model_A.save("my_model_A.h5")
model_B = keras.models.Sequential()
model_B.add(keras.layers.Flatten(input_shape=[28, 28]))
for n_hidden in (300, 100, 50, 50, 50):
model_B.add(keras.layers.Dense(n_hidden, activation="selu"))
model_B.add(keras.layers.Dense(1, activation="sigmoid"))
model_B.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B.fit(X_train_B, y_train_B, epochs=20,
validation_data=(X_valid_B, y_valid_B))
model.summary()
model_A = keras.models.load_model("my_model_A.h5")
model_B_on_A = keras.models.Sequential(model_A.layers[:-1])
model_B_on_A.add(keras.layers.Dense(1, activation="sigmoid"))
model_A_clone = keras.models.clone_model(model_A)
model_A_clone.set_weights(model_A.get_weights())
for layer in model_B_on_A.layers[:-1]:
layer.trainable = False
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=4,
validation_data=(X_valid_B, y_valid_B))
for layer in model_B_on_A.layers[:-1]:
layer.trainable = True
model_B_on_A.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
history = model_B_on_A.fit(X_train_B, y_train_B, epochs=16,
validation_data=(X_valid_B, y_valid_B))
```
So, what's the final verdict?
```
model_B.evaluate(X_test_B, y_test_B)
model_B_on_A.evaluate(X_test_B, y_test_B)
```
Great! We got quite a bit of transfer: the error rate dropped by a factor of almost 4!
```
(100 - 97.05) / (100 - 99.25)
```
# Faster Optimizers
## Momentum optimization
```
optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9)
```
## Nesterov Accelerated Gradient
```
optimizer = keras.optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
```
## AdaGrad
```
optimizer = keras.optimizers.Adagrad(lr=0.001)
```
## RMSProp
```
optimizer = keras.optimizers.RMSprop(lr=0.001, rho=0.9)
```
## Adam Optimization
```
optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
```
## Adamax Optimization
```
optimizer = keras.optimizers.Adamax(lr=0.001, beta_1=0.9, beta_2=0.999)
```
## Nadam Optimization
```
optimizer = keras.optimizers.Nadam(lr=0.001, beta_1=0.9, beta_2=0.999)
```
## Learning Rate Scheduling
### Power Scheduling
```lr = lr0 / (1 + steps / s)**c```
* Keras uses `c=1` and `s = 1 / decay`
```
optimizer = keras.optimizers.SGD(lr=0.01, decay=1e-4)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
learning_rate = 0.01
decay = 1e-4
batch_size = 32
n_steps_per_epoch = len(X_train) // batch_size
epochs = np.arange(n_epochs)
lrs = learning_rate / (1 + decay * epochs * n_steps_per_epoch)
plt.plot(epochs, lrs, "o-")
plt.axis([0, n_epochs - 1, 0, 0.01])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Power Scheduling", fontsize=14)
plt.grid(True)
plt.show()
```
### Exponential Scheduling
```lr = lr0 * 0.1**(epoch / s)```
```
def exponential_decay_fn(epoch):
return 0.01 * 0.1**(epoch / 20)
def exponential_decay(lr0, s):
def exponential_decay_fn(epoch):
return lr0 * 0.1**(epoch / s)
return exponential_decay_fn
exponential_decay_fn = exponential_decay(lr0=0.01, s=20)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
lr_scheduler = keras.callbacks.LearningRateScheduler(exponential_decay_fn)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, history.history["lr"], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling", fontsize=14)
plt.grid(True)
plt.show()
```
The schedule function can take the current learning rate as a second argument:
```
def exponential_decay_fn(epoch, lr):
return lr * 0.1**(1 / 20)
```
If you want to update the learning rate at each iteration rather than at each epoch, you must write your own callback class:
```
K = keras.backend
class ExponentialDecay(keras.callbacks.Callback):
def __init__(self, s=40000):
super().__init__()
self.s = s
def on_batch_begin(self, batch, logs=None):
# Note: the `batch` argument is reset at each epoch
lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, lr * 0.1**(1 / s))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
lr0 = 0.01
optimizer = keras.optimizers.Nadam(lr=lr0)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
exp_decay = ExponentialDecay(s)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[exp_decay])
n_steps = n_epochs * len(X_train) // 32
steps = np.arange(n_steps)
lrs = lr0 * 0.1**(steps / s)
plt.plot(steps, lrs, "-", linewidth=2)
plt.axis([0, n_steps - 1, 0, lr0 * 1.1])
plt.xlabel("Batch")
plt.ylabel("Learning Rate")
plt.title("Exponential Scheduling (per batch)", fontsize=14)
plt.grid(True)
plt.show()
```
### Piecewise Constant Scheduling
```
def piecewise_constant_fn(epoch):
if epoch < 5:
return 0.01
elif epoch < 15:
return 0.005
else:
return 0.001
def piecewise_constant(boundaries, values):
boundaries = np.array([0] + boundaries)
values = np.array(values)
def piecewise_constant_fn(epoch):
return values[np.argmax(boundaries > epoch) - 1]
return piecewise_constant_fn
piecewise_constant_fn = piecewise_constant([5, 15], [0.01, 0.005, 0.001])
lr_scheduler = keras.callbacks.LearningRateScheduler(piecewise_constant_fn)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, [piecewise_constant_fn(epoch) for epoch in history.epoch], "o-")
plt.axis([0, n_epochs - 1, 0, 0.011])
plt.xlabel("Epoch")
plt.ylabel("Learning Rate")
plt.title("Piecewise Constant Scheduling", fontsize=14)
plt.grid(True)
plt.show()
```
### Performance Scheduling
```
tf.random.set_seed(42)
np.random.seed(42)
lr_scheduler = keras.callbacks.ReduceLROnPlateau(factor=0.5, patience=5)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(lr=0.02, momentum=0.9)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid),
callbacks=[lr_scheduler])
plt.plot(history.epoch, history.history["lr"], "bo-")
plt.xlabel("Epoch")
plt.ylabel("Learning Rate", color='b')
plt.tick_params('y', colors='b')
plt.gca().set_xlim(0, n_epochs - 1)
plt.grid(True)
ax2 = plt.gca().twinx()
ax2.plot(history.epoch, history.history["val_loss"], "r^-")
ax2.set_ylabel('Validation Loss', color='r')
ax2.tick_params('y', colors='r')
plt.title("Reduce LR on Plateau", fontsize=14)
plt.show()
```
### tf.keras schedulers
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
s = 20 * len(X_train) // 32 # number of steps in 20 epochs (batch size = 32)
learning_rate = keras.optimizers.schedules.ExponentialDecay(0.01, s, 0.1)
optimizer = keras.optimizers.SGD(learning_rate)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 25
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
```
For piecewise constant scheduling, try this:
```
learning_rate = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[5. * n_steps_per_epoch, 15. * n_steps_per_epoch],
values=[0.01, 0.005, 0.001])
```
### 1Cycle scheduling
```
K = keras.backend
class ExponentialLearningRate(keras.callbacks.Callback):
def __init__(self, factor):
self.factor = factor
self.rates = []
self.losses = []
def on_batch_end(self, batch, logs):
self.rates.append(K.get_value(self.model.optimizer.lr))
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.lr, self.model.optimizer.lr * self.factor)
def find_learning_rate(model, X, y, epochs=1, batch_size=32, min_rate=10**-5, max_rate=10):
init_weights = model.get_weights()
iterations = len(X) // batch_size * epochs
factor = np.exp(np.log(max_rate / min_rate) / iterations)
init_lr = K.get_value(model.optimizer.lr)
K.set_value(model.optimizer.lr, min_rate)
exp_lr = ExponentialLearningRate(factor)
history = model.fit(X, y, epochs=epochs, batch_size=batch_size,
callbacks=[exp_lr])
K.set_value(model.optimizer.lr, init_lr)
model.set_weights(init_weights)
return exp_lr.rates, exp_lr.losses
def plot_lr_vs_loss(rates, losses):
plt.plot(rates, losses)
plt.gca().set_xscale('log')
plt.hlines(min(losses), min(rates), max(rates))
plt.axis([min(rates), max(rates), min(losses), (losses[0] + min(losses)) / 2])
plt.xlabel("Learning rate")
plt.ylabel("Loss")
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=1e-3),
metrics=["accuracy"])
batch_size = 128
rates, losses = find_learning_rate(model, X_train_scaled, y_train, epochs=1, batch_size=batch_size)
plot_lr_vs_loss(rates, losses)
class OneCycleScheduler(keras.callbacks.Callback):
def __init__(self, iterations, max_rate, start_rate=None,
last_iterations=None, last_rate=None):
self.iterations = iterations
self.max_rate = max_rate
self.start_rate = start_rate or max_rate / 10
self.last_iterations = last_iterations or iterations // 10 + 1
self.half_iteration = (iterations - self.last_iterations) // 2
self.last_rate = last_rate or self.start_rate / 1000
self.iteration = 0
def _interpolate(self, iter1, iter2, rate1, rate2):
return ((rate2 - rate1) * (self.iteration - iter1)
/ (iter2 - iter1) + rate1)
def on_batch_begin(self, batch, logs):
if self.iteration < self.half_iteration:
rate = self._interpolate(0, self.half_iteration, self.start_rate, self.max_rate)
elif self.iteration < 2 * self.half_iteration:
rate = self._interpolate(self.half_iteration, 2 * self.half_iteration,
self.max_rate, self.start_rate)
else:
rate = self._interpolate(2 * self.half_iteration, self.iterations,
self.start_rate, self.last_rate)
rate = max(rate, self.last_rate)
self.iteration += 1
K.set_value(self.model.optimizer.lr, rate)
n_epochs = 25
onecycle = OneCycleScheduler(len(X_train) // batch_size * n_epochs, max_rate=0.05)
history = model.fit(X_train_scaled, y_train, epochs=n_epochs, batch_size=batch_size,
validation_data=(X_valid_scaled, y_valid),
callbacks=[onecycle])
```
# Avoiding Overfitting Through Regularization
## $\ell_1$ and $\ell_2$ regularization
```
layer = keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
# or l1(0.1) for ℓ1 regularization with a factor or 0.1
# or l1_l2(0.1, 0.01) for both ℓ1 and ℓ2 regularization, with factors 0.1 and 0.01 respectively
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dense(300, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(100, activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01)),
keras.layers.Dense(10, activation="softmax",
kernel_regularizer=keras.regularizers.l2(0.01))
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
from functools import partial
RegularizedDense = partial(keras.layers.Dense,
activation="elu",
kernel_initializer="he_normal",
kernel_regularizer=keras.regularizers.l2(0.01))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
RegularizedDense(300),
RegularizedDense(100),
RegularizedDense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
```
## Dropout
```
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(300, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(100, activation="elu", kernel_initializer="he_normal"),
keras.layers.Dropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
```
## Alpha Dropout
```
tf.random.set_seed(42)
np.random.seed(42)
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(300, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal"),
keras.layers.AlphaDropout(rate=0.2),
keras.layers.Dense(10, activation="softmax")
])
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
n_epochs = 20
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
model.evaluate(X_test_scaled, y_test)
model.evaluate(X_train_scaled, y_train)
history = model.fit(X_train_scaled, y_train)
```
## MC Dropout
```
tf.random.set_seed(42)
np.random.seed(42)
y_probas = np.stack([model(X_test_scaled, training=True)
for sample in range(100)])
y_proba = y_probas.mean(axis=0)
y_std = y_probas.std(axis=0)
np.round(model.predict(X_test_scaled[:1]), 2)
np.round(y_probas[:, :1], 2)
np.round(y_proba[:1], 2)
y_std = y_probas.std(axis=0)
np.round(y_std[:1], 2)
y_pred = np.argmax(y_proba, axis=1)
accuracy = np.sum(y_pred == y_test) / len(y_test)
accuracy
class MCDropout(keras.layers.Dropout):
def call(self, inputs):
return super().call(inputs, training=True)
class MCAlphaDropout(keras.layers.AlphaDropout):
def call(self, inputs):
return super().call(inputs, training=True)
tf.random.set_seed(42)
np.random.seed(42)
mc_model = keras.models.Sequential([
MCAlphaDropout(layer.rate) if isinstance(layer, keras.layers.AlphaDropout) else layer
for layer in model.layers
])
mc_model.summary()
optimizer = keras.optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)
mc_model.compile(loss="sparse_categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"])
mc_model.set_weights(model.get_weights())
```
Now we can use the model with MC Dropout:
```
np.round(np.mean([mc_model.predict(X_test_scaled[:1]) for sample in range(100)], axis=0), 2)
```
## Max norm
```
layer = keras.layers.Dense(100, activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
MaxNormDense = partial(keras.layers.Dense,
activation="selu", kernel_initializer="lecun_normal",
kernel_constraint=keras.constraints.max_norm(1.))
model = keras.models.Sequential([
keras.layers.Flatten(input_shape=[28, 28]),
MaxNormDense(300),
MaxNormDense(100),
keras.layers.Dense(10, activation="softmax")
])
model.compile(loss="sparse_categorical_crossentropy", optimizer="nadam", metrics=["accuracy"])
n_epochs = 2
history = model.fit(X_train_scaled, y_train, epochs=n_epochs,
validation_data=(X_valid_scaled, y_valid))
```
# Exercises
## 1. to 7.
See appendix A.
## 8. Deep Learning
### 8.1.
_Exercise: Build a DNN with five hidden layers of 100 neurons each, He initialization, and the ELU activation function._
### 8.2.
_Exercise: Using Adam optimization and early stopping, try training it on MNIST but only on digits 0 to 4, as we will use transfer learning for digits 5 to 9 in the next exercise. You will need a softmax output layer with five neurons, and as always make sure to save checkpoints at regular intervals and save the final model so you can reuse it later._
### 8.3.
_Exercise: Tune the hyperparameters using cross-validation and see what precision you can achieve._
### 8.4.
_Exercise: Now try adding Batch Normalization and compare the learning curves: is it converging faster than before? Does it produce a better model?_
### 8.5.
_Exercise: is the model overfitting the training set? Try adding dropout to every layer and try again. Does it help?_
## 9. Transfer learning
### 9.1.
_Exercise: create a new DNN that reuses all the pretrained hidden layers of the previous model, freezes them, and replaces the softmax output layer with a new one._
### 9.2.
_Exercise: train this new DNN on digits 5 to 9, using only 100 images per digit, and time how long it takes. Despite this small number of examples, can you achieve high precision?_
### 9.3.
_Exercise: try caching the frozen layers, and train the model again: how much faster is it now?_
### 9.4.
_Exercise: try again reusing just four hidden layers instead of five. Can you achieve a higher precision?_
### 9.5.
_Exercise: now unfreeze the top two hidden layers and continue training: can you get the model to perform even better?_
## 10. Pretraining on an auxiliary task
In this exercise you will build a DNN that compares two MNIST digit images and predicts whether they represent the same digit or not. Then you will reuse the lower layers of this network to train an MNIST classifier using very little training data.
### 10.1.
Exercise: _Start by building two DNNs (let's call them DNN A and B), both similar to the one you built earlier but without the output layer: each DNN should have five hidden layers of 100 neurons each, He initialization, and ELU activation. Next, add one more hidden layer with 10 units on top of both DNNs. You should use the `keras.layers.concatenate()` function to concatenate the outputs of both DNNs, then feed the result to the hidden layer. Finally, add an output layer with a single neuron using the logistic activation function._
### 10.2.
_Exercise: split the MNIST training set in two sets: split #1 should containing 55,000 images, and split #2 should contain contain 5,000 images. Create a function that generates a training batch where each instance is a pair of MNIST images picked from split #1. Half of the training instances should be pairs of images that belong to the same class, while the other half should be images from different classes. For each pair, the training label should be 0 if the images are from the same class, or 1 if they are from different classes._
### 10.3.
_Exercise: train the DNN on this training set. For each image pair, you can simultaneously feed the first image to DNN A and the second image to DNN B. The whole network will gradually learn to tell whether two images belong to the same class or not._
### 10.4.
_Exercise: now create a new DNN by reusing and freezing the hidden layers of DNN A and adding a softmax output layer on top with 10 neurons. Train this network on split #2 and see if you can achieve high performance despite having only 500 images per class._
| github_jupyter |
## Imports
```
from bayes_opt import BayesianOptimization
import pandas as pd
import numpy as np
from datetime import timedelta
from tqdm import tqdm_notebook as tqdm
from sklearn import metrics
from sklearn.model_selection import StratifiedKFold
import lightgbm as lgb
from matplotlib import pyplot as plt
#import seaborn as sns
from collections import defaultdict, Counter
from sklearn.preprocessing import StandardScaler, MinMaxScaler, PolynomialFeatures, RobustScaler
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score as auc
from sklearn.linear_model import LogisticRegression
from scipy.special import logit
import lightgbm
from xgboost import XGBClassifier
from bayes_opt import BayesianOptimization
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
file = {
'test' : './Data/test_QyjYwdj.csv',
'train':'./Data/train.csv',
'submission':'./Data/sample_submission_Byiv0dS.csv',
'coupon_item_mapping' :'./Data/coupon_item_mapping.csv',
'campaign_data' : './Data/campaign_data.csv',
'item_data' : './Data/item_data.csv',
'customer_transaction_data':'./Data/customer_transaction_data.csv',
'customer_demographics':'./Data/customer_demographics.csv',
}
train = pd.read_csv(file.get("train"))#
test = pd.read_csv(file.get("test"))#
coupon_item_mapping = pd.read_csv(file.get("coupon_item_mapping"))#No
item_data = pd.read_csv(file.get("item_data"))# may be yes
customer_transaction_data = pd.read_csv(file.get("customer_transaction_data"))#may be yes
campaign_data = pd.read_csv(file.get("campaign_data"))#
customer_demographics = pd.read_csv(file.get("customer_demographics"))#
submission = pd.read_csv(file.get("submission"))
train.shape
data = pd.concat([train, test], sort=False).reset_index(drop = True)
ltr = len(train)
data = data.merge(campaign_data, on='campaign_id')# campaign_data
data['start_date'] = pd.to_datetime(data['start_date'], dayfirst=True)
data['end_date'] = pd.to_datetime(data['end_date'], dayfirst=True)
data['campaign_type'].factorize()
data['campaign_type'] = pd.Series(data['campaign_type'].factorize()[0]).replace(-1, np.nan)
customer_demographics['no_of_children'] = customer_demographics['no_of_children'].replace('3+', 3).astype(float)
customer_demographics['family_size'] = customer_demographics['family_size'].replace('5+', 5).astype(float)
customer_demographics['marital_status'] = pd.Series(customer_demographics['marital_status'].factorize()[0]).replace(-1, np.nan)
customer_demographics['age_range'] = pd.Series(customer_demographics['age_range'].factorize()[0]).replace(-1, np.nan)
# use train data itself
redeemed_before_count = train.groupby("customer_id")['redemption_status'].sum().to_dict()
data['no_of_times_redeemed_before'] = data['customer_id'].map(redeemed_before_count)
# rented
rented_mean = customer_demographics.groupby("customer_id")['rented'].mean().to_dict()
data['rented_mean'] = data['customer_id'].map(rented_mean)
# income_bracket
income_bracket_sum = customer_demographics.groupby("customer_id")['income_bracket'].sum().to_dict()
data['income_bracket_sum'] = data['customer_id'].map(income_bracket_sum)
# age_range
age_range_mean = customer_demographics.groupby("customer_id")['age_range'].mean().to_dict()
data['age_range_mean'] = data['customer_id'].map(age_range_mean)
# family_size
family_size_mean = customer_demographics.groupby("customer_id")['family_size'].mean().to_dict()
data['family_size_mean'] = data['customer_id'].map(family_size_mean)
# no_of_children - actual number
no_of_children_mean = customer_demographics.groupby("customer_id")['no_of_children'].mean().to_dict()
data['no_of_children_mean'] = data['customer_id'].map(no_of_children_mean)
# actually represents if they have children or not
no_of_children_count = customer_demographics.groupby("customer_id")['no_of_children'].count().to_dict()
data['no_of_children_count'] = data['customer_id'].map(no_of_children_count)
# marital_status
marital_status_count = customer_demographics.groupby("customer_id")['marital_status'].count().to_dict()
data['marital_status_count'] = data['customer_id'].map(marital_status_count)
# customer_transaction_data
customer_transaction_data['date'] = pd.to_datetime(customer_transaction_data['date'])
# quantity
quantity_mean = customer_transaction_data.groupby("customer_id")['quantity'].mean().to_dict()
data['quantity_mean'] = data['customer_id'].map(quantity_mean)
#coupon_discount
coupon_discount_mean = customer_transaction_data.groupby("customer_id")['coupon_discount'].mean().to_dict()
data['coupon_discount_mean'] = data['customer_id'].map(coupon_discount_mean)
# other_discount
other_discount_mean = customer_transaction_data.groupby("customer_id")['other_discount'].mean().to_dict()
data['other_discount_mean'] = data['customer_id'].map(other_discount_mean)
# day
customer_transaction_data['day'] = customer_transaction_data.date.dt.day
date_day_mean = customer_transaction_data.groupby("customer_id")['day'].mean().to_dict()
data['date_day_mean'] = data['customer_id'].map(date_day_mean)
# selling_price
selling_price_mean = customer_transaction_data.groupby("customer_id")['selling_price'].mean().to_dict()
data['selling_price_mean'] = data['customer_id'].map(selling_price_mean)
selling_price_mean = customer_transaction_data.groupby("customer_id")['selling_price'].sum().to_dict()
data['selling_price_sum'] = data['customer_id'].map(selling_price_mean)
selling_price_mean = customer_transaction_data.groupby("customer_id")['selling_price'].min().to_dict()
data['selling_price_min'] = data['customer_id'].map(selling_price_mean)
selling_price_mean = customer_transaction_data.groupby("customer_id")['selling_price'].max().to_dict()
data['selling_price_max'] = data['customer_id'].map(selling_price_mean)
selling_price_mean = customer_transaction_data.groupby("customer_id")['selling_price'].nunique().to_dict()
data['selling_price_nunique'] = data['customer_id'].map(selling_price_mean)
coupon_item_mapping = coupon_item_mapping.merge(item_data, how = 'left', on = 'item_id')
coupon_item_mapping['brand_type'] = pd.Series(coupon_item_mapping['brand_type'].factorize()[0]).replace(-1, np.nan)
coupon_item_mapping['category'] = pd.Series(coupon_item_mapping['category'].factorize()[0]).replace(-1, np.nan)
category = coupon_item_mapping.groupby("coupon_id")['category'].mean().to_dict()
data['category_mean'] = data['coupon_id'].map(category)
category = coupon_item_mapping.groupby("coupon_id")['category'].count().to_dict()
data['category_count'] = data['coupon_id'].map(category)
category = coupon_item_mapping.groupby("coupon_id")['category'].nunique().to_dict()
data['category_nunique'] = data['coupon_id'].map(category)
category = coupon_item_mapping.groupby("coupon_id")['category'].max().to_dict()
data['category_max'] = data['coupon_id'].map(category)
category = coupon_item_mapping.groupby("coupon_id")['category'].min().to_dict()
data['category_min'] = data['coupon_id'].map(category)
brand_mean = coupon_item_mapping.groupby("coupon_id")['brand'].mean().to_dict()
data['brand_mean'] = data['coupon_id'].map(brand_mean)
brand_mean = coupon_item_mapping.groupby("coupon_id")['brand'].count().to_dict()
data['brand_count'] = data['coupon_id'].map(brand_mean)
brand_mean = coupon_item_mapping.groupby("coupon_id")['brand'].min().to_dict()
data['brand_min'] = data['coupon_id'].map(brand_mean)
brand_mean = coupon_item_mapping.groupby("coupon_id")['brand'].max().to_dict()
data['brand_max'] = data['coupon_id'].map(brand_mean)
brand_mean = coupon_item_mapping.groupby("coupon_id")['brand'].nunique().to_dict()
data['brand_nunique'] = data['coupon_id'].map(brand_mean)
data.columns
data.shape
train_cols = ['campaign_id','coupon_id','campaign_type','rented_mean','income_bracket_sum','age_range_mean','family_size_mean',
'no_of_children_mean',
'no_of_children_count',
'marital_status_count',
'quantity_mean',
'coupon_discount_mean',
'other_discount_mean',
'date_day_mean',
'category_mean',
'category_nunique',
'category_max',
'category_min',
'brand_mean',
'brand_max',
'brand_nunique',
'selling_price_mean',
'selling_price_min',
'selling_price_nunique']
len(train_cols)
train = data[data['redemption_status'].notnull()]
test = data[data['redemption_status'].isnull()]
train.head()
train.isna().sum()
test.isna().sum()
pd.get_dummies(data[train_cols].fillna(0), columns=train_cols, drop_first=True, sparse=True).columns
train.columns
train.shape
test.shape
target.shape
train_np = train[train_cols].fillna(0).values
test_np = test[train_cols].fillna(0).values
def run_cv_model(train, test, target, model_fn, params={}, eval_fn=None, label='model'):
kf = StratifiedKFold(n_splits=5, shuffle = True, random_state = 228)
fold_splits = kf.split(train, target)
cv_scores = []
pred_full_test = 0
pred_train = np.zeros((train.shape[0]))
i = 1
for dev_index, val_index in fold_splits:
print('Started ' + label + ' fold ' + str(i) + '/5')
dev_X, val_X = train[dev_index], train[val_index]
dev_y, val_y = target[dev_index], target[val_index]
params2 = params.copy()
pred_val_y, pred_test_y = model_fn(dev_X, dev_y, val_X, val_y, test, params2)
pred_full_test = pred_full_test + pred_test_y
pred_train[val_index] = pred_val_y
if eval_fn is not None:
cv_score = eval_fn(val_y, pred_val_y)
cv_scores.append(cv_score)
print(label + ' cv score {}: {}'.format(i, cv_score))
i += 1
print('{} cv scores : {}'.format(label, cv_scores))
print('{} cv mean score : {}'.format(label, np.mean(cv_scores)))
print('{} cv std score : {}'.format(label, np.std(cv_scores)))
pred_full_test = pred_full_test / 10.0
results = {'label': label,
'train': pred_train, 'test': pred_full_test,
'cv': cv_scores}
return results
def runLR(train_X, train_y, test_X, test_y, test_X2, params):
print('Train LR')
model = LogisticRegression(**params)
model.fit(train_X, train_y)
print('Predict 1/2')
pred_test_y = logit(model.predict_proba(test_X)[:, 1])
print('Predict 2/2')
pred_test_y2 = logit(model.predict_proba(test_X2)[:, 1])
return pred_test_y, pred_test_y2
target = train['redemption_status'].values
lr_params = {'solver': 'lbfgs','C': 1.8,'max_iter' : 2000}
results = run_cv_model(train_np, test_np, target, runLR, lr_params, auc, 'lr')
tmp = dict(zip(test.id.values, results['test']))
answer1 = pd.DataFrame()
answer1['id'] = test.id.values
answer1['redemption_status'] = answer1['id'].map(tmp)
answer1.to_csv('submit_new.csv', index = None)
```
**xgboost**
```
def runXgb(train_X, train_y, test_X, test_y, test_X2, params):
print('Train LR')
model = XGBClassifier(random_state=42)
model.fit(train_X, train_y)
print('Predict 1/2')
pred_test_y = logit(model.predict_proba(test_X)[:, 1])
print('Predict 2/2')
pred_test_y2 = logit(model.predict_proba(test_X2)[:, 1])
return pred_test_y, pred_test_y2
target = train['redemption_status'].values
lr_params = {'solver': 'lbfgs','C': 1.8,'max_iter' : 2000}
results = run_cv_model(train_np, test_np, target, runXgb, lr_params, auc, 'xgb')
tmp = dict(zip(test.id.values, results['test']))
answer1 = pd.DataFrame()
answer1['id'] = test.id.values
answer1['redemption_status'] = answer1['id'].map(tmp)
answer1.to_csv('submit_new_xgb.csv', index = None)
target = train['redemption_status'].values
lr_params = {'solver': 'lbfgs','C': 1.8,'max_iter' : 2000}
results = run_cv_model(train_np, test_np, target, runXgb, lr_params, auc, 'xgb')
tmp = dict(zip(test.id.values, results['test']))
answer1 = pd.DataFrame()
answer1['id'] = test.id.values
answer1['redemption_status'] = answer1['id'].map(tmp)
answer1.to_csv('submit_new_xgb_with_my_inception_feature.csv', index = None)
```
### Bayesian Optimisation
```
dtrain = xgb.DMatrix(train_np, label=target)
train_np.shape
def bo_tune_xgb(max_depth, gamma, n_estimators ,learning_rate, subsample):
params = {'max_depth': int(max_depth),
'gamma': gamma,
'n_estimators': int(n_estimators),
'learning_rate':learning_rate,
'subsample': float(subsample),
'eta': 0.1,
'objective':'binary:logistic',
'eval_metric': 'auc'}
#Cross validating with the specified parameters in 5 folds and 70 iterations
cv_result = xgb.cv(params, dtrain, num_boost_round=70, nfold=5)
#Return the negative RMSE
return cv_result['test-auc-mean'].iloc[-1]
xgb_bo = BayesianOptimization(bo_tune_xgb, {'max_depth': (3, 10),
'gamma': (0, 1),
'learning_rate':(0,1),
'n_estimators':(100,120),
'subsample':(0.1,0.3)
})
xgb_bo.maximize(n_iter=5, init_points=8, acq='ei')
def runXgbHighest(train_X, train_y, test_X, test_y, test_X2, params):
print('Train LR')
model = XGBClassifier(random_state=42, learning_rate=0.03455, gamma=0.1887, max_depth=8, n_estimators=116, subsample=0.2643)
model.fit(train_X, train_y)
print('Predict 1/2')
pred_test_y = logit(model.predict_proba(test_X)[:, 1])
print('Predict 2/2')
pred_test_y2 = logit(model.predict_proba(test_X2)[:, 1])
return pred_test_y, pred_test_y2
target = train['redemption_status'].values
lr_params = {'solver': 'lbfgs','C': 1.8,'max_iter' : 2000}
results = run_cv_model(train_np, test_np, target, runXgbHighest, lr_params, auc, 'xgb-bpo')
tmp = dict(zip(test.id.values, results['test']))
answer1 = pd.DataFrame()
answer1['id'] = test.id.values
answer1['redemption_status'] = answer1['id'].map(tmp)
answer1.to_csv('submit_new_xgb_with_my_inception_feature_BPO.csv', index = None)
def runXgbHighest2(train_X, train_y, test_X, test_y, test_X2, params):
print('Train LR')
model = XGBClassifier(random_state=42, learning_rate=0.1733, gamma=0.923, max_depth=3, n_estimators=106, subsample=0.1987)
model.fit(train_X, train_y)
print('Predict 1/2')
pred_test_y = logit(model.predict_proba(test_X)[:, 1])
print('Predict 2/2')
pred_test_y2 = logit(model.predict_proba(test_X2)[:, 1])
print('feature importance : {}'.format(model.feature_importances_))
return pred_test_y, pred_test_y2
target = train['redemption_status'].values
lr_params = {'solver': 'lbfgs','C': 1.8,'max_iter' : 2000}
results = run_cv_model(train_np, test_np, target, runXgbHighest2, lr_params, auc, 'xgb-bpo2')
tmp = dict(zip(test.id.values, results['test']))
answer1 = pd.DataFrame()
answer1['id'] = test.id.values
answer1['redemption_status'] = answer1['id'].map(tmp)
answer1.to_csv('submit_new_xgb_BPO_2.csv', index = None)
def runXgbHighest3(train_X, train_y, test_X, test_y, test_X2, params):
print('Train LR')
model = XGBClassifier(random_state=42, learning_rate=0.4501, gamma=0.2141, max_depth=7, n_estimators=108, subsample=0.1594)
model.fit(train_X, train_y)
print('Predict 1/2')
pred_test_y = logit(model.predict_proba(test_X)[:, 1])
print('Predict 2/2')
pred_test_y2 = logit(model.predict_proba(test_X2)[:, 1])
return pred_test_y, pred_test_y2
```
**Tetsing sanity of data**
```
item_data.head()
coupon_item_mapping.head()
coupon_item_mapping.shape
coupon_item_mapping.merge(item_data, how = 'left', on = 'item_id').shape
data.head()
ltr
data.shape
train.shape
test.shape
data.head()
data.isna().sum()
```
| github_jupyter |
___
<a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
___
# NLP (Natural Language Processing) with Python
This is the notebook that goes along with the NLP video lecture!
In this lecture we will discuss a higher level overview of the basics of Natural Language Processing, which basically consists of combining machine learning techniques with text, and using math and statistics to get that text in a format that the machine learning algorithms can understand!
Once you've completed this lecture you'll have a project using some Yelp Text Data!
**Requirements: You will need to have NLTK installed, along with downloading the corpus for stopwords. To download everything with a conda installation, run the cell below. Or reference the full video lecture**
```
# ONLY RUN THIS CELL IF YOU NEED
# TO DOWNLOAD NLTK AND HAVE CONDA
# WATCH THE VIDEO FOR FULL INSTRUCTIONS ON THIS STEP
# Uncomment the code below and run:
# !conda install nltk #This installs nltk
# import nltk # Imports the library
# nltk.download() #Download the necessary datasets
```
## Get the Data
We'll be using a dataset from the [UCI datasets](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection)! This dataset is already located in the folder for this section.
The file we are using contains a collection of more than 5 thousand SMS phone messages. You can check out the **readme** file for more info.
Let's go ahead and use rstrip() plus a list comprehension to get a list of all the lines of text messages:
```
messages = [line.rstrip() for line in open('smsspamcollection/SMSSpamCollection')]
print(len(messages))
```
A collection of texts is also sometimes called "corpus". Let's print the first ten messages and number them using **enumerate**:
```
for message_no, message in enumerate(messages[:10]):
print(message_no, message)
print('\n')
```
Due to the spacing we can tell that this is a [TSV](http://en.wikipedia.org/wiki/Tab-separated_values) ("tab separated values") file, where the first column is a label saying whether the given message is a normal message (commonly known as "ham") or "spam". The second column is the message itself. (Note our numbers aren't part of the file, they are just from the **enumerate** call).
Using these labeled ham and spam examples, we'll **train a machine learning model to learn to discriminate between ham/spam automatically**. Then, with a trained model, we'll be able to **classify arbitrary unlabeled messages** as ham or spam.
From the official SciKit Learn documentation, we can visualize our process:
<img src='http://www.astroml.org/sklearn_tutorial/_images/plot_ML_flow_chart_3.png' width=600/>
Instead of parsing TSV manually using Python, we can just take advantage of pandas! Let's go ahead and import it!
```
import pandas as pd
```
We'll use **read_csv** and make note of the **sep** argument, we can also specify the desired column names by passing in a list of *names*.
```
messages = pd.read_csv('smsspamcollection/SMSSpamCollection', sep='\t',
names=["label", "message"])
messages.head()
```
## Exploratory Data Analysis
Let's check out some of the stats with some plots and the built-in methods in pandas!
```
messages.describe()
```
Let's use **groupby** to use describe by label, this way we can begin to think about the features that separate ham and spam!
```
messages.groupby('label').describe()
```
As we continue our analysis we want to start thinking about the features we are going to be using. This goes along with the general idea of [feature engineering](https://en.wikipedia.org/wiki/Feature_engineering). The better your domain knowledge on the data, the better your ability to engineer more features from it. Feature engineering is a very large part of spam detection in general. I encourage you to read up on the topic!
Let's make a new column to detect how long the text messages are:
```
messages['length'] = messages['message'].apply(len)
messages.head()
```
### Data Visualization
Let's visualize this! Let's do the imports:
```
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
messages['length'].plot(bins=50, kind='hist')
```
Play around with the bin size! Looks like text length may be a good feature to think about! Let's try to explain why the x-axis goes all the way to 1000ish, this must mean that there is some really long message!
```
messages.length.describe()
```
Woah! 910 characters, let's use masking to find this message:
```
messages[messages['length'] == 910]['message'].iloc[0]
```
Looks like we have some sort of Romeo sending texts! But let's focus back on the idea of trying to see if message length is a distinguishing feature between ham and spam:
```
messages.hist(column='length', by='label', bins=50,figsize=(12,4))
```
Very interesting! Through just basic EDA we've been able to discover a trend that spam messages tend to have more characters. (Sorry Romeo!)
Now let's begin to process the data so we can eventually use it with SciKit Learn!
## Text Pre-processing
Our main issue with our data is that it is all in text format (strings). The classification algorithms that we've learned about so far will need some sort of numerical feature vector in order to perform the classification task. There are actually many methods to convert a corpus to a vector format. The simplest is the the [bag-of-words](http://en.wikipedia.org/wiki/Bag-of-words_model) approach, where each unique word in a text will be represented by one number.
In this section we'll convert the raw messages (sequence of characters) into vectors (sequences of numbers).
As a first step, let's write a function that will split a message into its individual words and return a list. We'll also remove very common words, ('the', 'a', etc..). To do this we will take advantage of the NLTK library. It's pretty much the standard library in Python for processing text and has a lot of useful features. We'll only use some of the basic ones here.
Let's create a function that will process the string in the message column, then we can just use **apply()** in pandas do process all the text in the DataFrame.
First removing punctuation. We can just take advantage of Python's built-in **string** library to get a quick list of all the possible punctuation:
```
import string
mess = 'Sample message! Notice: it has punctuation.'
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
```
Now let's see how to remove stopwords. We can impot a list of english stopwords from NLTK (check the documentation for more languages and info).
```
from nltk.corpus import stopwords
stopwords.words('english')[0:10] # Show some stop words
nopunc.split()
# Now just remove any stopwords
clean_mess = [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
clean_mess
```
Now let's put both of these together in a function to apply it to our DataFrame later on:
```
def text_process(mess):
"""
Takes in a string of text, then performs the following:
1. Remove all punctuation
2. Remove all stopwords
3. Returns a list of the cleaned text
"""
# Check characters to see if they are in punctuation
nopunc = [char for char in mess if char not in string.punctuation]
# Join the characters again to form the string.
nopunc = ''.join(nopunc)
# Now just remove any stopwords
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
```
Here is the original DataFrame again:
```
messages.head()
```
Now let's "tokenize" these messages. Tokenization is just the term used to describe the process of converting the normal text strings in to a list of tokens (words that we actually want).
Let's see an example output on on column:
**Note:**
We may get some warnings or errors for symbols we didn't account for or that weren't in Unicode (like a British pound symbol)
```
# Check to make sure its working
messages['message'].head(5).apply(text_process)
# Show original dataframe
messages.head()
```
### Continuing Normalization
There are a lot of ways to continue normalizing this text. Such as [Stemming](https://en.wikipedia.org/wiki/Stemming) or distinguishing by [part of speech](http://www.nltk.org/book/ch05.html).
NLTK has lots of built-in tools and great documentation on a lot of these methods. Sometimes they don't work well for text-messages due to the way a lot of people tend to use abbreviations or shorthand, For example:
'Nah dawg, IDK! Wut time u headin to da club?'
versus
'No dog, I don't know! What time are you heading to the club?'
Some text normalization methods will have trouble with this type of shorthand and so I'll leave you to explore those more advanced methods through the [NLTK book online](http://www.nltk.org/book/).
For now we will just focus on using what we have to convert our list of words to an actual vector that SciKit-Learn can use.
## Vectorization
Currently, we have the messages as lists of tokens (also known as [lemmas](http://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html)) and now we need to convert each of those messages into a vector the SciKit Learn's algorithm models can work with.
Now we'll convert each message, represented as a list of tokens (lemmas) above, into a vector that machine learning models can understand.
We'll do that in three steps using the bag-of-words model:
1. Count how many times does a word occur in each message (Known as term frequency)
2. Weigh the counts, so that frequent tokens get lower weight (inverse document frequency)
3. Normalize the vectors to unit length, to abstract from the original text length (L2 norm)
Let's begin the first step:
Each vector will have as many dimensions as there are unique words in the SMS corpus. We will first use SciKit Learn's **CountVectorizer**. This model will convert a collection of text documents to a matrix of token counts.
We can imagine this as a 2-Dimensional matrix. Where the 1-dimension is the entire vocabulary (1 row per word) and the other dimension are the actual documents, in this case a column per text message.
For example:
<table border = “1“>
<tr>
<th></th> <th>Message 1</th> <th>Message 2</th> <th>...</th> <th>Message N</th>
</tr>
<tr>
<td><b>Word 1 Count</b></td><td>0</td><td>1</td><td>...</td><td>0</td>
</tr>
<tr>
<td><b>Word 2 Count</b></td><td>0</td><td>0</td><td>...</td><td>0</td>
</tr>
<tr>
<td><b>...</b></td> <td>1</td><td>2</td><td>...</td><td>0</td>
</tr>
<tr>
<td><b>Word N Count</b></td> <td>0</td><td>1</td><td>...</td><td>1</td>
</tr>
</table>
Since there are so many messages, we can expect a lot of zero counts for the presence of that word in that document. Because of this, SciKit Learn will output a [Sparse Matrix](https://en.wikipedia.org/wiki/Sparse_matrix).
```
from sklearn.feature_extraction.text import CountVectorizer
```
There are a lot of arguments and parameters that can be passed to the CountVectorizer. In this case we will just specify the **analyzer** to be our own previously defined function:
```
# Might take awhile...
bow_transformer = CountVectorizer(analyzer=text_process).fit(messages['message'])
# Print total number of vocab words
print(len(bow_transformer.vocabulary_))
```
Let's take one text message and get its bag-of-words counts as a vector, putting to use our new `bow_transformer`:
```
message4 = messages['message'][3]
print(message4)
```
Now let's see its vector representation:
```
bow4 = bow_transformer.transform([message4])
print(bow4)
print(bow4.shape)
```
This means that there are seven unique words in message number 4 (after removing common stop words). Two of them appear twice, the rest only once. Let's go ahead and check and confirm which ones appear twice:
```
print(bow_transformer.get_feature_names()[4073])
print(bow_transformer.get_feature_names()[9570])
```
Now we can use **.transform** on our Bag-of-Words (bow) transformed object and transform the entire DataFrame of messages. Let's go ahead and check out how the bag-of-words counts for the entire SMS corpus is a large, sparse matrix:
```
messages_bow = bow_transformer.transform(messages['message'])
print('Shape of Sparse Matrix: ', messages_bow.shape)
print('Amount of Non-Zero occurences: ', messages_bow.nnz)
sparsity = (100.0 * messages_bow.nnz / (messages_bow.shape[0] * messages_bow.shape[1]))
print('sparsity: {}'.format(round(sparsity)))
```
After the counting, the term weighting and normalization can be done with [TF-IDF](http://en.wikipedia.org/wiki/Tf%E2%80%93idf), using scikit-learn's `TfidfTransformer`.
____
### So what is TF-IDF?
TF-IDF stands for *term frequency-inverse document frequency*, and the tf-idf weight is a weight often used in information retrieval and text mining. This weight is a statistical measure used to evaluate how important a word is to a document in a collection or corpus. The importance increases proportionally to the number of times a word appears in the document but is offset by the frequency of the word in the corpus. Variations of the tf-idf weighting scheme are often used by search engines as a central tool in scoring and ranking a document's relevance given a user query.
One of the simplest ranking functions is computed by summing the tf-idf for each query term; many more sophisticated ranking functions are variants of this simple model.
Typically, the tf-idf weight is composed by two terms: the first computes the normalized Term Frequency (TF), aka. the number of times a word appears in a document, divided by the total number of words in that document; the second term is the Inverse Document Frequency (IDF), computed as the logarithm of the number of the documents in the corpus divided by the number of documents where the specific term appears.
**TF: Term Frequency**, which measures how frequently a term occurs in a document. Since every document is different in length, it is possible that a term would appear much more times in long documents than shorter ones. Thus, the term frequency is often divided by the document length (aka. the total number of terms in the document) as a way of normalization:
*TF(t) = (Number of times term t appears in a document) / (Total number of terms in the document).*
**IDF: Inverse Document Frequency**, which measures how important a term is. While computing TF, all terms are considered equally important. However it is known that certain terms, such as "is", "of", and "that", may appear a lot of times but have little importance. Thus we need to weigh down the frequent terms while scale up the rare ones, by computing the following:
*IDF(t) = log_e(Total number of documents / Number of documents with term t in it).*
See below for a simple example.
**Example:**
Consider a document containing 100 words wherein the word cat appears 3 times.
The term frequency (i.e., tf) for cat is then (3 / 100) = 0.03. Now, assume we have 10 million documents and the word cat appears in one thousand of these. Then, the inverse document frequency (i.e., idf) is calculated as log(10,000,000 / 1,000) = 4. Thus, the Tf-idf weight is the product of these quantities: 0.03 * 4 = 0.12.
____
Let's go ahead and see how we can do this in SciKit Learn:
```
from sklearn.feature_extraction.text import TfidfTransformer
tfidf_transformer = TfidfTransformer().fit(messages_bow)
tfidf4 = tfidf_transformer.transform(bow4)
print(tfidf4)
```
We'll go ahead and check what is the IDF (inverse document frequency) of the word `"u"` and of word `"university"`?
```
print(tfidf_transformer.idf_[bow_transformer.vocabulary_['u']])
print(tfidf_transformer.idf_[bow_transformer.vocabulary_['university']])
```
To transform the entire bag-of-words corpus into TF-IDF corpus at once:
```
messages_tfidf = tfidf_transformer.transform(messages_bow)
print(messages_tfidf.shape)
```
There are many ways the data can be preprocessed and vectorized. These steps involve feature engineering and building a "pipeline". I encourage you to check out SciKit Learn's documentation on dealing with text data as well as the expansive collection of available papers and books on the general topic of NLP.
## Training a model
With messages represented as vectors, we can finally train our spam/ham classifier. Now we can actually use almost any sort of classification algorithms. For a [variety of reasons](http://www.inf.ed.ac.uk/teaching/courses/inf2b/learnnotes/inf2b-learn-note07-2up.pdf), the Naive Bayes classifier algorithm is a good choice.
We'll be using scikit-learn here, choosing the [Naive Bayes](http://en.wikipedia.org/wiki/Naive_Bayes_classifier) classifier to start with:
```
from sklearn.naive_bayes import MultinomialNB
spam_detect_model = MultinomialNB().fit(messages_tfidf, messages['label'])
```
Let's try classifying our single random message and checking how we do:
```
print('predicted:', spam_detect_model.predict(tfidf4)[0])
print('expected:', messages.label[3])
```
Fantastic! We've developed a model that can attempt to predict spam vs ham classification!
## Part 6: Model Evaluation
Now we want to determine how well our model will do overall on the entire dataset. Let's begin by getting all the predictions:
```
all_predictions = spam_detect_model.predict(messages_tfidf)
print(all_predictions)
```
We can use SciKit Learn's built-in classification report, which returns [precision, recall,](https://en.wikipedia.org/wiki/Precision_and_recall) [f1-score](https://en.wikipedia.org/wiki/F1_score), and a column for support (meaning how many cases supported that classification). Check out the links for more detailed info on each of these metrics and the figure below:
<img src='https://upload.wikimedia.org/wikipedia/commons/thumb/2/26/Precisionrecall.svg/700px-Precisionrecall.svg.png' width=400 />
```
from sklearn.metrics import classification_report
print (classification_report(messages['label'], all_predictions))
```
There are quite a few possible metrics for evaluating model performance. Which one is the most important depends on the task and the business effects of decisions based off of the model. For example, the cost of mis-predicting "spam" as "ham" is probably much lower than mis-predicting "ham" as "spam".
In the above "evaluation",we evaluated accuracy on the same data we used for training. **You should never actually evaluate on the same dataset you train on!**
Such evaluation tells us nothing about the true predictive power of our model. If we simply remembered each example during training, the accuracy on training data would trivially be 100%, even though we wouldn't be able to classify any new messages.
A proper way is to split the data into a training/test set, where the model only ever sees the **training data** during its model fitting and parameter tuning. The **test data** is never used in any way. This is then our final evaluation on test data is representative of true predictive performance.
## Train Test Split
```
from sklearn.model_selection import train_test_split
msg_train, msg_test, label_train, label_test = \
train_test_split(messages['message'], messages['label'], test_size=0.2)
print(len(msg_train), len(msg_test), len(msg_train) + len(msg_test))
```
The test size is 20% of the entire dataset (1115 messages out of total 5572), and the training is the rest (4457 out of 5572). Note the default split would have been 30/70.
## Creating a Data Pipeline
Let's run our model again and then predict off the test set. We will use SciKit Learn's [pipeline](http://scikit-learn.org/stable/modules/pipeline.html) capabilities to store a pipeline of workflow. This will allow us to set up all the transformations that we will do to the data for future use. Let's see an example of how it works:
```
from sklearn.pipeline import Pipeline
pipeline = Pipeline([
('bow', CountVectorizer(analyzer=text_process)), # strings to token integer counts
('tfidf', TfidfTransformer()), # integer counts to weighted TF-IDF scores
('classifier', MultinomialNB()), # train on TF-IDF vectors w/ Naive Bayes classifier
])
```
Now we can directly pass message text data and the pipeline will do our pre-processing for us! We can treat it as a model/estimator API:
```
pipeline.fit(msg_train,label_train)
predictions = pipeline.predict(msg_test)
print(classification_report(predictions,label_test))
```
Now we have a classification report for our model on a true testing set! There is a lot more to Natural Language Processing than what we've covered here, and its vast expanse of topic could fill up several college courses! I encourage you to check out the resources below for more information on NLP!
## More Resources
Check out the links below for more info on Natural Language Processing:
[NLTK Book Online](http://www.nltk.org/book/)
[Kaggle Walkthrough](https://www.kaggle.com/c/word2vec-nlp-tutorial/details/part-1-for-beginners-bag-of-words)
[SciKit Learn's Tutorial](http://scikit-learn.org/stable/tutorial/text_analytics/working_with_text_data.html)
# Good Job!
| github_jupyter |
# "The Evolution of Music Industry Sales"
> "Deep dive analysis into music industry sales over the past 40 years."
- toc:true
- branch: master
- badges: true
- comments: true
- author: Karinn Murdock
- categories: [fastpages, jupyter]
Karinn Murdock
03/14/2022
# The Evolution of Music Industry Sales
## Introduction
For my final project, I am exploring the music industry and how it has changed over the years. More specifically, I want to look at how music industry sales have changed in the past 40 years in terms of types of sales (CDs, streaming, etc). In answering this overarching research question, I also want to explore (1) how the rise of new sale formats has led to the demise of others, and (2) which sales formats will be most popular moving forward.
The music industry has seen drastic change in recent years as a result of the Internet and music streaming platforms such as Spotify and Apple Music. Since the industry is changing so quickly, it is difficult for artists, record labels, and other companies to keep up. The topic I am researching is important to both artists and business executives in the music industry as they must understand the current sales landscape in order to maximize sales and cater to customer's preferences. By understanding which sale formats are most popular now, which ones are on the decline, and which ones will be popular in the future, artists and companies can know where to focus their marketing and distribution resources going forward.
## Methods
The data source I will use to explore my research question is a dataset I found on data.world. This dataset is on music industry sales over the past 40 years and was created by the Recording Industry Association of America (RIAA). According to RIAA, this is the most comprehensive data on U.S. recorded music revenues. This dataset goes all the way back to 1973 and lists format of sale, metric of sale, year of sale, and value of sale. A link to the dataset can be found here: https://data.world/makeovermonday/2020w21-visualizing-40-years-of-music-industry-sales
This dataset contains data on 23 different sales formats, from cassette sales to album sales to on-demand streaming sales. For each format (except a few), the sales value is listed for each year in three different metrics: units sold, revenue at time of sale, and revenue adjusted to present value. The "Value (Actual)" column is displayed in millions for both unit values and revenue values.
As stated in my introduction, I intend to analyze how music industry sales have changed since 1973 in terms of type of sales, as well as and how new sales formats have affected older sales formats. For my analysis, I decided to only work with the revenue adjusted to present value data, as I found it to be the most insightful. I split my analysis roughly into 5 main steps. First, I am creating bar charts of every sales formats in order to understand how sales of each format have changed since 1973. Second, I am going to create pie charts at different points in time to see how the composition of music sales has changed. Third, I am going to look at the correlation of different sales formats in the 2000's to understand which formats are correlated and which are anti-correlated. To take this analysis one step further, I am going to conduct linear regression analysis. Lastly, I am going to look at the growth rates of particular sales formats in 2019 to reveal which formats are increasing in popularity.
## Results
```
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import ipywidgets
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
```
### Data Exploration
```
df = pd.read_csv('MusicData2.csv')
df.shape
```
The dataset has 5 columns and 3008 rows of data.
```
df.columns
#hide
df.head()
#hide
df.tail()
#hide
df.dtypes
```
Finding summary statistics:
```
#hide
df.describe()
#hide
df.info()
df
```
Looking at the data, the column "Value (Actual)" has missing information for many of the data entries. Additionally, it seems some of the data in the "Value (Actual)" column is measuring sales based on units sold while other entries are measuring sales by revenue.
### Cleaning the Data
First, I am replacing all NaN values with a zero.
```
new_df = df.fillna(0)
new_df
```
In some of the earlier years, certain formats such as Ringtones hadn't been created yet. I don't want to include these sales formats in my dataset for the years when the value is 0.0, so I am creating a new dataframe with these values removed.
```
#collapse-output
new_df_no_values_0 = new_df[new_df["Value (Actual)"] != 0.0].reset_index(drop=True)
new_df_no_values_0
```
There are only 1274 rows now (in comparison to 3008 rows before) since some of the rows were irrelevant.
Creating a dataframe that has the units sold data for each year and format:
```
#collapse-output
units = new_df_no_values_0[new_df_no_values_0['Metric'] == 'Units']
units
len(list(set(units["Format"])))
```
The units sold data contains data on 17 different sales formats.
Creating a dataframe that has the present value of revenue sales data for each year and format:
```
#collapse-output
adjusted = new_df_no_values_0[new_df_no_values_0['Metric'] == 'Value (Adjusted)'].reset_index(drop=True)
adjusted
len(list(set(adjusted["Format"])))
```
The value (adjusted) data contains data for 23 different sales format. This dataset is missing units sold data for some of the more recent sales format types, such as On-Demand Streaming and Limited Tier Paid Subscriptions. As a result, I am going to use the value (adjusted) data for my analysis.
```
#hide
value = new_df_no_values_0[new_df_no_values_0['Metric'] == 'Value'].reset_index(drop=True)
value
```
### Exploratory Data Visualizations
Creating dataframes with the adjusted revenue sales data for each sales format:
```
# step 1: get all unique Format types
format_types = list(set(adjusted["Format"]))
format_types
# step 2: create an empty list to input new data_frames
format_dfs = []
# step 3: create all the new data frames and add to list
for i in np.arange(len(format_types)):
first_format_type = format_types[i]
format_df = adjusted[adjusted["Format"] == first_format_type]
format_dfs.append(format_df)
#hide
format_dfs[0] = adjusted[adjusted["Format"] == 'Download Music Video']
format_dfs[1] = adjusted[adjusted["Format"] == 'Download Album']
format_dfs[2] = adjusted[adjusted["Format"] == 'Other Ad-Supported Streaming']
format_dfs[3] = adjusted[adjusted["Format"] == 'Paid Subscription']
format_dfs[4] = adjusted[adjusted["Format"] == 'Synchronization']
format_dfs[5] = adjusted[adjusted["Format"] == 'Cassette']
format_dfs[6] = adjusted[adjusted["Format"] == 'On-Demand Streaming (Ad-Supported)']
format_dfs[7] = adjusted[adjusted["Format"] == 'Limited Tier Paid Subscription']
format_dfs[8] = adjusted[adjusted["Format"] == 'Ringtones & Ringbacks']
format_dfs[9] = adjusted[adjusted["Format"] == '8 - Track']
format_dfs[10] = adjusted[adjusted["Format"] == 'CD']
format_dfs[11] = adjusted[adjusted["Format"] == 'Vinyl Single']
format_dfs[12] = adjusted[adjusted["Format"] == 'CD Single']
format_dfs[13] = adjusted[adjusted["Format"] == 'Kiosk']
format_dfs[14] = adjusted[adjusted["Format"] == 'DVD Audio']
format_dfs[15] = adjusted[adjusted["Format"] == 'SACD']
format_dfs[16] = adjusted[adjusted["Format"] == 'Other Digital']
format_dfs[17] = adjusted[adjusted["Format"] == 'Other Tapes']
format_dfs[18] = adjusted[adjusted["Format"] == 'SoundExchange Distributions']
format_dfs[19] = adjusted[adjusted["Format"] == 'LP/EP']
format_dfs[20] = adjusted[adjusted["Format"] == 'Download Single']
format_dfs[21] = adjusted[adjusted["Format"] == 'Cassette Single']
format_dfs[22] = adjusted[adjusted["Format"] == 'Music Video (Physical)']
```
Creating dataframes containing graph titles for each sales format:
```
name_dfs = list(set(adjusted["Format"]))
name_dfs[0] = 'Download Music Video Sales'
name_dfs[1] = 'Download Album Sales'
name_dfs[2] = 'Other Ad-Supported Streaming Sales'
name_dfs[3] = 'Paid Subscription Sales'
name_dfs[4] = 'Synchronization Sales'
name_dfs[5] = 'Cassette Sales'
name_dfs[6] = 'On-Demand Streaming (Ad-Supported) Sales'
name_dfs[7] = 'Limited Tier Paid Subscription Sales'
name_dfs[8] = 'Ringtones & Ringbacks Sales'
name_dfs[9] = '8 - Track Sales'
name_dfs[10] = 'CD Sales'
name_dfs[11] = 'Vinyl Single Sales'
name_dfs[12] = 'CD Single Sales'
name_dfs[13] = 'Kiosk Sales'
name_dfs[14] = 'DVD Audio Sales'
name_dfs[15] = 'SACD Sales'
name_dfs[16] = 'Other Digital Sales'
name_dfs[17] = 'Other Tapes Sales'
name_dfs[18] = 'SoundExchange Distributions Sales'
name_dfs[19] = 'LP/EP Sales'
name_dfs[20] = 'Download Single Sales'
name_dfs[21] = 'Cassette Single Sales'
name_dfs[22] = 'Music Video (Physical) Sales'
def widgetplot(x):
p = format_dfs[x].plot(kind='bar', x='Year', y='Value (Actual)', figsize=(10,6))
p.set_title(name_dfs[x], fontsize=14)
p.set_xlabel('Year', fontsize=13)
p.set_ylabel('Revenue ($ in millions)', fontsize=13)
p.legend(['Revenue (in millions, adjusted to PV)'])
plt.show()
ipywidgets.interactive(widgetplot, x=(0,22))
```
Starting with **Download Music Video sales** (x=0), Download Music Video sales began in 2005 and peaked in 2008/2009, before significantly declining. In 2019, Download Music Video sales were just under $2 million.
**Download Album sales** (x=1) began in 2004, peaked in 2013, and have been declining since. However, it still remains a popular format as sales in 2019 were over $394 million.
**Other Ad-Supported Streaming sales** (x=2) weren't introduced until 2016. Sales were highest in 2017 and have declined slightly since but still remain a popular format, accounting for over $251 million in sales in 2019.
**Paid Subscription sales** (x=3), which became available in 2005, have significantly increased in recent years. Paid Subscription sales were highest in 2019, making up over $5.9 billion in sales.
**Synchronization sales** (x=4) started in 2009 and have been relatively constant since with a slight increase in the last few years. In 2019, Synchronization sales were around $276 million.
**Cassette sales** (x=5) peaked in 1988 and have decreased significantly since. Data on Cassette sales is not reported after 2008, as cassettes lost popularity.
**On-Demand Streaming (Ad-Supported) sales** (x=6) began in 2011 and have been increasing every year since. In 2019, On-Demand Streaming sales exceeded $908 million.
**Limited Tier Paid Subscription sales** (x=7) began in 2016 and have also been increasing every year since. Limited Tier Paid Subscription sales were over $829 mil in 2019.
**Ringtones and Ringbacks** (x=8) were introduced in 2005. Sales peaked shortly after in 2007 and have decreased significantly since. In 2019, Ringtones and Ringbacks sales were around $21 million.
**8-Track sales** (x=9) peaked in 1978 before decreasing. Data for 8-Tracks sales stopped after 1982, as this format lost popularity.
**CD sales** (x=10) began in 1983 and peaked in 1999/2000. CD sales have decreased significantly since yet remain a popular sales format, with over $614 million in sales in 2019.
**Vinyl Single sales** (x=11) peaked in 1979 and have decreased significantly since, but still remain a used sales format. Vinyl Single sales is one of the sales formats that has been around the longest in the industry. In 2019, sales of Vinyl Singles were around $6.8 million.
**CD Single sales** (x=12) data begins in 1988 and peaked in 1997. CD Single sales declined significantly after 1997. In 2018 and 2019, CD Single sales have been under $1 million.
**Kiosk sales** (x=13) began in 2005 and peaked in 2009/2010 before declining. There was a resurgence of Kiosk sales in 2013 followed by another decline. In 2019, Kiosk sales were around $1.5 million.
**DVD Audio sales** (x=14) started in 2001 and peaked in 2005. Sales dropped significantly between 2011-2013 and have increased slightly since. In 2019, DVD Audio sales totaled just over $1 million.
**SACD sales** (x=15) became available in 2003 and also peaked in 2003. Since 2003, SACD sales have dropped sharply, accounting for less than half a million in sales in 2019.
**Other Digital sales** (x=16) were introduced in 2016 and have been increasing slightly since. In 2019, sales were around $21.5 million.
**Other Tapes sales** (x=17) began in 1973 and only lasted until 1976. Other Tapes sales peaked in 1973.
**SoundExchange Distributions sales** (x=18) began in 2004 and have been increasing for the most part since. In 2019, SoundExchange Distributions sales were over $908 million.
**LP/EP sales** (x=19) peaked in 1978 before declining sharply. Around 2008 LP/EP sales began to slowly climb again, yet sales still remain much lower than seen in the 1970s. In 2019, LP/EP sales were over $497 million.
**Download Single sales** (x=20) were introduced in 2004 and peaked in 2012. Since 2012, Download Single sales have been declining but still remain a popular format, accounting for over $414 million in sales in 2019.
**Cassette Single sales** (x=21) began in 1987 and peaked in 1992. Data on Cassette Single sales ends in 2002, as sales sharply declined to below $1 million.
**Music Video (Physical) sales** (x=22) became available in 1989 and saw peak sales in 1998 and 2004. Sales have steadily declined since, with 2019 sales around $27 million.
By analyzing these bar charts, we can see which sales formats have seen increased sales in recent years and which ones have seen declining sales. Formats that have seen increased sales recently include Synchronization, Paid Subscriptions, On-Demand Streaming (Ad-Supported), Limited Tier Paid Subscriptions, Other Digital, SoundExchange Distributions, and LP/EP.
Formats that have seen a decline in sales recently include Download Music Video, Download Album, Other Ad-Supported Streaming, Ringtones and Ringbacks, CDs, Vinyl Singles, CD Singles, Kiosks, DVD Audios, SACDs, Download Single, and Music Video (Physical).
Next, I want to look at how the composition of music industry sales changed from 1973 to 2019. To do this, I am going to look at four different points in time, each 15 years apart.
Looking at the most popular sales formats in 1973:
```
df_1973 = adjusted[adjusted['Year'] == 1973]
#collapse-output
df_1973_index = df_1973.set_index("Format")
df_1973_index
```
Looking at the most popular sales formats in 1988:
```
#collapse-output
df_1988 = adjusted[adjusted['Year'] == 1988]
df_1988_index = df_1988.set_index("Format")
df_1988_index
```
Looking at the most popular sales formats in 2003:
```
#collapse-output
df_2003 = adjusted[adjusted['Year'] == 2003]
df_2003_index = df_2003.set_index("Format")
df_2003_index
```
Looking at the most popular sales formats in 2018:
```
#collapse-output
df_2018 = adjusted[adjusted['Year'] == 2018]
df_2018_index = df_2018.set_index("Format")
df_2018_index
```
Plotting the sales data from 1973, 1988, 2003, and 2018:
```
a = df_1973_index.plot(kind='pie', y='Value (Actual)',figsize=(17,8))
plt.legend(bbox_to_anchor=(1.0, 1.0))
a.set_title('Music Industry Sales in 1973', fontsize=14)
b = df_1988_index.plot(kind='pie', y='Value (Actual)',figsize=(17,8))
plt.legend(bbox_to_anchor=(1.0, 1.0))
b.set_title('Music Industry Sales in 1988', fontsize=14)
c = df_2003_index.plot(kind='pie', y='Value (Actual)',figsize=(17,8))
plt.legend(bbox_to_anchor=(1.0, 1.0))
c.set_title('Music Industry Sales in 2003', fontsize=14)
d = df_2018_index.plot(kind='pie', y='Value (Actual)',figsize=(19,9))
plt.legend(bbox_to_anchor=(1.6, 1.0))
d.set_title('Music Industry Sales in 2018', fontsize=14)
```
Looking at the above pie charts provides insight into the most commonly used sales formats during these four points in time as well as how music industry sales have changed between 1973 and 2018. **In 1973**, the most common music sales format was an LP or EP album, accounting for almost 2/3rds of sales. Other popular sales formats in 1973 included 8-tracks, vinyl singles, cassettes, and other tapes. **In 1988**, the most common music sales format was cassettes, followed by CDs. Cassettes accounted for over half of music sales in 1988. Other common sales formats included LP/EP albums, vinyl singles, cassette singles, and CD singles. 15 years later **in 2003**, the most popular music sales format was by far the CD, making up over 80% of music sales. Other sales formats included physical music videos, cassettes, CD singles, LP/EP albums, super audio CDs, vinyl singles, and dvd audios.
Most recently, **in 2018**, the most common music sales format was a paid subscription, accounting for a little under half of music sales. Other popular music formats include soundexchange distributions, CDs, on-demand streaming (ad-supported), limited tier paid subscriptions, single downloads, album downloads, LP/EP albums, synchronization, and other ad-supported streaming. Less popular but still used sales formats in 2018 included CD singles, DVD audios, downloading music videos, kiosks, physical music videos, other digital, ringtones and ringbacks, and vinyl singles.
As shown by the charts above, the number of sales formats available to purchase music increased dramatically between 2003 and 2018. In those 15 years, sales of CDs and other physical formats dropped significantly while sales shifted more towards technology-based formats.
### Correlation Analysis
To begin my analysis, I am going to look at the correlation between different sales formats throughout the 2000's.
Looking at the correlation between **Paid Subscription sales and CD sales** since 2005:
```
#collapse-output
format_dfs[3]
# finding CD sales from 2005 and onwards, since paid subscription sales don't arise until 2005
cds_2005_on = format_dfs[10][format_dfs[10]['Year']>= 2005]
print(format_dfs[3]['Value (Actual)'].reset_index(drop=True).corr(cds_2005_on['Value (Actual)'].reset_index(drop=True)))
```
Since 2005, **Paid Subscription sales and CD sales have been negatively correlated**, with a correlation of **-0.57**
```
ax = format_dfs[3].plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
cds_2005_on.plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('Paid Subscription vs CD Sales')
ax.legend(['Paid Subscription Sales', 'CD Sales'])
```
Since Paid Subscriptions became available in 2005, Paid Subscription sales have been increasing, while CD sales have been decreasing. This makes sense as the two formats are negatively correlated. Prior to 2015, CD sales were higher than Paid Subscription sales. However, since 2015 it appears that Paid Subscription sales have surpassed CD sales.
Looking at the correlation between **On-Demand Streaming sales and CD sales** since 2011:
```
#collapse-output
format_dfs[6]
# finding CD sales from 2011 and onwards, since on-demand streaming sales didn't arise until 2011
cds_2011_on = format_dfs[10][format_dfs[10]['Year']>= 2011]
print(cds_2011_on['Value (Actual)'].reset_index(drop=True).corr(format_dfs[6]['Value (Actual)'].reset_index(drop=True)))
```
Since 2011, **On-Demand Streaming (Ad-Supported) sales and CD sales have been very negatively correlated**, with a correlation of **-0.93**
```
ax = format_dfs[6].plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
cds_2011_on.plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('On-Demand Streaming vs CD Sales')
ax.legend(['On-Demand Streaming Sales', 'CD Sales'])
```
Since On-Demand Streaming arose in 2011, On-Demand Streaming sales have been increasing, while CD sales have been decreasing. This is consistent with the results of the correlation analysis. Prior to 2018, CD sales were higher than On-Demand Streaming sales. However, since roughly 2018, On-Demand Streaming sales have surpassed CD sales.
Looking at the correlation between **Paid Subscription sales and Cassette sales** between 2005 and 2008:
```
# finding paid subscription sales from 2008 and before since the Cassette sales data ends in 2008
ps_before_2008 = format_dfs[3][format_dfs[3]['Year']<=2008]
# finding cassette sales from 2005 and on, since paid subscriptions aren't available until 2005
cassette_2005_on = format_dfs[5][format_dfs[5]['Year']>=2005]
print(ps_before_2008['Value (Actual)'].reset_index(drop=True).corr(cassette_2005_on['Value (Actual)'].reset_index(drop=True)))
```
Between 2005 and 2008, **Paid Subscription sales and Cassette sales were very negatively correlated**, with a correlation of **-0.91**
```
ax = ps_before_2008.plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
cassette_2005_on.plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('Paid Subscription vs Cassette Sales')
ax.legend(['Paid Subscription Sales', 'Cassette Sales'])
```
Between 2005 and 2008 (when Paid Subscription and Cassette sales overlapped as sales formats) Paid Subscription sales were significantly higher than Cassette sales. Additionally, Paid Subscription sales increased between 2005 and 2008 while Cassette sales decreased.
Looking at correlation between **Paid Subscription Sales and On-Demand Streaming sales** after 2011:
```
#collapse-output
format_dfs[6]
# finding paid subscription sales from 2011 and on, as on-demand streaming begins in 2011
ps_2011_on = format_dfs[3][format_dfs[3]['Year']>=2011]
print(ps_2011_on['Value (Actual)'].reset_index(drop=True).corr(format_dfs[6]['Value (Actual)'].reset_index(drop=True)))
```
Since 2011, **Paid Subscription sales and On-Demand Streaming sales** have been very highly correlated, with a correlation of **0.98**
```
ax = ps_2011_on.plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
format_dfs[6].plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('Paid Subscription vs On-Demand Streaming Sales')
ax.legend(['Paid Subscription Sales', 'On-Demand Streaming Sales'])
```
Since 2011, both Paid Subscription sales as well as On-Demand Streaming sales have increased, which makes sense since they are highly correlated. However, Paid Subscription sales increased at a higher rate than On-Demand Streaming sales.
Looking at the correlation between **On-Demand Streaming (Ad-Supported) sales and Other Ad-Supported Streaming sales** after 2016:
```
#collapse-output
format_dfs[2]
# finding on-demand streaming sales since 2016
ods_2016_on = format_dfs[6][format_dfs[6]['Year']>=2016]
print(ods_2016_on['Value (Actual)'].reset_index(drop=True).corr(format_dfs[2]['Value (Actual)'].reset_index(drop=True)))
```
Since 2016, when Other Ad-Supported Streaming became an option, **Other Ad-Supported Streaming sales and On-Demand Streaming (Ad-Supported) have been highly correlated**, with a correlation of **0.77**
```
ax = ods_2016_on.plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
format_dfs[2].plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('On-Demand Streaming vs Other Ad-Supported Streaming Sales')
ax.legend(['On-Demand Streaming Sales', 'Other Ad-Supported Streaming Sales'])
```
Since 2016, both On-Demand Streaming (Ad-Supported) sales and Other Ad-Supported Streaming sales have increased, which is consistent with the correlation results. Despite both sales formats increasing, On-Demand Streaming (Ad-Supported) sales were higher than Other Ad-Supported Streaming sales between 2016 and 2019.
Looking at the correlation between **On-Demand Streaming (Ad-Supported) Sales and Download Album sales** since 2011:
```
#collapse-output
format_dfs[6]
# finding download album sales since 2011 (when On-Demand Streaming became an option)
download_album_2011_on = format_dfs[1][format_dfs[1]['Year']>=2011]
print(download_album_2011_on['Value (Actual)'].reset_index(drop=True).corr(format_dfs[6]['Value (Actual)'].reset_index(drop=True)))
```
Since 2011, **Download Album sales and On-Demand Streaming (Ad-Supported) sales have been very negatively correlated**, with a correlation of **-0.97**
```
ax = download_album_2011_on.plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
format_dfs[6].plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('Download Album vs On-Demand Streaming Sales')
ax.legend(['Download Album Sales', 'On-Demand Streaming Sales'])
```
Since 2011, On-Demand Streaming sales have been increasing, while Download Album sales have been decreasing. This makes sense since the two formats are negatively correlated. Prior to 2017, Download Album sales were higher than On-Demand Streaming sales. However, after 2017 On-Demand Streaming sales surpassed Download Album sales.
```
#hide
print(format_dfs[0]['Value (Actual)'].reset_index(drop=True).corr(format_dfs[1]['Value (Actual)'].reset_index(drop=True)))
#hide
ax = format_dfs[0].plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
format_dfs[1].plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('Download Music Video vs Download Album Sales')
plt.legend(['Download Music Video Sales', 'Download Album Sales'], bbox_to_anchor=(0.38, 1.0),)
```
Looking at the correlation between **CD Single sales and Download Single sales** after 2004:
```
# finding CD single sales since 2004 (when download singles became available)
cd_single_2004_on = format_dfs[12][format_dfs[12]['Year']>=2004]
print(cd_single_2004_on['Value (Actual)'].reset_index(drop=True).corr(format_dfs[20]['Value (Actual)'].reset_index(drop=True)))
```
Since 2004, **CD Single sales and Download Single sales have been negatively correlated**, with a correlation of **-0.4**
```
ax = format_dfs[20].plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
cd_single_2004_on.plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('Download Single vs CD Single Sales')
ax.legend(['Download Single Sales', 'CD Single Sales'])
```
Since 2004, Download Single sales have been higher than CD Single sales. Between 2004 and now, CD Single Sales slightly decreased, while Download Single sales increased until roughly 2012, and have been decreasing ever since.
Looking at the correlation between **Other Ad-Supported Streaming sales and Limited Tier Paid Subscription sales**:
```
#collapse-output
format_dfs[2]
print(format_dfs[2]['Value (Actual)'].reset_index(drop=True).corr(format_dfs[7]['Value (Actual)'].reset_index(drop=True)))
```
Since 2016, **Other Ad-Supported Streaming sales and Limited Tier Paid Subscription sales have been highly correlated**, with a correlation of **0.89**
```
ax = format_dfs[2].plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
format_dfs[7].plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('Other Ad-Supported Streaming vs Limited Tier Paid Subscription Sales')
ax.legend(['Other Ad-Supported Streaming Sales', 'Limited Tier Paid Subscription Sales'])
```
Since both Other Ad-Supported Streaming and Limited Tier Paid Subscriptions became available in 2016, both of these sale formats have increased. However, Limited Tier Paid Subscriptions sales have been higher than Other Ad-Supported Streaming sales and also seem to be increasing at a higher rate.
Looking at the correlation between **Paid Subscription sales and SoundExchange Distributions sales**:
```
print(format_dfs[3]['Value (Actual)'].reset_index(drop=True).corr(format_dfs[18]['Value (Actual)'].reset_index(drop=True)))
```
Since 2005, **Paid Subscription sales and SoundExchange Distrubtions sales have been highly correlated**, with a correlation of **0.77**
```
ax = format_dfs[3].plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
format_dfs[18].plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('Paid Subscription vs SoundExchange Distributions Sales')
ax.legend(['Paid Subscription Sales', 'SoundExchange Distributions Sales'])
```
Since 2005, both Paid Subscription sales and SoundExchange Distribution sales have increased. However, Paid Subscription sales have increased at a higher rate.
Looking at the correlation between **Download Album sales and Download Single sales**:
```
print(format_dfs[1]['Value (Actual)'].reset_index(drop=True).corr(format_dfs[20]['Value (Actual)'].reset_index(drop=True)))
```
Since 2004, **Download Album sales and Download Single sales have been highly correlated**, with a correlation of **0.93**
```
ax = format_dfs[1].plot(kind='line', x='Year', y='Value (Actual)', figsize=(9,6))
format_dfs[20].plot(kind='line', x='Year', y='Value (Actual)', ax=ax)
ax.set_title('Download Album vs Download Single Sales')
ax.legend(['Download Album Sales', 'Download Single Sales'])
```
Since 2004, Download Album sales and Download Single sales have been very similar. Both sales formats increased until roughly 2012, when they both reached their peak sales and began decreasing. Prior to roughly 2016, Download Single sales were higher than Download Album sales, but since 2016 they have been almost the same.
Based on the above correlation analysis, sales formats that have been highly correlated in the 2000's include:
(1) Other Ad-Supported Streaming and Limited Tier Paid Subscription sales (r = 0.89)
(2) Paid Subscription and On-Demand Streaming (Ad-Supported) sales (r = 0.98)
(3) On-Demand Streaming (Ad-Supported) and Other Ad-Supported Streaming sales (r = 0.77)
(4) Paid Subscription and SoundExchange Distributions sales (r = 0.77)
(5) Download Album and Download Single sales (r = 0.93)
Since these formats have been highly correlated throughout the 2000's, when one of the sales formats has increased, so has the other correlated format. Similarly, if one of the sales formats decreased, the correlated sales format also decreased.
Sales formats that have been negatively correlated in the 2000's include:
(1) Paid Subscription and CD sales (r = -0.57)
(2) On-Demand Streaming (Ad-Supported) and CD sales (r = -0.93)
(3) Paid Subscription and Cassette sales (r = -0.91)
(4) On-Demand Streaming (Ad-Supported) and Download Album sales (r = -0.97)
(5) CD Single and Download Single sales (r = -0.4)
Since these formats have been negatively correlated or anti-correlated throughout the 2000's, when one of the sales formats increases, the other sales format decreases. For example, while Paid Subscription sales increased in the 2000's, CD sales decreased in the 2000's. Based on the above correlations, it appears that the newer streaming formats are negatively correlated with some of the older sales formats. As these streaming options increase in sales, their counterparts see decreased sales.
### Linear Regression Analysis
To see how well sales of one format can predict sales of another, correlated format, I am plotting the linear regression of **highly correlated** sales formats.
Since Other Ad-Supported Streaming and Limited Tier Paid Subscription sales are highly correlated (r=0.89), we might be able to use linear regression to predict Limited Tier Paid Subscription sales from Other Ad-Supported Streaming sales.
```
#collapse-output
X = adjusted[adjusted["Format"] == 'Other Ad-Supported Streaming'][['Value (Actual)']]
Y = adjusted[adjusted["Format"] == 'Limited Tier Paid Subscription'][['Value (Actual)']]
print (X)
print (Y)
reg = LinearRegression().fit(X, Y)
reg.coef_
reg.intercept_
ytrain = reg.intercept_ + reg.coef_ * X
figure(figsize=(9, 5))
plt.plot(X,Y,'ro',X,ytrain,'b-')
plt.title("Other Ad-Supported Streaming vs Limited Tier Paid Subscription")
plt.xlabel("Other Ad-Supported Streaming Sales")
plt.ylabel("Limited Tier Paid Subscription Sales")
```
To test how well the model fits the data, I am calculating the mean squared error and R-squared value.
```
mean_squared_error(Y, ytrain)
r2_score(Y, ytrain)
```
Based on the R-squared value of 0.79, the model fits the data decently well. As a result, it seems **Other Ad-Supported Streaming sales can be used to predict Limited Tier Paid Subscription sales**. However, since data is only available for 2016-2019 for these sales formats, there might not be enough data to make this assumption
Since Paid Subscription and On-Demand Streaming (Ad-Supported) sales have been highly correlated (r=0.985) since 2011, we may be able to use linear regression to predict On-Demand Streaming sales using Paid Subscription sales.
```
# creating a new dataframe with the adjusted data from 2011 and after
adjusted_2011_on = adjusted[adjusted['Year']>=2011]
#collapse-output
PS = adjusted_2011_on[adjusted_2011_on["Format"] == 'Paid Subscription'][['Value (Actual)']]
OD = adjusted_2011_on[adjusted_2011_on["Format"] == 'On-Demand Streaming (Ad-Supported)'][['Value (Actual)']]
print (PS)
print (OD)
reg = LinearRegression().fit(PS, OD)
reg.coef_
reg.intercept_
ytrain = reg.intercept_ + reg.coef_ * PS
figure(figsize=(9, 5))
plt.plot(PS, OD,'ro',PS,ytrain,'b-')
plt.title("Paid Subscription vs On-Demand Streaming")
plt.xlabel("Paid Subscription Sales")
plt.ylabel("On-Demand Streaming (Ad-Supported) Sales")
mean_squared_error(OD, ytrain)
r2_score(OD, ytrain)
```
The high R-squared value of 0.97 indicates that the regression line is a good fit for the data. As a result, **Paid Subscription sales can be used to predict On-Demand Streaming sales**.
On-Demand Streaming (Ad-Supported) and Other-Ad Supported Streaming sales also have a high correlation (r=0.77). Thus, we might be able to predict Other Ad-Supported Streaming sales using On-Demand Streaming sales by finding the linear regression.
```
# creating a dataframe with the adjusted data from 2016-2019
adjusted_2016_on = adjusted[adjusted['Year']>=2016]
#collapse-output
DS = adjusted_2016_on[adjusted_2016_on["Format"] == 'On-Demand Streaming (Ad-Supported)'][['Value (Actual)']]
AS = adjusted_2016_on[adjusted_2016_on["Format"] == 'Other Ad-Supported Streaming'][['Value (Actual)']]
print (DS)
print (AS)
reg = LinearRegression().fit(DS, AS)
reg.coef_
reg.intercept_
ytrain = reg.intercept_ + reg.coef_ * DS
figure(figsize=(9, 5))
plt.plot(DS, AS,'ro',DS,ytrain,'b-')
plt.title("On-Demand Streaming vs Other Ad-Supported Streaming")
plt.xlabel("On-Demand Streaming (Ad-Supported) Sales")
plt.ylabel("Other Ad-Supported Streaming Sales")
mean_squared_error(AS, ytrain)
r2_score(AS, ytrain)
```
The R-squared value of 0.59 indicates that the regression line is not a good fit for the data. As a result, **we cannot use On-Demand Streaming sales to predict Other Ad-Supported Streaming sales**.
Paid Subscription and SoundExchange Distributions sales have a strong correlation (r=0.77). By finding the linear regression, we can can predict future SoundExchange Distributions sales using Paid Subscription sales.
```
# creating a dataframe with the adjusted data for sales since 2005
adjusted_2005_on = adjusted[adjusted['Year']>=2005]
#collapse-output
PS2 = adjusted_2005_on[adjusted_2005_on["Format"] == 'Paid Subscription'][['Value (Actual)']]
SD = adjusted_2005_on[adjusted_2005_on["Format"] == 'SoundExchange Distributions'][['Value (Actual)']]
print (PS2)
print (SD)
reg = LinearRegression().fit(PS2, SD)
reg.coef_
reg.intercept_
ytrain = reg.intercept_ + reg.coef_ * PS2
figure(figsize=(9, 5))
plt.plot(PS2, SD,'ro',PS2,ytrain,'b-')
plt.title("Paid Subscription vs SoundExchange Distributions")
plt.xlabel("Paid Subscription Sales")
plt.ylabel("SoundExchange Distributions Sales")
mean_squared_error(SD, ytrain)
r2_score(SD, ytrain)
```
As with the previous regression, the regression line does not fit the data, as indicated by the R-squared value of 0.5. Consequently, **Paid Subscription sales cannot be used to predict SoundExchange Distribution sales**.
The last formats I found to be highly correlated are Download Album and Download Single sales (r=0.93). We can predict future Download Album sales using Download Single sales by finding the linear regression.
```
# creating a dataframe with the adjusted data since 2004
adjusted_2004_on = adjusted[adjusted['Year']>=2004]
#collapse-output
DS = adjusted_2004_on[adjusted_2004_on["Format"] == 'Download Single'][['Value (Actual)']]
DA = adjusted_2004_on[adjusted_2004_on["Format"] == 'Download Album'][['Value (Actual)']]
print (DS)
print (DA)
reg = LinearRegression().fit(DS, DA)
reg.coef_
reg.intercept_
ytrain = reg.intercept_ + reg.coef_ * DS
figure(figsize=(9, 5))
plt.plot(DS, DA,'ro',DS,ytrain,'b-')
plt.title("Download Single vs Download Album")
plt.xlabel("Download Single")
plt.ylabel("Download Album")
mean_squared_error(DA, ytrain)
r2_score(DA, ytrain)
```
An R-squared of 0.87 indicates that the regression line sufficiently fits the data. Resultantly, **Download Single sales can be used to predict Download Album sales**.
Next, I am going to perform linear regression on the sales formats that are **negatively correlated** in order to see which sales formats can be used to accurately predict other sales formats.
Since Paid Subscription sales and CD sales are negatively correlated, (r=-0.57), we might be able to predict CD sales based on Paid Subscription sales using linear regression.
```
#collapse-output
PS2 = adjusted_2005_on[adjusted_2005_on["Format"] == 'Paid Subscription'][['Value (Actual)']]
CD = adjusted_2005_on[adjusted_2005_on["Format"] == 'CD'][['Value (Actual)']]
print (PS2)
print (CD)
reg = LinearRegression().fit(PS2, CD)
reg.coef_
reg.intercept_
ytrain = reg.intercept_ + reg.coef_ * PS2
figure(figsize=(9, 5))
plt.plot(PS2,CD,'ro',PS2,ytrain,'b-')
plt.title("Paid Subscription vs CD")
plt.xlabel("Paid Subscription Sales")
plt.ylabel("CD Sales")
mean_squared_error(CD, ytrain)
r2_score(CD, ytrain)
```
Based on the extremely high mean squared error and low R-squared value, this regression line does not fit the data well. As a result, **CD sales cannot be predicted based on Paid Subscription sales**, as the correlation isn't high enough.
Since On-Demand Streaming (Ad-Supported) sales and CD sales are negatively correlated, (r=-0.93), we might be able to predict CD sales based on On-Demand Streaming sales using linear regression.
```
#collapse-output
OD = adjusted_2011_on[adjusted_2011_on["Format"] == 'On-Demand Streaming (Ad-Supported)'][['Value (Actual)']]
CD2 = adjusted_2011_on[adjusted_2011_on["Format"] == 'CD'][['Value (Actual)']]
print (OD)
print (CD2)
reg = LinearRegression().fit(OD, CD2)
reg.coef_
reg.intercept_
ytrain = reg.intercept_ + reg.coef_ * OD
figure(figsize=(9, 5))
plt.plot(OD,CD2,'ro',OD,ytrain,'b-')
plt.title("On-Demand Streaming vs CD")
plt.xlabel("On-Demand Streaming (Ad-Supported) Sales")
plt.ylabel("CD Sales")
mean_squared_error(CD2, ytrain)
r2_score(CD2, ytrain)
```
While the mean squared error value is quite high, the R-squared of 0.87 implies that the regression line sufficiently fits the data. Therefore, **we can use On-Demand Streaming Sales to predict CD Sales** using this regression line.
On-Demand Streaming (Ad-Supported) sales and Download Album sales are very negatively correlated, (r=-0.97). As a result, we might be able to use linear regression to predict On-Demand Streaming sales using Download Album sales.
```
#collapse-output
DA2 = adjusted_2011_on[adjusted_2011_on["Format"] == 'Download Album'][['Value (Actual)']]
OD = adjusted_2011_on[adjusted_2011_on["Format"] == 'On-Demand Streaming (Ad-Supported)'][['Value (Actual)']]
print (DA2)
print (OD)
reg = LinearRegression().fit(DA2, OD)
reg.coef_
reg.intercept_
ytrain = reg.intercept_ + reg.coef_ * DA2
figure(figsize=(9, 5))
plt.plot(DA2,OD,'ro',DA2,ytrain,'b-')
plt.title("Download Album vs On-Demand Streaming")
plt.xlabel("Download Album Sales")
plt.ylabel("On-Demand Streaming (Ad-Supported) Sales")
mean_squared_error(OD, ytrain)
r2_score(OD, ytrain)
```
The high R-squared value of 0.94 demonstrates that the regression line fits the data very well. As a result, **we should be able to predict On-Demand Streaming sales by looking at Download Album sales**.
Lastly, CD Single sales and Download Single sales are also negatively correlated (r=-0.4). This correlation is on the lower end, but I am still going to perform linear regression to see if we can predict Download Single sales based on CD Single sales.
```
#collapse-output
CDS = adjusted_2004_on[adjusted_2004_on["Format"] == 'CD Single'][['Value (Actual)']]
DS = adjusted_2004_on[adjusted_2004_on["Format"] == 'Download Single'][['Value (Actual)']]
print (CDS)
print (DS)
reg = LinearRegression().fit(CDS, DS)
reg.coef_
reg.intercept_
ytrain = reg.intercept_ + reg.coef_ * CDS
figure(figsize=(9, 5))
plt.plot(CDS,DS,'ro',CDS,ytrain,'b-')
plt.title("CD Single vs Download Single")
plt.xlabel("CD Single Sales")
plt.ylabel("Download Single Sales")
mean_squared_error(DS, ytrain)
r2_score(DS, ytrain)
```
As I predicted, the correlation between CD Single sales and Download Single sales isn't strong enough to create an accurate regression line. As indicated by the high mean squared error and low R-squared value, **Download Single sales cannot be predicted based on CD Single sales**.
In summary, the linear regression analysis revealed that a few of the sales formats have a strong enough correlation that sales of one can predict sales of the other. The results revealed that: Limited Tier Paid Subscription sales can be predicted based on Other Ad-Supported Streaming sales, On-Demand Streaming sales can be predicted based on Paid Subscription sales, and Download Album sales can be predicted by looking at Download Single sales. As a result, we can expect that if one of the sales formats sees an increase in sales and popularity, the correlated sales format will also see an increase in sales and popularity. Similarly, if one sales format sees a decrease in sales, then the correlated sales format will most likely also see a decrease in sales.
For the negatively correlated formats, the linear regression analysis revealed that: CD sales can be predicted using On-Demand Streaming sales, and On-Demand Streaming sales can be predicted by looking at Download Album sales. More specifically, a decrease in CD sales can be predicted by an increase in On-Demand Streaming sales and an increase in On-Demand Streaming sales can be predicted by a decrease in Download Album sales.
### Growth Rate Analysis
As a last step in my analysis, I am going to look at the growth rates of popular sales formats in 2019. This information will help to predict which formats might have high growth in the near future.
Popular sales formats in recent years include: Other Ad-Supported Streaming, Paid Subscription, Synchronization, On-Demand Streaming (Ad-Supported), Limited Tier Paid Subscription, Other Digital Sales, SoundExchange Distributions, CDs, LP/EP, Download Single, and Download Album.
Other Ad-Supported Streaming:
```
#collapse-output
format_dfs[2]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **Other Ad-Supported Streaming sales decreased by 1.9%.**
Paid Subscription:
```
#collapse-output
format_dfs[3]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **Paid Subscription sales increased by 25%.**
Synchronization:
```
#collapse-output
format_dfs[4]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **Synchronization sales decreased by 4.96%.**
On-Demand Streaming (Ad-Supported):
```
#collapse-output
format_dfs[6]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **On-Demand Streaming (Ad-Supported) sales increased by 17.4%.**
Limited Tier Paid Subscription:
```
#collapse-output
format_dfs[7]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **Limited Tier Paid Subscription sales increased by 9.1%.**
Other Digital:
```
#collapse-output
format_dfs[16]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **Other Digital sales increased by 6.5%.**
SoundExchange Distributions:
```
#collapse-output
format_dfs[18]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **SoundExchange Distributions sales decreased by 6.4%.**
CDs:
```
#collapse-output
format_dfs[10]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **CD sales decreased by 13.6%.**
LP/EP:
```
#collapse-output
format_dfs[19]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **LP/EP sales increased by 16.6%.**
Download Single:
```
#collapse-output
format_dfs[20]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **Download Single sales decreased by 16.9%.**
Download Album:
```
#collapse-output
format_dfs[1]['Value (Actual)'].pct_change(periods=1)
```
In 2019, **Download Album sales decreased by 22.4%.**
By looking at the sales growth rates for each sales format in 2019, we are able to see which formats have increasing sales and which have decreasing sales. If we assume these trends will continue into the near future, we can predict which sales formats will be most popular going forward as well as which formats are being used less.
Sales formats that saw positive growth in 2019 include Paid Subscription sales, On-Demand Streaming (Ad-Supported) sales, LP/EP sales, Limited Tier Paid Subscription sales, and Other Digital sales. As a result, I expect that these sales formats will continue to increase in the near future and they will eventually make up the majority of all music industry sales. Paid Subscription services not only had the highest sales of all the formats in 2018, but also saw the largest increase in sales in 2019, increasing 25%. Looking at the sales formats that are increasing in popularity, we can see that streaming services, both paid and free, are taking over the music industry as the most popular way to consume music. In addition to streaming services, it seems that LP albums and EP albums are also making a comeback in terms of popularity.
Sales formats that saw negative growth in 2019 include Download Album sales, Download Single sales, CD sales, SoundExchange Distributions sales, Synchronization sales, and Other Ad-Supported Streaming sales. It is reasonable to assume that the majority of these formats are decreasing in popularity and thus sales in these formats will continue to decrease into the future. Download Album, Download Single, and CD sales saw the largest decrease by far, demonstrating that consumers are steering away from downloading music or buying "hard copies". Instead, consumers are getting their music from streaming services like Spotify, Apple Music, and Pandora or opting to buy LP/EP albums instead.
## Discussion
Through my analysis, I revealed how music industry sales have changed since 1973 and especially throughout the 2000's. Music industry sales have been greatly affected by technology, as consumers now have access to a plethora of music formats that didn't even exist 15 years ago. My analysis revealed which music sale formats have been strongly correlated in the 2000s and which formats have been strongly anti-correlated. Since correlation doesn't imply causation, I cannot say that certain formats led to the demise of other formats. But, it is interesting to see how the rise of new formats is correlated to the decline of more traditional music formats. In my correlation analysis, I found that streaming formats were highly correlated with other streaming formats, meaning that sales of both increased similarly during the 2000s. More specifically, I found:
(1) Other Ad-Supported Streaming and Limited Tier Paid Subscription sales to be highly correlated,
(2) Paid Subscription and On-Demand Streaming (Ad-Supported) sales to be highly correlated,
(3) On-Demand Streaming (Ad-Supported) and Other Ad-Supported Streaming sales to be highly correlated,
(4) Paid Subscription and SoundExchange Distributions sales to be highly correlated, and
(5) Download Album and Download Single sales to be highly correlated.
In contrast, I found that streaming formats were very anti-correlated with older sales formats. While these streaming formats saw increased sales throughout the 2000's, the older sales formats had large decreases in sales. To be specific, I found:
(1) Paid Subscription and CD sales to be moderately anti-correlated,
(2) On-Demand Streaming (Ad-Supported) and CD sales to be very anti-correlated,
(3) Paid Subscription and Cassette sales to be very anti-correlated,
(4) On-Demand Streaming (Ad-Supported) and Download Album sales to be very anti-correlated, and
(5) CD Single and Download Single sales to be moderately anti-correlated.
My analysis also reveals where music industry sales may be focused in the future. Plotting the different sales formats revealed that a number of them have been steadily increasing in recent years. The formats that have seen increasing sales over the past few years are
(1) Synchronizations,
(2) Paid Subscriptions,
(3) On-Demand Streaming (Ad-Supported),
(4) Limited Tier Paid Subscriptions,
(5) Other Digital,
(6) SoundExchange Distributions, and
(7) LP/EP.
Formats that have seen decreasing sales over the past few years include:
(1) Download Music Video,
(2) Download Album,
(3) Other Ad-Supported Streaming,
(4) Ringtones and Ringbacks,
(5) CDs,
(6) Vinyl Singles,
(7) CD Singles,
(8) Kiosks,
(9) DVD Audios,
(10) SACDs,
(11) Download Single, and
(12) Music Video (Physical).
It is clear that sales are shifting away from traditional sales formats like owning an album to technology-based formats like streaming an album. Additionally, it appears that consumers like the flexibility streaming platforms provide to jump from one artist to another, without having to commit to a whole album. As a result, I would advise artists and managers to shift away from these formats. Instead of focusing on promoting an album in order to increase album sales, artists should focus their strategy and marketing efforts on streaming platforms. By putting their music on platforms like Spotify and Apple Music, artists will not only expose their music to a wider audience but also put themselves in a better place to make sales.
My analysis also demonstrated how quickly popularity of music sales formats shifts. These new sales formats that are seeing high growth now will probably continue to grow into the near future, but as even newer formats are made available, they too will begin to see decreased sales. Therefore, it is imperative that people in the music industry monitor new technologies and changes in the landscapes so they can stay on top of new trends. My correlation and linear regression analysis revealed which formats's sales are closely correlated. In the future, if we begin to see a decrease in a particular sales format, we should be cautious of a decrease in the correlated sales format as well. In conclusion, my analysis provided insight into the past and future of music industry sales. This analysis is of interest to me as I feel I now have a much stronger understanding of the music industry sales landscape and where the industry is heading in the future. With this knowledge, artists, managers, and others working in the industry can hopefully better focus their marketing and predict potential sales avenues.
| github_jupyter |
# Developing an AI application
Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications.
In this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below.
<img src='assets/Flowers.png' width=500px>
The project is broken down into multiple steps:
* Load and preprocess the image dataset
* Train the image classifier on your dataset
* Use the trained classifier to predict image content
We'll lead you through each part which you'll implement in Python.
When you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.
First up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.
```
# Imports here
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
import numpy as np
from torch import nn
from torch import optim
from torchvision import datasets, transforms, models
from collections import OrderedDict
from PIL import Image
from torch.autograd import Variable
```
## Load the data
Here you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.
The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.
The pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
```
data_dir = 'flower_data'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# TODO: Define your transforms for the training, validation, and testing sets
data_transforms = {
'training' : transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'validating' : transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'testing' : transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
}
# TODO: Load the datasets with ImageFolder
image_datasets = {
'training' : datasets.ImageFolder(train_dir, transform=data_transforms['training']),
'validating' : datasets.ImageFolder(valid_dir, transform=data_transforms['validating']),
'testing' : datasets.ImageFolder(test_dir, transform=data_transforms['testing'])
}
# TODO: Using the image datasets and the trainforms, define the dataloaders
dataloaders = {
'training' : torch.utils.data.DataLoader(image_datasets['training'], batch_size=64, shuffle=True),
'validating' : torch.utils.data.DataLoader(image_datasets['validating'], batch_size=64, shuffle=True),
'testing' : torch.utils.data.DataLoader(image_datasets['testing'], batch_size=30, shuffle=False)
}
```
### Label mapping
You'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.
```
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
```
# Building and training the classifier
Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.
We're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:
* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)
* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
* Train the classifier layers using backpropagation using the pre-trained network to get the features
* Track the loss and accuracy on the validation set to determine the best hyperparameters
We've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!
When training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.
One last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to
GPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module.
**Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again.
```
# TODO: Build and train your network
def model_config(hidden_units):
model = models.densenet121(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
classifier_input_size = model.classifier.in_features
classifier_output_size = len(cat_to_name.keys())
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(classifier_input_size, hidden_units)),
('relu', nn.ReLU()),
('fc2', nn.Linear(hidden_units, classifier_output_size)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
return model
def model_create(learning_rate, hidden_units, class_to_idx):
# Load model
model = model_config(hidden_units)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
# Save class to index mapping
model.class_to_idx = class_to_idx
return model, optimizer, criterion
def do_deep_learning(model, trainloader, epochs, print_every, criterion, optimizer, device='gpu'):
epochs = epochs
print_every = print_every
steps = 0
model.to('cuda') # use cuda
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(trainloader):
steps += 1
inputs, labels = inputs.to('cuda'), labels.to('cuda')
optimizer.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
print("Epoch: {}/{}... ".format(e+1, epochs),
"Loss: {:.4f}".format(running_loss/print_every))
running_loss = 0
class_to_idx = image_datasets['training'].class_to_idx
hidden_units = 500
learning_rate = 0.005
epochs = 10
print_every = 50
model, optimizer, criterion = model_create(learning_rate, hidden_units, class_to_idx)
do_deep_learning(model, dataloaders['training'], epochs, print_every, criterion, optimizer, 'gpu')
```
## Testing your network
It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.
```
# TODO: Do validation on the test set
def validation(model, dataloaders, criterion):
correct = 0
total = 0
model.eval() #turn off dropout
with torch.no_grad():
for data in dataloaders:
images, labels = data
gpu = torch.cuda.is_available()
if gpu:
images = Variable(images.float().cuda())
labels = Variable(labels.long().cuda())
else:
images = Variable(images, volatile=True)
labels = Variable(labels, volatile=True)
outputs = model.forward(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
validation(model, dataloaders['testing'], criterion)
```
## Save the checkpoint
Now that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.
```model.class_to_idx = image_datasets['train'].class_to_idx```
Remember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.
```
# TODO: Save the checkpoint
checkpoint = {'input_size': 1024,
'struc': 'densenet121',
'learing_rate': learning_rate,
'optimizer' : optimizer.state_dict(),
'class_to_idx' : model.class_to_idx,
'output_size': 102,
'epochs': epochs,
'arch': 'densenet121',
'state_dict': model.state_dict()
}
torch.save(checkpoint, 'checkpoint.pth')
```
## Loading the checkpoint
At this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.
```
# TODO: Write a function that loads a checkpoint and rebuilds the model
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
class_to_idx = checkpoint['class_to_idx']
learning_rate = checkpoint['learing_rate']
model, optimizer, criterion = model_create(learning_rate, hidden_units, class_to_idx)
model.load_state_dict(checkpoint['state_dict'])
model.optimizer = checkpoint['optimizer']
model.epochs = checkpoint['epochs']
if torch.cuda.is_available():
model.cuda()
criterion.cuda()
return model
model = load_checkpoint('checkpoint.pth')
print(model)
```
# Inference for classification
Now you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like
```python
probs, classes = predict(image_path, model)
print(probs)
print(classes)
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']
```
First you'll need to handle processing the input image such that it can be used in your network.
## Image Preprocessing
You'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training.
First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.
Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.
As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation.
And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
```
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# TODO: Process a PIL image for use in a PyTorch model
width, height = image.size
short = width if width < height else height
long = height if height > width else width
new_short, new_long = 256, int(256/short*long)
im = image.resize((new_short,new_long))
left, top = (new_short - 224) / 2, (new_long - 224) / 2
area = (left, top, 224+left, 224+top)
img_new = im.crop(area)
np_img = np.array(img_new)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_img = (np_img / 255 - mean) / std
image = np.transpose(np_img, (2, 0, 1))
return image.astype(np.float32)
```
To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
```
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.numpy().transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
```
## Class Prediction
Once you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.
To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.
Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.
```python
probs, classes = predict(image_path, model)
print(probs)
print(classes)
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']
```
```
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
model.eval()
gpu = torch.cuda.is_available()
image = Image.open(image_path)
np_image = process_image(image)
tensor_image = torch.from_numpy(np_image)
if gpu:
tensor_image = Variable(tensor_image.float().cuda())
else:
tensor_image = Variable(tensor_image)
tensor_image = tensor_image.unsqueeze(0)
output = model.forward(tensor_image)
ps = torch.exp(output).data.topk(topk)
probs = ps[0].cpu() if gpu else ps[0]
classes = ps[1].cpu() if gpu else ps[1]
inverted_class_to_idx = {model.class_to_idx[c]: c for c in model.class_to_idx}
mapped_classes = list(inverted_class_to_idx[label] for label in classes.numpy()[0] )
return probs.numpy()[0], mapped_classes
```
## Sanity Checking
Now that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:
<img src='assets/inference_example.png' width=300px>
You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.
```
# TODO: Display an image along with the top 5 classes
def sanity_check(image, model):
fig = plt.figure(figsize=[15,15])
ax1 = plt.subplot(2,1,1)
ax2 = plt.subplot(2,1,2)
probs, classes = predict(image, model)
max_index = np.argmax(probs)
max_label = classes[max_index]
image = Image.open(image)
ax1.axis('off')
ax1.set_title(cat_to_name[max_label])
ax1.imshow(image)
labels = (cat_to_name[c] for c in classes)
tick_y = np.arange(5)
ax2.set_yticklabels(labels)
ax2.set_yticks(tick_y)
ax2.invert_yaxis()
ax2.set_xlabel('Probs')
ax2.barh(tick_y, probs, color='b')
plt.show()
sanity_check(test_dir + '/84/image_02563.jpg', model)
```
| github_jupyter |
```
import pandas as pd
import seaborn as sns
desc = pd.read_csv('data/VariableDefinitions.csv')
train = pd.read_csv('data/Train.csv')
test = pd.read_csv('data/Test.csv')
sub = pd.read_csv('data/Samplesubmission.csv')
df.loc[1].values
desc.drop(['Unnamed: 1'], axis=1, inplace=True)
desc.iloc[2:, ].values
train.head(2)
train.dtypes
train.isna().sum()
test.isna().sum()
train['is_train'] = True
test['is_train'] = False
full = pd.concat([train.drop('CHURN', axis=1), test], ignore_index=True)
for col in full.columns:
if full[col].isna().sum() == 0:
continue
full[col + '_isna'] = full[col].isna().astype('int')
filler = train[col].mode()[0]
if train[col].dtype != 'object':
filler = train[col].median()
full[col].fillna(filler, inplace=True)
full = full.drop('MRG', axis=1)
freq_map = train['TOP_PACK'].value_counts().to_dict()
full['TOP_PACK'] = full['TOP_PACK'].map(freq_map)
full.fillna(0, inplace=True)
full.isna().sum()
full = pd.get_dummies(full.drop('user_id', axis=1))
train, target = full[full['is_train']], train['CHURN']
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from xgboost import XGBClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from catboost import CatBoostClassifier
from lightgbm import LGBMClassifier
X_train, X_test, y_train, y_test = train_test_split(train, target)
lgbm = LGBMClassifier(learning_rate=0.075)
lgbm.fit(X_train, y_train)
roc_auc_score(y_test, lgbm.predict_proba(X_test)[:, 1])
cbc = CatBoostClassifier()
cbc.fit(X_train, y_train)
roc_auc_score(y_test, cbc.predict_proba(X_test)[:, 1])
positive = target.sum()
negative = target.shape[0] - positive
weight = positive / negative
xgb = XGBClassifier(scale_pos_weight=weight)
xgb.fit(X_train, y_train)
roc_auc_score(y_test, xgb.predict_proba(X_test)[:, 1])
# roc_auc_score(y_test, xgb.predict_proba(X_test)[:, 1]) -- for unweighted
xgb.fit(train, target)
def make_submission(prediction, filename):
sub['CHURN'] = prediction
sub.to_csv('data/submissions/{}.csv'.format(filename), index=False)
prediction = xgb.predict_proba(full[~full['is_train']])
make_submission(prediction[:, 1], 'xgboost_baseline')
lr = LogisticRegression()
lr.fit(X_train, y_train)
roc_auc_score(y_test, lr.predict_proba(X_test)[:, 1])
from hyperopt import hp, fmin, tpe, Trials, STATUS_OK
import numpy as np
X, X_test, y, y_test = train_test_split(train, target)
space = {
'learning_rate': hp.uniform('learning_rate', 0.05, 0.3),
'max_depth': hp.choice('max_depth', np.arange(5, 25, 1, dtype=int)),
'min_child_weight': hp.choice('min_child_weight', np.arange(1, 10, 1, dtype=int)),
'colsample_bytree': hp.uniform('colsample_bytree', 0.2, 0.9),
'subsample': hp.uniform('subsample', 0.6, 1),
'n_estimators': hp.choice('n_estimators', np.arange(20, 100, 2, dtype=int)),
}
lgb_fit_params = {
'eval_metric': 'auc',
'early_stopping_rounds': 50,
'verbose': False,
# 'num_iterations': 1000
}
def objective(params):
model = LGBMClassifier(
**params
)
X_train, X_val, y_train, y_val = train_test_split(X, y)
model.fit(X_train, y_train,
**lgb_fit_params,
eval_set=[(X_val, y_val)])
prediction = model.predict_proba(X_test)
score = roc_auc_score(y_test, prediction[:, 1])
return -score
best = fmin(fn=objective,
space=space,
algo=tpe.suggest,
max_evals=50)
best
lgb = LGBMClassifier(**best)
lgb.fit(train.values, target.values)
prediction = xgb.predict_proba(full[~full['is_train']])
make_submission(prediction[:, 1], 'lgbm_tuned')
target[target==1].index
positive = target.sum()
negative = target.shape[0] - positive
need = negative - positive
new_idx = np.hstack([np.random.choice(target[target==1].index, need), target[target==1].index])
X_oversampled = pd.concat([train[target==0], train.loc[new_idx]])
y_oversampled = pd.concat([target[target==0], target.loc[new_idx]])
idx = np.random.choice(target[target==1].index, int(positive * 0.3), replace=False)
test_idx1 = np.random.choice(target[target==1].index, int(positive * 0.3), replace=False)
test_idx0 = np.random.choice(target[target==0].index, int(negative * 0.3), replace=False)
full_test_idx = np.hstack([test_idx0, test_idx1])
full_train_idx = np.setdiff1d(target.index, full_test_idx)
X_train, X_test, y_train, y_test = X_oversampled.loc[full_train_idx], X_oversampled.loc[full_test_idx],\
y_oversampled.loc[full_train_idx], y_oversampled.loc[full_test_idx]
lr = LogisticRegression()
lr.fit(X_train, y_train)
roc_auc_score(y_test, lr.predict_proba(X_test)[:, 1])
lgbm = LGBMClassifier(**best)
lgbm.fit(X_train, y_train)
roc_auc_score(y_test, lgbm.predict_proba(X_test)[:, 1])
lgbm = LGBMClassifier(**best)
lgbm.fit(X_oversampled, y_oversampled)
prediction = lgbm.predict_proba(full[~full['is_train']])
make_submission(prediction[:, 1], 'lgbm_tuned_oversample')
xgb = XGBClassifier()
xgb.fit(X_oversampled, y_oversampled)
prediction = xgb.predict_proba(full[~full['is_train']])
make_submission(prediction[:, 1], 'xgb_oversample')
```
| github_jupyter |
```
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rcParams
rcParams['font.family'] = 'monospace'
#rcParams['font.sans-serif'] = ['Tahoma']
import numpy as np
import math
import datetime
import networkx as nx
import os
def TodaysDate():
Today = datetime.date.today()
TodaysDate = Today.strftime('%d%b%Y')
return TodaysDate
def DNAcoding_sequence(DNASequence, QualityScoreSequence, start_sequence, stop_sequence):
#utilises ONLY ONE stop_sequence, returns ONLY ONE coding_sequence
QualityScoreString = """!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~"""
ThresholdQualityScore = 29 # ThresholdQualityScore must be between 0 and 93
ThresholdQualityString = QualityScoreString[ThresholdQualityScore:]
MinLength = 24
MaxLength = 240
StartIndex = DNASequence.find(start_sequence) + len(start_sequence)
StopIndex = DNASequence.rfind(stop_sequence)
coding_sequence = DNASequence[StartIndex:StopIndex]
if MinLength <= len(coding_sequence) and len(coding_sequence) <= MaxLength and len(coding_sequence)%3 == 0:
for Character in QualityScoreSequence[StartIndex:StopIndex]:
if Character not in ThresholdQualityString:
return None
return str(coding_sequence)
def Translation(coding_sequence):
#translates DNA sequence
TranslationCode = {
'AAA':'K','AAC':'N','AAG':'K','AAU':'N',
'ACA':'T','ACC':'T','ACG':'T','ACU':'T',
'AGA':'R','AGC':'S','AGG':'R','AGU':'S',
'AUA':'I','AUC':'I','AUG':'M','AUU':'I',
'CAA':'Q','CAC':'H','CAG':'Q','CAU':'H',
'CCA':'P','CCC':'P','CCG':'P','CCU':'P',
'CGA':'R','CGC':'R','CGG':'R','CGU':'R',
'CUA':'L','CUC':'L','CUG':'L','CUU':'L',
'GAA':'E','GAC':'D','GAG':'E','GAU':'D',
'GCA':'A','GCC':'A','GCG':'A','GCU':'A',
'GGA':'G','GGC':'G','GGG':'G','GGU':'G',
'GUA':'V','GUC':'V','GUG':'V','GUU':'V',
'UAA':'#','UAC':'Y','UAG':'*','UAU':'Y',
'UCA':'S','UCC':'S','UCG':'S','UCU':'S',
'UGA':'&','UGC':'C','UGG':'W','UGU':'C',
'UUA':'L','UUC':'F','UUG':'L','UUU':'F'
}
# UAA (ochre) — #
# UAG (amber) — *
# UGA (opal) — &
TranscriptionCode = {'A':'A','C':'C','G':'G','T':'U','U':'T'}
RNASequence = ''
for Nucleotide in coding_sequence:
RNASequence += TranscriptionCode.get(Nucleotide,'X')
#converts DNA to RNA
#print RNASequence
peptide = ''
while len(RNASequence) != 0:
peptide += TranslationCode.get(RNASequence[0:3],'Do not fuck with me!')
RNASequence = RNASequence[3:]
return peptide
def SingleSelectionRoundSummary(fastq_file_path):
#returns a list of lists with peptide-sequences and their frequencies, sorted by frequency in descending order
RawDataFile = open(fastq_file_path, 'r')
lines = RawDataFile.readlines()
RawDataFile.close
#start_sequence = 'ATG' # Met codon
#stop_sequence = 'TGCGGCAGC'# Akane seams to have trimmed siquences
#stop_sequence = 'TAG' # amber stop codon
start_sequence = 'TAATACGACTCACTATAGGGTTAACTTTAAGAAGGAGATATACATATG' # NNK - T7g10M.F48
stop_sequence = 'TGCGGCAGCGGCAGCGGCAGCTAGGACGGGGGGCGGAAA' #NNK - CGS3an13.R39
#start_sequence = 'TAATACGACTCACTATAGGGTTGAACTTTAAGTAGGAGATATATCCATG' #NNU - T7-CH-F49
#stop_sequence = 'TGTGGGTCTGGGTCTGGGTCTTAGGACGGGGGGCGGAAA' #NNU - CGS3-CH-R39
SingleSelectionRoundSummary = {}
#creates empty SingleSelectionRoundSummary dictionary to store the results from a single round of selection
#SingleSelectionRoundSummary = {peptideY: {coding_sequence_YZ: Occurrence_YZ}}
#populates SingleSelectionRoundSummary dictionary with the results from a single round of selection
for i,line in enumerate(lines):
if start_sequence in line and stop_sequence in line:
coding_sequence = DNAcoding_sequence(line, lines[i + 2], start_sequence, stop_sequence)
if coding_sequence != None:
peptide = Translation(coding_sequence)
if peptide not in SingleSelectionRoundSummary:
SingleSelectionRoundSummary[str(peptide)] = {str(coding_sequence) : 1}
else:
if coding_sequence not in SingleSelectionRoundSummary[str(peptide)]:
SingleSelectionRoundSummary[str(peptide)][str(coding_sequence)] = 1
else:
SingleSelectionRoundSummary[str(peptide)][str(coding_sequence)] += 1
return SingleSelectionRoundSummary
def HammingDistance(Sequence1, Sequence2):
if len(Sequence1) < len(Sequence2):
Sequence1 = Sequence1 + (len(Sequence2) - len(Sequence1)) * '%'
elif len(Sequence1) > len(Sequence2):
Sequence2 = Sequence2 + (len(Sequence1) - len(Sequence2)) * '%'
HammingDistance = 0
for i in range(len(Sequence1)):
if Sequence1[i] == Sequence2[i]:
HammingDistance = HammingDistance
else:
HammingDistance = HammingDistance + 1
return HammingDistance
def HammingDistanceBasedFormating(Sequence1, Sequence2):
if len(Sequence1) < len(Sequence2):
Sequence1 = Sequence1 + (len(Sequence2) - len(Sequence1)) * '.'
elif len(Sequence1) > len(Sequence2):
Sequence2 = Sequence2 + (len(Sequence1) - len(Sequence2)) * '.'
HammingDistance = 0
FormatedSequence2 = ''
for i in range(len(Sequence1)):
if Sequence1[i] == Sequence2[i]:
FormatedSequence2 += Sequence2[i].lower()
HammingDistance = HammingDistance
else:
FormatedSequence2 += Sequence2[i]
HammingDistance = HammingDistance + 1
return FormatedSequence2
def Completedisplay_summary(data_directory_path):
# returns a display_summary dictionary with the following structure
# display_summary = {SelectionRound_X: {peptideXY: {CodingDNA_XYZ: Occurrence_XYZ}}}
Completedisplay_summary = {}
# creates empty display_summary dictionary to store the results from all the rounds of selection
for file in os.listdir(data_directory_path):
file_path = os.path.join(data_directory_path, file)
if file.endswith('.fastq'): # this conditional is necessary; without it some shit appears in the beginning of the file list
cycle_numberFirstDigit = file[file.find('.')-2]
cycle_numberSecondDigit = file[file.find('.')-1]
if cycle_numberFirstDigit == '0':
cycle_number = int(cycle_numberSecondDigit)
#print cycle_number
elif cycle_numberFirstDigit != '0':
cycle_number = int(file[file.find('.')-2 : file.find('.')])
#print cycle_number
#(1.A) extracts the round number from the file name (file name should have two digit number before full stop — '00.')
SelectionRoundSummary = SingleSelectionRoundSummary(file_path)
#(1.B) extracts single round results
Completedisplay_summary[cycle_number] = SelectionRoundSummary
#(1.C) populate ConcatenatedResultsList
#print ConcatenatedResultsList
return Completedisplay_summary
def peptidesOccurrences_BY_Round(data_directory_path):
display_summary = Completedisplay_summary(data_directory_path)
peptidesOccurrences_BY_Round = {}
for Round in display_summary:
peptidesOccurrences_IN_Round = {}
for peptide in display_summary[Round]:
peptidesOccurrences_IN_Round[peptide] = sum(display_summary[Round][peptide].values())
peptidesOccurrences_BY_Round[Round] = peptidesOccurrences_IN_Round
return peptidesOccurrences_BY_Round
def DNAsOccurrences_BY_Round(data_directory_path):
display_summary = Completedisplay_summary(data_directory_path)
DNAsOccurrences_BY_Round = {}
for Round in display_summary:
DNAsOccurrences_IN_Round = {}
for peptide in display_summary[Round]:
for DNA in display_summary[Round][peptide]:
DNAsOccurrences_IN_Round[DNA] = display_summary[Round][peptide][DNA]
DNAsOccurrences_BY_Round[Round] = DNAsOccurrences_IN_Round
return DNAsOccurrences_BY_Round
def TotalReads_BY_Round(data_directory_path):
display_summary = Completedisplay_summary(data_directory_path)
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path)
TotalReads_BY_Round = {}
for Round in display_summary:
TotalReads_BY_Round[Round] = sum(peptides_BY_Round[Round].values())
return TotalReads_BY_Round
def BaseRoundSortedpeptidesList(data_directory_path, base_cycle):
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path)
peptidesOccurrencesInBaseRound = peptides_BY_Round[base_cycle]
BaseRoundSortedpeptidesList = sorted(peptidesOccurrencesInBaseRound, key = peptidesOccurrencesInBaseRound.get, reverse = True)
return BaseRoundSortedpeptidesList
def peptidesRank_IN_BaseRound(data_directory_path, base_cycle):
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path)
BaseRoundSortedpeptides = BaseRoundSortedpeptidesList(data_directory_path, base_cycle)
BasepeptideCount = 0
peptideRank = 1
peptidesRank_IN_BaseRound = {}
for peptide in BaseRoundSortedpeptides:
peptideCount = peptides_BY_Round[base_cycle][peptide]
if peptideCount < BasepeptideCount:
peptideRank += 1
peptidesRank_IN_BaseRound[peptide] = peptideRank
BasepeptideCount = peptideCount
return peptidesRank_IN_BaseRound
def BaseRoundSortedDNAsList(data_directory_path, base_cycle):
DNAs_BY_Round = DNAsOccurrences_BY_Round(data_directory_path)
DNAsOccurrences_IN_BaseRound = DNAs_BY_Round[base_cycle]
BaseRoundSortedDNAsList = sorted(DNAsOccurrences_IN_BaseRound, key = DNAsOccurrences_IN_BaseRound.get, reverse = True)
return BaseRoundSortedDNAsList
def DNAClonesOccurrences_BY_Round_BY_peptide(data_directory_path):
display_summary = Completedisplay_summary(data_directory_path)
DNAClonesOccurrences_BY_Round_BY_peptide = {}
for Round in display_summary:
DNAClonesOccurrences_BY_peptide = {}
for peptide in display_summary[Round]:
DNAClonesOccurrences_BY_peptide[peptide] = len(display_summary[Round][peptide])
DNAClonesOccurrences_BY_Round_BY_peptide[Round] = DNAClonesOccurrences_BY_peptide
return DNAClonesOccurrences_BY_Round_BY_peptide
def peptidesAppearances_BY_Round(BaseRoundSortedpeptidesList, peptidesOccurrences_BY_Round):
peptidesAppearances_BY_Round = {}
for peptide in BaseRoundSortedpeptidesList:
peptidesAppearances_BY_Round[peptide] = []
for Round in peptidesOccurrences_BY_Round:
if peptide in peptidesOccurrences_BY_Round[Round]:
peptidesAppearances_BY_Round[peptide] += [Round]
return peptidesAppearances_BY_Round
def DNAsAppearances_BY_Round(BaseRoundSortedDNAsList, DNAsOccurrences_BY_Round):
DNAsAppearances_BY_Round = {}
for DNA in BaseRoundSortedDNAsList:
DNAsAppearances_BY_Round[DNA] = []
for Round in DNAsOccurrences_BY_Round:
if DNA in DNAsOccurrences_BY_Round[Round]:
DNAsAppearances_BY_Round[DNA] += [Round]
return DNAsAppearances_BY_Round
def display_summaryReport(data_directory_path, base_cycle, n_top_peptides, file_name):
today = TodaysDate()
display_summaryFileNameCSV = str(today) + 'display_summary' + file_name + '.csv'
display_summaryReportFile = open(display_summaryFileNameCSV, 'w')
display_summary = Completedisplay_summary(data_directory_path)
SortedRoundsList = sorted(display_summary.keys())
peptides_BY_Round = peptidesOccurrences_BY_Round(data_directory_path)
Totalpeptides_BY_Round = TotalReads_BY_Round(data_directory_path)
BaseRoundSortedpeptides = BaseRoundSortedpeptidesList(data_directory_path, base_cycle)
#for i in range(len(BaseRoundSortedpeptides)):
# print ('>seq' + str(i + 1) + '\n' + BaseRoundSortedpeptides[i])
#BaseRoundTopSortedpeptides = BaseRoundSortedpeptides[0 : (n_top_peptides)]
BaseRoundTopSortedpeptides = ['VWDPRTFYLSRI', 'WDANTIFIKRV', 'WNPRTIFIKRA', 'VWDPRTFYLSRT',
'IWDTGTFYLSRT', 'WWNTRSFYLSRI', 'FWDPRTFYLSRI', 'VWDPSTFYLSRI',
'KWDTRTFYLSRY', 'KWDTRTFYLSRI', 'IWDPRTFYLSRI', 'IWDTGTFYLSRI',
'VWDPRTFYLSRM', 'AWDPRTFYLSRI', 'VWDSRTFYLSRI', 'VWDPGTFYLSRI',
'VWDPRTFYMSRI', 'VWDPRTFYLSRS', 'VWDPRTFYLSRV', 'WNPRTIFIKRV',
'VRDPRTFYLSRI', 'VWDPKTFYLSRI', 'VWDPRTFYLSRN', 'FRFPFYIQRR'
]
BaseRoundpeptidesRank = peptidesRank_IN_BaseRound(data_directory_path, base_cycle)
#print (BaseRoundSortedpeptides)
Top24peptidesKDs = {'VWDPRTFYLSRI' : '3', 'WDANTIFIKRV' : '4', 'WNPRTIFIKRA' : '>1000', 'VWDPRTFYLSRT' : '3',
'IWDTGTFYLSRT' : '7', 'WWNTRSFYLSRI' : '12', 'FWDPRTFYLSRI' : '4', 'VWDPSTFYLSRI' : '3',
'KWDTRTFYLSRY' : '5', 'KWDTRTFYLSRI' : '6', 'IWDPRTFYLSRI' : '1', 'VWDPRTFYLSRM' : '4',
'IWDTGTFYLSRI' : '>1000', 'VWDPGTFYLSRI' : '<1', 'VWDSRTFYLSRI' : '3', 'AWDPRTFYLSRI': '6',
'VWDPRTFYLSRS' : '6', 'VWDPRTFYMSRI' : '1', 'VWDPRTFYLSRV' : '3', 'WNPRTIFIKRV' : '>1000',
'VRDPRTFYLSRI' : '>1000', 'VWDPRTFYLSRN' : '>1000', 'VWDPKTFYLSRI' : '14', 'FRFPFYIQRR' : '>1000'
}
display_summaryReportFile.write('peptide sequence' + ',' +
'rank (#)' + ',' +
'cDNA mutants' + ',')
for Round in SortedRoundsList:
display_summaryReportFile.write('C' +
str(Round) +
' count (#) [frequency(%)]' + ',')
display_summaryReportFile.write('\n')
for peptide in BaseRoundTopSortedpeptides:
#for peptide in Top24peptidesKDs:
BaseRoundpeptideFraction = float((peptides_BY_Round[Round].get(peptide, 0)))/float(Totalpeptides_BY_Round[base_cycle])
peptideRank = BaseRoundpeptidesRank[peptide]
Formatedpeptide = HammingDistanceBasedFormating(BaseRoundTopSortedpeptides[0], peptide)
peptidecDNAMutants = len(display_summary[base_cycle][peptide])
display_summaryReportFile.write(Formatedpeptide + ',' +
str(peptideRank) + ',' +
str(peptidecDNAMutants) + ',')
for Round in SortedRoundsList:
peptideFraction = float((peptides_BY_Round[Round].get(peptide, 0)))/float(Totalpeptides_BY_Round[Round])
BaseFraction = peptideFraction
display_summaryReportFile.write(str(peptides_BY_Round[Round].get(peptide, 0)) +
' [' + '{:.1%}'.format(peptideFraction) + ']' + ',')
display_summaryReportFile.write('\n')
display_summaryReportFile.write('total count (#)' + ',' + ',')
for Round in SortedRoundsList:
display_summaryReportFile.write(str(Totalpeptides_BY_Round[Round]) + ',')
display_summaryReportFile.write('\n\n\n')
display_summaryReportFile.close()
#-------------------------------------------------------------------------------
# Create a figure of size 8x6 inches, 500 dots per inch
plt.figure(
figsize = (8, 6),
dpi = 250)
# Create 'ggplot' style
plt.style.use('fivethirtyeight')
# Create a new subplot from a grid of 1x1
Graph = plt.subplot(1, 1, 1)
Xs = []
Ys = []
Rank = 1
peptideFractionInFinalRound = 0
# Map colours onto lines
cNorm = matplotlib.colors.Normalize(vmin = 1,
vmax = 20)
scalarMap = matplotlib.cm.ScalarMappable(norm = cNorm,
cmap = 'tab20')
peptideLabels = []
for peptide in BaseRoundTopSortedpeptides:
#for peptide in Top24peptidesKDs:
peptidesFractions_BY_Round = []
for Round in SortedRoundsList:
peptidesFractions_BY_Round += [
float((peptides_BY_Round[Round].get(peptide, 0)))/float(Totalpeptides_BY_Round[Round])]
x = SortedRoundsList
y = peptidesFractions_BY_Round
Xs += x
Ys += y
# peptideColour = scalarMap.to_rgba(BaseRoundTopSortedpeptides.index(peptide))
peptideRank = BaseRoundpeptidesRank[peptide]
# print(peptideRank)
peptideColour = scalarMap.to_rgba(peptideRank)
peptideKD = Top24peptidesKDs[peptide]
Formatedpeptide = HammingDistanceBasedFormating(BaseRoundTopSortedpeptides[0], peptide)
peptideLabel = f"{Formatedpeptide} ({peptidesFractions_BY_Round[-1]:.2%}, {peptideKD} nM)"
#Set peptideLabel
peptideLabels += [peptideLabel]
plt.plot(x, y,
'o-',
c = peptideColour,
lw = 2.0,
ms = 4.0,
mew = 0.1,
mec = '#191919')
XMin = min(Xs) - 0.05*(max(Xs) - min(Xs))
XMax = max(Xs) + 0.05*(max(Xs) - min(Xs))
YMin = min(Ys) - 0.05*(max(Ys) - min(Ys))
YMax = max(Ys) + 0.05*(max(Ys) - min(Ys))
plt.axis([XMin, XMax, YMin, YMax])
plt.xticks(fontsize = 10)
plt.yticks(fontsize = 10)
plt.xlabel('mRNA Display Cycle (#)',
fontsize = 10)
plt.ylabel('Ligand Fraction (%)',
fontsize = 10)
legend = plt.legend(peptideLabels,
title = 'cyclic-peptide random region (ligand fraction after last cycle, k$_D$)',
loc = 'upper center',
bbox_to_anchor = (0.5, -0.10),
fancybox = True,
shadow = False,
fontsize = 10,
ncol = 3)
Graph.get_legend().get_title().set_size('small')
display_summaryFileNamePNG = str(today) + 'display_summary' + file_name + '.png'
plt.savefig(display_summaryFileNamePNG,
bbox_extra_artists = [legend],
bbox_inches = 'tight',
dpi = 180)
plt.show()
plt.close()
data_directory_path = '../sample_input/'
base_cycle = 6
TopNpeptidesNumber = 24
SummaryFileName = 'Paper05_PHD2SelectionResults'
display_summaryReport(data_directory_path, base_cycle, TopNpeptidesNumber, SummaryFileName)
data_directory_path = 'sample_input/'
base_cycle = 6
TopNpeptidesNumber = 24
SummaryFileName = 'Paper05_PHD2SelectionResults'
display_summaryReport(data_directory_path, base_cycle, TopNpeptidesNumber, SummaryFileName)
```
| github_jupyter |
# Developing an AI application
Going forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications.
In this project, I'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below.
<img src='assets/Flowers.png' width=500px>
The project is broken down into multiple steps:
* Load and preprocess the image dataset
* Train the image classifier on our dataset
* Use the trained classifier to predict image content
I'll lead you through each part which we'll implement in Python.
When I've completed this project, you'll have an application that can be trained on any set of labeled images. Here our network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.
First up is importing the packages I'll need. It's good practice to keep all the imports at the beginning of your code.
```
import torch
from torch import nn, optim
from torchvision import datasets, transforms, models
import matplotlib.pyplot as plt
% matplotlib inline
from PIL import Image
import numpy as np
```
## Load the data
Here I'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. I'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.
The validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this I don't want any scaling or rotation transformations, but I'll need to resize then crop the images to the appropriate size.
The pre-trained networks I'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets I'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.
```
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
# Define our transforms for the training, validation, and testing sets
train_transforms = transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
valid_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
# Load the datasets with ImageFolder
train_data = datasets.ImageFolder(train_dir, transform = train_transforms)
valid_data = datasets.ImageFolder(valid_dir, transform = valid_transforms)
test_data = datasets.ImageFolder(test_dir, transform = test_transforms)
# Using the image datasets and the trainforms, define the dataloaders
train_loader = torch.utils.data.DataLoader(train_data, batch_size=17, shuffle=True)
valid_loader = torch.utils.data.DataLoader(valid_data, batch_size=17)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=17)
```
### Label mapping
I'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give us a dictionary mapping the integer encoded categories to the actual names of the flowers.
```
import json
with open('cat_to_name.json', 'r') as f:
cat_to_name = json.load(f)
```
# Building and training the classifier
Now that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.
Things I'll need to do:
* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html)
* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout
* Train the classifier layers using backpropagation using the pre-trained network to get the features
* Track the loss and accuracy on the validation set to determine the best hyperparameters
When training we need to make sure we're updating only the weights of the feed-forward network. We should be able to get the validation accuracy above 70% if we build everything right. We can try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.
**Note for Workspace users:** If your network is over 1 GB when saved as a checkpoint, there might be issues with saving backups in your workspace. Typically this happens with wide dense layers after the convolutional layers. If your saved checkpoint is larger than 1 GB (you can open a terminal and check with `ls -lh`), you should reduce the size of your hidden layers and train again.
```
model = models.vgg16(pretrained = True)
model
for param in model.parameters():
param.requires_grad = False
classifier = nn.Sequential(
nn.Linear(25088, 4096),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(4096, 512),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(512, 102),
nn.LogSoftmax(dim=1)
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device);
model.classifier = classifier
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
epochs = 5
steps = 0
train_losses, valid_losses = [], []
for i in range(epochs):
running_loss = 0
for images,labels in train_loader:
steps += 1
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad()
log_ps = model(images).to(device)
loss = criterion(log_ps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
else:
valid_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for images, labels in valid_loader:
images, labels = images.to(device), labels.to(device)
log_ps = model(images)
valid_loss += criterion(log_ps, labels)
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
model.train()
train_losses.append(running_loss/len(train_loader))
valid_losses.append(valid_loss/len(valid_loader))
print("Epoch: {}/{}.. ".format(i+1, epochs),
"Training Loss: {:.3f}.. ".format(running_loss/len(train_loader)),
"Validation Loss: {:.3f}.. ".format(valid_loss/len(valid_loader)),
"Validation Accuracy: {:.3f}".format(accuracy/len(valid_loader)))
```
## Testing your network
It's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give us a good estimate for the model's performance on completely new images. I need to run the test images through the network and measure the accuracy, the same way I did validation. We should be able to reach around 70% accuracy on the test set if the model has been trained well.
```
# Validation on the test set
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
log_ps = model(images)
test_loss += criterion(log_ps, labels)
ps = torch.exp(log_ps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor))
print("Testing Accuracy: {:.3f}".format(accuracy/len(test_loader)))
```
## Save the checkpoint
Now that our network is trained, I will save the model so we can load it later for making predictions. I probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. I can attach this to the model as an attribute which makes inference easier later on.
```model.class_to_idx = image_datasets['train'].class_to_idx```
Remember that we'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If we want to load the model and keep training, we'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. We'll likely want to use this trained model in the next part of the project, so best to save it now.
```
model.class_to_idx = train_data.class_to_idx
model.cpu()
checkpoint = {'class_to_idx': model.class_to_idx,
'state_dict': model.state_dict()}
torch.save(checkpoint, 'checkpoint.pth')
```
## Loading the checkpoint
At this point it's good to write a function that can load a checkpoint and rebuild the model. That way I can come back to this project and keep working on it without having to retrain the network.
```
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = models.vgg16(pretrained = True)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=0.001)
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
classifier = nn.Sequential(
nn.Linear(25088, 4096),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(4096, 512),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(512, 102),
nn.LogSoftmax(dim=1)
)
model.classifier = classifier
model.load_state_dict(checkpoint['state_dict'])
return model
model = load_checkpoint('checkpoint.pth')
model
```
# Inference for classification
Now I'll write a function to use a trained network for inference. That is, I'll pass an image into the network and predict the class of the flower in the image. Writing a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like
```python
probs, classes = predict(image_path, model)
print(probs)
print(classes)
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']
```
First I'll need to handle processing the input image such that it can be used in your network.
## Image Preprocessing
I'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training.
First, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then I'll need to crop out the center 224x224 portion of the image.
Color channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. I'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.
As before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. I'll want to subtract the means from each color channel, then divide by the standard deviation.
And finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. I can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.
```
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im = Image.open(image)
size = 256, 256
im.thumbnail(size)
crop_size = 224
left = (size[0] - crop_size)/2
upper = (size[1] - crop_size)/2
right = left + crop_size
lower = upper + crop_size
im_crop = im.crop(box = (left, upper, right, lower))
np_image = (np.array(im_crop))/255
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
np_image = (np_image - mean) / std
processed_image = np_image.transpose(2,0,1)
return processed_image
```
To check our work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).
```
def imshow(image, ax=None, title=None):
"""Imshow for Tensor."""
if ax is None:
fig, ax = plt.subplots()
# PyTorch tensors assume the color channel is the first dimension
# but matplotlib assumes is the third dimension
image = image.transpose((1, 2, 0))
# Undo preprocessing
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
# Image needs to be clipped between 0 and 1 or it looks like noise when displayed
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
image_path = 'flowers/train/1/image_06742.jpg'
imshow(process_image(image_path))
```
## Class Prediction
Once we can get images in the correct format, it's time to write a function for making predictions with our model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. I'll want to calculate the class probabilities then find the $K$ largest values.
To get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. I need to convert from these indices to the actual class labels using `class_to_idx` which hopefully we added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). I need to make sure to invert the dictionary so you get a mapping from index to class as well.
Again, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.
```python
probs, classes = predict(image_path, model)
print(probs)
print(classes)
> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]
> ['70', '3', '45', '62', '55']
```
```
def predict(image_path, model, top_k=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
model.to("cpu")
model.eval()
image = process_image(image_path)
tensor = torch.tensor(image).float().unsqueeze_(0)
with torch.no_grad():
log_ps = model.forward(tensor)
ps = torch.exp(log_ps)
probs, classes = ps.topk(top_k, dim=1)
return probs , classes
predict('flowers/train/1/image_06734.jpg', model, top_k=5)
```
## Sanity Checking
Now that we can use a trained model for predictions, we can check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Using `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image.
You can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file. To show a PyTorch tensor as an image, use the `imshow` function defined above.
```
def image_prediction(image_path):
plt.figure(figsize=(5,10))
ax = plt.subplot(2,1,1)
image = process_image(image_path)
imshow(image, ax).axis('off')
probs, classes = predict(image_path, model, top_k=5)
probs = probs.data.numpy().squeeze()
classes = classes.data.numpy().squeeze()
idx = {val: i for i, val in model.class_to_idx.items()}
labels = [idx[labels] for labels in classes]
flowers = [cat_to_name[labels] for labels in labels]
plt.subplot(2,1,2)
plt.barh(flowers, probs)
image_path = 'flowers/train/1/image_06742.jpg'
image_prediction(image_path)
```
| github_jupyter |
```
%%html
<style>
.jp-Cell {max-width: 1024px !important; margin: auto}
.jp-Cell-inputWrapper {max-width: 1024px !important; margin: auto}
.jp-MarkdownOutput p {text-align: justify;}
.jupyter-matplotlib-figure {margin: auto;}
</style>
%matplotlib widget
from pyvisco import inter
GUI = inter.Control()
```
# PYVISCO - Prony series identification for linear viscoelastic material models
Author: Martin Springer | martin.springer@nrel.gov
***
## Overview
The mechanical response of linear viscoelastic materials is often described with Generalized Maxwell models. The necessary material model parameters are typically identified by fitting a Prony series to the experimental measurement data in either the frequency-domain (via Dynamic Mechanical Thermal Analysis) or time-domain (via relaxation measurements). Pyvisco performs the necessary data processing of the experimental measurements, mathematical operations, and curve-fitting routines to identify the Prony series parameters. The experimental data can be provided as raw measurement sets at different temperatures or as pre-processed master curves.
* If raw measurement data are provided, the time-temperature superposition principle is applied to create a master curve and obtain the shift functions prior to the Prony series parameters identification.
* If master curves are provided, the shift procedure can be skipped, and the Prony series parameters identified directly.
An optional minimization routine is provided to reduce the number of Prony elements. This routine is helpful for Finite Element simulations where reducing the computational complexity of the linear viscoelastic material models can shorten the simulation time.
```
display(GUI.b_theory)
display(GUI.out_theory)
```
***
## Parameter identification
### Specify measurement type and upload input data
In this section the measurement type of the input data is specified and the experimental data are uploaded. A set of example input files can be downloaded here: [Example input files](https://github.com/NREL/pyvisco/raw/main/examples/examples.zip)
#### Conventions
Each input file needs to consist of two header rows. The first row indicates the column variables and the second row the corresponding units. The conventions used in this notebook are summarized below. Tensile moduli are denoted as $E$ and shear moduli are denoted as $G$. Only the tensile moduli are summarized in the table below. For shear modulus data, replace `E` with `G`, e.g. `E_relax` -> `G_relax`.
| Physical quantity | Symbol | Variable | Unit |
| :--------------------- | :-------------: | :--------- | :----: |
| Relaxation modulus: | $E(t)$ | `E_relax` | `[Pa, kPa, MPa, GPa]` |
| Storage modulus: | $E'(\omega)$ | `E_stor` | `[Pa, kPa, MPa, GPa]` |
| Loss modulus: | $E''(\omega)$ | `E_loss` | `[Pa, kPa, MPa, GPa]` |
| Complex modulus: | $\lvert E^{*}\rvert$ | `E_comp` | `[Pa, kPa, MPa, GPa]` |
| Loss factor: | $\tan(\delta)$ | `tan_del` | `-` |
| Instantaneous modulus: | $E_0$ | `E_0` | `[Pa, kPa, MPa, GPa]` |
| Equilibrium modulus: | $E_{inf}$ | `E_inf` | `[Pa, kPa, MPa, GPa]` |
| Angular frequency: | $\omega$ | `omega` | `rad/s` |
| Frequency: | $f$ | `f` | `Hz` |
| Time: | $t$ | `t` | `s` |
| Temperature: | $\theta$ | `T` | `°C` |
| Relaxation times: | $\tau_i$ | `tau_i` | `s` |
| Relaxation moduli: | $E_i$ | `E_i` | `[Pa, kPa, MPa, GPa]` |
| Norm. relaxation moduli: | $\alpha_i$ | `alpha_i` | `-` |
| Shift factor: | $$\log (a_{T}) $$ | `log_aT` | `-` |
***
#### Domain
The Prony series parameters can be either fitted from measurement data of Dynamic Mechanical Thermal Analysis (DMTA) in the frequency domain (freq) or from relaxation experiments in the time domain (time).
#### Loading
Measurement data can be provided from either tensile or shear experiments. Tensile moduli are denoted as $E$ and shear moduli are denoted as $G$.
#### Instrument
* **Eplexor:** DMTA measurements conducted with a Netzsch Gabo DMA EPLEXOR can be directly uploaded as Excel files. Use the `Excel Export!` feature of the Eplexor software with the default template to create the input files.
* **user:** Comma-separated values (csv) files are used for measurements conducted with other instruments. Prepare the input files in accordance to the performed measurement. The table below shows example headers, where the first row indicates the column quanitity and the second row the corresponding unit.
| Domain | Tensile | shear |
| :----------- | :------------- | :--------- |
|**Frequency domain:** | `f, E_stor, E_loss` | `f, G_stor, G_loss` |
| | `Hz, MPa, MPa` | `Hz, GPa, GPa` |
|**Time domain:** | `t, E_relax` | `t, G_relax` |
| | `s, MPa` | `s, GPa` |
#### Type
Either **raw** data containing measurements at different temperatures or ready to fit **master** curves can be uploaded.
Input files of **raw** measurement data need to specify the individual temperature sets of the performed experimental characterization:
* **Eplexor:** The notebook identifies the corresponding temperature sets automatically (only available in the frequency domain).
* **user:** Two additional columns need to be included in the input file. One column describing the temperature `T` of the measurement point and a second column `Set` to identify the corresponding measurement set of the data point (e.g. `f, E_stor, E_loss, T, Set`). All measurement points at the same temperature level are marked with the same number, e.g. 0 for the first measurement set. The first measurement set (0) represents the coldest temperature followed by the second set (1) at the next higher temperature level and so forth (see the provided [example input file](https://github.com/NREL/pyvisco/blob/main/examples/time_user_raw.csv) for further details).
```
display(GUI.w_inp_gen)
```
***
#### Optional shift factor upload
Already existing shift factors can be uploaded as a csv file with the header=`T, log_aT` and units=`C, -`, where `T` is the temperature level of the measurement set in Celsius and `log_aT` is the base 10 logarithm of the shift factor, $\log(a_T)$.
* **master:** Uploading the shift factors allows for the calculation of polynomial (D1 to D4) shift functions and the Williams–Landel–Ferry (WLF) shift function, but is not required for the Prony series estimation.
> _**Note**_: If a master curve from the Eplexor software is provided, the default behavior of the notebook is to use the WLF shift function from the Eplexor software. However, in the time-temperature superpostion section, a checkbox is provided to overwrite the WLF fit of the Eplexor software and conduct another WLF fit with the algorithm in this notebook.
* **raw:** The shift factors can either be directly determined for the desired reference temperature in the time-temperature superposition section (no upload necessary) or user-specified shift factors can be uploaded to be used in the creation of the master curve.
```
display(GUI.w_inp_shift)
```
***
#### Reference temperature
Temperature chosen to contruct the master curve through application of the time-temperature superposition principle $[\log(a_T) = 0]$.
```
display(GUI.w_RefT)
```
***
#### Check uploaded data
```
display(GUI.w_check_inp)
```
***
### Time-temperature superposition (shift functions)
This section allows the calculation of shift factors from raw input data to create a master curve. The shift factors are then used to fit polynomial and WLF shift functions.
#### Shift factors $\log(a_{T})$ - Create master curve from raw input
The time-temperature superposition principle is applied to create a master curve from the individual measurement sets at different temperature levels.
> _**Note**_:This subsection only applies to raw measurement data. If a master curve was uploaded, procede to the next step.
* **user shift factors uploaded:** The provided shift factors will be used to create the master curve from the raw measurement sets (_**Note**_: the `fit and overwrite provided shift factors` checkbox allows you to overwrite the uploaded user shift factors and fit new ones).
* **No user shift factors uploaded:** The measurement sets from the raw input file are used to estimate the shift factors and create a master curve. Measurement sets below the desired reference temperatures are shifted to lower frequencies (longer time periods), whereas measurement sets at temperatures higher than the reference temperature are shifted to higher frequencies (shorter time periods). The `manually adjust shift factors` checkbox allows you to modify the obtained shift factors manually. (_**Note**_: In the frequency domain, only the storage modulus input data are considered to create the master curve from the raw input data. The shift factors obtained from the storage modulus master curve are then used to create the loss modulus master curve.)
```
display(GUI.w_aT)
```
***
#### Shift functions (WLF & Polynomial degree 1 to 4)
If shift factors are available, the WLF shift function and polynomial functions of degree 1 to 4 can be fitted and plotted below. (_**Note**_: If the WLF shift functions was already provided by the Eplexor software, the checkbox below let's you overwrite the WLF fit of the Eplexor software with a WLF fit of this notebook.)
> _**Note**_:This subsection only provides shift functions and is not required to perform the parameter identification of the Prony series.
```
display(GUI.w_shift)
```
***
### Estimate Prony series parameters
#### Pre-process (smooth) master curve
A moving median filter to remove outliers in the measurement data can be applied before the Prony series parameters are identified. The window size can be adjusted through the slider above the figure. A window size of 1 means that no filtering procedure is performed and the input data are fitted directly.
```
display(GUI.w_smooth)
```
***
#### Define the number and discretization of the Prony series
The number of Prony terms, $N$, needs to be defined before the parameter $\tau_i$ and $\alpha_i$ can be identified. The `default` behavior is to equally space one Prony term per decade along the logarithmic time axis, e.g., $\tau_i$ = [1E-1, 1E0, 1E1,...] (s). This discretization typically delivers accurate results for engineering applications.
> _**Note:**_ The fine discretization can be computationally heavy for using the viscoelastic material models in Finite Element simulations. Hence, the default discretization can be modified by either using the optimization routine provided below or by manually defining the number of Prony terms (`manual`). Here, the user can decide whether to round the lowest and highest relaxation times, $\tau_i$, to the nearest base 10 number within the measurement window `round` or to use the exact minimum and maximum values of the experimental data for the relaxation times `exact`.
```
display(GUI.w_dis)
```
***
#### Curve fitting
Two different curve fitting routines for the Prony series parameters are employed and are dependent on the domain of the input data:
* **Frequency domain**: A generalized collocation method using stiffness matrices is used as described in [Kraus, M. A., and M. Niederwald. Eur J Eng Mech 37.1 (2017): 82-106](https://journals.ub.ovgu.de/index.php/techmech/article/view/600). This methods utilizes both the storage and loss modulus master curves to estimate the Prony series parameters.
* **Time domain**: A least-squares minimization is performed using the L-BFGS-B method from the scipy package. The implementation is similar to the optimization problem described by [Barrientos, E., Pelayo, F., Noriega, Á. et al. Mech Time-Depend Mater 23, 193–206 (2019)](https://doi.org/10.1007/s11043-018-9394-z) for a homogenous distribution of discrete times.
```
display(GUI.w_out_fit_prony)
```
***
#### Generalized Maxwell model
The fitted Prony series parameters in combination with the Generalized Maxwell model can be used to calculate the linear viscoelastic material parameters in both the time and frequency domain.
```
display(GUI.w_out_GMaxw)
```
***
### Optional: Minimize number of Prony elements (for Finite Element simulations)
The Generalized Maxwell model with a high number of Prony terms can be computationally expensive. Especially, when used in combination with numerical frameworks as the Finite Element Method. Reducing the number of Prony elements will decrease the accuracy of the linear viscoelastic material model, but can help to speed up subsequent numerical simulations.
We provide a simple routine to create an additional Generalized Maxwell model with a reduced number of Prony elements. The routine starts with the number of Prony terms specified above and subsequently reduces the number of terms. A least squares minimization is performed to fit the reduced term Prony parameters. The least squares residual is used to suggest an optimal number of Prony terms for subsequent FEM simulations ($R_{opt}^2 \approx$ $1.5 R_0^2$). However, the user can change this default setting by selecting a different number of Prony terms below.
> **_Note:_** This routine is computationally more demanding and can take a few minutes to complete. The runtime depends on the initial number of Prony elements and the number of data points in the measurement sets.
```
display(GUI.w_out_fit_min)
```
***
## Download results
A zip archive including the identified Prony series parameters, (shift factors and shift functions), results of the Generalized Maxwell model, and figures can be dowloaded below.
```
#display(GUI.db_zip)
display(GUI.w_out_down)
```
***
## Start over!
Clear all input data and reload an empty notebook.
```
display(GUI.w_reload)
```
| github_jupyter |
# Numpy (✗)
> Makine Öğrenmesi ve Derin Öğrenme için gerekli Numpy konuları.
- toc: true
- badges: true
- comments: true
- categories: [jupyter]
- image: images/chart-preview.png
# Set up
```
import numpy as np
# Set seed for reproducibility
np.random.seed(seed=1234)
```
# Basics
Let's take a took at how to create tensors with NumPy.
* **Tensor**: collection of values
<div align="left">
<img src="https://raw.githubusercontent.com/madewithml/images/master/basics/03_NumPy/tensors.png" width="650">
</div>
```
# Scalar
x = np.array(6) # scalar
print ("x: ", x)
# Number of dimensions
print ("x ndim: ", x.ndim)
# Dimensions
print ("x shape:", x.shape)
# Size of elements
print ("x size: ", x.size)
# Data type
print ("x dtype: ", x.dtype)
# Vector
x = np.array([1.3 , 2.2 , 1.7])
print ("x: ", x)
print ("x ndim: ", x.ndim)
print ("x shape:", x.shape)
print ("x size: ", x.size)
print ("x dtype: ", x.dtype) # notice the float datatype
# Matrix
x = np.array([[1,2], [3,4]])
print ("x:\n", x)
print ("x ndim: ", x.ndim)
print ("x shape:", x.shape)
print ("x size: ", x.size)
print ("x dtype: ", x.dtype)
# 3-D Tensor
x = np.array([[[1,2],[3,4]],[[5,6],[7,8]]])
print ("x:\n", x)
print ("x ndim: ", x.ndim)
print ("x shape:", x.shape)
print ("x size: ", x.size)
print ("x dtype: ", x.dtype)
```
NumPy also comes with several functions that allow us to create tensors quickly.
```
# Functions
print ("np.zeros((2,2)):\n", np.zeros((2,2)))
print ("np.ones((2,2)):\n", np.ones((2,2)))
print ("np.eye((2)):\n", np.eye((2))) # identity matrix
print ("np.random.random((2,2)):\n", np.random.random((2,2)))
```
# Indexing
Keep in mind that when indexing the row and column, indices start at 0. And like indexing with lists, we can use negative indices as well (where -1 is the last item).
<div align="left">
<img src="https://raw.githubusercontent.com/madewithml/images/master/basics/03_NumPy/indexing.png" width="300">
</div>
```
# Indexing
x = np.array([1, 2, 3])
print ("x: ", x)
print ("x[0]: ", x[0])
x[0] = 0
print ("x: ", x)
# Slicing
x = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print (x)
print ("x column 1: ", x[:, 1])
print ("x row 0: ", x[0, :])
print ("x rows 0,1 & cols 1,2: \n", x[0:2, 1:3])
# Integer array indexing
print (x)
rows_to_get = np.array([0, 1, 2])
print ("rows_to_get: ", rows_to_get)
cols_to_get = np.array([0, 2, 1])
print ("cols_to_get: ", cols_to_get)
# Combine sequences above to get values to get
print ("indexed values: ", x[rows_to_get, cols_to_get]) # (0, 0), (1, 2), (2, 1)
# Boolean array indexing
x = np.array([[1, 2], [3, 4], [5, 6]])
print ("x:\n", x)
print ("x > 2:\n", x > 2)
print ("x[x > 2]:\n", x[x > 2])
```
# Arithmetic
```
# Basic math
x = np.array([[1,2], [3,4]], dtype=np.float64)
y = np.array([[1,2], [3,4]], dtype=np.float64)
print ("x + y:\n", np.add(x, y)) # or x + y
print ("x - y:\n", np.subtract(x, y)) # or x - y
print ("x * y:\n", np.multiply(x, y)) # or x * y
```
### Dot product
One of the most common NumPy operations we’ll use in machine learning is matrix multiplication using the dot product. We take the rows of our first matrix (2) and the columns of our second matrix (2) to determine the dot product, giving us an output of `[2 X 2]`. The only requirement is that the inside dimensions match, in this case the first matrix has 3 columns and the second matrix has 3 rows.
<div align="left">
<img src="https://raw.githubusercontent.com/madewithml/images/master/basics/03_NumPy/dot.gif" width="450">
</div>
```
# Dot product
a = np.array([[1,2,3], [4,5,6]], dtype=np.float64) # we can specify dtype
b = np.array([[7,8], [9,10], [11, 12]], dtype=np.float64)
c = a.dot(b)
print (f"{a.shape} · {b.shape} = {c.shape}")
print (c)
```
### Axis operations
We can also do operations across a specific axis.
<div align="left">
<img src="https://raw.githubusercontent.com/madewithml/images/master/basics/03_NumPy/axis.gif" width="450">
</div>
```
# Sum across a dimension
x = np.array([[1,2],[3,4]])
print (x)
print ("sum all: ", np.sum(x)) # adds all elements
print ("sum axis=0: ", np.sum(x, axis=0)) # sum across rows
print ("sum axis=1: ", np.sum(x, axis=1)) # sum across columns
# Min/max
x = np.array([[1,2,3], [4,5,6]])
print ("min: ", x.min())
print ("max: ", x.max())
print ("min axis=0: ", x.min(axis=0))
print ("min axis=1: ", x.min(axis=1))
```
### Broadcasting
Here, we’re adding a vector with a scalar. Their dimensions aren’t compatible as is but how does NumPy still gives us the right result? This is where broadcasting comes in. The scalar is *broadcast* across the vector so that they have compatible shapes.
<div align="left">
<img src="https://raw.githubusercontent.com/madewithml/images/master/basics/03_NumPy/broadcasting.png" width="300">
</div>
```
# Broadcasting
x = np.array([1,2]) # vector
y = np.array(3) # scalar
z = x + y
print ("z:\n", z)
```
# Advanced
### Transposing
We often need to change the dimensions of our tensors for operations like the dot product. If we need to switch two dimensions, we can transpose
the tensor.
<div align="left">
<img src="https://raw.githubusercontent.com/madewithml/images/master/basics/03_NumPy/transpose.png" width="400">
</div>
```
# Transposing
x = np.array([[1,2,3], [4,5,6]])
print ("x:\n", x)
print ("x.shape: ", x.shape)
y = np.transpose(x, (1,0)) # flip dimensions at index 0 and 1
print ("y:\n", y)
print ("y.shape: ", y.shape)
```
### Reshaping
Sometimes, we'll need to alter the dimensions of the matrix. Reshaping allows us to transform a tensor into different permissible shapes -- our reshaped tensor has the same amount of values in the tensor. (1X6 = 2X3). We can also use `-1` on a dimension and NumPy will infer the dimension based on our input tensor.
The way reshape works is by looking at each dimension of the new tensor and separating our original tensor into that many units. So here the dimension at index 0 of the new tensor is 2 so we divide our original tensor into 2 units, and each of those has 3 values.
<div align="left">
<img src="https://raw.githubusercontent.com/madewithml/images/master/basics/03_NumPy/reshape.png" width="450">
</div>
```
# Reshaping
x = np.array([[1,2,3,4,5,6]])
print (x)
print ("x.shape: ", x.shape)
y = np.reshape(x, (2, 3))
print ("y: \n", y)
print ("y.shape: ", y.shape)
z = np.reshape(x, (2, -1))
print ("z: \n", z)
print ("z.shape: ", z.shape)
```
### Unintended reshaping
Though reshaping is very convenient to manipulate tensors, we must be careful of their pitfalls as well. Let's look at the example below. Suppose we have `x`, which has the shape `[2 X 3 X 4]`.
```
[[[ 1 1 1 1]
[ 2 2 2 2]
[ 3 3 3 3]]
[[10 10 10 10]
[20 20 20 20]
[30 30 30 30]]]
```
We want to reshape x so that it has shape `[3 X 8]` which we'll get by moving the dimension at index 0 to become the dimension at index 1 and then combining the last two dimensions. But when we do this, we want our output
to look like:
✅
```
[[ 1 1 1 1 10 10 10 10]
[ 2 2 2 2 20 20 20 20]
[ 3 3 3 3 30 30 30 30]]
```
and not like:
❌
```
[[ 1 1 1 1 2 2 2 2]
[ 3 3 3 3 10 10 10 10]
[20 20 20 20 30 30 30 30]]
```
even though they both have the same shape `[3X8]`.
```
x = np.array([[[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
[[10, 10, 10, 10], [20, 20, 20, 20], [30, 30, 30, 30]]])
print ("x:\n", x)
print ("x.shape: ", x.shape)
```
When we naively do a reshape, we get the right shape but the values are not what we're looking for.
<div align="left">
<img src="https://raw.githubusercontent.com/madewithml/images/master/basics/03_NumPy/reshape_wrong.png" width="600">
</div>
```
# Unintended reshaping
z_incorrect = np.reshape(x, (x.shape[1], -1))
print ("z_incorrect:\n", z_incorrect)
print ("z_incorrect.shape: ", z_incorrect.shape)
```
Instead, if we transpose the tensor and then do a reshape, we get our desired tensor. Transpose allows us to put our two vectors that we want to combine together and then we use reshape to join them together.
Always create a dummy example like this when you’re unsure about reshaping. Blindly going by the tensor shape can lead to lots of issues downstream.
<div align="left">
<img src="https://raw.githubusercontent.com/madewithml/images/master/basics/03_NumPy/reshape_right.png" width="600">
</div>
```
# Intended reshaping
y = np.transpose(x, (1,0,2))
print ("y:\n", y)
print ("y.shape: ", y.shape)
z_correct = np.reshape(y, (y.shape[0], -1))
print ("z_correct:\n", z_correct)
print ("z_correct.shape: ", z_correct.shape)
```
### Adding/removing dimensions
We can also easily add and remove dimensions to our tensors and we'll want to do this to make tensors compatible for certain operations.
```
# Adding dimensions
x = np.array([[1,2,3],[4,5,6]])
print ("x:\n", x)
print ("x.shape: ", x.shape)
y = np.expand_dims(x, 1) # expand dim 1
print ("y: \n", y)
print ("y.shape: ", y.shape) # notice extra set of brackets are added
# Removing dimensions
x = np.array([[[1,2,3]],[[4,5,6]]])
print ("x:\n", x)
print ("x.shape: ", x.shape)
y = np.squeeze(x, 1) # squeeze dim 1
print ("y: \n", y)
print ("y.shape: ", y.shape) # notice extra set of brackets are gone
```
# Additional resources
* **NumPy reference manual**: We don't have to memorize anything here and we will be taking a closer look at NumPy in the later lessons. If you want to learn more checkout the [NumPy reference manual](https://docs.scipy.org/doc/numpy-1.15.1/reference/).
---
Share and discover ML projects at <a href="https://madewithml.com/">Made With ML</a>.
<div align="left">
<a class="ai-header-badge" target="_blank" href="https://github.com/madewithml/basics"><img src="https://img.shields.io/github/stars/madewithml/basics.svg?style=social&label=Star"></a>
<a class="ai-header-badge" target="_blank" href="https://www.linkedin.com/company/madewithml"><img src="https://img.shields.io/badge/style--5eba00.svg?label=LinkedIn&logo=linkedin&style=social"></a>
<a class="ai-header-badge" target="_blank" href="https://twitter.com/madewithml"><img src="https://img.shields.io/twitter/follow/madewithml.svg?label=Follow&style=social"></a>
</div>
| github_jupyter |
<img src="images/usm.jpg" width="480" height="240" align="left"/>
# MAT281 - Introducción a Pandas
## Objetivos de la clase
* Aprender conceptos básicos de la librería pandas.
## Contenidos
* [Pandas](#c1)
<a id='c1'></a>
## Pandas
<img src="images/pandas.jpeg" width="360" height="240" align="center"/>
[Pandas](https://pandas.pydata.org/) es un paquete de Python que proporciona estructuras de datos rápidas, flexibles y expresivas diseñadas para que trabajar con datos "relacionales" o "etiquetados" sea fácil e intuitivo.
Su objetivo es ser el bloque de construcción fundamental de alto nivel para hacer análisis de datos prácticos del mundo real en Python. Además, tiene el objetivo más amplio de convertirse en la herramienta de análisis/manipulación de datos de código abierto más potente y flexible disponible en cualquier idioma. Ya está en camino hacia este objetivo.
### Series y DataFrames
* Las **series** son arreglos unidimensionales con etiquetas. Se puede pensar como una generalización de los diccionarios de Python.
* Los **dataframe** son arreglos bidimensionales y una extensión natural de las series. Se puede pensar como la generalización de un numpy.array.
## 1.- Pandas Series
### Operaciones Básicas con series
```
# importar libreria: pandas, os
import pandas as pd
import numpy as np
import os
# crear serie
my_serie = pd.Series(range(3, 33, 3), index=list('abcdefghij'))
# imprimir serie
print("serie:")
print( my_serie )
# tipo
print("type:")
print( type(my_serie) )
# valores
print("values:")
print(my_serie.values)
# indice
print("index:")
print(my_serie.index)
# acceder al valor de la serie: directo
print("direct:")
print(my_serie['b'])
# acceder al valor de la serie: loc
print("loc:")
print(my_serie.loc['b'])
# acceder al valor de la serie: iloc con indice
print("iloc:")
print(my_serie.iloc[1])
# editar valores
print("edit:")
print("\nold 'd':",my_serie.loc['d'] )
my_serie.loc['d'] = 1000
print("new 'd':",my_serie.loc['d'] )
```
### Manejo de Fechas
Pandas también trae módulos para trabajar el formato de fechas.
```
# crear serie de fechas
date_rng = pd.date_range(start='1/1/2019', end='1/03/2019', freq='4H')
# imprimir serie
print("serie:")
print( date_rng )
# tipo
print("type:\n")
print( type(date_rng) )
# elementos de datetime a string
string_date_rng = [str(x) for x in date_rng]
print("datetime to string: \n")
print( np.array(string_date_rng) )
# elementos de string a datetime
timestamp_date_rng = pd.to_datetime(string_date_rng, infer_datetime_format=True)
print("string to datetime:\n")
print( timestamp_date_rng )
# obtener fechas
print("date:\n")
print(timestamp_date_rng.date)
# obtener horas
print("hour:\n")
print(timestamp_date_rng.hour)
```
### Operaciones matemáticas
Al igual que numpy, las series de pandas pueden realizar operaciones matemáticas similares (mientrás los arreglos a operar sean del tipo numérico). Por otro lado existen otras funciones de utilidad.
```
# crear serie
s1 = pd.Series([1,1,1,2,2,2,3,3,3,4,5,5,5,5])
print(f"max: {s1.max()}") # maximo
print(f"min: {s1.min()}") # minimo
print(f"mean: {s1.mean()}") # promedio
print(f"median: {s1.median()}") # mediana
```
### Masking
Existen módulos para acceder a valores que queremos que cumplan una determinada regla. Por ejemplo, acceder al valor máximo de una serie. En este caso a esta regla la denominaremos *mask*.
```
# 1. definir valor maximo
n_max = s1.max()
# 2.- definir "mask" que busca el valor
mask = (s1 == n_max)
# 3.- aplicar mask sobre la serie
s1[mask]
```
### Valores Nulos o datos perdidos
En algunas ocaciones, los arreglos no tienen información en una determinada posición, lo cual puede ser perjudicial si no se tiene control sobre estos valores.
### a) Encontrar valores nulos
```
# crear serie
s_null = pd.Series([1,2,np.nan,4,5,6,7,np.nan,9])
s_null
# mask valores nulos
print("is null?:\n")
print(s_null.isnull() )
# filtrar valores nulos
print("null serie: \n")
print(s_null[s_null.isnull()] )
```
### b) Encontrar valores no nulos
```
# imprimir serie
print("serie:")
print( s_null )
# mask valores no nulos
print("\nis not null?:")
print(s_null.notnull() )
# filtrar valores no nulos
print("\nserie with not null values")
print(s_null[s_null.notnull()] )
```
La pregunta que nos queda hacer es: ¿ Qué se debe hacer con los valores nulos ?, la respuesta es **depende**.
* Si tenemos muchos datos, lo más probable es que se puedan eliminar estos datos sin culpa.
* Si se tienen poco datos, lo más probable es que se necesite inputar un valor por defecto a los valores nulos (**ejemplo**: el promedio).
## 2.- Pandas Dataframes
### Trabajando con DataFrames
<img src="images/dataframe.png" width="360" height="240" align="center"/>
Como se mencina anteriormente, los dataframes son arreglos de series, los cuales pueden ser de distintos tipos (numéricos, string, etc.). En esta parte mostraremos un ejemplo aplicado de las distintas funcionalidades de los dataframes.
### Creación de dataframes
La creación se puede hacer de variadas formas con listas, dictionarios , numpy array , entre otros.
```
# empty dataframe
df_empty = pd.DataFrame()
df_empty
# dataframe with list
df_list = pd.DataFrame(
[
["nombre_01", "apellido_01", 60],
["nombre_02", "apellido_02", 14]
], columns = ["nombre", "apellido", "edad"]
)
df_list
# dataframe with dct
df_dct = pd.DataFrame(
{
"nombre": ["nombre_01", "nombre_02"],
"apellido": ["apellido_01", "apellido_02"],
"edad": np.array([60,14]),
}
)
df_dct
```
### Lectura de datos con dataframes
En general, cuando se trabajan con datos, estos se almacenan en algún lugar y en algún tipo de formato, por ejemplo:
* .txt
* .csv
* .xlsx
* .db
* etc.
Para cada formato, existe un módulo para realizar la lectura de datos. En este caso, se analiza el conjunto de datos 'player_data.csv', el cual muestra informacion básica de algunos jugadores de la NBA.
<img src="images/nba_logo.jpg" width="360" height="240" align="center"/>
```
# load data ## crea la ruta
player_data = pd.read_csv(os.path.join('data', 'player_data.csv'), index_col='name')
```
### Módulos básicos
Existen módulos para comprender rápidamente la naturaleza del dataframe.
```
# first 5 rows
print("first 5 rows:")
player_data.head(5)
# last 5 rows
print("\nlast 5 rows:")
player_data.tail(5)
# tipo
print("\ntype of dataframe:")
type(player_data)
# tipo por columns
print("\ntype of columns:")
player_data.dtypes
# dimension
print("\nshape:")
player_data.shape
# columna posicion
print("\ncolumn 'position': ")
player_data['position'].head()
player_data.columns
```
### Exploración de datos
Existen módulos de pandas que realizan resumen de la información que dispone el dataframe.
```
# descripcion
player_data.describe(include='all')
```
### Operando sobre Dataframes
Cuando se trabaja con un conjunto de datos, se crea una dinámica de preguntas y respuestas, en donde a medida que necesito información, se va accediendo al dataframe. En algunas ocaciones es directo, basta un simple módulo, aunque en otras será necesaria realizar operaciones un poco más complejas.
Por ejemplo, del conjunto de datos en estudio, se esta interesado en responder las siguientes preguntas:
### a) Determine si el dataframe tiene valores nulos
```
player_data.notnull().all(axis=1).head(10)
```
### b) Elimine los valores nulos del dataframe
```
player_data = player_data[lambda df: df.notnull().all(axis=1)]
player_data.head()
```
### c) Determinar el tiempo de cada jugador en su posición.
```
# Determinar el tiempo de cada jugador en su posición.
player_data['duration'] = player_data['year_end'] - player_data['year_start']
player_data.head()
```
### d) Castear la fecha de str a objeto datetime
```
# Castear la fecha de str a objeto datetime
player_data['birth_date_dt'] = pd.to_datetime(player_data['birth_date'], format="%B %d, %Y")
player_data.head()
```
### e) Determinar todas las posiciones.
```
# Determinar todas las posiciones.
positions = player_data['position'].unique()
positions
```
### f) Iterar sobre cada posición y encontrar el mayor valor.
```
# Iterar sobre cada posición y encontrar el mayor valor
##serie vacia
nba_position_duration = pd.Series()
#iterar
for position in positions:
#filtramos por posicion
df_aux = player_data.loc[lambda x: x['position'] == position]
#encontrar max
max_duration = df_aux['duration'].max()
nba_position_duration.loc[position] = max_duration
nba_position_duration
```
### g) Dermine los jugadores más altos de la NBA
```
# iteracion jugador mas alto
height_split = player_data['height'].str.split('-')
for player, height_list in height_split.items():
if height_list == height_list:
# Para manejar el caso en que la altura sea nan.
height = int(height_list[0]) * 30.48 + int(height_list[1]) * 2.54
player_data.loc[player, "height_cm"] = height
else:
player_data.loc[player, "height_cm"] = np.nan
max_height = player_data['height_cm'].max()
tallest_player = player_data.loc[lambda x: x['height_cm'] == max_height].index.tolist()
print(tallest_player)
```
## Referencia
1. [Python Pandas Tutorial: A Complete Introduction for Beginners](https://www.learndatasci.com/tutorials/python-pandas-tutorial-complete-introduction-for-beginners/)
2. [General functions](https://pandas.pydata.org/pandas-docs/stable/reference/general_functions.html)
| github_jupyter |
# CODE TO PERFORM SIMPLE LINEAR REGRESSION ON FUEL CONSUMPTION DATASET
# Dr. Ryan @STEMplicity

# PROBLEM STATEMENT
- You have been hired as a consultant to a major Automotive Manufacturer and you have been tasked to develop a model to predict the impact of increasing the vehicle horsepower (HP) on fuel economy (Mileage Per Gallon (MPG)). You gathered the data:
- Data set:
- Independant variable X: Vehicle Horse Power
- Dependant variable Y: Mileage Per Gallon (MPG)
# STEP #1: LIBRARIES IMPORT
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
```
# STEP #2: IMPORT DATASET
```
# Import data
fueleconomy_df = pd.read_csv('FuelEconomy.csv')
# Preview data
fueleconomy_df.head(100)
# Preview Data
fueleconomy_df.head(5)
# Preview Data
fueleconomy_df.tail(5)
# Get statistical sumaries
fueleconomy_df.describe()
# summarise data
fueleconomy_df.info()
```
# STEP#3: VISUALIZE DATASET
```
fueleconomy_df.head(4)
# Visualise the data
# Observe the negative relationship
sns.jointplot(x = 'Horse Power', y = 'Fuel Economy (MPG)', data = fueleconomy_df)
sns.jointplot(x = 'Fuel Economy (MPG)', y = 'Horse Power', data = fueleconomy_df)
sns.pairplot(fueleconomy_df)
# Visualise with lmplot x = 'Horse Power', vs y = 'Fuel Economy (MPG)'
sns.lmplot(x = 'Horse Power', y = 'Fuel Economy (MPG)', data = fueleconomy_df)
# Visualise with lmplot x = 'Fuel Economy (MPG)', vs y = 'Horse Power'
sns.lmplot(x = 'Fuel Economy (MPG)', y = 'Horse Power', data = fueleconomy_df)
```
# STEP#4: CREATE TESTING AND TRAINING DATASET
```
X = fueleconomy_df[['Horse Power']]
y = fueleconomy_df['Fuel Economy (MPG)']
X
y
# Shape of X
X.shape
# Implement train, test, split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
X_train.shape
X_test.shape
```
# STEP#5: TRAIN THE MODEL
```
X_train.shape
X_test.shape
# Import relevant modules
from sklearn.linear_model import LinearRegression
# Instantiate a regressor
regressor = LinearRegression(fit_intercept = True)
# fit the model
regressor.fit(X_train, y_train)
# obtain the parameters
print('Linear Model Coeff (m):', regressor.coef_)
print('Linear Model Coeff (b):', regressor.intercept_)
```
# STEP#6: TEST THE MODEL
```
# Passing along our testing data to the regressor
y_predict = regressor.predict(X_test)
y_predict
# Comparing to y-test, which is the true value
y_test
# Visualise the data
plt.scatter(X_train, y_train, color = 'gray')
plt.plot(X_train, regressor.predict(X_train), color = 'blue')
plt.xlabel('Horse Power (HP)')
plt.ylabel('MPG')
plt.title('HP vs. MPG (Training Set)')
plt.scatter(X_test, y_test, color = 'gray')
plt.plot(X_test, regressor.predict(X_test), color = 'blue')
plt.xlabel('Horse Power (HP)')
plt.ylabel('MPG')
plt.title('HP vs. MPG (Testing Set)')
# Predicting MPG based on a given Horse Power
HP = 500
MPG = regressor.predict(HP)
MPG
HP = int(input("What Horse Power Do you want to Predict?" ))
MPG = regressor.predict(HP)
twoSigFigs = round(HP, 2)
numToString = str(twoSigFigs)
print ("The Fuel Consumption (MPG) " + numToString + "units.")
```
# EXCELLENT JOB! NOW YOU BECAME EXPERT IN SIMPLE LINEAR REGRESSION
| github_jupyter |
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set_style("whitegrid", {'axes.grid' : False})
import joblib
import catboost
import xgboost as xgb
import lightgbm as lgb
from category_encoders import BinaryEncoder
from sklearn.metrics import mean_squared_error
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import FunctionTransformer
def run_lgbm(X_train, X_test, y_train, y_test, feature_names, categorical_features='auto', model_params=None, fit_params=None, seed=21):
X_train_GBM = lgb.Dataset(X_train, label=y_train, feature_name=feature_names, categorical_feature=categorical_features, free_raw_data=False)
X_test_GBM = lgb.Dataset(X_test, label=y_test, reference=X_train_GBM, feature_name=feature_names, free_raw_data=False)
if model_params is None:
model_params = {'seed': seed, 'num_threads': 16, 'objective':'root_mean_squared_error',
'metric': ['root_mean_squared_error'] }
if fit_params is None:
fit_params = {'verbose_eval': True, 'num_boost_round': 300, 'valid_sets': [X_test_GBM],
'early_stopping_rounds': 30,'categorical_feature': categorical_features, 'feature_name': feature_names}
model = lgb.train(model_params, X_train_GBM, **fit_params)
y_pred = model.predict(X_test, model.best_iteration)
return model, y_pred, mean_squared_error(y_test, y_pred)
def run_lr(X_train, X_test, y_train, y_test, model_params=None):
if model_params is None:
model_params = {'n_jobs': 16}
model = LinearRegression(**model_params)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
return model, y_pred, mean_squared_error(y_test, y_pred)
def run_etr(X_train, X_test, y_train, y_test, model_params=None, seed=21):
if model_params is None:
model_params = {'verbose': 1, 'n_estimators': 300, 'criterion': 'mse', 'n_jobs': 16, 'random_state': seed}
model = ExtraTreesRegressor(**model_params)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
return model, y_pred, mean_squared_error(y_test, y_pred)
def run_xgb(X_train, X_test, y_train, y_test, feature_names, model_params=None, fit_params=None, seed=21):
dtrain = xgb.DMatrix(X_train, y_train, feature_names=feature_names)
dtest = xgb.DMatrix(X_test, y_test, feature_names=feature_names)
if model_params is None:
model_params = {'booster': 'gbtree', 'nthread': 16, 'objective': 'reg:linear', 'eval_metric': 'rmse', 'seed': seed,
'verbosity': 1}
if fit_params is None:
fit_params = {'num_boost_round': 300, 'evals': [(dtest, 'eval')], 'early_stopping_rounds': 30}
model = xgb.train(model_params, dtrain, **fit_params)
y_pred = model.predict(dtest)
return model, y_pred, mean_squared_error(y_test, y_pred)
def run_catb(X_train, X_test, y_train, y_test, feature_names, cat_features=None, model_params=None, fit_params=None, predict_params=None, seed=21):
train_pool = catboost.Pool(X_train, y_train, cat_features=cat_features)
test_pool = catboost.Pool(X_test, y_test, cat_features=cat_features)
if model_params is None:
model_params = {'n_estimators': 300, 'thread_count': 16, 'loss_function': 'RMSE', 'eval_metric': 'RMSE',
'random_state': seed, 'verbose': True}
if fit_params is None:
fit_params = {'use_best_model': True, 'eval_set': test_pool}
if predict_params is None:
predict_params = {'thread_count': 16}
model = catboost.CatBoostRegressor(**model_params)
model.fit(train_pool, **fit_params)
y_pred = model.predict(test_pool, **predict_params)
return model, y_pred, mean_squared_error(y_test, y_pred)
df_train_dataset = pd.read_pickle('data/df/df_train_dataset.pkl')
df_validation_dataset = pd.read_pickle('data/df/df_validation_dataset.pkl')
continuous_features = joblib.load('data/iterables/continuous_features.joblib')
categorical_features = joblib.load('data/iterables/categorical_features.joblib')
categorical_features_encoded = joblib.load('data/iterables/categorical_features_encoded.joblib')
target_features = joblib.load('data/iterables/target_features.joblib')
target_transformer = joblib.load('models/preprocessing/target_transformer.joblib')
df_train_dataset.shape, df_validation_dataset.shape
X = df_train_dataset[categorical_features_encoded + continuous_features]
y = df_train_dataset[target_features].values.flatten()
print(X.shape, y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, shuffle=True, random_state=10)
# https://github.com/scikit-learn/scikit-learn/issues/8723
X_train = X_train.copy()
X_test = X_test.copy()
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
X_train.reset_index(inplace=True, drop=True)
X_test.reset_index(inplace=True, drop=True)
```
## Linear reg
```
reg_linear, y_pred, score = run_lr(X_train, X_test, y_train, y_test)
print('mse', score, 'rmse', score ** .5)
y_pred_val = reg_linear.predict(df_validation_dataset[categorical_features_encoded + continuous_features].values)
y_pred_val = target_transformer.inverse_transform(np.expand_dims(y_pred_val, axis=1))
```
## Xgb
```
reg_xgb, y_pred, score = run_xgb(X_train, X_test, y_train, y_test, feature_names=categorical_features_encoded + continuous_features)
print('mse', score, 'rmse', score ** .5)
d_val = xgb.DMatrix(df_validation_dataset[categorical_features_encoded + continuous_features].values, feature_names=categorical_features_encoded + continuous_features)
y_pred_val = reg_xgb.predict(d_val)
y_pred_val = target_transformer.inverse_transform(np.expand_dims(y_pred_val, axis=1))
df_validation_dataset[target_features] = y_pred_val
df_validation_dataset[['reservation_id', 'amount_spent_per_room_night_scaled']].to_csv('submission.csv', index=False)
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.set_size_inches(24, 24)
xgb.plot_importance(reg_xgb, ax=ax, max_num_features=100, height=0.5);
```
## Lgbm
```
df_train_dataset = pd.read_pickle('data/df/df_train_dataset.pkl')
df_validation_dataset = pd.read_pickle('data/df/df_validation_dataset.pkl')
continuous_features = joblib.load('data/iterables/continuous_features.joblib')
categorical_features = joblib.load('data/iterables/categorical_features.joblib')
target_features = joblib.load('data/iterables/target_features.joblib')
target_transformer = joblib.load('models/preprocessing/target_transformer.joblib')
df_train_dataset.shape, df_validation_dataset.shape
X = df_train_dataset[categorical_features + continuous_features]
y = df_train_dataset[target_features].values.flatten()
print(X.shape, y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8, test_size=0.2, shuffle=True, random_state=10)
# https://github.com/scikit-learn/scikit-learn/issues/8723
X_train = X_train.copy()
X_test = X_test.copy()
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
X_train.reset_index(inplace=True, drop=True)
X_test.reset_index(inplace=True, drop=True)
feature_names = categorical_features + continuous_features
reg_lgbm, y_pred, score = run_lgbm(X_train, X_test, y_train, y_test, feature_names, categorical_features)
print('mse', score, 'rmse', score ** .5)
y_pred_val = reg_lgbm.predict(df_validation_dataset[categorical_features + continuous_features].values, reg_lgbm.best_iteration)
y_pred_val = target_transformer.inverse_transform(np.expand_dims(y_pred_val, axis=1))
df_validation_dataset[target_features] = y_pred_val
df_validation_dataset[['reservation_id', 'amount_spent_per_room_night_scaled']].to_csv('submission.csv', index=False)
fig, ax = plt.subplots(nrows=1, ncols=1)
fig.set_size_inches(24, 24)
lgb.plot_importance(reg_lgbm, ax=ax, height=0.5, max_num_features=100);
```
## Catboost
```
feature_names = categorical_features + continuous_features
cat_features = [i for i, c in enumerate(feature_names) if c in categorical_features]
reg_catb, y_pred, score = run_catb(X_train, X_test, y_train, y_test, feature_names, cat_features)
print('mse', score, 'rmse', score ** .5)
feature_names = categorical_features + continuous_features
cat_features = [i for i, c in enumerate(feature_names) if c in categorical_features]
val_pool = catboost.Pool(df_validation_dataset[categorical_features + continuous_features].values, feature_names=feature_names, cat_features=cat_features)
y_pred_val = reg_catb.predict(val_pool)
y_pred_val = target_transformer.inverse_transform(np.expand_dims(y_pred_val, axis=1))
df_validation_dataset[target_features] = y_pred_val
df_validation_dataset[['reservation_id', 'amount_spent_per_room_night_scaled']].to_csv('submission.csv', index=False)
```
| github_jupyter |
## Tutorial on QAOA Compiler
This tutorial shows how to use the QAOA compiler for QAOA circuit compilation and optimization. (https://github.com/mahabubul-alam/QAOA-Compiler).
### Inputs to the QAOA Compiler
The compiler takes three json files as the inputs. The files hold the following information:
* ZZ-interactions and their corresponding coefficients in the problem Hamiltonian (https://github.com/mahabubul-alam/QAOA-Compiler/blob/main/examples/QAOA_circ.json)
* Target hardware supported gates and corresponding reliabilities (https://github.com/mahabubul-alam/QAOA-Compiler/blob/main/examples/QC.json)
* Configurations for compilation (e.g., target p-value, routing method, random seed, etc.) (https://github.com/mahabubul-alam/QAOA-Compiler/blob/main/examples/Config.json)

The input ZZ-interactions json file content for the above graph MaxCut problem is shown below:
```
{
"(0, 2)": "-0.5", // ZZ-interaction between the qubit pair (0, 2) with a coefficient of -0.5
"(1, 2)": "-0.5"
}
```
A script is provided under utils (https://github.com/mahabubul-alam/QAOA-Compiler/blob/main/utils/construct_qaoa_circ_json.py) to generate this input json file for arbitrary unweighted graph.
The hardware json file must have the following information:
* Supported single-qubit gates
* Supported two-qubit gates
* Reliabilities of the supported single-qubit operations
* Reliabilities of the supported two-qubit operations <br>
A json file's content for a hypothetical 3-qubit hardware is shown below:
```
{
"1Q": [ //native single-qubit gates of the target hardware
"u3"
],
"2Q": [ //native two-qubit gate of the target hardware
"cx"
],
"u3": {
"0": 0.991, //"qubit" : success probability of u3
"1": 0.995,
"2": 0.998
}
"cx": {
"(0,1)": 0.96, //"(qubit1,qubit2)" : success probability of cx between qubit1, qubit2 (both directions)
"(1,2)": 0.97,
"(2,0)": 0.98,
}
}
```
A script is provided under utils (https://github.com/mahabubul-alam/QAOA-Compiler/blob/main/utils/construct_qc.py) to generate json files for the quantum processors from IBM.
The configuration json file should hold the following information:
* Target backend compiler (currently supports qiskit)
* Target p-value
* Packing Limit (see https://www.microarch.org/micro53/papers/738300a215.pdf)
* Target routing method (any routing method that are supported by the qiskit compiler, e.g., sabre, stochastic_swap, basic_swap, etc.)
* Random seed for the qiskit transpiler
* Chosen optimization level for the qiskit compiler (0~3)
The content of a sample configuration json file is shown below:
```
{
"Backend" : "qiskit",
"Target_p" : "1",
"Packing_Limit" : "10e10",
"Route_Method" : "sabre",
"Trans_Seed" : "0",
"Opt_Level" : "3"
}
```
### How to Run
```
python run.py -arg arg_val
```
* -device_json string (mandatory): Target device configuration file location. This file holds the information on basis gates, reliability, and allowed two-qubit operations. It has to be written in json format. An example can be found [here](https://github.com/mahabubul-alam/QAOA_Compiler/blob/main/examples/QC.json).
* -circuit_json string (mandatory): Problem QAOA-circuit file location. This file holds the required ZZ interactions between various qubit-pairs to encode the cost hamiltonian. It has to be written in json format. An example can be found [here](https://github.com/mahabubul-alam/QAOA_Compiler/blob/main/examples/QAOA_circ.json).
* -config_json string (mandatory): Compiler configuration file location. This file holds target p-level, and chosen packing limit, qiskit transpiler seed, optimization level, and routing method. It has to be written in json format. An example can be found [here](https://github.com/mahabubul-alam/QAOA_Compiler/blob/main/examples/Config.json).
* -policy_compilation string: Chosen compilation policy. The current version supports the following policies: Instruction Parallelization-only ('IP'), Iterative Compilation ('IterC'), Incremental Compilation ('IC'), Variation-aware Incremental Compilation ('VIC'). The default value is 'IC'.
* -target_IterC string: Minimization objective of Iterative Compilation. The current version supports the following minimization objectives: Circuit Depth ('D'), Native two-qubit gate-count ('GC_2Q'), Estimated Success Probability ('ESP'). The default value is 'GC_2Q'.
* -initial_layout_method string: The chosen initial layout method. Currently supported methods: 'vqp', 'qaim', 'random'. The default method is 'qaim'.
* -output_qasm_file_name string: File name to write the compiled parametric QAOA circuit. The output is written in qasm format. The default value is 'QAOA.qasm'. The output qasm files are written following this naming style: {Method(IP/IC/VIC/IterC)}_{output_qasm_file_name}.
```
!python run.py -device_json examples/QC.json -circuit_json examples/QAOA_circ.json -config_json examples/Config.json -policy_compilation VIC -initial_layout_method vqp
```
### Output QAOA Circuits
The tool generates 3 QASM files:
* The uncompiled circuit (https://github.com/mahabubul-alam/QAOA-Compiler/blob/main/uncompiled_QAOA.qasm)
* Compiled QAOA circuit with conventinal approach (https://github.com/mahabubul-alam/QAOA-Compiler/blob/main/naive_compiled_QAOA.qasm)
* Optimized QAOA circuit with chosen set of optimization policies (https://github.com/mahabubul-alam/QAOA-Compiler/blob/main/VIC_QAOA.qasm)
A sample QASM file is shown below:
```
!cat VIC_QAOA.qasm
```
| github_jupyter |
# Session 7: The Errata Review No. 1
This session is a review of the prior six sessions and covering those pieces that were left off. Not necessarily errors, but missing pieces to complete the picture from the series. These topics answer some questions and will help complete the picture of the C# language features discussed to this point.
## Increment and Assignment operators
In session 1, we reviewed operators and interacting with numbers. We skipped the [increment](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/arithmetic-operators?WT.mc_id=visualstudio-twitch-jefritz#increment-operator-) `++` and [decrement](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/arithmetic-operators?WT.mc_id=visualstudio-twitch-jefritz#decrement-operator---) `--` operators. These operators allow you to increment and decrement values quickly. You can place these operators before and after the variable you would like to act on, and they will be incremented or decremented before or after being returned.
Let's take a look:
```
var counter = 1;
display(counter--); // Running ++ AFTER counter will display 1
display(counter); // and then display 2 in the next row
var counter = 1;
display(--counter); // Running ++ BEFORE counter will display 2 as it is incrementing the variable before
// displaying it
```
## Logical negation operator
Sometimes you want to invert the value of a boolean, converting from `true` to `false` and from `false` to `true`. Quite simply, just prefix your test or boolean value with the [negation operator](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/boolean-logical-operators?WT.mc_id=visualstudio-twitch-jefritz#logical-negation-operator-) `!` to invert values
```
var isTrue = true;
display(!isTrue);
display(!(1 > 2))
```
## TypeOf, GetType and NameOf methods
Sometimes you need to work with the type of a variable or the name of a value. The methods `typeof`, `GetType()` and `nameof` allow you to interact with the types and pass them along for further interaction.
[typeof](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/type-testing-and-cast?WT.mc_id=visualstudio-twitch-jefritz#typeof-operator) allows you to get a reference to a type for use in methods where you need to inspect the underlying type system
```
display(typeof(int));
```
Conversely, the `GetType()` method allows you to get the type information for a variable already in use. Every object in C# has the `GetType()` method available.
```
var myInt = 5;
display(myInt.GetType());
```
The [`nameof` expression](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/nameof?WT.mc_id=visualstudio-twitch-jefritz) gives the name of a type or member as a string. This is particularly useful when you are generating error messages.
```
class People {
public string Name { get; set; }
public TimeSpan CalculateAge() => DateTime.Now.Subtract(new DateTime(2000,1,1));
}
var fritz = new People { Name="Fritz" };
display(nameof(People));
display(typeof(People));
display(nameof(fritz.Name));
```
## String Formatting
Formatting and working with strings or text is a fundamental building block of working with user-input. We failed to cover the various ways to interact with those strings. Let's take a look at a handful of the ways to work with text data.
## Concatenation
You may have seen notes and output that concatenates strings by using the [`+` operator](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/operators/addition-operator?WT.mc_id=visualstudio-twitch-jefritz#string-concatenation). This is the simplest form of concatenation and only works when both sides of the `+` operator are strings.
```
var greeting = "Hello";
display(greeting + " World!");
// += also works
greeting += " C# developers";
display(greeting);
```
If you have multiple strings to combine, the `+` operator gets a little unwieldy and is not as performance aware as several other techniques. We can [combine multiple strings](https://docs.microsoft.com/en-us/dotnet/csharp/how-to/concatenate-multiple-strings?WT.mc_id=visualstudio-twitch-jefritz) using the `Concat`, `Join`, `Format` and interpolation features of C#.
```
var greeting = "Good";
var time = DateTime.Now.Hour < 12 && DateTime.Now.Hour > 3 ? "Morning" : DateTime.Now.Hour < 17 ? "Afternoon" : "Evening";
var name = "Visual Studio Channel";
// Use string.concat with a comma separated list of arguments
display(string.Concat(greeting, " ", time, " ", name + "!"));
var terms = new [] {greeting, time, name};
// Use string.Join to assembly values in an array with a separator
display(string.Join(" ", terms));
// Use string.Format to configure a template string and load values into it based on position
var format = "Good {1} {0}";
display(string.Format(format, time, name));
// With C# 7 and later you can now use string interpolation to format a string.
// Simply prefix a string with a $ to allow you to insert C# expressions in { } inside
// a string
var names = new string[] {"Fritz", "Scott", "Maria", "Jayme"};
display($"Good {time} {name} {string.Join(",",names)}");
// Another technique that can be used when you don't know the exact number of strings
// to concatenate is to use the StringBuilder class.
var sb = new StringBuilder();
sb.AppendFormat("Good {0}", time);
sb.Append(" ");
sb.Append(name);
display(sb.ToString());
```
### Parsing strings with Split
You can turn a string into an array of strings using the `Split` method on a string variable. Pass the character that identifies the boundary between elements of your array to turn it into an array:
```
var phrase = "Good Morning Cleveland";
display(phrase.Split(' '));
display(phrase.Split(' ')[2]);
var fibonacci = "1,1,2,3,5,8,13,21";
display(fibonacci.Split(','));
```
## A Deeper Dive on Enums
We briefly discussed enumeration types in session 3 and touched on using the `enum` keyword to represent related values. Let's go a little further into conversions and working with the enum types.
### Conversions
[Enum types](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/builtin-types/enum?WT.mc_id=visualstudio-twitch-jefritz) are extensions on top of numeric types. By default, they wrap the `int` integer data type. While this base numeric type can be overridden, we can also convert data into and out of the enum using standard explicit conversion operators
```
enum DotNetLanguages : byte {
csharp = 100,
visual_basic = 2,
fsharp = 3
}
var myLanguage = DotNetLanguages.csharp;
display(myLanguage);
display((byte)myLanguage);
display((int)myLanguage);
// Push a numeric type INTO DotNetLanguages
myLanguage = (DotNetLanguages)2;
display(myLanguage);
```
### Working with strings using Parse and TryParse
What about the string value of the enumeration itself? We can work with that using the [`Parse`](https://docs.microsoft.com/en-us/dotnet/api/system.enum.parse?view=netcore-3.1&WT.mc_id=visualstudio-twitch-jefritz) and [`TryParse`](https://docs.microsoft.com/en-us/dotnet/api/system.enum.tryparse?view=netcore-3.1&WT.mc_id=visualstudio-twitch-jefritz) methods of the Enum object to convert a string into the Enum type
```
var thisLanguage = "csharp";
myLanguage = Enum.Parse<DotNetLanguages>(thisLanguage);
display(myLanguage);
// Use the optional boolean flag parameter to indicate if the Parse operation is case-insensitive
thisLanguage = "CSharp";
myLanguage = Enum.Parse<DotNetLanguages>(thisLanguage, true);
display(myLanguage);
// TryParse has a similar signature, but returns a boolean to indicate success
var success = Enum.TryParse<DotNetLanguages>("Visual_Basic", true, out var foo);
display(success);
display(foo);
```
### GetValues and the Enumeration's available values
The constant values of the enum type can be exposed using the [Enum.GetValues](https://docs.microsoft.com/en-us/dotnet/api/system.enum.getvalues?view=netcore-3.1&WT.mc_id=visualstudio-twitch-jefritz) method. This returns an array of the numeric values of the enum. Let's inspect our `DotNetLanguages` type:
```
var languages = Enum.GetValues(typeof(DotNetLanguages));
display(languages);
// We can convert back to the named values of the enum with a little conversion
foreach (var l in languages) {
display((DotNetLanguages)l);
}
```
## Classes vs. Structs
In the second session we introduced the `class` keyword to create reference types. There is another keyword, `struct`, that allows you to create [Structure](https://docs.microsoft.com/en-us/dotnet/csharp/language-reference/builtin-types/struct?WT.mc_id=visualstudio-twitch-jefritz) **value types** which will be allocated in memory and reclaimed more quickly than a class. While a `struct` looks like a class in syntax, there are some constraints:
- A constructor must be defined that configures all properties / fields
- The parameterless constructor is not allowed
- Instance Fields / Properties cannot be assigned in their declaration
- Finalizers are not allowed
- A struct cannot inherit from another type, but can implement interfaces
Structs are typically used to store related numeric types. Let's tinker with an example:
```
struct Rectangle {
public Rectangle(int length, int width) {
this.Length = length;
this.Width = width;
}
public static readonly int Depth = DateTime.Now.Minute;
public int Length {get;set;}
public int Width {get;set;}
public int Area { get { return Length * Width;}}
public int Perimeter { get { return Length*2 + Width*2;}}
}
var myRectangle = new Rectangle(2, 5);
display(myRectangle);
display(Rectangle.Depth);
enum CountryCode {
USA = 1
}
struct PhoneNumber {
public PhoneNumber(CountryCode countryCode, string exchange, string number) {
this.CountryCode = countryCode;
this.Exchange = exchange;
this.Number = number;
}
public CountryCode CountryCode { get; set;}
public string Exchange { get; set;}
public string Number {get; set;}
}
var jennysNumber = new PhoneNumber(CountryCode.USA, "867", "5309");
display(jennysNumber);
```
### When should I use a struct instead of a class?
This is a common question among C# developers. How do you decide? Since a `struct` is a simple value type, there are [several guidelines to help you decide](https://docs.microsoft.com/en-us/dotnet/standard/design-guidelines/choosing-between-class-and-struct?WT.mc_id=visualstudio-twitch-jefritz):
**Choose a struct INSTEAD of a class if all of these are true about the type:**
- It will be small and short-lived in memory
- It represents a single value
- It can be represented in 16 bytes or less
- It will not be changed, and is immutable
- You will not be converting it to a class (called `boxing` and `unboxing`)
## Stopping and Skipping Loops
In session four we learned about loops using `for`, `while`, and `do`. We can speed up our loop by moving to the next iteration in the loop and we can stop a loop process completely using the `continue` and `break` keywords. Let's take a look at some examples:
```
for (var i=1; i<10_000_000; i++) {
display(i);
if (i%10 == 0) break; // Stop if the value is a multiple of 10
}
// We can skip an iteration in the loop using the continue keyword
for (var i = 1; i<10_000_000; i++) {
if (i%3 == 0) continue; // Skip this iteration
display(i);
if (i%10 == 0) break;
}
```
## Initializing Collections
In the fifth session we explored Arrays, Lists, and Dictionary types. We saw that you could initialize an array with syntax like the following:
```
var fibonacci = new int[] {1,1,2,3,5,8,13};
display(fibonacci);
//var coordinates = new int[,] {{1,2}, {2,3}};
//display(coordinates);
```
We can also initialize List and Dictionary types using the curly braces notation:
```
var myList = new List<string> {
"C#",
"Visual Basic",
"F#"
};
display(myList);
var myShapes = new List<Rectangle> {
new Rectangle(2, 5),
new Rectangle(3, 4),
new Rectangle(4, 3)
};
display(myShapes);
var myDictionary = new Dictionary<int, string> {
{100, "C#"},
{200, "Visual Basic"},
{300, "F#"}
};
display(myDictionary);
```
## Dictionary Types
This question was raised on an earlier stream, and [David Fowler](https://twitter.com/davidfowl) (.NET Engineering Team Architect) wrote a series of [Tweets](https://twitter.com/davidfowl/status/1444467842418548737) about it based on discussions with [Stephen Toub](https://twitter.com/stephentoub/) (Architect for the .NET Libraries)
There are 4 built-in Dictionary types with .NET:
- [Hashtable](https://docs.microsoft.com/dotnet/api/system.collections.hashtable)
- This is the most efficient dictionary type with keys organized by the hash of their values.
- David says: "Good read speed (no lock required), sameish weight as dictionary but more expensive to mutate and no generics!"
```
var hashTbl = new Hashtable();
hashTbl.Add("key1", "value1");
hashTbl.Add("key2", "value2");
display(hashTbl);
```
- [Dictionary](https://docs.microsoft.com/dotnet/api/system.collections.generic.dictionary-2)
- A generic dictionary object that you can search easily by keys.
- David says: "Lightweight to create and 'medium' update speed. Poor read speed when used with a lock. As an immutable object it has the best read speed and heavy to update."
```
private readonly Dictionary<string,string> readonlyDictionary = new() {
{"txt", "Text Files"},
{"wav", "Sound Files"},
{"mp3", "Compressed Music Files"},
};
display(readonlyDictionary);
readonlyDictionary.Add("mp4", "Video Files");
display(newDictionary);
```
- [ConcurrentDictionary](https://docs.microsoft.com/dotnet/api/system.collections.concurrent.concurrentdictionary-2)
- A thread-safe version of Dictionary that is optimized for use by multiple threads. It is not recommended for use by a single thread due to the extra overhead allocate for multi-threaded support.
- David says: "Poorish read speed, no locking required but more allocations require to update than a dictionary."
Instead of Adding, Updating and Getting values from the ConcurrentDictionary, we TryAdd, TryUpdate, and TryGetValue. TryAdd will return false if the key already exists, and TryUpdate will return false if the key does not exist. TryGetValue will return false if the key does not exist. We can also AddOrUpdate to add a value if the key does not exist and GetOrAdd to add a value if the key does not exist.
```
using System.Collections.Concurrent;
var cd = new ConcurrentDictionary<string, string>();
cd.AddOrUpdate("key1", "value1", (key, oldValue) => "value2");
cd.AddOrUpdate("key1", "value1", (key, oldValue) => "value2");
display(cd.TryAdd("key2", "value1"));
display(cd);
```
- [ImmutableDictionary](https://docs.microsoft.com/dotnet/api/system.collections.immutable.immutabledictionary-2)
- A new type in .NET Core and .NET 5/6 that is a read-only version of Dictionary. Changes to it's contents involve creation of a new Dictionary object and copying of the contents.
- David says: "Poorish read speed, no locking required but more allocations required to update than a dictionary."
```
using System.Collections.Immutable;
var d = ImmutableDictionary.CreateBuilder<string,string>();
d.Add("key1", "value1");
d.Add("key2", "value2");
var theDict = d.ToImmutable();
//theDict = theDict.Add("key3", "value3");
display(theDict);
```
## Const and Static keywords
```
const int Five = 5;
// Five = 6;
display(Five);
class Student {
public const decimal MaxGPA = 5.0m;
}
display(Student.MaxGPA);
class Student {
public static bool InClass = false;
public string Name { get; set; }
public override string ToString() {
return Name + ": " + Student.InClass;
}
public static void GoToClass() {
Student.InClass = true;
}
public static void DitchClass() {
Student.InClass = false;
}
}
var students = new Student[] { new Student { Name="Hugo" }, new Student {Name="Fritz"}, new Student {Name="Lily"}};
foreach (var s in students) {
display(s.ToString());
}
Student.GoToClass();
foreach (var s in students) {
display(s.ToString());
}
static class DateMethods {
public static int CalculateAge(DateTime date1, DateTime date2) {
return 10;
}
}
display(DateMethods.CalculateAge(DateTime.Now, DateTime.Now))
```
| github_jupyter |
```
"""
Made on July 10th, 2019
@author: Theodore Pena
@contact: theodore.pena@tufts.edu
"""
line_color = 'purple' # Color for the 10-panel plots
x_Delta = np.log10(54) # In our time units, the time between SDSS and HSC
default_Delta_value = -0.0843431604042636
data_path = '/home/tpena01/AGN_variability_project/Simulations/light_curves/v_bend_in_times_10/results_v_bend_in_times_10_{}.bin'
```
# Setup and imports
```
import sys
print("sys version: {}".format(sys.version))
# This project is entirely in python 3.7
import matplotlib
import matplotlib.pyplot as plt
%matplotlib qt
# If you don't have an X server, line 7 might crash your kernel. Try '%matplotlib inline' instead.
import numpy as np
print("numpy version: {}".format(np.__version__))
from tqdm import tqdm
# This gives for loops progress bars.
import random
# This helps choosing random numbers from our arrays
random.seed() # Randomize seed
from IPython.core.display import display, HTML
# An alternate, cleaner take on the jupyter workspace
display(HTML("<style>.container { width:100% !important; }</style>"))
```
# Data extraction function
```
def extract_data(brightest_percent_lower=1, brightest_percent_upper=0, num_random_points=1000, t_max=10000, length_curve=2**24, num_curves=20):
"""
Input: Parameters documented in the cell above
Output: Graph of delta Eddington ratio as a function of delta time.
"""
if brightest_percent_lower <= brightest_percent_upper:
sys.exit('Can\'t have an interval where the lower bound is greater than or equal to the upper bound. Remember, things are reversed. 100% is a reasonable lower bound, and brightest_percent_upper defaults to zero.')
# Load the data
default_curves = []
for i in tqdm(range(num_curves + 1)):
if i == 0:
continue # for some reason the results files start at 1 and not 0
_er_curve = np.zeros(length_curve, dtype=float)
_er_curve = np.fromfile(data_path.format(str(i)))
default_curves.append(_er_curve)
default_curves = np.array(default_curves)
default_curves = np.log10(default_curves) # Move everything into the log domain
# Cut out the last t_max points
cut_curves = np.zeros((np.array(list(default_curves.shape)) - np.array([0, t_max])))
for i in tqdm(range(num_curves)):
cut_curves[i, :] = default_curves[i, :-t_max]
##
# Select all points brighter than brightest_percent_lower%
num_brightest_lower = int(np.floor((cut_curves[0].shape[0] * (brightest_percent_lower/100))))
if brightest_percent_lower == 100:
num_brightest_lower = cut_curves[0].shape[0]
if brightest_percent_lower == 0:
sys.exit('Cannot use 0 as a lower bound.')
else:
indices_lower = []
for i in tqdm(range(num_curves)):
indices_lower.append(np.argpartition(cut_curves[i, :], -num_brightest_lower)[-num_brightest_lower:])
indices_lower = np.array(indices_lower)
# Select all points brighter than brightest_percent_upper%
num_brightest_upper = int(np.floor((cut_curves[0].shape[0] * (brightest_percent_upper/100))))
if brightest_percent_upper == 100:
num_brightest_upper = cut_curves[0].shape[0]
if brightest_percent_upper == 0:
indices_upper = []
for i in range(num_curves):
indices_upper.append(np.array([]))
else:
indices_upper = []
for i in tqdm(range(num_curves)):
indices_upper.append(np.argpartition(cut_curves[i, :], -num_brightest_upper)[-num_brightest_upper:])
indices_upper = np.array(indices_upper)
indices = []
for i in range(num_curves):
indices.append(np.setdiff1d(indices_lower[i], indices_upper[i], assume_unique=True))
##
# Randomly sample from the chosen indices
chosen_indices = []
for brightest_points_in_curve in tqdm(indices):
chosen_indices.append(random.sample(list(brightest_points_in_curve), num_random_points))
chosen_indices = np.array(chosen_indices, dtype=int)
# Find the smallest number that we've chosen (We print this out later)
small_points = []
for i in tqdm(range(num_curves)):
small_points.append(np.min(cut_curves[i][chosen_indices[i]]))
smallest_point = "Min log(Edd): " + str(np.min(small_points))[:6]
# Select all our points
t_examine = np.logspace(0, np.log10(t_max), np.log(t_max)*10 + 1).astype(int)
t_log = np.log10(t_examine) # Used later
t_array = np.tile(t_examine, (num_random_points, 1))
master_array = np.zeros(t_examine.shape, dtype=int)
for i in tqdm(range(num_curves)):
indices_array = np.tile(chosen_indices[i, :], (t_array.shape[1], 1)).T
indices_array = indices_array + t_array
master_array = np.vstack((default_curves[i][indices_array], master_array))
master_array = np.delete(master_array, -1, 0)
starting_vals = np.copy(master_array[:, 0])
for i in tqdm(range(master_array.shape[1])):
master_array[:, i] = master_array[:, i] - starting_vals
# Find our trends
means = []
stands = []
for i in tqdm(range(master_array.shape[1])):
means.append(np.mean(master_array[:, i]))
stands.append(np.std(master_array[:, i]))
means = np.array(means)
stands = np.array(stands)
# Get a line of best fit
best_fit = np.poly1d(np.poly1d(np.polyfit(t_log.astype(float)[1:], means.astype(float)[1:], 1)))
return (t_log, means, stands, best_fit, smallest_point)
```
# Main
```
t_log100, means100, stands100, best_fit100, smallest_point100 = extract_data(brightest_percent_lower=100, brightest_percent_upper=50)
t_log50, means50, stands50, best_fit50, smallest_point50 = extract_data(brightest_percent_lower=50, brightest_percent_upper=10)
t_log10, means10, stands10, best_fit10, smallest_point10 = extract_data(brightest_percent_lower=10, brightest_percent_upper=5)
t_log5, means5, stands5, best_fit5, smallest_point5 = extract_data(brightest_percent_lower=5, brightest_percent_upper=1)
t_log1, means1, stands1, best_fit1, smallest_point1 = extract_data(brightest_percent_lower=1, brightest_percent_upper=0.5)
t_log05, means05, stands05, best_fit05, smallest_point05 = extract_data(brightest_percent_lower=0.5, brightest_percent_upper=0.1)
t_log01, means01, stands01, best_fit01, smallest_point01 = extract_data(brightest_percent_lower=0.1, brightest_percent_upper=0.05)
t_log005, means005, stands005, best_fit005, smallest_point005 = extract_data(brightest_percent_lower=0.05, brightest_percent_upper=0.02)
t_log002, means002, stands002, best_fit002, smallest_point002 = extract_data(brightest_percent_lower=0.02, brightest_percent_upper=0.006)
t_loginf, meansinf, standsinf, best_fitinf, smallest_pointinf = extract_data(brightest_percent_lower=0.006)
log_t_1 = [means100[np.where(t_log100==1)[0][0]], means50[np.where(t_log50==1)[0][0]], means10[np.where(t_log10==1)[0][0]], means5[np.where(t_log5==1)[0][0]], means1[np.where(t_log1==1)[0][0]], means05[np.where(t_log05==1)[0][0]], means01[np.where(t_log01==1)[0][0]], means005[np.where(t_log005==1)[0][0]], means002[np.where(t_log002==1)[0][0]], meansinf[np.where(t_loginf==1)[0][0]]]
log_t_2 = [means100[np.where(t_log100==2)[0][0]], means50[np.where(t_log50==2)[0][0]], means10[np.where(t_log10==2)[0][0]], means5[np.where(t_log5==2)[0][0]], means1[np.where(t_log1==2)[0][0]], means05[np.where(t_log05==2)[0][0]], means01[np.where(t_log01==2)[0][0]], means005[np.where(t_log005==2)[0][0]], means002[np.where(t_log002==2)[0][0]], meansinf[np.where(t_loginf==2)[0][0]]]
log_t_3 = [means100[np.where(t_log100==3)[0][0]], means50[np.where(t_log50==3)[0][0]], means10[np.where(t_log10==3)[0][0]], means5[np.where(t_log5==3)[0][0]], means1[np.where(t_log1==3)[0][0]], means05[np.where(t_log05==3)[0][0]], means01[np.where(t_log01==3)[0][0]], means005[np.where(t_log005==3)[0][0]], means002[np.where(t_log002==3)[0][0]], meansinf[np.where(t_loginf==3)[0][0]]]
log_t_4 = [means100[np.where(t_log100==4)[0][0]], means50[np.where(t_log50==4)[0][0]], means10[np.where(t_log10==4)[0][0]], means5[np.where(t_log5==4)[0][0]], means1[np.where(t_log1==4)[0][0]], means05[np.where(t_log05==4)[0][0]], means01[np.where(t_log01==4)[0][0]], means005[np.where(t_log005==4)[0][0]], means002[np.where(t_log002==4)[0][0]], meansinf[np.where(t_loginf==4)[0][0]]]
log_t_1_stands = np.log10(np.array([stands100[np.where(t_log100==1)[0][0]], stands50[np.where(t_log50==1)[0][0]], stands10[np.where(t_log10==1)[0][0]], stands5[np.where(t_log5==1)[0][0]], stands1[np.where(t_log1==1)[0][0]], stands05[np.where(t_log05==1)[0][0]], stands01[np.where(t_log01==1)[0][0]], stands005[np.where(t_log005==1)[0][0]], stands002[np.where(t_log002==1)[0][0]], standsinf[np.where(t_loginf==1)[0][0]]]))
log_t_2_stands = np.log10(np.array([stands100[np.where(t_log100==2)[0][0]], stands50[np.where(t_log50==2)[0][0]], stands10[np.where(t_log10==2)[0][0]], stands5[np.where(t_log5==2)[0][0]], stands1[np.where(t_log1==2)[0][0]], stands05[np.where(t_log05==2)[0][0]], stands01[np.where(t_log01==2)[0][0]], stands005[np.where(t_log005==2)[0][0]], stands002[np.where(t_log002==2)[0][0]], standsinf[np.where(t_loginf==2)[0][0]]]))
log_t_3_stands = np.log10(np.array([stands100[np.where(t_log100==3)[0][0]], stands50[np.where(t_log50==3)[0][0]], stands10[np.where(t_log10==3)[0][0]], stands5[np.where(t_log5==3)[0][0]], stands1[np.where(t_log1==3)[0][0]], stands05[np.where(t_log05==3)[0][0]], stands01[np.where(t_log01==3)[0][0]], stands005[np.where(t_log005==3)[0][0]], stands002[np.where(t_log002==3)[0][0]], standsinf[np.where(t_loginf==3)[0][0]]]))
log_t_4_stands = np.log10(np.array([stands100[np.where(t_log100==4)[0][0]], stands50[np.where(t_log50==4)[0][0]], stands10[np.where(t_log10==4)[0][0]], stands5[np.where(t_log5==4)[0][0]], stands1[np.where(t_log1==4)[0][0]], stands05[np.where(t_log05==4)[0][0]], stands01[np.where(t_log01==4)[0][0]], stands005[np.where(t_log005==4)[0][0]], stands002[np.where(t_log002==4)[0][0]], standsinf[np.where(t_loginf==4)[0][0]]]))
x = np.log10(np.array([100, 50, 10, 5, 1, 0.5, 0.1, 0.05, 0.02, 0.006]))
```
# Delta value
```
Delta_value = means05[np.where(t_log05==x_Delta)][0]
print('For this set of parameters, Delta is ' + str(Delta_value - default_Delta_value) + '.')
print('Remember, a negative delta (approximately) means that the curve was steeper than the default plot of log Edd. Ratio as a function of time.')
```
# Graphs
## 10-panel plots
```
# Delta Eddington ratio plots
with plt.style.context('seaborn-paper'):
fig, ax = plt.subplots(5, 2, figsize=(20, 10), sharex=True, sharey=True, gridspec_kw={'width_ratios': [1, 1], 'wspace':0, 'left':0.04, 'right':0.96, 'bottom':0.05, 'top':0.92})
fig.suptitle('Data from: ' + data_path)
ax[0,0].set_title('100%', fontsize=13)
ax[0,0].tick_params(direction='in', length=6, width=1.5)
ax[0,0].spines['top'].set_linewidth(1.5)
ax[0,0].spines['right'].set_linewidth(1.5)
ax[0,0].spines['bottom'].set_linewidth(1.5)
ax[0,0].spines['left'].set_linewidth(1.5)
ax[0,0].hlines(0, 0, t_log100[-1] + 0.2, linewidth=1, linestyle='--')
ax[0,0].errorbar(t_log100, means100, yerr=stands100, fmt='s', alpha=0.5, color=line_color)
ax[0,0].plot(t_log100[1:], best_fit100(t_log100[1:].astype(float)), ls='--', color='orange')
ax[0,0].text(0, -1, smallest_point100)
ax[0,1].set_title('50%', fontsize=13)
ax[0,1].tick_params(direction='in', length=6, width=1.5)
ax[0,1].spines['top'].set_linewidth(1.5)
ax[0,1].spines['right'].set_linewidth(1.5)
ax[0,1].spines['bottom'].set_linewidth(1.5)
ax[0,1].spines['left'].set_linewidth(1.5)
ax[0,1].hlines(0, 0, t_log50[-1] +0.2, linewidth=1, linestyle='--')
ax[0,1].errorbar(t_log50, means50, yerr=stands50, fmt='s', alpha=0.5, color=line_color)
ax[0,1].plot(t_log50[1:], best_fit50(t_log50[1:].astype(float)), ls='--', color='orange')
ax[0,1].text(0, -1, smallest_point50)
ax[1,0].set_title('10%', fontsize=13)
ax[1,0].tick_params(direction='in', length=6, width=1.5)
ax[1,0].spines['top'].set_linewidth(1.5)
ax[1,0].spines['right'].set_linewidth(1.5)
ax[1,0].spines['bottom'].set_linewidth(1.5)
ax[1,0].spines['left'].set_linewidth(1.5)
ax[1,0].hlines(0, 0, t_log10[-1] +0.2, linewidth=1, linestyle='--')
ax[1,0].errorbar(t_log10, means10, yerr=stands10, fmt='s', alpha=0.5, color=line_color)
ax[1,0].plot(t_log10[1:], best_fit10(t_log10[1:].astype(float)), ls='--', color='orange')
ax[1,0].text(0, -1, smallest_point10)
ax[1,1].set_title('5%', fontsize=13)
ax[1,1].tick_params(direction='in', length=6, width=1.5)
ax[1,1].spines['top'].set_linewidth(1.5)
ax[1,1].spines['right'].set_linewidth(1.5)
ax[1,1].spines['bottom'].set_linewidth(1.5)
ax[1,1].spines['left'].set_linewidth(1.5)
ax[1,1].hlines(0, 0, t_log5[-1] +0.2, linewidth=1, linestyle='--')
ax[1,1].errorbar(t_log5, means5, yerr=stands5, fmt='s', alpha=0.5, color=line_color)
ax[1,1].plot(t_log5[1:], best_fit5(t_log5[1:].astype(float)), ls='--', color='orange')
ax[1,1].text(0, -1, smallest_point5)
ax[2,0].set_title('1%', fontsize=13)
ax[2,0].tick_params(direction='in', length=6, width=1.5)
ax[2,0].spines['top'].set_linewidth(1.5)
ax[2,0].spines['right'].set_linewidth(1.5)
ax[2,0].spines['bottom'].set_linewidth(1.5)
ax[2,0].spines['left'].set_linewidth(1.5)
ax[2,0].hlines(0, 0, t_log1[-1] +0.2, linewidth=1, linestyle='--')
ax[2,0].errorbar(t_log1, means1, yerr=stands1, fmt='s', alpha=0.5, color=line_color)
ax[2,0].plot(t_log1[1:], best_fit1(t_log1[1:].astype(float)), ls='--', color='orange')
ax[2,0].text(0, -1, smallest_point1)
ax[2,1].set_title('0.5%', fontsize=13)
ax[2,1].tick_params(direction='in', length=6, width=1.5)
ax[2,1].spines['top'].set_linewidth(1.5)
ax[2,1].spines['right'].set_linewidth(1.5)
ax[2,1].spines['bottom'].set_linewidth(1.5)
ax[2,1].spines['left'].set_linewidth(1.5)
ax[2,1].hlines(0, 0, t_log05[-1] +0.2, linewidth=1, linestyle='--')
ax[2,1].errorbar(t_log05, means05, yerr=stands05, fmt='s', alpha=0.5, color=line_color)
ax[2,1].plot(t_log05[1:], best_fit05(t_log05[1:].astype(float)), ls='--', color='orange')
ax[2,1].text(0, -1, smallest_point05)
ax[3,0].set_title('0.1%', fontsize=13)
ax[3,0].tick_params(direction='in', length=6, width=1.5)
ax[3,0].spines['top'].set_linewidth(1.5)
ax[3,0].spines['right'].set_linewidth(1.5)
ax[3,0].spines['bottom'].set_linewidth(1.5)
ax[3,0].spines['left'].set_linewidth(1.5)
ax[3,0].hlines(0, 0, t_log01[-1] +0.2, linewidth=1, linestyle='--')
ax[3,0].errorbar(t_log01, means01, yerr=stands01, fmt='s', alpha=0.5, color=line_color)
ax[3,0].plot(t_log01[1:], best_fit01(t_log01[1:].astype(float)), ls='--', color='orange')
ax[3,0].text(0, -1, smallest_point01)
ax[3,1].set_title('0.05%', fontsize=13)
ax[3,1].tick_params(direction='in', length=6, width=1.5)
ax[3,1].spines['top'].set_linewidth(1.5)
ax[3,1].spines['right'].set_linewidth(1.5)
ax[3,1].spines['bottom'].set_linewidth(1.5)
ax[3,1].spines['left'].set_linewidth(1.5)
ax[3,1].hlines(0, 0, t_log005[-1] +0.2, linewidth=1, linestyle='--')
ax[3,1].errorbar(t_log005, means005, yerr=stands005, fmt='s', alpha=0.5, color=line_color)
ax[3,1].plot(t_log005[1:], best_fit005(t_log005[1:].astype(float)), ls='--', color='orange')
ax[3,1].text(0, -1, smallest_point005)
ax[4,0].set_title('0.02%', fontsize=13)
ax[4,0].tick_params(direction='in', length=6, width=1.5)
ax[4,0].spines['top'].set_linewidth(1.5)
ax[4,0].spines['right'].set_linewidth(1.5)
ax[4,0].spines['bottom'].set_linewidth(1.5)
ax[4,0].spines['left'].set_linewidth(1.5)
ax[4,0].set_xlabel('log(t/time units)', fontsize=13)
ax[4,0].set_ylabel('Mean $\Delta$log(Edd. Ratio)', fontsize=13)
ax[4,0].hlines(0, 0, t_log002[-1] +0.2, linewidth=1, linestyle='--')
ax[4,0].errorbar(t_log002, means002, yerr=stands002, fmt='s', alpha=0.5, color=line_color)
ax[4,0].plot(t_log002[1:], best_fit002(t_log002[1:].astype(float)), ls='--', color='orange')
ax[4,0].text(0, -1, smallest_point002)
ax[4,1].set_title('As small as possible (0.006%)', fontsize=13)
ax[4,1].tick_params(direction='in', length=6, width=1.5)
ax[4,1].spines['top'].set_linewidth(1.5)
ax[4,1].spines['right'].set_linewidth(1.5)
ax[4,1].spines['bottom'].set_linewidth(1.5)
ax[4,1].spines['left'].set_linewidth(1.5)
ax[4,1].set_xlabel('log(t/time units)', fontsize=13)
ax[4,1].hlines(0, 0, t_loginf[-1] +0.2, linewidth=1, linestyle='--')
ax[4,1].errorbar(t_loginf, meansinf, yerr=standsinf, fmt='s', alpha=0.5, color=line_color)
ax[4,1].plot(t_loginf[1:], best_fitinf(t_loginf[1:].astype(float)), ls='--', color='orange')
ax[4,1].text(0, -1, smallest_pointinf)
plt.savefig('10-panel_eddington_plot.pdf', bbox_inches='tight')
plt.show()
# Structure function plots
with plt.style.context('seaborn-paper'):
fig, ax = plt.subplots(5, 2, figsize=(20, 10), sharex=True, sharey=True, gridspec_kw={'width_ratios': [1, 1], 'wspace':0, 'left':0.04, 'right':0.96, 'bottom':0.05, 'top':0.92})
fig.suptitle('Data from: ' + data_path)
ax[0,0].set_title('100%', fontsize=13)
ax[0,0].tick_params(direction='in', length=6, width=1.5)
ax[0,0].spines['top'].set_linewidth(1.5)
ax[0,0].spines['right'].set_linewidth(1.5)
ax[0,0].spines['bottom'].set_linewidth(1.5)
ax[0,0].spines['left'].set_linewidth(1.5)
ax[0,0].plot(t_log100[1:], np.log10(stands100[1:]), color=line_color)
ax[0,0].text(0.5, -1, smallest_point100)
ax[0,1].set_title('50%', fontsize=13)
ax[0,1].tick_params(direction='in', length=6, width=1.5)
ax[0,1].spines['top'].set_linewidth(1.5)
ax[0,1].spines['right'].set_linewidth(1.5)
ax[0,1].spines['bottom'].set_linewidth(1.5)
ax[0,1].spines['left'].set_linewidth(1.5)
ax[0,1].plot(t_log50[1:], np.log10(stands50[1:]), color=line_color)
ax[0,1].text(0.5, -1, smallest_point50)
ax[1,0].set_title('10%', fontsize=13)
ax[1,0].tick_params(direction='in', length=6, width=1.5)
ax[1,0].spines['top'].set_linewidth(1.5)
ax[1,0].spines['right'].set_linewidth(1.5)
ax[1,0].spines['bottom'].set_linewidth(1.5)
ax[1,0].spines['left'].set_linewidth(1.5)
ax[1,0].plot(t_log10[1:], np.log10(stands10[1:]), color=line_color)
ax[1,0].text(0.5, -1, smallest_point10)
ax[1,1].set_title('5%', fontsize=13)
ax[1,1].tick_params(direction='in', length=6, width=1.5)
ax[1,1].spines['top'].set_linewidth(1.5)
ax[1,1].spines['right'].set_linewidth(1.5)
ax[1,1].spines['bottom'].set_linewidth(1.5)
ax[1,1].spines['left'].set_linewidth(1.5)
ax[1,1].plot(t_log5[1:], np.log10(stands5[1:]), color=line_color)
ax[1,1].text(0.5, -1, smallest_point5)
ax[2,0].set_title('1%', fontsize=13)
ax[2,0].tick_params(direction='in', length=6, width=1.5)
ax[2,0].spines['top'].set_linewidth(1.5)
ax[2,0].spines['right'].set_linewidth(1.5)
ax[2,0].spines['bottom'].set_linewidth(1.5)
ax[2,0].spines['left'].set_linewidth(1.5)
ax[2,0].plot(t_log1[1:], np.log10(stands1[1:]), color=line_color)
ax[2,0].text(0.5, -1, smallest_point1)
ax[2,1].set_title('0.5%', fontsize=13)
ax[2,1].tick_params(direction='in', length=6, width=1.5)
ax[2,1].spines['top'].set_linewidth(1.5)
ax[2,1].spines['right'].set_linewidth(1.5)
ax[2,1].spines['bottom'].set_linewidth(1.5)
ax[2,1].spines['left'].set_linewidth(1.5)
ax[2,1].plot(t_log05[1:], np.log10(stands05[1:]), color=line_color)
ax[2,1].text(0.5, -1, smallest_point05)
ax[3,0].set_title('0.1%', fontsize=13)
ax[3,0].tick_params(direction='in', length=6, width=1.5)
ax[3,0].spines['top'].set_linewidth(1.5)
ax[3,0].spines['right'].set_linewidth(1.5)
ax[3,0].spines['bottom'].set_linewidth(1.5)
ax[3,0].spines['left'].set_linewidth(1.5)
ax[3,0].plot(t_log01[1:], np.log10(stands01[1:]), color=line_color)
ax[3,0].text(0.5, -1, smallest_point01)
ax[3,1].set_title('0.05%', fontsize=13)
ax[3,1].tick_params(direction='in', length=6, width=1.5)
ax[3,1].spines['top'].set_linewidth(1.5)
ax[3,1].spines['right'].set_linewidth(1.5)
ax[3,1].spines['bottom'].set_linewidth(1.5)
ax[3,1].spines['left'].set_linewidth(1.5)
ax[3,1].plot(t_log005[1:], np.log10(stands005[1:]), color=line_color)
ax[3,1].text(0.5, -1, smallest_point005)
ax[4,0].set_title('0.02%', fontsize=13)
ax[4,0].tick_params(direction='in', length=6, width=1.5)
ax[4,0].spines['top'].set_linewidth(1.5)
ax[4,0].spines['right'].set_linewidth(1.5)
ax[4,0].spines['bottom'].set_linewidth(1.5)
ax[4,0].spines['left'].set_linewidth(1.5)
ax[4,0].set_xlabel('log(t/time units)', fontsize=13)
ax[4,0].set_ylabel('log(SF)', fontsize=13)
ax[4,0].plot(t_log002[1:], np.log10(stands002[1:]), color=line_color)
ax[4,0].text(0.5, -1, smallest_point002)
ax[4,1].set_title('As small as possible (0.006%)', fontsize=13)
ax[4,1].tick_params(direction='in', length=6, width=1.5)
ax[4,1].spines['top'].set_linewidth(1.5)
ax[4,1].spines['right'].set_linewidth(1.5)
ax[4,1].spines['bottom'].set_linewidth(1.5)
ax[4,1].spines['left'].set_linewidth(1.5)
ax[4,1].set_xlabel('log(t/time units)', fontsize=13)
ax[4,1].plot(t_loginf[1:], np.log10(standsinf[1:]), color=line_color)
ax[4,1].text(0.5, -1, smallest_pointinf)
plt.savefig('10-panel_SF_plot.pdf', bbox_inches='tight')
plt.show()
```
## 4-Line plots
```
# PSD regime determination plot
with plt.style.context('seaborn-paper'):
fig, ax = plt.subplots(1, figsize=(20, 10))
ax.set_title('Mean change in log(Edd. Ratio) as a function of chosen percent. Data from: '+ data_path, fontsize=13)
ax.tick_params(direction='in', length=6, width=1.5)
ax.spines['top'].set_linewidth(1.5)
ax.spines['right'].set_linewidth(1.5)
ax.spines['bottom'].set_linewidth(1.5)
ax.spines['left'].set_linewidth(1.5)
ax.set_xlabel('log(% total data)', fontsize=18)
ax.set_xlim(x[0], x[-1])
ax.set_ylabel('Mean $\Delta$log(Edd. Ratio)', fontsize=18)
ax.plot(x, log_t_1, label='log(t/time units) = 1', marker='o', markersize=10)
ax.plot(x, log_t_2, label='log(t/time units) = 2', marker='o', markersize=10)
ax.plot(x, log_t_3, label='log(t/time units) = 3', marker='o', markersize=10)
ax.plot(x, log_t_4, label='log(t/time units) = 4', marker='o', markersize=10)
ax.legend(prop={'size':18})
plt.savefig('4-line_eddington_plot.pdf', bbox_inches='tight')
plt.show()
# Structure functions in a strange space
with plt.style.context('seaborn-paper'):
fig, ax = plt.subplots(1, figsize=(20, 10))
ax.set_title('log(SF) as a function of log(chosen percent). Data from: ' + data_path, fontsize=13)
ax.tick_params(direction='in', length=6, width=1.5)
ax.spines['top'].set_linewidth(1.5)
ax.spines['right'].set_linewidth(1.5)
ax.spines['bottom'].set_linewidth(1.5)
ax.spines['left'].set_linewidth(1.5)
ax.set_xlabel('log(% total data)', fontsize=18)
ax.set_xlim(x[0], x[-1])
ax.set_ylabel('log(SF)', fontsize=18)
ax.plot(x, log_t_1_stands, label='log(t/time units) = 1', marker='o', markersize=10)
ax.plot(x, log_t_2_stands, label='log(t/time units) = 2', marker='o', markersize=10)
ax.plot(x, log_t_3_stands, label='log(t/time units) = 3', marker='o', markersize=10)
ax.plot(x, log_t_4_stands, label='log(t/time units) = 4', marker='o', markersize=10)
ax.legend(prop={'size':18})
plt.savefig('4-line_SF_plot.pdf', bbox_inches='tight')
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jonkrohn/ML-foundations/blob/master/notebooks/2-linear-algebra-ii.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Linear Algebra II: Matrix Operations
This topic, *Linear Algebra II: Matrix Operations*, builds on the basics of linear algebra. It is essential because these intermediate-level manipulations of tensors lie at the heart of most machine learning approaches and are especially predominant in deep learning.
Through the measured exposition of theory paired with interactive examples, you’ll develop an understanding of how linear algebra is used to solve for unknown values in high-dimensional spaces as well as to reduce the dimensionality of complex spaces. The content covered in this topic is itself foundational for several other topics in the *Machine Learning Foundations* series, especially *Probability & Information Theory* and *Optimization*.
Over the course of studying this topic, you'll:
* Develop a geometric intuition of what’s going on beneath the hood of machine learning algorithms, including those used for deep learning.
* Be able to more intimately grasp the details of machine learning papers as well as all of the other subjects that underlie ML, including calculus, statistics, and optimization algorithms.
* Reduce the dimensionalty of complex spaces down to their most informative elements with techniques such as eigendecomposition, singular value decomposition, and principal components analysis.
**Note that this Jupyter notebook is not intended to stand alone. It is the companion code to a lecture or to videos from Jon Krohn's [Machine Learning Foundations](https://github.com/jonkrohn/ML-foundations) series, which offer detail on the following:**
*Review of Matrix Properties*
* Modern Linear Algebra Applications
* Tensors, Vectors, and Norms
* Matrix Multiplication
* Matrix Inversion
* Identity, Diagonal and Orthogonal Matrices
*Segment 2: Eigendecomposition*
* Eigenvectors
* Eigenvalues
* Matrix Determinants
* Matrix Decomposition
* Applications of Eigendecomposition
*Segment 3: Matrix Operations for Machine Learning*
* Singular Value Decomposition (SVD)
* The Moore-Penrose Pseudoinverse
* The Trace Operator
* Principal Component Analysis (PCA): A Simple Machine Learning Algorithm
* Resources for Further Study of Linear Algebra
## Segment 1: Review of Tensor Properties
```
import numpy as np
import torch
```
### Vector Transposition
```
x = np.array([25, 2, 5])
x
x.shape
x = np.array([[25, 2, 5]])
x
x.shape
x.T
x.T.shape
x_p = torch.tensor([25, 2, 5])
x_p
x_p.T
x_p.view(3, 1) # "view" because we're changing output but not the way x is stored in memory
```
**Return to slides here.**
## $L^2$ Norm
```
x
(25**2 + 2**2 + 5**2)**(1/2)
np.linalg.norm(x)
```
So, if units in this 3-dimensional vector space are meters, then the vector $x$ has a length of 25.6m
```
# the following line of code will fail because torch.norm() requires input to be float not integer
# torch.norm(p)
torch.norm(torch.tensor([25, 2, 5.]))
```
**Return to slides here.**
### Matrices
```
X = np.array([[25, 2], [5, 26], [3, 7]])
X
X.shape
X_p = torch.tensor([[25, 2], [5, 26], [3, 7]])
X_p
X_p.shape
```
**Return to slides here.**
### Matrix Transposition
```
X
X.T
X_p.T
```
**Return to slides here.**
### Matrix Multiplication
Scalars are applied to each element of matrix:
```
X*3
X*3+3
X_p*3
X_p*3+3
```
Using the multiplication operator on two tensors of the same size in PyTorch (or Numpy or TensorFlow) applies element-wise operations. This is the **Hadamard product** (denoted by the $\odot$ operator, e.g., $A \odot B$) *not* **matrix multiplication**:
```
A = np.array([[3, 4], [5, 6], [7, 8]])
A
X
X * A
A_p = torch.tensor([[3, 4], [5, 6], [7, 8]])
A_p
X_p * A_p
```
Matrix multiplication with a vector:
```
b = np.array([1, 2])
b
np.dot(A, b) # even though technically dot products is between 2 vectors
b_p = torch.tensor([1, 2])
b_p
torch.matmul(A_p, b_p)
```
Matrix multiplication with two matrices:
```
B = np.array([[1, 9], [2, 0]])
B
np.dot(A, B) # note first column is same as Xb
B_p = torch.tensor([[1, 9], [2, 0]])
B_p
torch.matmul(A_p, B_p)
```
### Matrix Inversion
```
X = np.array([[4, 2], [-5, -3]])
X
Xinv = np.linalg.inv(X)
Xinv
y = np.array([4, -7])
y
w = np.dot(Xinv, y)
w
```
Show that $y = Xw$:
```
np.dot(X, w)
X_p = torch.tensor([[4, 2], [-5, -3.]]) # note that torch.inverse() requires floats
X_p
Xinv_p = torch.inverse(X_p)
Xinv_p
y_p = torch.tensor([4, -7.])
y_p
w_p = torch.matmul(Xinv_p, y_p)
w_p
torch.matmul(X_p, w_p)
```
**Return to slides here.**
## Segment 2: Eigendecomposition
### Eigenvectors and Eigenvalues
Let's say we have a vector $v$:
```
v = np.array([3, 1])
v
```
Let's plot $v$ using Hadrien Jean's handy `plotVectors` function (from [this notebook](https://github.com/hadrienj/deepLearningBook-Notes/blob/master/2.7%20Eigendecomposition/2.7%20Eigendecomposition.ipynb) under [MIT license](https://github.com/hadrienj/deepLearningBook-Notes/blob/master/LICENSE)).
```
import matplotlib.pyplot as plt
def plotVectors(vecs, cols, alpha=1):
"""
Plot set of vectors.
Parameters
----------
vecs : array-like
Coordinates of the vectors to plot. Each vectors is in an array. For
instance: [[1, 3], [2, 2]] can be used to plot 2 vectors.
cols : array-like
Colors of the vectors. For instance: ['red', 'blue'] will display the
first vector in red and the second in blue.
alpha : float
Opacity of vectors
Returns:
fig : instance of matplotlib.figure.Figure
The figure of the vectors
"""
plt.figure()
plt.axvline(x=0, color='#A9A9A9', zorder=0)
plt.axhline(y=0, color='#A9A9A9', zorder=0)
for i in range(len(vecs)):
x = np.concatenate([[0,0],vecs[i]])
plt.quiver([x[0]],
[x[1]],
[x[2]],
[x[3]],
angles='xy', scale_units='xy', scale=1, color=cols[i],
alpha=alpha)
plotVectors([v], cols=['lightblue'])
_ = plt.xlim(-1, 5)
_ = plt.ylim(-1, 5)
```
"Applying" a matrix to a vector (i.e., performing matrix-vector multiplication) can linearly transform the vector, e.g, rotate it or rescale it.
The identity matrix, introduced earlier, is the exception that proves the rule: Applying an identity matrix does not transform the vector:
```
I = np.array([[1, 0], [0, 1]])
I
Iv = np.dot(I, v)
Iv
v == Iv
plotVectors([Iv], cols=['blue'])
_ = plt.xlim(-1, 5)
_ = plt.ylim(-1, 5)
```
In contrast, let's see what happens when we apply (some non-identity matrix) $A$ to the vector $v$:
```
A = np.array([[-1, 4], [2, -2]])
A
Av = np.dot(A, v)
Av
plotVectors([v, Av], ['lightblue', 'blue'])
_ = plt.xlim(-1, 5)
_ = plt.ylim(-1, 5)
# a second example:
v2 = np.array([2, 1])
plotVectors([v2, np.dot(A, v2)], ['lightgreen', 'green'])
_ = plt.xlim(-1, 5)
_ = plt.ylim(-1, 5)
```
We can concatenate several vectors together into a matrix (say, $V$), where each column is a separate vector. Then, whatever linear transformations we apply to $V$ will be independently applied to each column (vector):
```
v
# recall that we need to convert array to 2D to transpose into column, e.g.:
np.matrix(v).T
v3 = np.array([-3, -1]) # mirror image of x over both axes
v4 = np.array([-1, 1])
V = np.concatenate((np.matrix(v).T,
np.matrix(v2).T,
np.matrix(v3).T,
np.matrix(v4).T),
axis=1)
V
IV = np.dot(I, V)
IV
AV = np.dot(A, V)
AV
# function to convert column of matrix to 1D vector:
def vectorfy(mtrx, clmn):
return np.array(mtrx[:,clmn]).reshape(-1)
vectorfy(V, 0)
vectorfy(V, 0) == v
plotVectors([vectorfy(V, 0), vectorfy(V, 1), vectorfy(V, 2), vectorfy(V, 3),
vectorfy(AV, 0), vectorfy(AV, 1), vectorfy(AV, 2), vectorfy(AV, 3)],
['lightblue', 'lightgreen', 'lightgray', 'orange',
'blue', 'green', 'gray', 'red'])
_ = plt.xlim(-4, 6)
_ = plt.ylim(-5, 5)
```
Now that we can appreciate linear transformation of vectors by matrices, let's move on to working with eigenvectors and eigenvalues.
An **eigenvector** (*eigen* is German for "typical"; we could translate *eigenvector* to "characteristic vector") is a special vector $v$ such that when it is transformed by some matrix (let's say $A$), the product $Av$ has the exact same direction as $v$.
An **eigenvalue** is a scalar (traditionally represented as $\lambda$) that simply scales the eigenvector $v$ such that the following equation is satisfied:
$Av = \lambda v$
Easiest way to understand this is to work through an example:
```
A
```
Eigenvectors and eigenvalues can be derived algebraically (e.g., with the [QR algorithm](https://en.wikipedia.org/wiki/QR_algorithm), which was independent developed in the 1950s by both [Vera Kublanovskaya](https://en.wikipedia.org/wiki/Vera_Kublanovskaya) and John Francis), however this is outside scope of today's class. We'll cheat with NumPy `eig()` method, which returns a tuple of:
* a vector of eigenvalues
* a matrix of eigenvectors
```
lambdas, V = np.linalg.eig(A)
```
The matrix contains as many eigenvectors as there are columns of A:
```
V # each column is a separate eigenvector v
```
With a corresponding eigenvalue for each eigenvector:
```
lambdas
```
Let's confirm that $Av = \lambda v$ for the first eigenvector:
```
v = V[:,0]
v
lambduh = lambdas[0] # note that "lambda" is reserved term in Python
lambduh
Av = np.dot(A, v)
Av
lambduh * v
plotVectors([Av, v], ['blue', 'lightblue'])
_ = plt.xlim(-1, 2)
_ = plt.ylim(-1, 2)
```
And again for the second eigenvector of A:
```
v2 = V[:,1]
v2
lambda2 = lambdas[1]
lambda2
Av2 = np.dot(A, v2)
Av2
lambda2 * v2
plotVectors([Av, v, Av2, v2],
['blue', 'lightblue', 'green', 'lightgreen'])
_ = plt.xlim(-1, 4)
_ = plt.ylim(-3, 2)
```
Using the PyTorch `eig()` method, we can do exactly the same:
```
A
A_p = torch.tensor([[-1, 4], [2, -2.]]) # must be float for PyTorch eig()
A_p
eigens = torch.eig(A_p, eigenvectors=True)
eigens
v_p = eigens.eigenvectors[:,0]
v_p
lambda_p = eigens.eigenvalues[0][0]
lambda_p
Av_p = torch.matmul(A_p, v_p)
Av_p
lambda_p * v_p
v2_p = eigens.eigenvectors[:,1]
v2_p
lambda2_p = eigens.eigenvalues[1][0]
lambda2_p
Av2_p = torch.matmul(A_p, v2_p)
Av2_p
lambda2_p * v2_p
plotVectors([Av_p.numpy(), v_p.numpy(), Av2_p.numpy(), v2_p.numpy()],
['blue', 'lightblue', 'green', 'lightgreen'])
_ = plt.xlim(-1, 4)
_ = plt.ylim(-3, 2)
```
### Eigenvectors in >2 Dimensions
While plotting gets trickier in higher-dimensional spaces, we can nevertheless find and use eigenvectors with more than two dimensions. Here's a 3D example (there are three dimensions handled over three rows):
```
X
lambdas_X, V_X = np.linalg.eig(X)
V_X # one eigenvector per column of X
lambdas_X # a corresponding eigenvalue for each eigenvector
```
Confirm $Xv = \lambda v$ for an example vector:
```
v_X = V_X[:,0]
v_X
lambda_X = lambdas_X[0]
lambda_X
np.dot(X, v_X) # matrix multiplication
lambda_X * v_X
```
**Exercises**:
1. Use PyTorch to confirm $Xv = \lambda v$ for the first eigenvector of $X$.
2. Confirm $Xv = \lambda v$ for the remaining eigenvectors of $X$ (you can use NumPy or PyTorch, whichever you prefer).
**Return to slides here.**
### 2x2 Matrix Determinants
```
X
np.linalg.det(X)
```
**Return to slides here.**
```
N = np.array([[-4, 1], [-8, 2]])
N
np.linalg.det(N)
# Uncommenting the following line results in a "singular matrix" error
# Ninv = np.linalg.inv(N)
N = torch.tensor([[-4, 1], [-8, 2.]]) # must use float not int
torch.det(N)
```
**Return to slides here.**
### Generalizing Determinants
```
X = np.array([[1, 2, 4], [2, -1, 3], [0, 5, 1]])
X
np.linalg.det(X)
```
### Determinants & Eigenvalues
```
lambdas, V = np.linalg.eig(X)
lambdas
np.product(lambdas)
```
**Return to slides here.**
```
np.abs(np.product(lambdas))
B = np.array([[1, 0], [0, 1]])
B
plotVectors([vectorfy(B, 0), vectorfy(B, 1)],
['lightblue', 'lightgreen'])
_ = plt.xlim(-1, 3)
_ = plt.ylim(-1, 3)
N
np.linalg.det(N)
NB = np.dot(N, B)
NB
plotVectors([vectorfy(B, 0), vectorfy(B, 1), vectorfy(NB, 0), vectorfy(NB, 1)],
['lightblue', 'lightgreen', 'blue', 'green'])
_ = plt.xlim(-6, 6)
_ = plt.ylim(-9, 3)
I
np.linalg.det(I)
IB = np.dot(I, B)
IB
plotVectors([vectorfy(B, 0), vectorfy(B, 1), vectorfy(IB, 0), vectorfy(IB, 1)],
['lightblue', 'lightgreen', 'blue', 'green'])
_ = plt.xlim(-1, 3)
_ = plt.ylim(-1, 3)
J = np.array([[-0.5, 0], [0, 2]])
J
np.linalg.det(J)
np.abs(np.linalg.det(J))
JB = np.dot(J, B)
JB
plotVectors([vectorfy(B, 0), vectorfy(B, 1), vectorfy(JB, 0), vectorfy(JB, 1)],
['lightblue', 'lightgreen', 'blue', 'green'])
_ = plt.xlim(-1, 3)
_ = plt.ylim(-1, 3)
doubleI = I*2
np.linalg.det(doubleI)
doubleIB = np.dot(doubleI, B)
doubleIB
plotVectors([vectorfy(B, 0), vectorfy(B, 1), vectorfy(doubleIB, 0), vectorfy(doubleIB, 1)],
['lightblue', 'lightgreen', 'blue', 'green'])
_ = plt.xlim(-1, 3)
_ = plt.ylim(-1, 3)
```
**Return to slides here.**
### Eigendecomposition
The **eigendecomposition** of some matrix $A$ is
$A = V \Lambda V^{-1}$
Where:
* As in examples above, $V$ is the concatenation of all the eigenvectors of $A$
* $\Lambda$ (upper-case $\lambda$) is the diagonal matrix diag($\lambda$). Note that the convention is to arrange the lambda values in descending order; as a result, the first eigenvector (and its associated eigenvector) may be a primary characteristic of the matrix $A$.
```
# This was used earlier as a matrix X; it has nice clean integer eigenvalues...
A = np.array([[4, 2], [-5, -3]])
A
lambdas, V = np.linalg.eig(A)
V
Vinv = np.linalg.inv(V)
Vinv
Lambda = np.diag(lambdas)
Lambda
```
Confirm that $A = V \Lambda V^{-1}$:
```
np.dot(V, np.dot(Lambda, Vinv))
```
Eigendecomposition is not possible with all matrices. And in some cases where it is possible, the eigendecomposition involves complex numbers instead of straightforward real numbers.
In machine learning, however, we are typically working with real symmetric matrices, which can be conveniently and efficiently decomposed into real-only eigenvectors and real-only eigenvalues. If $A$ is a real symmetric matrix then...
$A = Q \Lambda Q^T$
...where $Q$ is analogous to $V$ from the previous equation except that it's special because it's an orthogonal matrix.
```
A = np.array([[2, 1], [1, 2]])
A
lambdas, Q = np.linalg.eig(A)
lambdas
Lambda = np.diag(lambdas)
Lambda
Q
```
Recalling that $Q^TQ = QQ^T = I$, can demonstrate that $Q$ is an orthogonal matrix:
```
np.dot(Q.T, Q)
np.dot(Q, Q.T)
```
Let's confirm $A = Q \Lambda Q^T$:
```
np.dot(Q, np.dot(Lambda, Q.T))
```
**Exercises**:
1. Use PyTorch to decompose the matrix $P$ (below) into its components $V$, $\Lambda$, and $V^{-1}$. Confirm that $P = V \Lambda V^{-1}$.
2. Use PyTorch to decompose the symmetric matrix $S$ (below) into its components $Q$, $\Lambda$, and $Q^T$. Confirm that $S = Q \Lambda Q^T$.
```
P = torch.tensor([[25, 2, -5], [3, -2, 1], [5, 7, 4.]])
P
S = torch.tensor([[25, 2, -5], [2, -2, 1], [-5, 1, 4.]])
S
```
**Return to slides here.**
## Segment 3: Matrix Operations for ML
### Singular Value Decomposition (SVD)
As on slides, SVD of matrix $A$ is:
$A = UDV^T$
Where:
* $U$ is an orthogonal $m \times m$ matrix; its columns are the **left-singular vectors** of $A$.
* $V$ is an orthogonal $n \times n$ matrix; its columns are the **right-singular vectors** of $A$.
* $D$ is a diagonal $m \times n$ matrix; elements along its diagonal are the **singular values** of $A$.
```
A = np.array([[-1, 2], [3, -2], [5, 7]])
A
U, d, VT = np.linalg.svd(A) # V is already transposed
U
VT
d
np.diag(d)
D = np.concatenate((np.diag(d), [[0, 0]]), axis=0)
D
np.dot(U, np.dot(D, VT))
```
SVD and eigendecomposition are closely related to each other:
* Left-singular vectors of $A$ = eigenvectors of $AA^T$.
* Right-singular vectors of $A$ = eigenvectors of $A^TA$.
* Non-zero singular values of $A$ = square roots of eigenvectors of $AA^T$ = square roots of eigenvectors of $A^TA$
**Exercise**: Using the matrix `P` from the preceding PyTorch exercises, demonstrate that these three SVD-eigendecomposition equations are true.
### Image Compression via SVD
The section features code adapted from [Frank Cleary's](https://gist.github.com/frankcleary/4d2bd178708503b556b0).
```
import time
from PIL import Image
```
Fetch photo of Oboe, a terrier, with the book *Deep Learning Illustrated*:
```
! wget https://raw.githubusercontent.com/jonkrohn/DLTFpT/master/notebooks/oboe-with-book.jpg
img = Image.open('oboe-with-book.jpg')
plt.imshow(img)
```
Convert image to grayscale so that we don't have to deal with the complexity of multiple color channels:
```
imggray = img.convert('LA')
plt.imshow(imggray)
```
Convert data into numpy matrix, which doesn't impact image data:
```
imgmat = np.array(list(imggray.getdata(band=0)), float)
imgmat.shape = (imggray.size[1], imggray.size[0])
imgmat = np.matrix(imgmat)
plt.imshow(imgmat, cmap='gray')
```
Calculate SVD of the image:
```
U, sigma, V = np.linalg.svd(imgmat)
```
As eigenvalues are arranged in descending order in diag($\lambda$) so to are singular values, by convention, arranged in descending order in $D$ (or, in this code, diag($\sigma$)). Thus, the first left-singular vector of $U$ and first right-singular vector of $V$ may represent the most prominent feature of the image:
```
reconstimg = np.matrix(U[:, :1]) * np.diag(sigma[:1]) * np.matrix(V[:1, :])
plt.imshow(reconstimg, cmap='gray')
```
Additional singular vectors improve the image quality:
```
for i in [2, 4, 8, 16, 32, 64]:
reconstimg = np.matrix(U[:, :i]) * np.diag(sigma[:i]) * np.matrix(V[:i, :])
plt.imshow(reconstimg, cmap='gray')
title = "n = %s" % i
plt.title(title)
plt.show()
```
With 64 singular vectors, the image is reconstructed quite well, however the data footprint is much smaller than the original image:
```
imgmat.shape
full_representation = 4032*3024
full_representation
svd64_rep = 64*4032 + 64 + 64*3024
svd64_rep
svd64_rep/full_representation
```
Specifically, the image represented as 64 singular vectors is 3.7% of the size of the original!
**Return to slides here.**
### The Moore-Penrose Pseudoinverse
Let's calculate the pseudoinverse $A^+$ of some matrix $A$ using the formula from the slides:
$A^+ = VD^+U^T$
```
A
```
As shown earlier, the NumPy SVD method returns $U$, $d$, and $V^T$:
```
U, d, VT = np.linalg.svd(A)
U
VT
d
```
To create $D^+$, we first invert the non-zero values of $d$:
```
D = np.diag(d)
D
1/8.669
1/4.104
```
...and then we would take the tranpose of the resulting matrix.
Because $D$ is a diagonal matrix, this can, however, be done in a single step by inverting $D$:
```
Dinv = np.linalg.inv(D)
Dinv
```
The final $D^+$ matrix needs to have a shape that can undergo matrix multiplication in the $A^+ = VD^+U^T$ equation. These dimensions can be obtained from $A$:
```
A.shape[0]
A.shape[1]
Dplus = np.zeros((3, 2)).T
Dplus
Dplus[:2, :2] = Dinv
Dplus
```
Now we have everything we need to calculate $A^+$ with $VD^+U^T$:
```
np.dot(VT.T, np.dot(Dplus, U.T))
```
Working out this derivation is helpful for understanding how Moore-Penrose pseudoinverses work, but unsurprisingly NumPy is loaded with an existing method `pinv()`:
```
np.linalg.pinv(A)
```
**Exercise**
Use the `torch.svd()` method to calculate the pseudoinverse of `A_p`, confirming that your result matches the output of `torch.pinverse(A_p)`:
```
A_p = torch.tensor([[-1, 2], [3, -2], [5, 7.]])
A_p
torch.pinverse(A_p)
```
**Return to slides here.**
For regression problems, we typically have many more cases ($n$, or rows of $X$) than features to predict ($m$, or columns of $X$). Let's solve a miniature example of such an overdetermined situation.
We have eight data points ($n$ = 8):
```
x1 = [0, 1, 2, 3, 4, 5, 6, 7.]
y = [1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37]
fig, ax = plt.subplots()
_ = ax.scatter(x1, y)
```
Although it appears there is only one predictor ($x_1$), we need a second one (let's call it $x_0$) in order to allow for a $y$-intercept (therefore, $m$ = 2). Without this second variable, the line we fit to the plot would need to pass through the origin (0, 0). The $y$-intercept is constant across all the points so we can set it equal to `1` across the board:
```
x0 = np.ones(8)
x0
```
Concatenate $x_0$ and $x_1$ into a matrix $X$:
```
X = np.concatenate((np.matrix(x0).T, np.matrix(x1).T), axis=1)
X
```
From the slides, we know that we can compute the weights $w$ using the pseudoinverse of $w = X^+y$:
```
w = np.dot(np.linalg.pinv(X), y)
w
```
The first weight corresponds to the $y$-intercept of the line, which is typically denoted as $b$:
```
b = np.asarray(w).reshape(-1)[0]
b
```
While the second weight corresponds to the slope of the line, which is typically denoted as $m$:
```
m = np.asarray(w).reshape(-1)[1]
m
```
With the weights we can plot the line to confirm it fits the points:
```
fig, ax = plt.subplots()
ax.scatter(x1, y)
x_min, x_max = ax.get_xlim()
y_min, y_max = b, b + m*(x_max-x_min)
ax.plot([x_min, x_max], [y_min, y_max])
_ = ax.set_xlim([x_min, x_max])
```
### The Trace Operator
Denoted as Tr($A$). Simply the sum of the diagonal elements of a matrix: $$\sum_i A_{i,i}$$
```
A = np.array([[25, 2], [5, 4]])
A
25 + 4
np.trace(A)
```
The trace operator has a number of useful properties that come in handy while rearranging linear algebra equations, e.g.:
* Tr($A$) = Tr($A^T$)
* Assuming the matrix shapes line up: Tr(ABC) = Tr(CAB) = Tr(BCA)
In particular, the trace operator can provide a convenient way to calculate a matrix's Frobenius norm: $$||A||_F = \sqrt{\mathrm{Tr}(AA^\mathrm{T})}$$
**Exercise**
Using the matrix `A_p`:
1. Identify the PyTorch trace method and the trace of the matrix.
2. Further, use the PyTorch Frobenius norm method (for the left-hand side of the equation) and the trace method (for the right-hand side of the equation) to demonstrate that $||A||_F = \sqrt{\mathrm{Tr}(AA^\mathrm{T})}$
```
A_p
```
**Return to slides here.**
### Principal Component Analysis
This PCA example code is adapted from [here](https://jupyter.brynmawr.edu/services/public/dblank/CS371%20Cognitive%20Science/2016-Fall/PCA.ipynb).
```
from sklearn import datasets
iris = datasets.load_iris()
iris.data.shape
iris.get("feature_names")
iris.data[0:6,:]
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
X = pca.fit_transform(iris.data)
X.shape
X[0:6,:]
plt.scatter(X[:, 0], X[:, 1])
iris.target.shape
iris.target[0:6]
unique_elements, counts_elements = np.unique(iris.target, return_counts=True)
np.asarray((unique_elements, counts_elements))
list(iris.target_names)
plt.scatter(X[:, 0], X[:, 1], c=iris.target)
```
**Return to slides here.**
| github_jupyter |

## Classification
Classification - predicting the discrete class ($y$) of an object from a vector of input features ($\vec x$).
Models used in this notebook include: Logistic Regression, Support Vector Machines, KNN
**Author List**: Kevin Li
**Original Sources**: http://scikit-learn.org, http://archive.ics.uci.edu/ml/datasets/Iris
**License**: Feel free to do whatever you want to with this code
## Iris Dataset
```
from sklearn import datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data
Y = iris.target
# type(iris)
print("feature vector shape=", X.shape)
print("class shape=", Y.shape)
print(iris.target_names, type(iris.target_names))
print(iris.feature_names, type(iris.feature_names))
print type (X)
print X[0:5]
print type (Y)
print Y[0:5]
print "---"
print(iris.DESCR)
# specifies that figures should be shown inline, directly in the notebook.
%pylab inline
# Learn more about thhis visualization package at http://seaborn.pydata.org/
# http://seaborn.pydata.org/tutorial/axis_grids.html
# http://seaborn.pydata.org/tutorial/aesthetics.html#aesthetics-tutorial
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
df = sns.load_dataset("iris")
print "df is a ", type(df)
g = sns.PairGrid(df, diag_sharey=False,hue="species")
g.map_lower(sns.kdeplot, cmap="Blues_d")
g.map_upper(plt.scatter)
g.map_diag(sns.kdeplot, lw=3)
# sns.load_dataset?
sns.load_dataset
```
- Logistic Regression: `linear_model.LogisticRegression`
- KNN Classification: `neighbors.KNeighborsClassifier`
- LDA / QDA: `lda.LDA` / `lda.QDA`
- Naive Bayes: `naive_bayes.GaussianNB`
- Support Vector Machines: `svm.SVC`
- Classification Trees: `tree.DecisionTreeClassifier`
- Random Forest: `ensemble.RandomForestClassifier`
- Multi-class & multi-label Classification is supported: `multiclass.OneVsRest` `multiclass.OneVsOne`
- Boosting & Ensemble Learning: xgboost, cart
## Logistic Regression
A standard logistic sigmoid function
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/8/88/Logistic-curve.svg/320px-Logistic-curve.svg.png" width="50%">
```
%matplotlib inline
import numpy as np
from sklearn import linear_model, datasets
# set_context
sns.set_context("talk")
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, 1:3] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
# https://en.wikipedia.org/wiki/Logistic_regression
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# numpy.ravel: Return a contiguous flattened array.
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=get_cmap("Spectral"))
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
#plt.xlim(xx.min(), xx.max())
#plt.ylim(yy.min(), yy.max())
#plt.xticks(())
#plt.yticks(())
plt.show()
```
## Support Vector Machines (Bell Labs, 1992)
<img src="http://docs.opencv.org/2.4/_images/optimal-hyperplane.png" width="50%">
```
# adapted from http://scikit-learn.org/0.13/auto_examples/svm/plot_iris.html#example-svm-plot-iris-py
%matplotlib inline
import numpy as np
from sklearn import svm, datasets
sns.set_context("talk")
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, 1:3] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
h = 0.02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, Y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, Y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, Y)
lin_svc = svm.LinearSVC(C=C).fit(X, Y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel',
'LinearSVC (linear kernel)']
clfs = [svc, rbf_svc, poly_svc, lin_svc]
f,axs = plt.subplots(2,2)
for i, clf in enumerate(clfs):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
ax = axs[i//2][i % 2]
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z,cmap=get_cmap("Spectral"))
ax.axis('off')
# Plot also the training points
ax.scatter(X[:, 0], X[:, 1], c=Y,cmap=get_cmap("Spectral"))
ax.set_title(titles[i])
```
## Beyond Linear SVM
```
# SVM with polynomial kernel visualization
from IPython.display import YouTubeVideo
YouTubeVideo("3liCbRZPrZA")
```
## kNearestNeighbors (kNN)
```
# %load http://scikit-learn.org/stable/_downloads/plot_classification.py
"""
================================
Nearest Neighbors Classification
================================
Sample usage of Nearest Neighbors classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for weights in ['uniform', 'distance']:
# we create an instance of Neighbours Classifier and fit the data.
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.title("3-Class classification (k = %i, weights = '%s')"
% (n_neighbors, weights))
plt.show()
```
##### Back to the Iris Data Set
```
iris = datasets.load_iris()
iris_X = iris.data
iris_y = iris.target
indices = np.random.permutation(len(iris_X))
iris_X_train = iris_X[indices[:-10]]
iris_y_train = iris_y[indices[:-10]]
iris_X_test = iris_X[indices[-10:]]
iris_y_test = iris_y[indices[-10:]]
# Create and fit a nearest-neighbor classifier
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(iris_X_train, iris_y_train)
KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=1, n_neighbors=15, p=2,
weights='uniform')
print("predicted:", knn.predict(iris_X_test))
print("actual :", iris_y_test)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/davidedomini/stroke_predictions/blob/main/StrokePrediction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Stroke Prediction
**Davide Domini** <br>
davide.domini@studio.unibo.it<br> <br>
Programmazione di applicazioni data intensive <br>
Laurea in Ingegneria e Scienze Informatiche
DISI - Università di Bologna, Cesena
Citazioni:
* Stroke Preditction Dataset https://www.kaggle.com/fedesoriano/stroke-prediction-dataset
##Descrizione del problema e comprensione dei dati
*In questo progetto si vuole realizzare un modello in grado di predire la presenza o meno di un ictus in base ad alcune caratteristiche fisiche e di stile di vita di alcuni pazienti*
Vengono importate le librerie necessarie
- **NumPy** per lavorare agilmente con l'algebra lineare
- **Pandas** per gestire meglio i dati in formato tabellare
- **Seaborn** per disegnare i grafici (basata su **matplotlib**)
- **Urllib** per recuperare il dataset dalla repo github
- **Sklearn** per avere i modelli di classificazione
- **Imblearn** per applicare l'oversampling alla classe meno numerosa
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
import os.path
import math
from urllib.request import urlretrieve
from imblearn.over_sampling import SMOTE
from sklearn.linear_model import Perceptron
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
import graphviz
from sklearn import tree
from sklearn.dummy import DummyClassifier
from xgboost import XGBClassifier
%matplotlib inline
```
###Caricamento dei dati e preprocessing
Viene recuperato il file del dataset dalla repo GitHub
```
dataset_url = "https://raw.githubusercontent.com/davidedomini/stroke_predictions/main/healthcare-dataset-stroke-data.csv"
if not os.path.exists("healthcare-dataset-stroke-data.csv"):
urlretrieve(dataset_url, "healthcare-dataset-stroke-data.csv")
stroke_dataset = pd.read_csv("healthcare-dataset-stroke-data.csv", sep=",")
stroke_dataset.head(10)
```
Osservando il dataframe notiamo che il campo `id` è solo un identificatore univoco del paziente, non avendo nessuna importanza ai fini del modello lo impostiamo come index
```
stroke_dataset.set_index("id", inplace=True)
stroke_dataset.head(10)
```
###Comprensione delle feature
Descrizione:
1. **id**: identificatore univoco del paziente
2. **gender**: [Nominale] genere del paziente, può essere "Male", "Female" o "Other"
3. **age**: [Intervallo] età del paziente
4. **hypertension**: [Nominale] rappresenta la presenza di ipertensione nel paziente, assume valore 0 se non presente e valore 1 se presente
5. **heart_disease**: [Nominale] rappresenta la presenza di cardiopatia nel paziente, assume valore 0 se non presente e valore 1 se presente
6. **ever_married**: [Nominale] indica se il paziente è mai stato sposato, assume valore "Yes" o "No"
7. **work_type**: [Nominale] indica il tipo di lavoro del paziente, assume valori "children", "Govt_jov", "Never_Worked", "Private" o "Self-employed"
8. **Residence_type**: [Nominale] indica la zona di residenza del paziente, assume valori "Rural" o "Urban"
9. **avg_glucose_level**: [Intervallo] indica il livello di glucosio medio nel sangue
10. **bmi**: [Intervallo] indica l'indice di massa corporea, si calcola come: $$ \frac{massa}{altezza^2} $$
11. **smoking_status**: [Nominale] indica le abitudini del paziente con il fumo, assume valori "formerly smoked", "never smoked", "smokes" o "Unknown"
12. **stroke**: [Nominale] indica se il paziente ha avuto un ictus, assume valore 0 oppure 1
$\Rightarrow$ Siccome la variabile `stroke` da predire è discreta si tratta di un problema di classificazione (con due classi)
###Esplorazione delle singole feature
```
stroke_dataset.describe()
```
Con il metodo describe possiamo ottenere varie informazioni sulle feature presenti nel dataset:
* L'età media dei pazienti è circa 43 anni, il più giovane ha meno di un anno mentre il più anziano ne ha 82, inoltre possiamo notare che comunque il 50% ha più di 45 anni
* Il livello di glucosio medio nel sangue è circa 106, di solito questo valore dovrebbe stare nell'intervallo [70;100], dal 75-esimo percentile possiamo notare che circa il 25% dei pazienti ha livelli preoccupanti che denotano una probabile presenza di diabete
* La media dei valori del BMI assume valore 28, leggermente troppo alto in quanto una persona con peso regolare dovrebbe stare nell'intervallo [18;25], inoltre abbiamo 10 come valore minimo e 97 come valore massimo il che indica che abbiamo alcuni casi di grave magrezza e grave obesità
Inoltre dal riassunto precedente del dataframe possiamo notare che ci sono alcuni valori NaN quindi controlliamo meglio
```
stroke_dataset.isna().sum()
```
Vista la presenza di 201 valori mancanti nella colonna bmi procediamo alla rimozione
```
stroke_dataset.dropna(inplace=True)
stroke_dataset.isna().sum()
stroke_dataset["stroke"].value_counts().plot.pie(autopct="%.1f%%");
```
Dal grafico a torta della feature `stroke` notiamo che le due classi sono estremamente sbilanciate, questo protrebbe creare problemi in seguito quindi nelle prossime sezioni verranno applicate tecniche come dare un peso diverso alle due classi oppure under/over -sampling di una delle due classi
```
sb.displot(stroke_dataset["avg_glucose_level"]);
sb.displot(stroke_dataset["bmi"]);
```
Dai precedenti grafici per le distribuzioni delle feature `avg_glucose_level` e `bmi` osserviamo che:
* Il livello medio di glucosio è molto concentrato nell'intervallo che va circa da 60 a 100
* Il bmi è molto concentrato nell'intevallo [20;40]
```
plt.figure(figsize=(15,10))
plt.subplot(1,2,1)
sb.histplot(x="gender", data=stroke_dataset);
plt.subplot(1,2,2)
sb.histplot(x="age", data=stroke_dataset);
```
Da questi istogrammi possiamo notare che:
* I pazienti sono più donne che uomini
* Il numero di pazienti ha due picchi intorno ai 40 anni e agli 80 anni
```
plt.figure(figsize=(20,10))
plt.subplot(3,2,1)
sb.countplot(x="hypertension", data=stroke_dataset);
plt.subplot(3,2,2)
sb.countplot(x="ever_married", data=stroke_dataset);
plt.subplot(3,2,3)
sb.countplot(x="smoking_status", data=stroke_dataset);
plt.subplot(3,2,4)
sb.countplot(x="heart_disease", data=stroke_dataset);
plt.subplot(3,2,5)
sb.countplot(x="Residence_type", data=stroke_dataset);
plt.subplot(3,2,6)
sb.countplot(x="work_type", data=stroke_dataset);
```
Da questi altri grafici invece notiamo che:
* Sono molti di più i pazienti senza ipertensione che quelli che ne soffrono
* Abbiamo meno pazienti che non sono mai stati sposati
* Molti dei pazienti non hanno mai fumato, però ce ne sono anche molti in cui abbiamo stato sconosciuto, quindi questo potrebbe andare a pareggiare i conti nella realtà
* Pochi pazienti sono cardiopatici
* La differenza fra chi abita in zone urbane e chi in zone rurali è pressochè nulla
```
plt.figure(figsize=(20,10))
plt.subplot(2,2,1)
sb.boxplot(x="age", data=stroke_dataset);
plt.subplot(2,2,2)
sb.boxplot(x="bmi", data=stroke_dataset);
plt.subplot(2,2,3)
sb.boxplot(x="avg_glucose_level", data=stroke_dataset);
```
Da questi ultimi grafici possiamo notare che la feature `avg_glucose_level` sembra avere molti valori outlier, a primo impatto potremmo pensare di rimuoverne almeno una parte per non avere valori anomali quindi facciamo un'analisi più approfondita per decidere se è il caso di farlo o no
```
sb.displot(x='avg_glucose_level', hue='stroke', data = stroke_dataset, palette="pastel", multiple="stack");
stroke_avg165 = stroke_dataset[stroke_dataset.avg_glucose_level >= 160].loc[:,"stroke"].sum()
stroke_tot = stroke_dataset.loc[:,"stroke"].sum()
stroke_perc = stroke_avg165 / stroke_tot
print(f"Abbiamo {stroke_tot} ictus totali, di cui {stroke_avg165} sono avvenuti in pazienti con un valore di glucosio medio nel sangue non nella norma, in percentuale sono quindi il {stroke_perc:.1%}")
```
Da questa ulteriore analisi possiamo quindi osservare due cose:
1. Una buona fetta degli ictus totali è presente in persone con il livello medio di glucosio nel sangue non nella norma
2. Se consideriamo due frazioni distinte del dataset, nella prima sezione contenente i pazienti con un livello di glucosio medio inferiore a 160 abbiamo che i casi di ictus sono una piccola frazione dei casi totali mentre nella seconda sezione i casi con ictus sono una frazione molto più grande dei casi totali
In virtù di questi risultati decidiamo di tenere anche i dati outlier per la feature `avg_glucose_level`
###Esplorazione dei legami fra le feature
```
sb.displot(x='age', hue='stroke', data = stroke_dataset, palette="pastel", multiple="stack");
```
Un primo legame interessante è quello fra età e presenza di ictus, possiamo notare come praticamente la totalità degli ictus si sia verificata in pazienti con più di 35-40 anni.
In particolare abbiamo un picco nei pazienti più anziani intorno agli 80 anni
```
plt.figure(figsize=(25,20))
plt.subplot(4,2,1)
sb.countplot(x='hypertension', hue='stroke', data = stroke_dataset, palette="pastel");
plt.subplot(4,2,2)
sb.countplot(x='heart_disease', hue='stroke', data = stroke_dataset, palette="pastel");
plt.subplot(4,2,3)
sb.countplot(x='ever_married', hue='stroke', data = stroke_dataset, palette="pastel");
plt.subplot(4,2,4)
sb.countplot(x='work_type', hue='stroke', data = stroke_dataset, palette="pastel");
plt.subplot(4,2,5)
sb.countplot(x='Residence_type', hue='stroke', data = stroke_dataset, palette="pastel");
plt.subplot(4,2,6)
sb.countplot(x='smoking_status', hue='stroke', data = stroke_dataset, palette="pastel");
```
Notiamo quindi che:
* Ipertensione e cardiopatia sembrano influire abbastanza sugli ictus
* Molti dei pazienti che hanno presentato ictus erano sposati
* La distribuzione rispetto alla zona di residenza è uniforme, circa metà e metà
* Abitudini scorrette con il fumo possono incidere sulla presenza di ictus
* La maggior parte dei pazienti con ictus lavorava nel settore privato o era un lavoratore autonomo
```
corr_matrix = stroke_dataset.corr()
mask = np.triu(np.ones_like(corr_matrix, dtype=bool))
f, ax = plt.subplots(figsize=(11,9))
cmap = sb.diverging_palette(230,20,as_cmap=True)
sb.heatmap(corr_matrix, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={"shrink": .5});
#credits: https://seaborn.pydata.org/examples/many_pairwise_correlations.html?highlight=correlation%20matrix
```
Da questa matrice di correlazione possiamo osservare che:
* L'età sembra influire in modo abbastanza uguale su tute le altre feature presenti
* Gli ictus sono correlati in modo più forte con l'età, in minor modo con ipertensione, cardiopatia e glucosio mentre molto meno con il bmi
* È probabile che chi ha un livello medio di glucosio nel sangue più alto abbia anche problemi di ipertensione e/o cardiopatia
*Comunque i valori di correlazione non sono molto alti quindi probabilmente sarà necessario usare modelli con feature non lineari*
##Feature Engineering
Andiamo ora a trasformare le varie feature categoriche, queste verranno splittate creando tante feature quanti sono i possibili valori che potevano assumere
* Ad esempio la feature `Residence_type` poteva assumere valori *Urban* o *Rural* quindi verranno create due nuove feature `Residence_type_Urban` e `Residence_type_Rural`, se una determinata istanza aveva valore Urban ora avrà valore 1 nella rispettiva feature e valore 0 nell'altra
In egual modo vengono trasformate tutte le altre feature categoriche
```
categorical_features = ["gender", "work_type", "Residence_type", "smoking_status"]
stroke_dataset = pd.get_dummies(stroke_dataset, columns=categorical_features, prefix=categorical_features)
stroke_dataset.head(10)
```
Un altro aspetto da considerare è che la feature `ever_married` assume valori *Yes* e *No*, quindi andremo a modificarla applicando la seguente trasformazione:
* Yes = 1
* No = 0
```
stroke_dataset["ever_married"] = np.where(stroke_dataset["ever_married"] == "Yes", 1, 0)
stroke_dataset.head(10)
```
In questo modo abbiamo ottenuto tutte feature numeriche su cui possiamo lavorare agilmente
Applichiamo una tecnica di oversampling al dataset per ottenere un bilanciamento delle classi
```
sm = SMOTE(random_state=42)
y = stroke_dataset["stroke"]
X = stroke_dataset.drop("stroke", axis=1)
X_res, y_res = sm.fit_resample(X, y)
```
Dal seguente grafico notiamo che ora le classi sono bilanciate
```
pd.value_counts(y_res).plot.pie(autopct="%.1f%%", title="Stroke");
```
Andiamo ora a suddividere i dati in training set e validations set
```
X_train, X_val, y_train, y_val = train_test_split(
X_res,y_res,
test_size = 1/3,
random_state = 42
)
```
Applichiamo una standardiddazione dei dati
```
scaler = StandardScaler()
X_train_S = scaler.fit_transform(X_train)
X_val_S = scaler.transform(X_val)
```
In tutti i modelli in cui verrà applicata la `Grid Search` useremo una divisione con la classe `StratifiedKFold` in modo da avere in ogni sub-fold la stessa distribuzione per i dati
La seguente funzione calcola l'intervallo di accuratezza per un modello di classificazione con confidenza al 95% dato l'f-1 score secondo la seguente formula:
$$ p = \frac{f + \frac{z^2}{2N} \pm z \sqrt{\frac{f}{N} - \frac{f^2}{N} + \frac{z^2}{4N^2} } }{1+\frac{z^2}{N}} $$
```
def accuracy_interval(f):
N = len(y_val)
n_min = f + ( 1.96**2/(2*N) - 1.96 * np.sqrt( (f/N) - (f**2/N) + (1.96**2/(4*N**2) ) ) )
n_max = f + ( 1.96**2/(2*N) + 1.96 * np.sqrt( (f/N) - (f**2/N) + (1.96**2/(4*N**2) ) ) )
d = 1 + (1.96**2 / N)
e_min = n_min / d
e_max = n_max / d
return np.round(e_min,4), np.round(e_max,4)
```
Creo un dizionario vuoto in cui man mano inserirò i valori f1-score di ogni modello in modo da poter fare un confronto finale
```
accuracy = {}
```
##Perceptron
```
model = Perceptron(random_state=42)
model.fit(X_train_S, y_train)
y_pred = model.predict(X_val_S)
cm = confusion_matrix(y_val, y_pred)
pd.DataFrame(cm, index=model.classes_, columns=model.classes_)
precision = precision_score(y_val, y_pred, pos_label=0)
recall = recall_score(y_val, y_pred, pos_label=0)
f1 = f1_score(y_val, y_pred, average="macro")
print(f"precision: {precision}, recall: {recall}, f1-score: {f1}")
i = accuracy_interval(f1)
accuracy["Perceptron"] = i
print(i)
```
Aggiungiamo GridSearch e CrossValidation
```
model = Perceptron(random_state=42)
parameters = {
"penalty": [None, "l1", "l2", "elasticnet"],
"alpha": np.logspace(-4, 0, 5),
"tol": np.logspace(-9, 6, 6)
}
skf = StratifiedKFold(3, shuffle=True, random_state=42)
gs = GridSearchCV(model, parameters, cv=skf)
gs.fit(X_train_S, y_train)
pd.DataFrame(gs.cv_results_).sort_values("rank_test_score").head(5)
gs.best_params_
print(f'Best score: {round(gs.best_score_ * 100, 4):.4f}%')
pc_imp = pd.Series(gs.best_estimator_.coef_[0], index=X.columns)
pc_imp.nlargest(4).plot(kind='barh');
```
A seguito della penalizzazione l1, che la GridSearch identifica come parametro migliore, notiamo che le feature più rilevanti sono:
* Età
* Tipo di lavoro: privato
* Livello medio di glucosio nel sangue
```
y_pred = gs.predict(X_val_S)
cm = confusion_matrix(y_val, y_pred)
pd.DataFrame(cm, index=gs.best_estimator_.classes_, columns=gs.best_estimator_.classes_)
precision = precision_score(y_val, y_pred, pos_label=0)
recall = recall_score(y_val, y_pred, pos_label=0)
f1 = f1_score(y_val, y_pred, average="macro")
print(f"precision: {precision}, recall: {recall}, f1-score: {f1}")
i = accuracy_interval(f1)
accuracy["Perceptron with gs"] = i
print(i)
perceptron_mse = mean_squared_error(y_val, y_pred)
print('MSE: {}'.format(perceptron_mse))
```
##Logistic regression
```
model = LogisticRegression(random_state=42, solver="saga")
model.fit(X_train_S, y_train)
y_pred = model.predict(X_val_S)
cm = confusion_matrix(y_val, y_pred)
pd.DataFrame(cm, index=model.classes_, columns=model.classes_)
precision = precision_score(y_val, y_pred, pos_label=0)
recall = recall_score(y_val, y_pred, pos_label=0)
f1 = f1_score(y_val, y_pred, average="macro")
print(f"precision: {precision}, recall: {recall}, f1-score: {f1}")
i = accuracy_interval(f1)
accuracy["Logistic Regression"] = i
print(i)
model = LogisticRegression(random_state=42, solver="saga")
parameters = {
"penalty": ["l1"],
"C": [0.3, 0.8, 1],#np.logspace(-4, 0, 5),
"tol": np.logspace(-9, 6, 6)
}
skf = StratifiedKFold(3, shuffle=True, random_state=42)
gs = GridSearchCV(model, parameters, cv=skf)
gs.fit(X_train_S, y_train)
pd.DataFrame(gs.cv_results_).sort_values("rank_test_score").head(5)
gs.best_params_
print(f'Best score: {round(gs.best_score_ * 100, 4):.4f}%')
pc_imp = pd.Series(gs.best_estimator_.coef_[0], index=X.columns)
pc_imp.nlargest(4).plot(kind='barh');
y_pred = gs.predict(X_val_S)
cm = confusion_matrix(y_val, y_pred)
pd.DataFrame(cm, index=gs.best_estimator_.classes_, columns=gs.best_estimator_.classes_)
precision = precision_score(y_val, y_pred, pos_label=0)
recall = recall_score(y_val, y_pred, pos_label=0)
f1 = f1_score(y_val, y_pred, average="macro")
print(f"precision: {precision}, recall: {recall}, f1-score: {f1}")
i = accuracy_interval(f1)
accuracy["Logistic regression with gs"] = i
print(i)
logisticregression_mse = mean_squared_error(y_val, y_pred)
print('MSE: {}'.format(logisticregression_mse))
```
##SVM
```
model = SVC()
parameters = {
"kernel": ["rbf"],
"C": np.logspace(-2, 0, 3)
}
skf = StratifiedKFold(3, shuffle=True, random_state=42)
gs = GridSearchCV(model, parameters, cv=skf)
gs.fit(X_train_S, y_train)
pd.DataFrame(gs.cv_results_).sort_values("rank_test_score").head(5)
gs.best_params_
print(f'Best score: {round(gs.best_score_ * 100, 4):.4f}%')
svm_imp = pd.Series(gs.best_estimator_.support_vectors_[0], index=X.columns)
svm_imp.nlargest(4).plot(kind='barh');
y_pred = gs.predict(X_val_S)
cm = confusion_matrix(y_val, y_pred)
pd.DataFrame(cm, index=gs.best_estimator_.classes_, columns=gs.best_estimator_.classes_)
precision = precision_score(y_val, y_pred, pos_label=0)
recall = recall_score(y_val, y_pred, pos_label=0)
f1 = f1_score(y_val, y_pred, average="macro")
print(f"precision: {precision}, recall: {recall}, f1-score: {f1}")
i = accuracy_interval(f1)
accuracy["Support Vector Machines"] = i
print(i)
svm_mse = mean_squared_error(y_val, y_pred)
print('MSE: {}'.format(svm_mse))
```
##Decision tree
I parametri che andiamo a testare nella grid search sono:
- `min_samples_split` che è il numero minimo di campioni che deve avere una foglia per poter essere splittata
- `min_samples_leaf` numero minimo di campioni per ogni foglia (i.e. se splittiamo un nodo questo split è valido solo se lascia in ogni foglia che crea almeno min_samples_leaf campioni)
- `max_depth` profondità massima che può raggiungere l'albero (con `None` cresce senza limiti su questo parametro)
- `max_features` il numero massimo di feature da considerare per ogni split
```
model = DecisionTreeClassifier(random_state=42)
num_features = X_train_S.shape[1]
parameters = {
'min_samples_split': range(2, 4, 1),
'min_samples_leaf': range(1, 4, 1),
'max_depth': [None] + [i for i in range(2, 7)],
'max_features': range(2, num_features, 1)}
skf = StratifiedKFold(3, shuffle=True, random_state=42)
gs = GridSearchCV(model, parameters, cv=skf)
gs.fit(X_train_S, y_train)
gs.best_score_
y_pred = gs.predict(X_val_S)
cm = confusion_matrix(y_val, y_pred)
pd.DataFrame(cm, index=gs.best_estimator_.classes_, columns=gs.best_estimator_.classes_)
precision = precision_score(y_val, y_pred, pos_label=0)
recall = recall_score(y_val, y_pred, pos_label=0)
f1 = f1_score(y_val, y_pred, average="macro")
print(f"precision: {precision}, recall: {recall}, f1-score: {f1}")
i = accuracy_interval(f1)
accuracy["Decision tree"] = i
print(i)
decisiontree_mse = mean_squared_error(y_val, y_pred)
print('MSE: {}'.format(decisiontree_mse))
```
###Visualizzazione dell'albero decisionale
Nei nodi troviamo:
- Il criterio con cui viene effettuato il taglio
- Il parametro `gini` che indica la qualità della suddivisione, rappresenta la frequenza con cui un elemento scelto casualmente dall'insieme verrebbe etichettato in modo errato se fosse etichettato casualmente in base alla distribuzione delle etichette nel sottoinsieme, è calcolato come:
$$Gini = 1 - \sum_{i=1}^C p_i^2 $$
$\Rightarrow$ Assume valore 0 quando tutte le istanze nel nodo hanno una stessa label
- Il parametro `samples` che indica la percentuale di campioni presenti in quel nodo
- Il parametro `value` che indica la percentuale di istanze per ogni classe
```
dot_data = tree.export_graphviz(gs.best_estimator_, out_file=None,
feature_names=X.columns,
filled=True,
proportion=True,
max_depth=3)
# Draw graph
graph = graphviz.Source(dot_data, format="png")
graph
```
##XGBoost
```
model = XGBClassifier(nthread=8, objective='binary:logistic')
parameters = {
'eta': [0.002, 0.1, 0.5],
'max_depth': [6],
'n_estimators': [150, 300],
'alpha': [0.0001, 0.001]
}
skf = StratifiedKFold(3, shuffle=True, random_state=42)
gs = GridSearchCV(model, parameters, cv=skf)
gs.fit(X_train_S, y_train)
gs.best_score_
y_pred = gs.predict(X_val_S)
cm = confusion_matrix(y_val, y_pred)
pd.DataFrame(cm, index=gs.best_estimator_.classes_, columns=gs.best_estimator_.classes_)
precision = precision_score(y_val, y_pred, pos_label=0)
recall = recall_score(y_val, y_pred, pos_label=0)
f1 = f1_score(y_val, y_pred, average="macro")
print(f"precision: {precision}, recall: {recall}, f1-score: {f1}")
i = accuracy_interval(f1)
accuracy["XGBoost"] = i
print(i)
xgboost_mse = mean_squared_error(y_val, y_pred)
print('MSE: {}'.format(decisiontree_mse))
```
##Model comparison
Prendiamo una confidenza del 95%, di conseguenza dalla tabella della distribuzione z otteniamo 1.96 come valore per `z`
La seguente funzione implementa il confronto fra due modelli dati i rispettivi errori `e1` ed `e2` e la cardinalità del test set `n` secondo le formule :
$$ d = |e_1 - e_2| $$
$$ \sigma_t^2 = \sigma_1^2 + \sigma_2^2 = \frac{e_1(1-e_1)}{n} + \frac{e_2(1-e_2)}{n}$$
$$d_t = d \pm z_{1-\alpha} \cdot \sigma_t$$
```
def intervallo(mse1, mse2):
d = np.abs(mse1 - mse2)
variance = (mse1 * (1 - mse1)) / len(X_val) + (mse2 * (1 - mse2)) / len(X_val)
d_min = d - 1.96 * np.sqrt(variance)
d_max = d + 1.96 * np.sqrt(variance)
return d_min, d_max
```
Andiamo a calcolare l'intervallo fra tutte le coppie di modelli
```
from itertools import combinations
mse = [("perceptron_mse", "Perceptron"), ("logisticregression_mse","Logistic Regression"),
("svm_mse", "SVM"), ("decisiontree_mse", "Decision Tree"), ("xgboost_mse", "XGBoost")]
print (f"{'Models':<40} {'Interval':<15}")
for m1, m2 in list(combinations(mse, 2)):
mse1, mse2 = eval(m1[0]), eval(m2[0])
name1, name2 = m1[1], m2[1]
comparison = name1 + " vs " + name2
print (f"{comparison:<40} {np.round(intervallo(mse1 , mse2), 4)} ")
```
###Confronto con un modello casuale
La seguente funzione calcola, come la precedente, l'intervallo per il confronto fra due modelli ma utilizza una confidenza al 99% invece del 95%
```
def intervallo99(mse1, mse2):
d = np.abs(mse1 - mse2)
variance = (mse1 * (1 - mse1)) / len(X_val) + (mse2 * (1 - mse2)) / len(X_val)
d_min = d - 2.58 * np.sqrt(variance)
d_max = d + 2.58 * np.sqrt(variance)
return d_min, d_max
random = DummyClassifier(strategy="uniform", random_state=42)
random.fit(X_train_S, y_train)
y_pred = random.predict(X_val_S)
print(random.score(X_val_S, y_val))
mse_random = mean_squared_error(y_val, y_pred)
print(mse_random)
print (f"{'Models':<40} Interval")
for m in mse:
mse_i = eval(m[0])
name_i = m[1]
comparison = name_i + " vs Random"
print (f"{comparison:<40} {np.round(intervallo99(mse_i , mse_random), 4)} ")
```
Possiamo vedere come la differenza fra tutti i modelli e uno random sia sempre statisticamente significativa quindi i modelli implementati sono tutti accettabili
##Conclusioni
Dalla precedente sezione *Model Comparison* possiamo vedere che fra tutti i modelli la differenza è stasticamente significativa.
Riassunto dei vari f1-score:
```
print (f"{'Model':<40} f-1 score")
for k in accuracy.keys():
print(f"{k:<40} {accuracy[k]}")
```
Visti i risultati ottenuti per l'indice *f1-score* per i vari modelli si può dire che i migliori sono:
- SVM con kernel RBF non lineare
- Decision tree
- XGBoost
Questo probabilmente è dovuto al fatto che inizialmente la correlazione fra le 11 feature di input è abbastanza bassa quindi usando modelli non lineari andiamo a separare meglio i dati.
Questo lo si può notare anche dal fatto che se implementiamo un modello SVM con kernel lineare otteniamo un f-1 score molto più basso (come si vede nell'esempio di seguito)
```
model = SVC()
parameters = {
"kernel": ["linear"],
"C": np.logspace(-2, 0, 3)
}
skf = StratifiedKFold(3, shuffle=True, random_state=42)
gs = GridSearchCV(model, parameters, cv=skf)
gs.fit(X_train_S, y_train)
y_pred = gs.predict(X_val_S)
cm = confusion_matrix(y_val, y_pred)
pd.DataFrame(cm, index=gs.best_estimator_.classes_, columns=gs.best_estimator_.classes_)
X_val.shape
precision = precision_score(y_val, y_pred, pos_label=0)
recall = recall_score(y_val, y_pred, pos_label=0)
f1 = f1_score(y_val, y_pred, average="macro")
print(f"precision: {precision}, recall: {recall}, f1-score: {f1}")
```
| github_jupyter |
# Inference
## Imports & Args
```
import argparse
import json
import logging
import os
import random
from io import open
import numpy as np
import math
import _pickle as cPickle
from scipy.stats import spearmanr
from tensorboardX import SummaryWriter
from tqdm import tqdm
from bisect import bisect
import yaml
from easydict import EasyDict as edict
import sys
import pdb
import torch
import torch.nn.functional as F
import torch.nn as nn
from vilbert.task_utils import (
LoadDatasetEval,
LoadLosses,
ForwardModelsTrain,
ForwardModelsVal,
EvaluatingModel,
)
import vilbert.utils as utils
import torch.distributed as dist
def evaluate(
args,
task_dataloader_val,
task_stop_controller,
task_cfg,
device,
task_id,
model,
task_losses,
epochId,
default_gpu,
tbLogger,
):
model.eval()
for i, batch in enumerate(task_dataloader_val[task_id]):
loss, score, batch_size = ForwardModelsVal(
args, task_cfg, device, task_id, batch, model, task_losses
)
tbLogger.step_val(
epochId, float(loss), float(score), task_id, batch_size, "val"
)
if default_gpu:
sys.stdout.write("%d/%d\r" % (i, len(task_dataloader_val[task_id])))
sys.stdout.flush()
# update the multi-task scheduler.
task_stop_controller[task_id].step(tbLogger.getValScore(task_id))
score = tbLogger.showLossVal(task_id, task_stop_controller)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--from_pretrained",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--output_dir",
default="results",
type=str,
help="The output directory where the model checkpoints will be written.",
)
parser.add_argument(
"--config_file",
default="config/bert_config.json",
type=str,
help="The config file which specified the model details.",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether not to use CUDA when available"
)
parser.add_argument(
"--do_lower_case",
default=True,
type=bool,
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit float precision instead of 32-bit",
)
parser.add_argument(
"--loss_scale",
type=float,
default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n",
)
parser.add_argument(
"--num_workers",
type=int,
default=16,
help="Number of workers in the dataloader.",
)
parser.add_argument(
"--save_name", default="", type=str, help="save name for training."
)
parser.add_argument(
"--use_chunk",
default=0,
type=float,
help="whether use chunck for parallel training.",
)
parser.add_argument(
"--batch_size", default=30, type=int, help="what is the batch size?"
)
parser.add_argument(
"--tasks", default="", type=str, help="1-2-3... training task separate by -"
)
parser.add_argument(
"--in_memory",
default=False,
type=bool,
help="whether use chunck for parallel training.",
)
parser.add_argument(
"--baseline", action="store_true", help="whether use single stream baseline."
)
parser.add_argument("--split", default="", type=str, help="which split to use.")
parser.add_argument(
"--dynamic_attention",
action="store_true",
help="whether use dynamic attention.",
)
parser.add_argument(
"--clean_train_sets",
default=True,
type=bool,
help="whether clean train sets for multitask data.",
)
parser.add_argument(
"--visual_target",
default=0,
type=int,
help="which target to use for visual branch. \
0: soft label, \
1: regress the feature, \
2: NCE loss.",
)
parser.add_argument(
"--task_specific_tokens",
action="store_true",
help="whether to use task specific tokens for the multi-task learning.",
)
```
## load the textual input
```
args = parser.parse_args(['--bert_model', 'bert-base-uncased',
'--from_pretrained', 'save/NLVR2_bert_base_6layer_6conect-finetune_from_multi_task_model-task_12/pytorch_model_19.bin',
'--config_file', 'config/bert_base_6layer_6conect.json',
'--tasks', '19',
'--split', 'trainval_dc', # this is the deep captions training split
'--save_name', 'task-19',
'--task_specific_tokens',
'--batch_size', '128'])
with open("vilbert_tasks.yml", "r") as f:
task_cfg = edict(yaml.safe_load(f))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.baseline:
from pytorch_transformers.modeling_bert import BertConfig
from vilbert.basebert import BaseBertForVLTasks
else:
from vilbert.vilbert import BertConfig
from vilbert.vilbert import VILBertForVLTasks
task_names = []
for i, task_id in enumerate(args.tasks.split("-")):
task = "TASK" + task_id
name = task_cfg[task]["name"]
task_names.append(name)
# timeStamp = '-'.join(task_names) + '_' + args.config_file.split('/')[1].split('.')[0]
timeStamp = args.from_pretrained.split("/")[-1] + "-" + args.save_name
savePath = os.path.join(args.output_dir, timeStamp)
config = BertConfig.from_json_file(args.config_file)
if args.task_specific_tokens:
config.task_specific_tokens = True
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend="nccl")
logger.info(
"device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16
)
)
default_gpu = False
if dist.is_available() and args.local_rank != -1:
rank = dist.get_rank()
if rank == 0:
default_gpu = True
else:
default_gpu = True
if default_gpu and not os.path.exists(savePath):
os.makedirs(savePath)
task_batch_size, task_num_iters, task_ids, task_datasets_val, task_dataloader_val = LoadDatasetEval(
args, task_cfg, args.tasks.split("-")
)
tbLogger = utils.tbLogger(
timeStamp,
savePath,
task_names,
task_ids,
task_num_iters,
1,
save_logger=False,
txt_name="eval.txt",
)
# num_labels = max([dataset.num_labels for dataset in task_datasets_val.values()])
if args.dynamic_attention:
config.dynamic_attention = True
if "roberta" in args.bert_model:
config.model = "roberta"
if args.visual_target == 0:
config.v_target_size = 1601
config.visual_target = args.visual_target
else:
config.v_target_size = 2048
config.visual_target = args.visual_target
if args.task_specific_tokens:
config.task_specific_tokens = True
task_batch_size, task_num_iters, task_ids, task_datasets_val, task_dataloader_val
len(task_datasets_val['TASK19']), len(task_dataloader_val['TASK19'])
```
## load the pretrained model
```
num_labels = 0
if args.baseline:
model = BaseBertForVLTasks.from_pretrained(
args.from_pretrained,
config=config,
num_labels=num_labels,
default_gpu=default_gpu,
)
else:
model = VILBertForVLTasks.from_pretrained(
args.from_pretrained,
config=config,
num_labels=num_labels,
default_gpu=default_gpu,
)
task_losses = LoadLosses(args, task_cfg, args.tasks.split("-"))
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
)
model = DDP(model, delay_allreduce=True)
elif n_gpu > 1:
model = nn.DataParallel(model)
```
## Propagate Training Split
```
print("***** Running evaluation *****")
print(" Num Iters: ", task_num_iters)
print(" Batch size: ", task_batch_size)
pooled_output_mul_list, pooled_output_sum_list, pooled_output_t_list, pooled_output_v_list = list(), list(), list(), list()
targets_list = list()
model.eval()
# when run evaluate, we run each task sequentially.
for task_id in task_ids:
results = []
others = []
for i, batch in enumerate(task_dataloader_val[task_id]):
loss, score, batch_size, results, others, target = EvaluatingModel(
args,
task_cfg,
device,
task_id,
batch,
model,
task_dataloader_val,
task_losses,
results,
others,
)
pooled_output_mul_list.append(model.pooled_output_mul)
pooled_output_sum_list.append(model.pooled_output_sum)
pooled_output_t_list.append(model.pooled_output_t)
pooled_output_v_list.append(model.pooled_output_v)
targets_list.append(target)
tbLogger.step_val(0, float(loss), float(score), task_id, batch_size, "val")
sys.stdout.write("%d/%d\r" % (i, len(task_dataloader_val[task_id])))
sys.stdout.flush()
# save the result or evaluate the result.
ave_score = tbLogger.showLossVal(task_id)
if args.split:
json_path = os.path.join(savePath, args.split)
else:
json_path = os.path.join(savePath, task_cfg[task_id]["val_split"])
json.dump(results, open(json_path + "_result.json", "w"))
json.dump(others, open(json_path + "_others.json", "w"))
```
## save ViLBERT output
```
pooled_output_mul = torch.cat(pooled_output_mul_list, 0)
pooled_output_sum = torch.cat(pooled_output_sum_list, 0)
pooled_output_t = torch.cat(pooled_output_t_list, 0)
pooled_output_v = torch.cat(pooled_output_v_list, 0)
concat_pooled_output = torch.cat([pooled_output_t, pooled_output_v], 1)
targets = torch.cat(targets_list, 0)
targets
train_save_path = "datasets/ME/out_features/train_dc_features_nlvr2.pkl"
pooled_dict = {
"pooled_output_mul": pooled_output_mul,
"pooled_output_sum": pooled_output_sum,
"pooled_output_t": pooled_output_t,
"pooled_output_v": pooled_output_v,
"concat_pooled_output": concat_pooled_output,
"targets": targets,
}
pooled_dict.keys()
cPickle.dump(pooled_dict, open(train_save_path, 'wb'))
#cPickle.dump(val_pooled_dict, open(val_save_path, 'wb'))
```
# Training a Regressor
```
import torch
import torch.nn as nn
import torch.utils.data as Data
from torch.autograd import Variable
from statistics import mean
import matplotlib.pyplot as plt
import _pickle as cPickle
from tqdm import tqdm
from scipy.stats import spearmanr
train_save_path = "datasets/ME/out_features/train_dc_features_nlvr2.pkl"
# val_save_path = "datasets/ME/out_features/val_features.pkl"
pooled_dict = cPickle.load(open(train_save_path, 'rb'))
#val_pooled_dict = cPickle.load(open(val_save_path, 'rb'))
pooled_output_mul = pooled_dict["pooled_output_mul"]
pooled_output_sum = pooled_dict["pooled_output_sum"]
pooled_output_t = pooled_dict["pooled_output_t"]
pooled_output_v = pooled_dict["pooled_output_v"]
concat_pooled_output = pooled_dict["concat_pooled_output"]
targets = pooled_dict["targets"]
indices = {
"0": {},
"1": {},
"2": {},
"3": {},
}
import numpy as np
from sklearn.model_selection import KFold
kf = KFold(n_splits=4)
for i, (train_index, test_index) in enumerate(kf.split(pooled_output_mul)):
indices[str(i)]["train"] = train_index
indices[str(i)]["test"] = test_index
class Net(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, num_scores):
super(Net, self).__init__()
self.out = nn.Sequential(
nn.Linear(input_size, hidden_size_1),
GeLU(),
nn.Linear(hidden_size_1, hidden_size_2),
GeLU(),
nn.Linear(hidden_size_2, num_scores)
)
def forward(self, x):
return self.out(x)
class LinNet(nn.Module):
def __init__(self, input_size, hidden_size_1, num_scores):
super(LinNet, self).__init__()
self.out = nn.Sequential(
nn.Linear(input_size, hidden_size_1),
nn.Linear(hidden_size_1, num_scores),
)
def forward(self, x):
return self.out(x)
class SimpleLinNet(nn.Module):
def __init__(self, input_size, num_scores):
super(SimpleLinNet, self).__init__()
self.out = nn.Sequential(
nn.Linear(input_size, num_scores),
)
def forward(self, x):
return self.out(x)
class SigLinNet(nn.Module):
def __init__(self, input_size,
hidden_size_1,
hidden_size_2,
hidden_size_3,
num_scores):
super(SigLinNet, self).__init__()
self.out = nn.Sequential(
nn.Linear(input_size, hidden_size_1),
nn.Sigmoid(),
nn.Linear(hidden_size_1, hidden_size_2),
nn.Sigmoid(),
nn.Linear(hidden_size_2, hidden_size_3),
nn.Sigmoid(),
nn.Linear(hidden_size_3, num_scores),
)
def forward(self, x):
return self.out(x)
class ReLuLinNet(nn.Module):
def __init__(self, input_size, hidden_size_1, hidden_size_2, num_scores):
super(ReLuLinNet, self).__init__()
self.out = nn.Sequential(
nn.Linear(input_size, hidden_size_1),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_size_1, hidden_size_2),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(hidden_size_2, num_scores),
)
def forward(self, x):
return self.out(x)
def train_reg(inputs, targets, input_size, output_size, split, model, batch_size, epoch, lr, score, *argv):
torch.manual_seed(42)
nets = []
los = []
for i in range(len(split)):
ind = list(split[str(i)]["train"])
if score == "both":
torch_dataset = Data.TensorDataset(inputs[ind], targets[ind])
elif score == "stm":
torch_dataset = Data.TensorDataset(inputs[ind], targets[ind,0].reshape(-1,1))
elif score == "ltm":
torch_dataset = Data.TensorDataset(inputs[ind], targets[ind,1].reshape(-1,1))
loader = Data.DataLoader(
dataset=torch_dataset,
batch_size=batch_size,
shuffle=True
)
net = model(input_size, *argv, output_size)
net.cuda()
optimizer = torch.optim.Adam(net.parameters(), lr=lr, weight_decay=1e-4)
loss_func = torch.nn.MSELoss()
losses = []
net.train()
for _ in tqdm(range(epoch), desc="Split %d" % i):
errors = []
for step, (batch_in, batch_out) in enumerate(loader):
optimizer.zero_grad()
b_in = Variable(batch_in)
b_out = Variable(batch_out)
prediction = net(b_in)
loss = loss_func(prediction, b_out)
errors.append(loss.item())
loss.backward()
optimizer.step()
losses.append(mean(errors))
#if not (epoch+1) % 10:
# print('Epoch {}: train loss: {}'.format(epoch+1, mean(errors))
nets.append(net)
los.append(losses)
return nets, los
def test_reg(nets, inputs, targets, split, score):
losses = list()
rhos = {"stm": [], "ltm": []}
loss_func = torch.nn.MSELoss()
for i, net in enumerate(nets):
ind = list(split[str(i)]["test"])
if score == "both":
torch_dataset_val = Data.TensorDataset(inputs[ind], targets[ind])
elif score == "stm":
torch_dataset_val = Data.TensorDataset(inputs[ind], targets[ind,0].reshape(-1,1))
elif score == "ltm":
torch_dataset_val = Data.TensorDataset(inputs[ind], targets[ind,1].reshape(-1,1))
loader_val = Data.DataLoader(
dataset=torch_dataset_val,
batch_size=VAL_BATCH_SIZE,
shuffle=False
)
dataiter_val = iter(loader_val)
in_, out_ = dataiter_val.next()
curr_net = net
curr_net.eval()
pred_scores = curr_net(in_)
loss = loss_func(pred_scores, out_)
losses.append(loss.item())
r, _ = spearmanr(
pred_scores.cpu().detach().numpy()[:,0],
out_.cpu().detach().numpy()[:,0],
axis=0
)
rhos["stm"].append(r)
r, _ = spearmanr(
pred_scores.cpu().detach().numpy()[:,1],
out_.cpu().detach().numpy()[:,1],
axis=0
)
rhos["ltm"].append(r)
return rhos, losses
BATCH_SIZE = 128
VAL_BATCH_SIZE = 2000
EPOCH = 200
lr = 4e-4
```
## 1024-input train
```
nets, los = train_reg(
pooled_output_v,
targets,
1024, # input size
2, # output size
indices, # train and validation indices for each split
SigLinNet, # model class to be used
BATCH_SIZE,
EPOCH,
lr,
"both", # predict both scores
512, 64, 32 # sizes of hidden network layers
)
for l in los:
plt.plot(l[3:])
plt.yscale('log')
```
## 1024-input test
```
rhos, losses = test_reg(nets, pooled_output_v, targets, indices, "both")
rhos
mean(rhos["stm"]), mean(rhos["ltm"])
```
## 2048-input train
```
nets_2, los_2 = train_reg(
concat_pooled_output,
targets,
2048,
2,
indices,
SigLinNet,
BATCH_SIZE,
EPOCH,
lr,
"both",
512, 64, 32
)
for l in los_2:
plt.plot(l[3:])
plt.yscale('log')
```
## 2048-input test
```
rhos_2, losses_2 = test_reg(nets_2, concat_pooled_output, targets, indices, "both")
rhos_2
mean(rhos_2["stm"]), mean(rhos_2["ltm"])
```
| github_jupyter |
```
import datetime
import time
import json
import os
import string
import requests
import sys
import traceback
import azure.cosmos.cosmos_client as cosmos_client
from helpers import keys
from helpers import nlp_helper
from gremlin_python.driver import client, serializer
config = {
'ENDPOINT': keys.cosmos_uri,
'PRIMARYKEY': keys.cosmos_primary_key,
'DATABASE': 'NetOwl',
'CONTAINER': 'Entities',
'LINK-CONTAINER': 'Links',
'EVENT-CONTAINER': 'Events'
}
docs_path = r'C:\Users\jame9353\Box Sync\Data\Early Bird'
json_out_dir = r'C:\Data\json'
geoevent_url = r'https://ge-1.eastus.cloudapp.azure.com:6143/geoevent/rest/receiver/netowl-geoentities-in'
out_ext = ".json"
print("Connecting to Cosmos DB Graph Client...")
graph_client = client.Client('wss://pilot-graph.gremlin.cosmosdb.azure.com:443/','g',
username="/dbs/NetOwl/colls/Links",
password="whE1lJjFxzVSCQ7ppNDc5hMCwNl7x8C0BeMTF6dGq4pTN3c8qDVyUBLutYwQZJW1haxJP6W8wckzqBepDcGlAQ==",
message_serializer=serializer.GraphSONMessageSerializer()
)
print("Successfully connected to Cosmos DB Graph Client")
# Initialize the Cosmos client
print("Connecting to Cosmos DB SQL API...")
client = cosmos_client.CosmosClient(url_connection=config['ENDPOINT'], auth={
'masterKey': config['PRIMARYKEY']})
print("Creating Database...")
# Create a database
db = client.CreateDatabase({'id': config['DATABASE']})
# Create container options
options = {
'offerThroughput': 400
}
container_definition = {
'id': config['CONTAINER']
}
link_container_definition = {
'id': config['LINK-CONTAINER']
}
event_container_definition = {
'id': config['EVENT-CONTAINER']
}
# Create a container for Entities
print("Creating " + str(config['CONTAINER']) + " container...")
container = client.CreateContainer(db['_self'], container_definition, options)
# Create a container for Links
print("Creating " + str(config['LINK-CONTAINER']) + " container...")
link_container = client.CreateContainer(db['_self'], link_container_definition, options)
# Create a container for Events
print("Creating " + str(config['EVENT-CONTAINER']) + " container...")
event_container = client.CreateContainer(db['_self'], event_container_definition, options)
numchars = 100 # number of characters to retrieve for head/tail
# Function to watch a folder and detect new images on a 1 second refresh interval
#before = dict ([(f, None) for f in os.listdir (docs_path)])
before = {}
count = 0
errors = 0
print("Beginning monitor of " + str(docs_path) + " at " + str(datetime.datetime.now()))
while True:
# Compares the folder contents after the sleep to what existed beforehand, and makes a list of adds and removes
after = dict ([(f, None) for f in os.listdir (docs_path)])
added = [f for f in after if not f in before]
removed = [f for f in before if not f in after]
if added: print("Added: ", ", ".join (added))
if removed: print("Removed: ", ", ".join (removed))
before = after
for filename in added:
if filename.endswith(".htm"):
print("Processing " + filename + " at " + str(datetime.datetime.now()))
start = time.time()
# create empty lists for objects
rdfobjs = []
rdfobjsGeo = []
linkobjs = []
eventobjs = []
orgdocs = []
haslinks = False
bigstring = "" # keeps track of what was sent
newhead = "" # empty string to catch empty head/tail
newtail = ""
filepath = os.path.join(docs_path, filename)
nlp_helper.netowl_curl(filepath, json_out_dir, out_ext, keys.netowl_key)
outfile = os.path.join(json_out_dir, filename + out_ext)
with open(outfile, 'r', encoding="utf-8") as file:
rdfstring = json.load(file)
uniquets = str(time.time()) # unique time stamp for each doc
doc = rdfstring['document'][0] # gets main part
if 'text' in doc:
v = doc['text'][0]
if 'content' in v:
bigstring = v['content']
if 'entity' not in doc:
print("ERROR: Nothing returned from NetOwl, or other unspecified error.") # NOQA E501
break
ents = (doc['entity']) # gets all entities in doc
for e in ents:
# gather data from each entity
# rdfvalue = nof.cleanup_text(e['value']) # value (ie name)
rdfvalue = nlp_helper.cleanup_text(e['value']) # value (ie name)
rdfid = e['id']
rdfid = filename.split(".")[0] + "-" + rdfid # unique to each entity
# test for geo (decide which type of obj to make - geo or non-geo)
if 'geodetic' in e:
if 'link-ref' in e:
refrels = []
linkdescs = []
haslinks = True
for k in e['link-ref']: # every link-ref per entity
refrels.append(k['idref']) # keep these - all references # noqa: E501
if 'role-type' in k: # test the role type is source # noqa: E501
if k['role-type'] == "source":
linkdesc = rdfvalue + " is a " + k['role'] + " in " + k['entity-arg'][0]['value'] # noqa: E501
linkdescs.append(linkdesc)
else:
linkdescs.append("This item has parent links but no children") # noqa: E501
else:
haslinks = False
if 'entity-ref' in e:
isGeo = False # already plotted, relegate to rdfobj list # noqa: E501
else:
lat = float(e['geodetic']['latitude'])
longg = float(e['geodetic']['longitude'])
isGeo = True
else:
isGeo = False
# check for addresses
if e['ontology'] == "entity:address:mail":
address = e['value']
# location = nof.geocode_address(address) # returns x,y
location = geocode_address(address) # returns x,y
isGeo = True
# set lat long
lat = location['y']
longg = location['x']
# check for links
if 'link-ref' in e:
refrels = []
linkdescs = []
haslinks = True
for k in e['link-ref']: # every link-ref per entity
refrels.append(k['idref']) # keep these - all references # noqa: E501
if 'role-type' in k: # test the role type is source # noqa: E501
if k['role-type'] == "source":
linkdesc = rdfvalue + " is a " + k['role'] + " in " + k['entity-arg'][0]['value'] # noqa: E501
linkdescs.append(linkdesc)
else:
linkdescs.append("This item has parent links but no children") # noqa: E501
else:
haslinks = False
# set up head and tail
if 'entity-mention' in e:
em = e['entity-mention'][0]
if 'head' in em:
newhead = nlp_helper.get_head(bigstring, int(em['head']), numchars)
if 'tail' in em:
newtail = nlp_helper.get_tail(bigstring, int(em['tail']), numchars)
else:
em = None
if isGeo:
if haslinks:
# add refrels to new obj
rdfobj = nlp_helper.RDFitemGeo(rdfid, rdfvalue, longg, lat, uniquets, filename, # noqa: E501
refrels)
ld = str(linkdescs)
if len(ld) > 255:
ld = ld[:254] # shorten long ones
rdfobj.set_link_details(ld)
else:
rdfobj = nlp_helper.RDFitemGeo(rdfid, rdfvalue, longg, lat, uniquets, filename) # noqa: E501
rdfobj.set_link_details("No links for this point")
# set type for symbology
rdfobj.set_type("placename") # default
rdfobj.set_subtype("unknown") # default
if e['ontology'] == "entity:place:city":
rdfobj.set_type("placename")
rdfobj.set_subtype("city")
if e['ontology'] == "entity:place:country":
rdfobj.set_type("placename")
rdfobj.set_subtype("country")
if e['ontology'] == "entity:place:province":
rdfobj.set_type("placename")
rdfobj.set_subtype("province")
if e['ontology'] == "entity:place:continent":
rdfobj.set_type("placename")
rdfobj.set_subtype("continent")
if e['ontology'] == "entity:numeric:coordinate:mgrs":
rdfobj.set_type("coordinate")
rdfobj.set_subtype("MGRS")
if e['ontology'] == "entity:numeric:coordinate:latlong": # noqa: E501
rdfobj.set_type("coordinate")
rdfobj.set_subtype("latlong")
if e['ontology'] == "entity:address:mail":
rdfobj.set_type("address")
rdfobj.set_subtype("mail")
if e['ontology'] == "entity:place:other":
rdfobj.set_type("placename")
rdfobj.set_subtype("descriptor")
if e['ontology'] == "entity:place:landform":
rdfobj.set_type("placename")
rdfobj.set_subtype("landform")
if e['ontology'] == "entity:organization:facility":
rdfobj.set_type("placename")
rdfobj.set_subtype("facility")
if e['ontology'] == "entity:place:water":
rdfobj.set_type("placename")
rdfobj.set_subtype("water")
if e['ontology'] == "entity:place:county":
rdfobj.set_type("placename")
rdfobj.set_subtype("county")
rdfobj.set_head(newhead)
rdfobj.set_tail(newtail)
item = rdfobj.toJSON()
cosmos_item = client.CreateItem(container['_self'],{
"head": rdfobj.head,
"id": rdfobj.id,
"lat": rdfobj.lat,
"linkdetails": rdfobj.linkdetails,
"links": rdfobj.links,
"long": rdfobj.long,
"orgdoc": rdfobj.orgdoc,
"subtype": rdfobj.subtype,
"tail": rdfobj.tail,
"timest": rdfobj.timest,
"type": rdfobj.type,
"value": rdfobj.value
})
rdfobjsGeo.append(rdfobj)
nlp_helper.post_to_geoevent(item, geoevent_url)
else: # not geo
ontology = e['ontology']
if haslinks:
rdfobj = nlp_helper.RDFitem(rdfid, rdfvalue, uniquets, filename, ontology, refrels) # noqa: E501
else: # has neither links nor address
rdfobj = nlp_helper.RDFitem(rdfid, rdfvalue, uniquets, filename, ontology)
rdfobj.set_head(newhead)
rdfobj.set_tail(newtail)
cosmos_item = client.CreateItem(container['_self'],{
"value": rdfobj.value,
"links": rdfobj.links,
"orgdoc": rdfobj.orgdoc,
"id": rdfobj.id,
"type": rdfobj.type,
"head": rdfobj.head,
"tail": rdfobj.tail
})
rdfobjs.append(rdfobj)
if 'link' in doc:
linksys = (doc['link'])
for l in linksys:
linkid = filename.split(".")[0] + "-" + l['id']
if 'entity-arg' in l:
fromid = filename.split(".")[0] + "-" + l['entity-arg'][0]['idref']
toid = filename.split(".")[0] + "-" + l['entity-arg'][1]['idref']
fromvalue = l['entity-arg'][0]['value']
tovalue = l['entity-arg'][1]['value']
fromrole = l['entity-arg'][0]['role']
torole = l['entity-arg'][1]['role']
fromroletype = l['entity-arg'][0]['role-type']
toroletype = l['entity-arg'][1]['role-type']
# build link objects
linkobj = nlp_helper.RDFlinkItem(linkid, fromid, toid, fromvalue, tovalue,
fromrole, torole, fromroletype,
toroletype, uniquets)
cosmos_link_item = client.CreateItem(link_container['_self'],{
"linkid": linkobj.linkid,
"fromid": linkobj.fromid,
"toid": linkobj.toid,
"fromvalue":linkobj.fromvalue,
"tovalue":linkobj.tovalue,
"fromrole":linkobj.fromrole,
"torole": linkobj.torole,
"fromroletype": linkobj.fromroletype,
"toroletype": linkobj.toroletype
})
linkobjs.append(linkobj)
_gremlin_insert_vertices = ["g.addV('{0}').property('type', '{1}').property('id', '{0}')".format(fromvalue, fromroletype),
"g.addV('{0}').property('type', '{1}').property('id', '{0}')".format(tovalue, toroletype)]
_gremlin_insert_edges = ["g.V('{0}').addE('linked').to(g.V('{1}'))".format(fromvalue, tovalue)]
#try:
#nlp_helper.insert_vertices(_gremlin_insert_vertices, graph_client)
#except:
#print('Error on node insertion')
#nlp_helper.insert_edges(_gremlin_insert_edges, graph_client)
if 'event' in doc:
events = doc['event']
for e in events:
evid = e['id']
evvalue = e['value']
if 'entity-arg' in e:
fromid = filename.split(".")[0] + "-" + e['entity-arg'][0]['idref']
fromvalue = e['entity-arg'][0]['value']
fromrole = e['entity-arg'][0]['role']
if len(e['entity-arg']) > 1:
toid = filename.split(".")[0] + "-" + e['entity-arg'][1]['idref']
tovalue = e['entity-arg'][1]['value']
torole = e['entity-arg'][1]['role']
else:
toid = None
tovalue = None
torole = None
eventobj = nlp_helper.RDFeventItem(evvalue, evid, fromid, toid,
fromvalue, tovalue, fromrole,
torole, filename, uniquets)
cosmos_event_item = client.CreateItem(event_container['_self'],{
"eventvalue": eventobj.eventvalue,
"eventid": eventobj.eventid,
"fromid": eventobj.fromid,
"toid": eventobj.toid,
"fromvalue": eventobj.fromvalue,
"tovalue": eventobj.tovalue,
"fromrole": eventobj.fromrole,
"torole": eventobj.torole,
"orgdoc": eventobj.orgdoc
})
eventobjs.append(eventobj)
_gremlin_insert_vertices = ["g.addV('{0}').property('type', '{1}').property('id', '{0}')".format(fromvalue, fromroletype),
"g.addV('{0}').property('type', '{1}').property('id', '{0}')".format(tovalue, toroletype)]
_gremlin_insert_edges = ["g.V('{0}').addE('{1}').to(g.V('{2}'))".format(fromvalue, fromrole, tovalue)]
#try:
#nlp_helper.insert_vertices(_gremlin_insert_vertices, graph_client)
#except:
#print('Error on node insertion')
#nlp_helper.insert_edges(_gremlin_insert_edges, graph_client)
end = time.time()
process_time = end - start
print("Non-Geospatial Entities Found: " + str(len(rdfobjs)))
print("Geospatial Entities Found: " + str(len(rdfobjsGeo)))
print("Links Found: " + str(len(linkobjs)))
print("Events Found: " + str(len(eventobjs)))
print("Process took {0} seconds".format(str(process_time)))
count +=1
if count > 4:
print("Exiting")
break
```
| github_jupyter |
# Day 9 - Finding the sum, again, with a running series
* https://adventofcode.com/2020/day/9
This looks to be a variant of the [day 1, part 1 puzzle](./Day%2001.ipynb); finding the sum of two numbers in a set. Only now, we have to make sure we know what number to remove as we progres! This calls for a _sliding window_ iterator really, where we view the whole series through a slit X entries wide as it moves along the inputs.
As this puzzle is easier with a set of numbers, I create a sliding window of size `preamble + 2`, so we have access to the value to be removed and the value to be checked, at the same time; to achieve this, I created a window function that takes an *offset*, where you can take `offset` fewer items at the start, then have the window grow until it reaches the desired size:
```
from collections import deque
from itertools import islice
from typing import Iterable, Iterator, TypeVar
T = TypeVar("T")
def window(iterable: Iterable[T], n: int = 2, offset: int = 0) -> Iterator[deque[T]]:
it = iter(iterable)
queue = deque(islice(it, n - offset), maxlen=n)
yield queue
append = queue.append
for elem in it:
append(elem)
yield queue
def next_invalid(numbers: Iterable[int], preamble: int = 25) -> int:
it = window(numbers, preamble + 2, 2)
pool = set(next(it))
for win in it:
to_check = win[-1]
if len(win) == preamble + 2:
# remove the value now outside of our preamble window
pool.remove(win[0])
# validate the value can be created from a sum
for a in pool:
b = to_check - a
if b == a:
continue
if b in pool:
# number validated
break
else:
# no valid sum found
return to_check
pool.add(to_check)
test = [int(v) for v in """\
35
20
15
25
47
40
62
55
65
95
102
117
150
182
127
219
299
277
309
576
""".split()]
assert next_invalid(test, 5) == 127
import aocd
number_stream = [int(v) for v in aocd.get_data(day=9, year=2020).split()]
print("Part 1:", next_invalid(number_stream))
```
## Part 2
To solve the second part, you need a _dynamic_ window size over the input stream, and a running total. When the running total equals the value from part 1, we can then take the min and max values from the window.
- While the running total is too low, grow the window one stap and add the extra value to the total
- If the running total is too high, remove a value at the back of the window from the running total, and shrink that side of the window by one step.
With the Python `deque` (double-ended queue) already used in part one, this is a trivial task to achieve:
```
def find_weakness(numbers: Iterable[int], preamble: int = 25) -> int:
invalid = next_invalid(numbers, preamble)
it = iter(numbers)
total = next(it)
window = deque([total])
while total != invalid and window:
if total < invalid:
window.append(next(it))
total += window[-1]
else:
total -= window.popleft()
if not window:
raise ValueError("Could not find a weakness")
return min(window) + max(window)
assert find_weakness(test, 5) == 62
print("Part 2:", find_weakness(number_stream))
```
| github_jupyter |
# Intro to matplotlib - plotting in Python
Most of this material is reproduced or adapted from
https://github.com/jakevdp/PythonDataScienceHandbook
The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT).
For this lecture, we also grab references from
[http://jrjohansson.github.io](http://jrjohansson.github.io)
This work is licensed under a [CC-BY license](https://creativecommons.org/licenses/by/3.0/).
## Introduction
Matplotlib is a 2D and 3D graphics library for generating scientific figures.
More information at the Matplotlib web page: http://matplotlib.org/
### Importing Matplotlib
Just as we use the ``np`` shorthand for NumPy, we will use some standard shorthands for Matplotlib imports:
```
#Do imports
import numpy as np
import matplotlib.pyplot as plt
```
The ``plt`` interface is what we will use most often.
#### Plotting from a Jupyter notebook
Plotting interactively within notebook can be done with the ``%matplotlib`` command.
- ``%matplotlib notebook`` will lead to *interactive* plots embedded within the notebook
- ``%matplotlib inline`` will lead to *static* images of your plot embedded in the notebook
We will generally opt for ``%matplotlib inline``:
```
%matplotlib inline
```
After running this command (it needs to be done only once per kernel/session), any cell within the notebook that creates a plot will embed a PNG image of the resulting graphic:
```
x = np.linspace(0, 10, 100)
fig = plt.figure()
plt.plot(x, np.sin(x), '-')
plt.plot(x, np.cos(x), '--');
```
### Saving Figures to File
One nice feature of Matplotlib is the ability to save figures in a wide variety of formats.
Saving a figure can be done using the ``savefig()`` command.
For example, to save the previous figure as a PNG file, you can run this:
```
fig.savefig('my_figure.png')
```
We now have a file called ``my_figure.png`` in the current working directory:
```
!ls -lh my_figure.png #This won't work in Windows (you could use !dir my_figure.png )
```
In ``savefig()``, the file format is inferred from the extension of the given filename.
Depending on what backends you have installed, many different file formats are available.
The list of supported file types can be found for your system by using the following method of the figure canvas object:
```
fig.canvas.get_supported_filetypes()
```
# Simple Line Plots
Perhaps the simplest of all plots is the visualization of a single function $y = f(x)$.
For all Matplotlib plots, we start by creating a figure and an axes.
In their simplest form, a figure and axes can be created as follows:
```
fig = plt.figure()
ax = plt.axes()
#This is called the object-oriented interface because we define two objects, one of the
#class figure and one of the class axes.
```
In Matplotlib, the *figure* (an instance of the class ``plt.Figure``) can be thought of as a single container that contains all the objects representing axes, graphics, text, and labels.
The *axes* (an instance of the class ``plt.Axes``) is what we see above: a bounding box with ticks and labels, which will eventually contain the plot elements that make up our visualization.
Once we have created an axes, we can use the ``ax.plot`` function to plot some data. Let's start with a simple sinusoid:
```
fig = plt.figure()
ax = plt.axes()
x = np.linspace(0, 10, 1000) #what is this line doing?
ax.plot(x, np.sin(x));
```
Alternatively, we can use the pylab interface and let the figure and axes be created for us in the background:
```
plt.plot(x, np.sin(x));
# This is simpler but less flexible.
```
If we want to create a single figure with multiple lines, we can simply call the ``plot`` function multiple times:
```
plt.plot(x, np.sin(x))
plt.plot(x, np.cos(x))
plt.plot(x, np.cos(x)+1);
```
That's all there is to plotting simple functions in Matplotlib!
We'll now dive into some more details about how to control the appearance of the axes and lines.
## Adjusting the Plot: Line Colors and Styles
The first adjustment you might wish to make to a plot is to control the line colors and styles.
The ``plt.plot()`` function takes additional arguments that can be used to specify these.
To adjust the color, you can use the ``color`` keyword, which accepts a string argument representing virtually any imaginable color.
The color can be specified in a variety of ways:
```
plt.plot(x, np.sin(x - 0), color='blue') # specify color by name
plt.plot(x, np.sin(x - 1), color='g') # short color code (rgbcmyk)
plt.plot(x, np.sin(x - 2), color='0.75') # Grayscale between 0 and 1
plt.plot(x, np.sin(x - 3), color='#FFDD44') # Hex code (RRGGBB from 00 to FF)
plt.plot(x, np.sin(x - 4), color=(1.0,0.2,0.3)) # RGB tuple, values 0 to 1
plt.plot(x, np.sin(x - 5), color='chartreuse'); # all HTML color names supported
```
If no color is specified, Matplotlib will automatically cycle through a set of default colors for multiple lines.
Similarly, the line style can be adjusted using the ``linestyle`` keyword:
```
plt.plot(x, x + 0, linestyle='solid')
plt.plot(x, x + 1, linestyle='dashed')
plt.plot(x, x + 2, linestyle='dashdot')
plt.plot(x, x + 3, linestyle='dotted');
# For short, you can use the following codes:
plt.plot(x, x + 4, linestyle='-') # solid
plt.plot(x, x + 5, linestyle='--') # dashed
plt.plot(x, x + 6, linestyle='-.') # dashdot
plt.plot(x, x + 7, linestyle=':'); # dotted
```
If you would like to be extremely terse, these ``linestyle`` and ``color`` codes can be combined into a single non-keyword argument to the ``plt.plot()`` function:
```
plt.plot(x, x + 0, '-g') # solid green
plt.plot(x, x + 1, '--c') # dashed cyan
plt.plot(x, x + 2, '-.k') # dashdot black
plt.plot(x, x + 3, ':r'); # dotted red
```
These single-character color codes reflect the standard abbreviations in the RGB (Red/Green/Blue) and CMYK (Cyan/Magenta/Yellow/blacK) color systems, commonly used for digital color graphics.
There are many other keyword arguments that can be used to fine-tune the appearance of the plot; for more details, I'd suggest viewing the docstring of the ``plt.plot()`` function.
## Adjusting the Plot: Axes Limits
Matplotlib does a decent job of choosing default axes limits for your plot, but sometimes it's nice to have finer control.
The most basic way to adjust axis limits is to use the ``plt.xlim()`` and ``plt.ylim()`` methods:
```
plt.plot(x, np.sin(x))
plt.xlim(-1, 11)
plt.ylim(-5, 5);
```
If for some reason you'd like either axis to be displayed in reverse, you can simply reverse the order of the arguments:
```
plt.plot(x, np.sin(x))
plt.xlim(10, 0)
plt.ylim(1.2, -1.2);
```
A useful related method is ``plt.axis()`` (note here the potential confusion between *axes* with an *e*, and *axis* with an *i*).
The ``plt.axis()`` method allows you to set the ``x`` and ``y`` limits with a single call, by passing a list which specifies ``[xmin, xmax, ymin, ymax]``:
```
plt.plot(x, np.sin(x))
plt.axis([-1, 11, -1.5, 1.5]);
```
It allows even higher-level specifications, such as ensuring an equal aspect ratio so that on your screen, one unit in ``x`` is equal to one unit in ``y``:
```
plt.plot(x, np.sin(x))
plt.axis('equal');
```
For more information on axis limits and the other capabilities of the ``plt.axis`` method, refer to the ``plt.axis`` docstring.
## Labeling Plots
Every plot should include a title, axis labels, and a simple legend.
```
# Let's do this one together
x = np.linspace(0,10,100)
plt.plot(x, np.tan(x), label = 'Tan(x)');
plt.plot(x, np.arctan(x), label = 'Arctan(x)');
plt.title('Tangent of x');
plt.xlabel('x');
plt.ylabel('Tan(x)');
plt.legend(loc = 'upper right');
```
The position, size, and style of these labels can be adjusted using optional arguments to the function.
For more information, see the Matplotlib documentation and the docstrings of each of these functions.
When multiple lines are being shown within a single axes, it can be useful to create a plot legend that labels each line type.
Again, Matplotlib has a built-in way of quickly creating such a legend.
It is done via the (you guessed it) ``plt.legend()`` method.
You can specify the label of each line using the ``label`` keyword of the plot function:
```
#plt.legend(loc=0) # let matplotlib decide the optimal location; default choice
#plt.legend(loc=1) # upper right corner
#plt.legend(loc=2) # upper left corner
#plt.legend(loc=3) # lower left corner
#plt.legend(loc=4) # lower right corner
```
As you can see, the ``plt.legend()`` function keeps track of the line style and color, and matches these with the correct label.
More information on specifying and formatting plot legends can be found in the ``plt.legend`` docstring.
## Aside: Matplotlib Gotchas
While most ``plt`` functions translate directly to ``ax`` methods (such as ``plt.plot()`` → ``ax.plot()``, ``plt.legend()`` → ``ax.legend()``, etc.), this is not the case for all commands.
In particular, functions to set limits, labels, and titles are slightly modified.
For transitioning between MATLAB-style functions and object-oriented methods, make the following changes:
- ``plt.xlabel()`` → ``ax.set_xlabel()``
- ``plt.ylabel()`` → ``ax.set_ylabel()``
- ``plt.xlim()`` → ``ax.set_xlim()``
- ``plt.ylim()`` → ``ax.set_ylim()``
- ``plt.title()`` → ``ax.set_title()``
In the object-oriented interface to plotting, rather than calling these functions individually, it is often more convenient to use the ``ax.set()`` method to set all these properties at once:
```
ax = plt.axes()
ax.plot(x, np.sin(x))
ax.set(xlim=(0, 10), ylim=(-2, 2),
xlabel='x', ylabel='sin(x)',
title='Your old friend, the sine curve');
```
For simple plots, the choice of which style to use is largely a matter of preference, but the object-oriented approach can become a necessity as plots become more complicated.
## Scatter Plots with ``plt.plot``
In the previous section we looked at ``plt.plot``/``ax.plot`` to produce line plots.
It turns out that this same function can produce scatter plots as well:
```
x = np.linspace(0, 10, 30)
y = np.sin(x)
#plt.plot(x, y, '*', color='black');
plt.plot(x, y, '*k'); #same thing
```
As you saw above, you can give "plot" a marker (without a line), and it will produce a scatter plot.
The third argument in the function call is a character that represents the type of symbol used for the plotting. Just as you can specify options such as ``'-'``, ``'--'`` to control the line style, the marker style has its own set of short string codes. The full list of available symbols can be seen in the documentation of ``plt.plot``, or in Matplotlib's online documentation. Most of the possibilities are fairly intuitive, and we'll show a number of the more common ones here:
```
#What is this code doing?
for marker in ['o', '.', ',', 'x', '+', 'v', '^', '<', '>', 's', 'd']:
plt.plot(np.random.uniform(size=5), np.random.uniform(size=5), marker,
label="marker='{0}'".format(marker))
plt.legend(numpoints=1)
plt.xlim(0, 1.8);
```
For even more possibilities, these character codes can be used together with line and color codes to plot points along with a line connecting them:
```
plt.plot(x, y, '-ok');
```
Additional keyword arguments to ``plt.plot`` specify a wide range of properties of the lines and markers:
```
plt.plot(x, y, '-p', color='gray',
markersize=15, linewidth=4,
markerfacecolor='white',
markeredgecolor='gray',
markeredgewidth=2)
plt.ylim(-1.2, 1.2);
#Of course, most scatter plots are not meant to represent a sine curve but some relationship between x and y, for example
x = np.linspace(-3,3,100)
y = x**2 + np.random.rand(100) #what is this doing?
plt.plot(x,y,'o'); #(note: the semicolon here suppresses text output)
```
This type of flexibility in the ``plt.plot`` function allows for a wide variety of possible visualization options.
For a full description of the options available, refer to the ``plt.plot`` documentation.
## Scatter Plots with ``plt.scatter``
A second, more powerful method of creating scatter plots is the ``plt.scatter`` function, which can be used very similarly to the ``plt.plot`` function:
```
plt.scatter(x, y, marker='o');
```
The primary difference of ``plt.scatter`` from ``plt.plot`` is that it can be used to create scatter plots where the properties of each individual point (size, face color, edge color, etc.) can be individually controlled or mapped to data.
Let's show this by creating a random scatter plot with points of many colors and sizes.
In order to better see the overlapping results, we'll also use the ``alpha`` keyword to adjust the transparency level:
```
#What is this code doing?
x = np.random.uniform(size=100) #create a new array with 100 random values
y = np.random.uniform(size=100) #same
colors = np.random.uniform(size=100) #same
sizes = 1000 * np.random.uniform(size=100)
plt.scatter(x, y, c=colors, s=sizes, alpha=0.3,
cmap='viridis')
plt.colorbar(); # show color scale
```
Notice that the color argument is automatically mapped to a color scale (shown here by the ``colorbar()`` command), and that the size argument is given in pixels.
#### In this way, the color and size of points can be used to convey information in the visualization, in order to visualize multidimensional data.
## ``plot`` Versus ``scatter``: A Note on Efficiency
Aside from the different features available in ``plt.plot`` and ``plt.scatter``, why might you choose to use one over the other? While it doesn't matter as much for small amounts of data, as datasets get larger than a few thousand points, ``plt.plot`` can be noticeably more efficient than ``plt.scatter``.
The reason is that ``plt.scatter`` has the capability to render a different size and/or color for each point, so the renderer must do the extra work of constructing each point individually.
In ``plt.plot``, on the other hand, the points are always essentially clones of each other, so the work of determining the appearance of the points is done only once for the entire set of data.
For large datasets, the difference between these two can lead to vastly different performance, and for this reason, ``plt.plot`` should be preferred over ``plt.scatter`` for large datasets.
# Visualizing Errors
For any scientific measurement, accurate accounting for errors is nearly as important, if not more important, than accurate reporting of the number itself.
In visualization of data and results, showing these errors effectively can make a plot convey much more complete information.
## Basic Errorbars
A basic errorbar can be created with a single Matplotlib function call:
```
x = np.linspace(0, 10, 50)
dy = 0.8
dx = 0.3
y = np.sin(x) + dy * np.random.randn(50)
plt.errorbar(x, y, xerr = dx, yerr=dy, fmt='*k');
```
Here the ``fmt`` is a format code controlling the appearance of lines and points, and has the same syntax as the shorthand used in ``plt.plot`` that we saw above.
In addition to these basic options, the ``errorbar`` function has many options to fine-tune the outputs.
In crowded plots it might be useful to make the errorbars lighter than the points themselves:
```
plt.errorbar(x, y, xerr=dx, yerr=dy, fmt='o', color='black',
ecolor='lightgray', elinewidth=3, capsize=0);
```
# Histograms, Binnings, and Density
A simple histogram can be a great first step in understanding a dataset.
In class 2 we saw a preview of Matplotlib's histogram function, which creates a basic histogram in one line:
```
data = np.random.randn(1000)
plt.hist(data); #How will the data look like?
plt.hist(data,density=True,bins=20); #How will the data look like?
```
The ``hist()`` function has many options to tune both the calculation and the display;
here's an example of a more customized histogram:
```
# what do these things mean?
data = np.random.randn(1000)
plt.hist(data, bins=30, density=True, alpha=0.7,
histtype='stepfilled', color='steelblue',
edgecolor='none');
```
The ``plt.hist`` docstring has more information on other customization options available.
Plotting histograms along with some transparency ``alpha`` can be very useful when comparing histograms of several distributions:
```
x1 = np.random.normal(0, 0.8, 1000) #what is this doing?
x2 = np.random.normal(-2, 1, 1000)
x3 = np.random.normal(3, 2, 1000)
kwargs = dict(histtype='stepfilled', alpha=0.5, density=True, bins=40)
plt.hist(x1, **kwargs)
plt.hist(x2, **kwargs)
plt.hist(x3, **kwargs);
```
If you would like to simply compute the histogram (that is, count the number of points in a given bin) and not display it, the ``np.histogram()`` function is available:
```
counts, bin_edges = np.histogram(data, bins=5)
print(counts)
print(bin_edges)
```
## Further reading
* http://www.matplotlib.org - The project web page for matplotlib, see in particular https://matplotlib.org/tutorials/index.html
* https://github.com/matplotlib/matplotlib - The source code for matplotlib.
* http://matplotlib.org/gallery.html - A large gallery showcaseing various types of plots matplotlib can create. Highly recommended!
* http://www.loria.fr/~rougier/teaching/matplotlib - A good matplotlib tutorial.
* http://scipy-lectures.github.io/matplotlib/matplotlib.html - Another good matplotlib reference.
| github_jupyter |
# Campus SEIR Modeling
## Campus infection data
The following data consists of new infections reported since August 3, 2020, from diagnostic testing administered by the Wellness Center and University Health Services at the University of Notre Dame. The data is publically available on the [Notre Dame Covid-19 Dashboard](https://here.nd.edu/our-approach/dashboard/).
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from scipy.integrate import solve_ivp
from scipy.optimize import minimize
from datetime import timedelta
data = [
["2020-08-03", 0],
["2020-08-04", 0],
["2020-08-05", 0],
["2020-08-06", 1],
["2020-08-07", 0],
["2020-08-08", 1],
["2020-08-09", 2],
["2020-08-10", 4],
["2020-08-11", 4],
["2020-08-12", 7],
["2020-08-13", 10],
["2020-08-14", 14],
["2020-08-15", 3],
["2020-08-16", 15],
["2020-08-17", 80],
]
df = pd.DataFrame(data, columns=["date", "new cases"])
df["date"] = pd.to_datetime(df["date"])
fig, ax = plt.subplots(figsize=(8,4))
ax.bar(df["date"], df["new cases"], width=0.6)
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=mdates.MO))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b %d"))
plt.title("Reported New Infections")
plt.grid()
```
## Fitting an SEIR model to campus data
Because of the limited amount of data available at the time this notebook was prepared, the model fitting has been limited to an SEIR model for infectious disease in a homogeneous population. In an SEIR model, the progression of an epidemic can be modeled by the rate processes shown in the following diagram.
$$\text{Susceptible}
\xrightarrow {\frac{\beta S I}{N}}
\text{Exposed}
\xrightarrow{\alpha E}
\text{Infectious}
\xrightarrow{\gamma I}
\text{Recovered} $$
which yeild the following model for the population of the four compartments
$$\begin{align*}
\frac{dS}{dt} &= -\beta S \frac{I}{N} \\
\frac{dE}{dt} &= \beta S \frac{I}{N} - \alpha E \\
\frac{dI}{dt} &= \alpha E - \gamma I \\
\frac{dR}{dt} &= \gamma I \\
\end{align*}$$
The recovery rate is given by $\gamma = 1/t_{recovery}$ where the average recovery time $t_{recovery}$ is estimated as 8 days.
| Parameter | Description | Estimated Value | Source |
| :-- | :-- | :-- | :-- |
| $N$ | campus population | 15,000 | estimate |
| $\alpha$ | 1/average latency period | 1/(3.0 d) |
| $\gamma$ | 1/average recovery period | 1/(8.0 d) | literature |
| $\beta$ | infection rate constant | tbd | fitted to data |
| $I_0$ | initial infectives on Aug 3, 2020 | tbd | fitted to data
| $R_0$ | reproduction number | ${\beta}/{\gamma}$ |
```
N = 15000 # estimated campus population
gamma = 1/8.0 # recovery rate = 1 / average recovery time in days
alpha = 1/3.0
def model(t, y, beta):
S, E, I, R = y
dSdt = -beta*S*I/N
dEdt = beta*S*I/N - alpha*E
dIdt = alpha*E - gamma*I
dRdt = gamma*I
return np.array([dSdt, dEdt, dIdt, dRdt])
def solve_model(t, params):
beta, I_initial = params
IC = [N - I_initial, I_initial, 0.0, 0.0]
soln = solve_ivp(lambda t, y: model(t, y, beta), np.array([t[0], t[-1]]),
IC, t_eval=t, atol=1e-6, rtol=1e-9)
S, E, I, R = soln.y
U = beta*S*I/N
return S, E, I, R, U
def residuals(df, params):
S, E, I, R, U = solve_model(df.index, params)
return np.linalg.norm(df["new cases"] - U)
def fit_model(df, params_est=[0.5, 0.5]):
return minimize(lambda params: residuals(df, params), params_est, method="Nelder-Mead").x
def plot_data(df):
plt.plot(df.index, np.array(df["new cases"]), "r.", ms=20, label="data")
plt.xlabel("days")
plt.title("new cases")
plt.legend()
def plot_model(t, params):
print("R0 =", round(beta/gamma, 1))
S, E, I, R, U = solve_model(t, params)
plt.plot(t, U, lw=3, label="model")
plt.xlabel("days")
plt.title("new cases")
plt.legend()
plot_data(df)
beta, I_initial = fit_model(df)
plot_model(df.index, [beta, I_initial])
```
## Fitted parameter values
```
from tabulate import tabulate
parameter_table = [
["N", 15000],
["I0", I_initial],
["beta", beta],
["gamma", gamma],
["R0", beta/gamma]
]
print(tabulate(parameter_table, headers=["Parameter", "Value"]))
```
## Short term predictions of newly confirmed cases
Using the fitted parameters, the following code presents a short term projection of newly diagnosed infections. Roughly speaking, the model projects a 50% increase per day in newly diagnosed cases as a result of testing sympotomatic individuals.
The number of infected but asympotomatic individuals is unknown at this time, but can be expected to be a 2x multiple of this projection.
```
# prediction horizon (days ahead)
H = 1
# retrospective lag
K = 6
fig, ax = plt.subplots(1, 1, figsize=(12, 4))
for k in range(0, K+1):
# use data up to k days ago
if k > 0:
beta, I_initial = fit_model(df[:-k])
P = max(df[:-k].index) + H
c = 'b'
a = 0.25
else:
beta, I_initial = fit_model(df)
P = max(df.index) + H
c = 'r'
a = 1.0
# simulation
t = np.linspace(0, P, P+1)
S, E, I, R, U = solve_model(t, [beta, I_initial])
# plotting
dates = [df["date"][0] + timedelta(days=t) for t in t]
ax.plot(dates, U, c, lw=3, alpha=a)
ax.plot(df["date"], df["new cases"], "r.", ms=25, label="new infections (data)")
ax.xaxis.set_major_locator(mdates.WeekdayLocator(byweekday=mdates.MO))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%b %d"))
ax.grid(True)
ax.set_title(f"{H} day-ahead predictions of confirmed new cases");
```
| github_jupyter |
```
# dependencies
import pandas as pd
from sqlalchemy import create_engine, inspect
# read raw data csv
csv_file = "NYC_Dog_Licensing_Dataset.csv"
all_dog_data = pd.read_csv(csv_file)
all_dog_data.head(10)
# trim data frame to necessary columns
dog_data_df = all_dog_data[['AnimalName','AnimalGender','BreedName','Borough','ZipCode']]
dog_data_df.head(10)
# remove incomplete rows
dog_data_df.count()
cleaned_dog_data_df = dog_data_df.dropna(how='any')
cleaned_dog_data_df.count()
# reformat zip code as integer
cleaned_dog_data_df['ZipCode'] = cleaned_dog_data_df['ZipCode'].astype(int)
cleaned_dog_data_df.head(10)
# connect to postgres to create dog database
engine = create_engine('postgres://postgres:postgres@localhost:5432')
conn = engine.connect()
conn.execute("commit")
conn.execute("drop database if exists dog_db")
conn.execute("commit")
conn.execute("create database dog_db")
# import dataframe into database table
engine = create_engine('postgres://postgres:postgres@localhost:5432/dog_db')
conn = engine.connect()
cleaned_dog_data_df.to_sql('dog_names', con=conn, if_exists='replace', index=False)
# check for data
engine.execute('SELECT * FROM dog_names').fetchall()
# inspect table names and column names
inspector = inspect(engine)
inspector.get_table_names()
inspector = inspect(engine)
columns = inspector.get_columns('dog_names')
print(columns)
# query the table and save as dataframe for analysis
dog_table = pd.read_sql_query('select * from dog_names', con=engine)
dog_table.head(20)
dog_data = dog_table.loc[(dog_table["AnimalName"] != "UNKNOWN") & (dog_table["AnimalName"] != "NAME NOT PROVIDED"), :]
dog_data.head(20)
name_counts = pd.DataFrame(dog_data.groupby("AnimalName")["AnimalName"].count())
name_counts_df = name_counts.rename(columns={"AnimalName":"Count"})
name_counts_df.head()
top_names = name_counts_df.sort_values(["Count"], ascending=False)
top_names.head(12)
f_dog_data = dog_data.loc[dog_data["AnimalGender"] == "F", :]
f_dog_data.head()
f_name_counts = pd.DataFrame(f_dog_data.groupby("AnimalName")["AnimalName"].count())
f_name_counts_df = f_name_counts.rename(columns={"AnimalName":"Count"})
f_top_names = f_name_counts_df.sort_values(["Count"], ascending=False)
f_top_names.head(12)
m_dog_data = dog_data.loc[dog_data["AnimalGender"] == "M", :]
m_name_counts = pd.DataFrame(m_dog_data.groupby("AnimalName")["AnimalName"].count())
m_name_counts_df = m_name_counts.rename(columns={"AnimalName":"Count"})
m_top_names = m_name_counts_df.sort_values(["Count"], ascending=False)
m_top_names.head(12)
borough_counts = pd.DataFrame(dog_data.groupby("Borough")["AnimalName"].count())
borough_counts_df = borough_counts.rename(columns={"AnimalName":"Count"})
borough_dogs = borough_counts_df.sort_values(["Count"], ascending=False)
borough_dogs.head()
top_boroughs = dog_data.loc[(dog_data["Borough"] == "Manhattan") | (dog_data["Borough"] == "Brooklyn") | (dog_data["Borough"] == "Queens") | (dog_data["Borough"] == "Bronx") | (dog_data["Borough"] == "Staten Island"), :]
f_m_top_boroughs = top_boroughs.loc[(top_boroughs["AnimalGender"] == "F") | (top_boroughs["AnimalGender"] == "M"), :]
f_m_top_boroughs.head()
borough_data = f_m_top_boroughs.groupby(['Borough','AnimalGender'])["AnimalName"].count()
borough_dogs = pd.DataFrame(borough_data)
borough_dogs
mt_dog_data = dog_data.loc[dog_data["Borough"].str.contains("Manhattan", case=False), :]
mt_name_counts = pd.DataFrame(mt_dog_data.groupby("AnimalName")["AnimalName"].count())
mt_name_counts_df = mt_name_counts.rename(columns={"AnimalName":"Count"})
mt_top_names = mt_name_counts_df.sort_values(["Count"], ascending=False)
mt_top_names.head(12)
bk_dog_data = dog_data.loc[dog_data["Borough"].str.contains("Brooklyn", case=False), :]
bk_name_counts = pd.DataFrame(bk_dog_data.groupby("AnimalName")["AnimalName"].count())
bk_name_counts_df = bk_name_counts.rename(columns={"AnimalName":"Count"})
bk_top_names = bk_name_counts_df.sort_values(["Count"], ascending=False)
bk_top_names.head(12)
qn_dog_data = dog_data.loc[dog_data["Borough"].str.contains("Queens", case=False), :]
qn_name_counts = pd.DataFrame(qn_dog_data.groupby("AnimalName")["AnimalName"].count())
qn_name_counts_df = qn_name_counts.rename(columns={"AnimalName":"Count"})
qn_top_names = qn_name_counts_df.sort_values(["Count"], ascending=False)
qn_top_names.head(12)
bx_dog_data = dog_data.loc[dog_data["Borough"].str.contains("Bronx", case=False), :]
bx_name_counts = pd.DataFrame(bx_dog_data.groupby("AnimalName")["AnimalName"].count())
bx_name_counts_df = bx_name_counts.rename(columns={"AnimalName":"Count"})
bx_top_names = bx_name_counts_df.sort_values(["Count"], ascending=False)
bx_top_names.head(12)
si_dog_data = dog_data.loc[dog_data["Borough"].str.contains("Staten Island", case=False), :]
si_name_counts = pd.DataFrame(si_dog_data.groupby("AnimalName")["AnimalName"].count())
si_name_counts_df = si_name_counts.rename(columns={"AnimalName":"Count"})
si_top_names = si_name_counts_df.sort_values(["Count"], ascending=False)
si_top_names.head(12)
```
| github_jupyter |
# What's this PyTorch business?
You've written a lot of code in this assignment to provide a whole host of neural network functionality. Dropout, Batch Norm, and 2D convolutions are some of the workhorses of deep learning in computer vision. You've also worked hard to make your code efficient and vectorized.
For the last part of this assignment, though, we're going to leave behind your beautiful codebase and instead migrate to one of two popular deep learning frameworks: in this instance, PyTorch (or TensorFlow, if you choose to use that notebook).
### What is PyTorch?
PyTorch is a system for executing dynamic computational graphs over Tensor objects that behave similarly as numpy ndarray. It comes with a powerful automatic differentiation engine that removes the need for manual back-propagation.
### Why?
* Our code will now run on GPUs! Much faster training. When using a framework like PyTorch or TensorFlow you can harness the power of the GPU for your own custom neural network architectures without having to write CUDA code directly (which is beyond the scope of this class).
* We want you to be ready to use one of these frameworks for your project so you can experiment more efficiently than if you were writing every feature you want to use by hand.
* We want you to stand on the shoulders of giants! TensorFlow and PyTorch are both excellent frameworks that will make your lives a lot easier, and now that you understand their guts, you are free to use them :)
* We want you to be exposed to the sort of deep learning code you might run into in academia or industry.
### PyTorch versions
This notebook assumes that you are using **PyTorch version 1.4**. In some of the previous versions (e.g. before 0.4), Tensors had to be wrapped in Variable objects to be used in autograd; however Variables have now been deprecated. In addition 1.0+ versions separate a Tensor's datatype from its device, and use numpy-style factories for constructing Tensors rather than directly invoking Tensor constructors.
## How will I learn PyTorch?
Justin Johnson has made an excellent [tutorial](https://github.com/jcjohnson/pytorch-examples) for PyTorch.
You can also find the detailed [API doc](http://pytorch.org/docs/stable/index.html) here. If you have other questions that are not addressed by the API docs, the [PyTorch forum](https://discuss.pytorch.org/) is a much better place to ask than StackOverflow.
## Install PyTorch 1.4 (ONLY IF YOU ARE WORKING LOCALLY)
1. Have the latest version of Anaconda installed on your machine.
2. Create a new conda environment starting from Python 3.7. In this setup example, we'll call it `torch_env`.
3. Run the command: `conda activate torch_env`
4. Run the command: `pip install torch==1.4 torchvision==0.5.0`
# Table of Contents
This assignment has 5 parts. You will learn PyTorch on **three different levels of abstraction**, which will help you understand it better and prepare you for the final project.
1. Part I, Preparation: we will use CIFAR-10 dataset.
2. Part II, Barebones PyTorch: **Abstraction level 1**, we will work directly with the lowest-level PyTorch Tensors.
3. Part III, PyTorch Module API: **Abstraction level 2**, we will use `nn.Module` to define arbitrary neural network architecture.
4. Part IV, PyTorch Sequential API: **Abstraction level 3**, we will use `nn.Sequential` to define a linear feed-forward network very conveniently.
5. Part V, CIFAR-10 open-ended challenge: please implement your own network to get as high accuracy as possible on CIFAR-10. You can experiment with any layer, optimizer, hyperparameters or other advanced features.
Here is a table of comparison:
| API | Flexibility | Convenience |
|---------------|-------------|-------------|
| Barebone | High | Low |
| `nn.Module` | High | Medium |
| `nn.Sequential` | Low | High |
# Part I. Preparation
First, we load the CIFAR-10 dataset. This might take a couple minutes the first time you do it, but the files should stay cached after that.
In previous parts of the assignment we had to write our own code to download the CIFAR-10 dataset, preprocess it, and iterate through it in minibatches; PyTorch provides convenient tools to automate this process for us.
```
import torch
assert '.'.join(torch.__version__.split('.')[:2]) == '1.4'
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.data import sampler
import torchvision.datasets as dset
import torchvision.transforms as T
import numpy as np
NUM_TRAIN = 49000
# The torchvision.transforms package provides tools for preprocessing data
# and for performing data augmentation; here we set up a transform to
# preprocess the data by subtracting the mean RGB value and dividing by the
# standard deviation of each RGB value; we've hardcoded the mean and std.
transform = T.Compose([
T.ToTensor(),
T.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
# We set up a Dataset object for each split (train / val / test); Datasets load
# training examples one at a time, so we wrap each Dataset in a DataLoader which
# iterates through the Dataset and forms minibatches. We divide the CIFAR-10
# training set into train and val sets by passing a Sampler object to the
# DataLoader telling how it should sample from the underlying Dataset.
cifar10_train = dset.CIFAR10('./cs231n/datasets', train=True, download=True,
transform=transform)
loader_train = DataLoader(cifar10_train, batch_size=64,
sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN)))
cifar10_val = dset.CIFAR10('./cs231n/datasets', train=True, download=True,
transform=transform)
loader_val = DataLoader(cifar10_val, batch_size=64,
sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 50000)))
cifar10_test = dset.CIFAR10('./cs231n/datasets', train=False, download=True,
transform=transform)
loader_test = DataLoader(cifar10_test, batch_size=64)
```
You have an option to **use GPU by setting the flag to True below**. It is not necessary to use GPU for this assignment. Note that if your computer does not have CUDA enabled, `torch.cuda.is_available()` will return False and this notebook will fallback to CPU mode.
The global variables `dtype` and `device` will control the data types throughout this assignment.
## Colab Users
If you are using Colab, you need to manually switch to a GPU device. You can do this by clicking `Runtime -> Change runtime type` and selecting `GPU` under `Hardware Accelerator`. Note that you have to rerun the cells from the top since the kernel gets restarted upon switching runtimes.
```
USE_GPU = True
dtype = torch.float32 # we will be using float throughout this tutorial
if USE_GPU and torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
# Constant to control how frequently we print train loss
print_every = 100
print('using device:', device)
```
# Part II. Barebones PyTorch
PyTorch ships with high-level APIs to help us define model architectures conveniently, which we will cover in Part II of this tutorial. In this section, we will start with the barebone PyTorch elements to understand the autograd engine better. After this exercise, you will come to appreciate the high-level model API more.
We will start with a simple fully-connected ReLU network with two hidden layers and no biases for CIFAR classification.
This implementation computes the forward pass using operations on PyTorch Tensors, and uses PyTorch autograd to compute gradients. It is important that you understand every line, because you will write a harder version after the example.
When we create a PyTorch Tensor with `requires_grad=True`, then operations involving that Tensor will not just compute values; they will also build up a computational graph in the background, allowing us to easily backpropagate through the graph to compute gradients of some Tensors with respect to a downstream loss. Concretely if x is a Tensor with `x.requires_grad == True` then after backpropagation `x.grad` will be another Tensor holding the gradient of x with respect to the scalar loss at the end.
### PyTorch Tensors: Flatten Function
A PyTorch Tensor is conceptionally similar to a numpy array: it is an n-dimensional grid of numbers, and like numpy PyTorch provides many functions to efficiently operate on Tensors. As a simple example, we provide a `flatten` function below which reshapes image data for use in a fully-connected neural network.
Recall that image data is typically stored in a Tensor of shape N x C x H x W, where:
* N is the number of datapoints
* C is the number of channels
* H is the height of the intermediate feature map in pixels
* W is the height of the intermediate feature map in pixels
This is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we use fully connected affine layers to process the image, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a "flatten" operation to collapse the `C x H x W` values per representation into a single long vector. The flatten function below first reads in the N, C, H, and W values from a given batch of data, and then returns a "view" of that data. "View" is analogous to numpy's "reshape" method: it reshapes x's dimensions to be N x ??, where ?? is allowed to be anything (in this case, it will be C x H x W, but we don't need to specify that explicitly).
```
def flatten(x):
N = x.shape[0] # read in N, C, H, W
return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image
def test_flatten():
x = torch.arange(12).view(2, 1, 3, 2)
print('Before flattening: ', x)
print('After flattening: ', flatten(x))
test_flatten()
```
### Barebones PyTorch: Two-Layer Network
Here we define a function `two_layer_fc` which performs the forward pass of a two-layer fully-connected ReLU network on a batch of image data. After defining the forward pass we check that it doesn't crash and that it produces outputs of the right shape by running zeros through the network.
You don't have to write any code here, but it's important that you read and understand the implementation.
```
import torch.nn.functional as F # useful stateless functions
def two_layer_fc(x, params):
"""
A fully-connected neural networks; the architecture is:
NN is fully connected -> ReLU -> fully connected layer.
Note that this function only defines the forward pass;
PyTorch will take care of the backward pass for us.
The input to the network will be a minibatch of data, of shape
(N, d1, ..., dM) where d1 * ... * dM = D. The hidden layer will have H units,
and the output layer will produce scores for C classes.
Inputs:
- x: A PyTorch Tensor of shape (N, d1, ..., dM) giving a minibatch of
input data.
- params: A list [w1, w2] of PyTorch Tensors giving weights for the network;
w1 has shape (D, H) and w2 has shape (H, C).
Returns:
- scores: A PyTorch Tensor of shape (N, C) giving classification scores for
the input data x.
"""
# first we flatten the image
x = flatten(x) # shape: [batch_size, C x H x W]
w1, w2 = params
# Forward pass: compute predicted y using operations on Tensors. Since w1 and
# w2 have requires_grad=True, operations involving these Tensors will cause
# PyTorch to build a computational graph, allowing automatic computation of
# gradients. Since we are no longer implementing the backward pass by hand we
# don't need to keep references to intermediate values.
# you can also use `.clamp(min=0)`, equivalent to F.relu()
x = F.relu(x.mm(w1))
x = x.mm(w2)
return x
def two_layer_fc_test():
hidden_layer_size = 42
x = torch.zeros((64, 50), dtype=dtype) # minibatch size 64, feature dimension 50
w1 = torch.zeros((50, hidden_layer_size), dtype=dtype)
w2 = torch.zeros((hidden_layer_size, 10), dtype=dtype)
scores = two_layer_fc(x, [w1, w2])
print(scores.size()) # you should see [64, 10]
two_layer_fc_test()
```
### Barebones PyTorch: Three-Layer ConvNet
Here you will complete the implementation of the function `three_layer_convnet`, which will perform the forward pass of a three-layer convolutional network. Like above, we can immediately test our implementation by passing zeros through the network. The network should have the following architecture:
1. A convolutional layer (with bias) with `channel_1` filters, each with shape `KW1 x KH1`, and zero-padding of two
2. ReLU nonlinearity
3. A convolutional layer (with bias) with `channel_2` filters, each with shape `KW2 x KH2`, and zero-padding of one
4. ReLU nonlinearity
5. Fully-connected layer with bias, producing scores for C classes.
Note that we have **no softmax activation** here after our fully-connected layer: this is because PyTorch's cross entropy loss performs a softmax activation for you, and by bundling that step in makes computation more efficient.
**HINT**: For convolutions: http://pytorch.org/docs/stable/nn.html#torch.nn.functional.conv2d; pay attention to the shapes of convolutional filters!
```
def three_layer_convnet(x, params):
"""
Performs the forward pass of a three-layer convolutional network with the
architecture defined above.
Inputs:
- x: A PyTorch Tensor of shape (N, 3, H, W) giving a minibatch of images
- params: A list of PyTorch Tensors giving the weights and biases for the
network; should contain the following:
- conv_w1: PyTorch Tensor of shape (channel_1, 3, KH1, KW1) giving weights
for the first convolutional layer
- conv_b1: PyTorch Tensor of shape (channel_1,) giving biases for the first
convolutional layer
- conv_w2: PyTorch Tensor of shape (channel_2, channel_1, KH2, KW2) giving
weights for the second convolutional layer
- conv_b2: PyTorch Tensor of shape (channel_2,) giving biases for the second
convolutional layer
- fc_w: PyTorch Tensor giving weights for the fully-connected layer. Can you
figure out what the shape should be?
- fc_b: PyTorch Tensor giving biases for the fully-connected layer. Can you
figure out what the shape should be?
Returns:
- scores: PyTorch Tensor of shape (N, C) giving classification scores for x
"""
conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b = params
scores = None
################################################################################
# TODO: Implement the forward pass for the three-layer ConvNet. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
x = F.conv2d(x, conv_w1, bias=conv_b1, padding=conv_w1.size()[-1] // 2)
x = F.relu(x)
x = F.conv2d(x, conv_w2, bias=conv_b2, padding=conv_w2.size()[-1] // 2)
x = F.relu(x)
x = x.view(x.size()[0], -1)
scores = F.linear(x, fc_w.transpose(0, 1), bias=fc_b)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE #
################################################################################
return scores
```
After defining the forward pass of the ConvNet above, run the following cell to test your implementation.
When you run this function, scores should have shape (64, 10).
```
def three_layer_convnet_test():
x = torch.zeros((64, 3, 32, 32), dtype=dtype) # minibatch size 64, image size [3, 32, 32]
conv_w1 = torch.zeros((6, 3, 5, 5), dtype=dtype) # [out_channel, in_channel, kernel_H, kernel_W]
conv_b1 = torch.zeros((6,)) # out_channel
conv_w2 = torch.zeros((9, 6, 3, 3), dtype=dtype) # [out_channel, in_channel, kernel_H, kernel_W]
conv_b2 = torch.zeros((9,)) # out_channel
# you must calculate the shape of the tensor after two conv layers, before the fully-connected layer
fc_w = torch.zeros((9 * 32 * 32, 10))
fc_b = torch.zeros(10)
scores = three_layer_convnet(x, [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b])
print(scores.size()) # you should see [64, 10]
three_layer_convnet_test()
```
### Barebones PyTorch: Initialization
Let's write a couple utility methods to initialize the weight matrices for our models.
- `random_weight(shape)` initializes a weight tensor with the Kaiming normalization method.
- `zero_weight(shape)` initializes a weight tensor with all zeros. Useful for instantiating bias parameters.
The `random_weight` function uses the Kaiming normal initialization method, described in:
He et al, *Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification*, ICCV 2015, https://arxiv.org/abs/1502.01852
```
def random_weight(shape):
"""
Create random Tensors for weights; setting requires_grad=True means that we
want to compute gradients for these Tensors during the backward pass.
We use Kaiming normalization: sqrt(2 / fan_in)
"""
if len(shape) == 2: # FC weight
fan_in = shape[0]
else:
fan_in = np.prod(shape[1:]) # conv weight [out_channel, in_channel, kH, kW]
# randn is standard normal distribution generator.
w = torch.randn(shape, device=device, dtype=dtype) * np.sqrt(2. / fan_in)
w.requires_grad = True
return w
def zero_weight(shape):
return torch.zeros(shape, device=device, dtype=dtype, requires_grad=True)
# create a weight of shape [3 x 5]
# you should see the type `torch.cuda.FloatTensor` if you use GPU.
# Otherwise it should be `torch.FloatTensor`
random_weight((3, 5))
```
### Barebones PyTorch: Check Accuracy
When training the model we will use the following function to check the accuracy of our model on the training or validation sets.
When checking accuracy we don't need to compute any gradients; as a result we don't need PyTorch to build a computational graph for us when we compute scores. To prevent a graph from being built we scope our computation under a `torch.no_grad()` context manager.
```
def check_accuracy_part2(loader, model_fn, params):
"""
Check the accuracy of a classification model.
Inputs:
- loader: A DataLoader for the data split we want to check
- model_fn: A function that performs the forward pass of the model,
with the signature scores = model_fn(x, params)
- params: List of PyTorch Tensors giving parameters of the model
Returns: Nothing, but prints the accuracy of the model
"""
split = 'val' if loader.dataset.train else 'test'
print('Checking accuracy on the %s set' % split)
num_correct, num_samples = 0, 0
with torch.no_grad():
for x, y in loader:
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.int64)
scores = model_fn(x, params)
_, preds = scores.max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f%%)' % (num_correct, num_samples, 100 * acc))
```
### BareBones PyTorch: Training Loop
We can now set up a basic training loop to train our network. We will train the model using stochastic gradient descent without momentum. We will use `torch.functional.cross_entropy` to compute the loss; you can [read about it here](http://pytorch.org/docs/stable/nn.html#cross-entropy).
The training loop takes as input the neural network function, a list of initialized parameters (`[w1, w2]` in our example), and learning rate.
```
def train_part2(model_fn, params, learning_rate):
"""
Train a model on CIFAR-10.
Inputs:
- model_fn: A Python function that performs the forward pass of the model.
It should have the signature scores = model_fn(x, params) where x is a
PyTorch Tensor of image data, params is a list of PyTorch Tensors giving
model weights, and scores is a PyTorch Tensor of shape (N, C) giving
scores for the elements in x.
- params: List of PyTorch Tensors giving weights for the model
- learning_rate: Python scalar giving the learning rate to use for SGD
Returns: Nothing
"""
for t, (x, y) in enumerate(loader_train):
# Move the data to the proper device (GPU or CPU)
x = x.to(device=device, dtype=dtype)
y = y.to(device=device, dtype=torch.long)
# Forward pass: compute scores and loss
scores = model_fn(x, params)
loss = F.cross_entropy(scores, y)
# Backward pass: PyTorch figures out which Tensors in the computational
# graph has requires_grad=True and uses backpropagation to compute the
# gradient of the loss with respect to these Tensors, and stores the
# gradients in the .grad attribute of each Tensor.
loss.backward()
# Update parameters. We don't want to backpropagate through the
# parameter updates, so we scope the updates under a torch.no_grad()
# context manager to prevent a computational graph from being built.
with torch.no_grad():
for w in params:
w -= learning_rate * w.grad
# Manually zero the gradients after running the backward pass
w.grad.zero_()
if t % print_every == 0:
print('Iteration %d, loss = %.4f' % (t, loss.item()))
check_accuracy_part2(loader_val, model_fn, params)
print()
```
### BareBones PyTorch: Train a Two-Layer Network
Now we are ready to run the training loop. We need to explicitly allocate tensors for the fully connected weights, `w1` and `w2`.
Each minibatch of CIFAR has 64 examples, so the tensor shape is `[64, 3, 32, 32]`.
After flattening, `x` shape should be `[64, 3 * 32 * 32]`. This will be the size of the first dimension of `w1`.
The second dimension of `w1` is the hidden layer size, which will also be the first dimension of `w2`.
Finally, the output of the network is a 10-dimensional vector that represents the probability distribution over 10 classes.
You don't need to tune any hyperparameters but you should see accuracies above 40% after training for one epoch.
```
hidden_layer_size = 4000
learning_rate = 1e-2
w1 = random_weight((3 * 32 * 32, hidden_layer_size))
w2 = random_weight((hidden_layer_size, 10))
train_part2(two_layer_fc, [w1, w2], learning_rate)
```
### BareBones PyTorch: Training a ConvNet
In the below you should use the functions defined above to train a three-layer convolutional network on CIFAR. The network should have the following architecture:
1. Convolutional layer (with bias) with 32 5x5 filters, with zero-padding of 2
2. ReLU
3. Convolutional layer (with bias) with 16 3x3 filters, with zero-padding of 1
4. ReLU
5. Fully-connected layer (with bias) to compute scores for 10 classes
You should initialize your weight matrices using the `random_weight` function defined above, and you should initialize your bias vectors using the `zero_weight` function above.
You don't need to tune any hyperparameters, but if everything works correctly you should achieve an accuracy above 42% after one epoch.
```
learning_rate = 3e-3
channel_1 = 32
channel_2 = 16
conv_w1 = None
conv_b1 = None
conv_w2 = None
conv_b2 = None
fc_w = None
fc_b = None
################################################################################
# TODO: Initialize the parameters of a three-layer ConvNet. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
conv_w1 = random_weight((32, 3, 5, 5))
conv_b1 = zero_weight(32)
conv_w2 = random_weight((16, 32, 3, 3))
conv_b2 = zero_weight(16)
fc_w = random_weight((16 * 32 * 32, 10))
fc_b = zero_weight(10)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE #
################################################################################
params = [conv_w1, conv_b1, conv_w2, conv_b2, fc_w, fc_b]
train_part2(three_layer_convnet, params, learning_rate)
```
# Part III. PyTorch Module API
Barebone PyTorch requires that we track all the parameter tensors by hand. This is fine for small networks with a few tensors, but it would be extremely inconvenient and error-prone to track tens or hundreds of tensors in larger networks.
PyTorch provides the `nn.Module` API for you to define arbitrary network architectures, while tracking every learnable parameters for you. In Part II, we implemented SGD ourselves. PyTorch also provides the `torch.optim` package that implements all the common optimizers, such as RMSProp, Adagrad, and Adam. It even supports approximate second-order methods like L-BFGS! You can refer to the [doc](http://pytorch.org/docs/master/optim.html) for the exact specifications of each optimizer.
To use the Module API, follow the steps below:
1. Subclass `nn.Module`. Give your network class an intuitive name like `TwoLayerFC`.
2. In the constructor `__init__()`, define all the layers you need as class attributes. Layer objects like `nn.Linear` and `nn.Conv2d` are themselves `nn.Module` subclasses and contain learnable parameters, so that you don't have to instantiate the raw tensors yourself. `nn.Module` will track these internal parameters for you. Refer to the [doc](http://pytorch.org/docs/master/nn.html) to learn more about the dozens of builtin layers. **Warning**: don't forget to call the `super().__init__()` first!
3. In the `forward()` method, define the *connectivity* of your network. You should use the attributes defined in `__init__` as function calls that take tensor as input and output the "transformed" tensor. Do *not* create any new layers with learnable parameters in `forward()`! All of them must be declared upfront in `__init__`.
After you define your Module subclass, you can instantiate it as an object and call it just like the NN forward function in part II.
### Module API: Two-Layer Network
Here is a concrete example of a 2-layer fully connected network:
```
class TwoLayerFC(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super().__init__()
# assign layer objects to class attributes
self.fc1 = nn.Linear(input_size, hidden_size)
# nn.init package contains convenient initialization methods
# http://pytorch.org/docs/master/nn.html#torch-nn-init
nn.init.kaiming_normal_(self.fc1.weight)
self.fc2 = nn.Linear(hidden_size, num_classes)
nn.init.kaiming_normal_(self.fc2.weight)
def forward(self, x):
# forward always defines connectivity
x = flatten(x)
scores = self.fc2(F.relu(self.fc1(x)))
return scores
def test_TwoLayerFC():
input_size = 50
x = torch.zeros((64, input_size), dtype=dtype) # minibatch size 64, feature dimension 50
model = TwoLayerFC(input_size, 42, 10)
scores = model(x)
print(scores.size()) # you should see [64, 10]
test_TwoLayerFC()
```
### Module API: Three-Layer ConvNet
It's your turn to implement a 3-layer ConvNet followed by a fully connected layer. The network architecture should be the same as in Part II:
1. Convolutional layer with `channel_1` 5x5 filters with zero-padding of 2
2. ReLU
3. Convolutional layer with `channel_2` 3x3 filters with zero-padding of 1
4. ReLU
5. Fully-connected layer to `num_classes` classes
You should initialize the weight matrices of the model using the Kaiming normal initialization method.
**HINT**: http://pytorch.org/docs/stable/nn.html#conv2d
After you implement the three-layer ConvNet, the `test_ThreeLayerConvNet` function will run your implementation; it should print `(64, 10)` for the shape of the output scores.
```
class ThreeLayerConvNet(nn.Module):
def __init__(self, in_channel, channel_1, channel_2, num_classes):
super().__init__()
########################################################################
# TODO: Set up the layers you need for a three-layer ConvNet with the #
# architecture defined above. #
########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
self.conv1 = nn.Conv2d(in_channel, channel_1, 5, padding=2)
self.conv2 = nn.Conv2d(channel_1, channel_2, 3, padding=1)
self.fc = nn.Linear(channel_2 * 32 * 32, num_classes)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
########################################################################
# END OF YOUR CODE #
########################################################################
def forward(self, x):
scores = None
########################################################################
# TODO: Implement the forward function for a 3-layer ConvNet. you #
# should use the layers you defined in __init__ and specify the #
# connectivity of those layers in forward() #
########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
scores = self.fc(F.relu(self.conv2(F.relu(self.conv1(x)))).reshape(-1, self.fc.in_features))
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
########################################################################
# END OF YOUR CODE #
########################################################################
return scores
def test_ThreeLayerConvNet():
x = torch.zeros((64, 3, 32, 32), dtype=dtype) # minibatch size 64, image size [3, 32, 32]
model = ThreeLayerConvNet(in_channel=3, channel_1=12, channel_2=8, num_classes=10)
scores = model(x)
print(scores.size()) # you should see [64, 10]
test_ThreeLayerConvNet()
```
### Module API: Check Accuracy
Given the validation or test set, we can check the classification accuracy of a neural network.
This version is slightly different from the one in part II. You don't manually pass in the parameters anymore.
```
def check_accuracy_part34(loader, model):
if loader.dataset.train:
print('Checking accuracy on validation set')
else:
print('Checking accuracy on test set')
num_correct = 0
num_samples = 0
model.eval() # set model to evaluation mode
with torch.no_grad():
for x, y in loader:
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
scores = model(x)
_, preds = scores.max(1)
num_correct += (preds == y).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
```
### Module API: Training Loop
We also use a slightly different training loop. Rather than updating the values of the weights ourselves, we use an Optimizer object from the `torch.optim` package, which abstract the notion of an optimization algorithm and provides implementations of most of the algorithms commonly used to optimize neural networks.
```
def train_part34(model, optimizer, epochs=1):
"""
Train a model on CIFAR-10 using the PyTorch Module API.
Inputs:
- model: A PyTorch Module giving the model to train.
- optimizer: An Optimizer object we will use to train the model
- epochs: (Optional) A Python integer giving the number of epochs to train for
Returns: Nothing, but prints model accuracies during training.
"""
model = model.to(device=device) # move the model parameters to CPU/GPU
for e in range(epochs):
for t, (x, y) in enumerate(loader_train):
model.train() # put model to training mode
x = x.to(device=device, dtype=dtype) # move to device, e.g. GPU
y = y.to(device=device, dtype=torch.long)
scores = model(x)
loss = F.cross_entropy(scores, y)
# Zero out all of the gradients for the variables which the optimizer
# will update.
optimizer.zero_grad()
# This is the backwards pass: compute the gradient of the loss with
# respect to each parameter of the model.
loss.backward()
# Actually update the parameters of the model using the gradients
# computed by the backwards pass.
optimizer.step()
if t % print_every == 0:
print('Iteration %d, loss = %.4f' % (t, loss.item()))
check_accuracy_part34(loader_val, model)
print()
```
### Module API: Train a Two-Layer Network
Now we are ready to run the training loop. In contrast to part II, we don't explicitly allocate parameter tensors anymore.
Simply pass the input size, hidden layer size, and number of classes (i.e. output size) to the constructor of `TwoLayerFC`.
You also need to define an optimizer that tracks all the learnable parameters inside `TwoLayerFC`.
You don't need to tune any hyperparameters, but you should see model accuracies above 40% after training for one epoch.
```
hidden_layer_size = 4000
learning_rate = 1e-2
model = TwoLayerFC(3 * 32 * 32, hidden_layer_size, 10)
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
train_part34(model, optimizer)
```
### Module API: Train a Three-Layer ConvNet
You should now use the Module API to train a three-layer ConvNet on CIFAR. This should look very similar to training the two-layer network! You don't need to tune any hyperparameters, but you should achieve above above 45% after training for one epoch.
You should train the model using stochastic gradient descent without momentum.
```
learning_rate = 3e-3
channel_1 = 32
channel_2 = 16
model = None
optimizer = None
################################################################################
# TODO: Instantiate your ThreeLayerConvNet model and a corresponding optimizer #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model = ThreeLayerConvNet(in_channel=3, channel_1=channel_1, channel_2=channel_2, num_classes=10)
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
train_part34(model, optimizer)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE
################################################################################
train_part34(model, optimizer)
```
# Part IV. PyTorch Sequential API
Part III introduced the PyTorch Module API, which allows you to define arbitrary learnable layers and their connectivity.
For simple models like a stack of feed forward layers, you still need to go through 3 steps: subclass `nn.Module`, assign layers to class attributes in `__init__`, and call each layer one by one in `forward()`. Is there a more convenient way?
Fortunately, PyTorch provides a container Module called `nn.Sequential`, which merges the above steps into one. It is not as flexible as `nn.Module`, because you cannot specify more complex topology than a feed-forward stack, but it's good enough for many use cases.
### Sequential API: Two-Layer Network
Let's see how to rewrite our two-layer fully connected network example with `nn.Sequential`, and train it using the training loop defined above.
Again, you don't need to tune any hyperparameters here, but you shoud achieve above 40% accuracy after one epoch of training.
```
# We need to wrap `flatten` function in a module in order to stack it
# in nn.Sequential
class Flatten(nn.Module):
def forward(self, x):
return flatten(x)
hidden_layer_size = 4000
learning_rate = 1e-2
model = nn.Sequential(
Flatten(),
nn.Linear(3 * 32 * 32, hidden_layer_size),
nn.ReLU(),
nn.Linear(hidden_layer_size, 10),
)
# you can use Nesterov momentum in optim.SGD
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=0.9, nesterov=True)
train_part34(model, optimizer)
```
### Sequential API: Three-Layer ConvNet
Here you should use `nn.Sequential` to define and train a three-layer ConvNet with the same architecture we used in Part III:
1. Convolutional layer (with bias) with 32 5x5 filters, with zero-padding of 2
2. ReLU
3. Convolutional layer (with bias) with 16 3x3 filters, with zero-padding of 1
4. ReLU
5. Fully-connected layer (with bias) to compute scores for 10 classes
You should initialize your weight matrices using the `random_weight` function defined above, and you should initialize your bias vectors using the `zero_weight` function above.
You should optimize your model using stochastic gradient descent with Nesterov momentum 0.9.
Again, you don't need to tune any hyperparameters but you should see accuracy above 55% after one epoch of training.
```
channel_1 = 32
channel_2 = 16
learning_rate = 1e-2
model = None
optimizer = None
################################################################################
# TODO: Rewrite the 2-layer ConvNet with bias from Part III with the #
# Sequential API. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model = nn.Sequential(
nn.Conv2d(3, channel_1, 5, padding=2),
nn.ReLU(),
nn.Conv2d(channel_1, channel_2, 3, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(channel_2 * 32 * 32, 10),
)
optimizer = optim.SGD(model.parameters(), lr=learning_rate,
momentum=0.9, nesterov=True)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE
################################################################################
train_part34(model, optimizer)
```
# Part V. CIFAR-10 open-ended challenge
In this section, you can experiment with whatever ConvNet architecture you'd like on CIFAR-10.
Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves **at least 70%** accuracy on the CIFAR-10 **validation** set within 10 epochs. You can use the check_accuracy and train functions from above. You can use either `nn.Module` or `nn.Sequential` API.
Describe what you did at the end of this notebook.
Here are the official API documentation for each component. One note: what we call in the class "spatial batch norm" is called "BatchNorm2D" in PyTorch.
* Layers in torch.nn package: http://pytorch.org/docs/stable/nn.html
* Activations: http://pytorch.org/docs/stable/nn.html#non-linear-activations
* Loss functions: http://pytorch.org/docs/stable/nn.html#loss-functions
* Optimizers: http://pytorch.org/docs/stable/optim.html
### Things you might try:
- **Filter size**: Above we used 5x5; would smaller filters be more efficient?
- **Number of filters**: Above we used 32 filters. Do more or fewer do better?
- **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions?
- **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster?
- **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include:
- [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]
- [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]
- [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM]
- **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture).
- **Regularization**: Add l2 weight regularization, or perhaps use Dropout.
### Tips for training
For each network architecture that you try, you should tune the learning rate and other hyperparameters. When doing this there are a couple important things to keep in mind:
- If the parameters are working well, you should see improvement within a few hundred iterations
- Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all.
- Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs.
- You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set.
### Going above and beyond
If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these, but don't miss the fun if you have time!
- Alternative optimizers: you can try Adam, Adagrad, RMSprop, etc.
- Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut.
- Model ensembles
- Data augmentation
- New Architectures
- [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output.
- [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together.
- [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32)
### Have fun and happy training!
```
################################################################################
# TODO: #
# Experiment with any architectures, optimizers, and hyperparameters. #
# Achieve AT LEAST 70% accuracy on the *validation set* within 10 epochs. #
# #
# Note that you can use the check_accuracy function to evaluate on either #
# the test set or the validation set, by passing either loader_test or #
# loader_val as the second argument to check_accuracy. You should not touch #
# the test set until you have finished your architecture and hyperparameter #
# tuning, and only run the test set once at the end to report a final value. #
################################################################################
model = None
optimizer = None
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model = nn.Sequential(
nn.Conv2d(3, 6, 3, padding=1),
nn.BatchNorm2d(6),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(6, 16, 3, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(16, 120, 3, padding=1),
nn.BatchNorm2d(120),
nn.ReLU(),
nn.MaxPool2d(2),
Flatten(),
nn.Linear(120 * 4 * 4, 84 * 4),
nn.Linear(84 * 4, 10)
)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
################################################################################
# END OF YOUR CODE
################################################################################
# You should get at least 70% accuracy
train_part34(model, optimizer, epochs=10)
```
## Describe what you did
In the cell below you should write an explanation of what you did, any additional features that you implemented, and/or any graphs that you made in the process of training and evaluating your network.
TODO: Describe what you did
## Test set -- run this only once
Now that we've gotten a result we're happy with, we test our final model on the test set (which you should store in best_model). Think about how this compares to your validation set accuracy.
```
best_model = model
check_accuracy_part34(loader_test, best_model)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/tmbern/DS-Unit-2-Kaggle-Challenge/blob/master/module4-classification-metrics/Unit_2_Sprint_2_Module_4_CLASS-lecture-notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Lambda School Data Science
*Unit 2, Sprint 2, Module 4*
---
# Classification Metrics
- get and interpret the **confusion matrix** for classification models
- use classification metrics: **precision, recall**
- understand the relationships between precision, recall, **thresholds, and predicted probabilities**, to help **make decisions and allocate budgets**
- Get **ROC AUC** (Receiver Operating Characteristic, Area Under the Curve)
### Setup
Run the code cell below. You can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab.
Libraries
- category_encoders
- ipywidgets
- matplotlib
- numpy
- pandas
- scikit-learn
- seaborn
```
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
```
# Get and interpret the confusion matrix for classification models
## Overview
First, load the Tanzania Waterpumps data and fit a model. (This code isn't new, we've seen it all before.)
```
%matplotlib inline
import category_encoders as ce
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.ensemble import RandomForestClassifier
def wrangle(X):
"""Wrangles train, validate, and test sets in the same way"""
X = X.copy()
# Convert date_recorded to datetime
X['date_recorded'] = pd.to_datetime(X['date_recorded'], infer_datetime_format=True)
# Extract components from date_recorded, then drop the original column
X['year_recorded'] = X['date_recorded'].dt.year
X['month_recorded'] = X['date_recorded'].dt.month
X['day_recorded'] = X['date_recorded'].dt.day
X = X.drop(columns='date_recorded')
# Engineer feature: how many years from construction_year to date_recorded
X['years'] = X['year_recorded'] - X['construction_year']
# Drop recorded_by (never varies) and id (always varies, random)
unusable_variance = ['recorded_by', 'id']
X = X.drop(columns=unusable_variance)
# Drop duplicate columns
duplicate_columns = ['quantity_group']
X = X.drop(columns=duplicate_columns)
# About 3% of the time, latitude has small values near zero,
# outside Tanzania, so we'll treat these like null values
X['latitude'] = X['latitude'].replace(-2e-08, np.nan)
# When columns have zeros and shouldn't, they are like null values
cols_with_zeros = ['construction_year', 'longitude', 'latitude', 'gps_height', 'population']
for col in cols_with_zeros:
X[col] = X[col].replace(0, np.nan)
return X
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
# Split train into train & val. Make val the same size as test.
target = 'status_group'
train, val = train_test_split(train, test_size=len(test),
stratify=train[target], random_state=42)
# Wrangle train, validate, and test sets in the same way
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
# Arrange data into X features matrix and y target vector
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
# Make pipeline!
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
```
## Follow Along
Scikit-learn added a [**`plot_confusion_matrix`**](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html) function in version 0.22!
```
import sklearn
sklearn.__version__
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(pipeline, X_val, y_val, values_format='.0f', xticks_rotation='vertical')
```
#### How many correct predictions were made?
```
correct_predictions = 7005 +332 + 4351
correct_predictions
```
#### How many total predictions were made?
```
total_predictions = y_val.shape[0]
total_predictions
```
#### What was the classification accuracy?
```
correct_predictions / total_predictions
accuracy_score(y_val, y_pred)
sum(y_pred == y_val) / len(y_pred)
```
# Use classification metrics: precision, recall
## Overview
[Scikit-Learn User Guide — Classification Report](https://scikit-learn.org/stable/modules/model_evaluation.html#classification-report)
```
from sklearn.metrics import classification_report
print(classification_report(y_val, y_pred))
```
#### Wikipedia, [Precision and recall](https://en.wikipedia.org/wiki/Precision_and_recall)
> Both precision and recall are based on an understanding and measure of relevance.
> Suppose a computer program for recognizing dogs in photographs identifies 8 dogs in a picture containing 12 dogs and some cats. Of the 8 identified as dogs, 5 actually are dogs (true positives), while the rest are cats (false positives). The program's precision is 5/8 while its recall is 5/12.
> High precision means that an algorithm returned substantially more relevant results than irrelevant ones, while high recall means that an algorithm returned most of the relevant results.
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/2/26/Precisionrecall.svg/700px-Precisionrecall.svg.png" width="400">
## Follow Along
#### [We can get precision & recall from the confusion matrix](https://en.wikipedia.org/wiki/Precision_and_recall#Definition_(classification_context))
```
cm = plot_confusion_matrix(pipeline, X_val, y_val, values_format='.0f', xticks_rotation='vertical')
cm
# precision = true_positives / (true_positives + positives)
# recall = true_positives / (true_positives + false_negatives)
```
#### How many correct predictions of "non functional"?
```
correct_predictions_nonfunctional = 4351
```
#### How many total predictions of "non functional"?
```
total_predictions_nonfunctional = 622 + 156 +4351
```
#### What's the precision for "non functional"?
```
correct_predictions_nonfunctional / total_predictions_nonfunctional
print(classification_report(y_val, y_pred))
```
#### How many actual "non functional" waterpumps?
```
actual_non_functional = 1098 + 68 + 4351
```
#### What's the recall for "non functional"?
```
correct_predictions_nonfunctional / actual_non_functional
```
# Understand the relationships between precision, recall, thresholds, and predicted probabilities, to help make decisions and allocate budgets
## Overview
### Imagine this scenario...
Suppose there are over 14,000 waterpumps that you _do_ have some information about, but you _don't_ know whether they are currently functional, or functional but need repair, or non-functional.
```
len(test)
```
**You have the time and resources to go to just 2,000 waterpumps for proactive maintenance.** You want to predict, which 2,000 are most likely non-functional or in need of repair, to help you triage and prioritize your waterpump inspections.
You have historical inspection data for over 59,000 other waterpumps, which you'll use to fit your predictive model.
```
len(train) + len(val)
```
You have historical inspection data for over 59,000 other waterpumps, which you'll use to fit your predictive model.
Based on this historical data, if you randomly chose waterpumps to inspect, then about 46% of the waterpumps would need repairs, and 54% would not need repairs.
```
y_train.value_counts(normalize=True)
2000 * 0.46
```
**Can you do better than random at prioritizing inspections?**
In this scenario, we should define our target differently. We want to identify which waterpumps are non-functional _or_ are functional but needs repair:
```
y_train = y_train != 'functional'
y_val = y_val != 'functional'
y_train.value_counts(normalize=True)
```
We already made our validation set the same size as our test set.
```
len(val) == len(test)
```
We can refit our model, using the redefined target.
Then make predictions for the validation set.
```
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
```
## Follow Along
#### Look at the confusion matrix:
```
plot_confusion_matrix(pipeline, X_val, y_val, values_format='.0f', xticks_rotation='vertical')
```
#### How many total predictions of "True" ("non functional" or "functional needs repair") ?
```
5032 + 977
print(classification_report(y_val, y_pred))
```
### We don't have "budget" to take action on all these predictions
- But we can get predicted probabilities, to rank the predictions.
- Then change the threshold, to change the number of positive predictions, based on our budget.
### Get predicted probabilities and plot the distribution
```
pipeline.predict_proba(X_val)
pipeline.predict(X_val)
#Predicted probabilites for the positive class
pipeline.predict_proba(X_val)[:, 1]
threshold = 0.92
sum(pipeline.predict_proba(X_val)[:, 1] > threshold)
```
### Change the threshold
```
import seaborn as sns
y_pred_proba = pipeline.predict_proba(X_val)[:, 1]
ax = sns.distplot(y_pred_proba)
threshold = 0.9
ax.axvline(threshold, color='red' )
```
### Or, get exactly 2,000 positive predictions
Identify the 2,000 waterpumps in the validation set with highest predicted probabilities.
```
from ipywidgets import interact, fixed
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def my_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
def set_threshold(y_true, y_pred_proba, threshold=0.5):
y_pred = y_pred_proba > threshold
ax = sns.distplot(y_pred_proba)
ax.axvline(threshold, color='red')
plt.show()
print(classification_report(y_true, y_pred))
my_confusion_matrix(y_true, y_pred)
interact(set_threshold,
y_true=fixed(y_val),
y_pred_proba=fixed(y_pred_proba),
threshold=(0, 1, 0.02));
```
Most of these top 2,000 waterpumps will be relevant recommendations, meaning `y_val==True`, meaning the waterpump is non-functional or needs repairs.
Some of these top 2,000 waterpumps will be irrelevant recommendations, meaning `y_val==False`, meaning the waterpump is functional and does not need repairs.
Let's look at a random sample of 50 out of these top 2,000:
```
```
So how many of our recommendations were relevant? ...
```
```
What's the precision for this subset of 2,000 predictions?
```
```
### In this scenario ...
Accuracy _isn't_ the best metric!
Instead, change the threshold, to change the number of positive predictions, based on the budget. (You have the time and resources to go to just 2,000 waterpumps for proactive maintenance.)
Then, evaluate with the precision for "non functional"/"functional needs repair".
This is conceptually like **Precision@K**, where k=2,000.
Read more here: [Recall and Precision at k for Recommender Systems: Detailed Explanation with examples](https://medium.com/@m_n_malaeb/recall-and-precision-at-k-for-recommender-systems-618483226c54)
> Precision at k is the proportion of recommended items in the top-k set that are relevant
> Mathematically precision@k is defined as: `Precision@k = (# of recommended items @k that are relevant) / (# of recommended items @k)`
> In the context of recommendation systems we are most likely interested in recommending top-N items to the user. So it makes more sense to compute precision and recall metrics in the first N items instead of all the items. Thus the notion of precision and recall at k where k is a user definable integer that is set by the user to match the top-N recommendations objective.
We asked, can you do better than random at prioritizing inspections?
If we had randomly chosen waterpumps to inspect, we estimate that only 920 waterpumps would be repaired after 2,000 maintenance visits. (46%)
But using our predictive model, in the validation set, we succesfully identified over 1,900 waterpumps in need of repair!
So we will use this predictive model with the dataset of over 14,000 waterpumps that we _do_ have some information about, but we _don't_ know whether they are currently functional, or functional but need repair, or non-functional.
We will predict which 2,000 are most likely non-functional or in need of repair.
We estimate that approximately 1,900 waterpumps will be repaired after these 2,000 maintenance visits.
So we're confident that our predictive model will help triage and prioritize waterpump inspections.
### But ...
This metric (~1,900 waterpumps repaired after 2,000 maintenance visits) is specific for _one_ classification problem and _one_ possible trade-off.
Can we get an evaluation metric that is generic for _all_ classification problems and _all_ possible trade-offs?
Yes — the most common such metric is **ROC AUC.**
## Get ROC AUC (Receiver Operating Characteristic, Area Under the Curve)
[Wikipedia explains,](https://en.wikipedia.org/wiki/Receiver_operating_characteristic) "A receiver operating characteristic curve, or ROC curve, is a graphical plot that illustrates the diagnostic ability of a binary classifier system as its discrimination threshold is varied. **The ROC curve is created by plotting the true positive rate (TPR) against the false positive rate (FPR) at various threshold settings.**"
ROC AUC is the area under the ROC curve. [It can be interpreted](https://stats.stackexchange.com/questions/132777/what-does-auc-stand-for-and-what-is-it) as "the expectation that a uniformly drawn random positive is ranked before a uniformly drawn random negative."
ROC AUC measures **how well a classifier ranks predicted probabilities.** So, when you get your classifier’s ROC AUC score, you need to **use predicted probabilities, not discrete predictions.**
ROC AUC ranges **from 0 to 1.** Higher is better. A naive majority class **baseline** will have an ROC AUC score of **0.5.**
#### Scikit-Learn docs
- [User Guide: Receiver operating characteristic (ROC)](https://scikit-learn.org/stable/modules/model_evaluation.html#receiver-operating-characteristic-roc)
- [sklearn.metrics.roc_curve](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html)
- [sklearn.metrics.roc_auc_score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html)
#### More links
- [ROC curves and Area Under the Curve explained](https://www.dataschool.io/roc-curves-and-auc-explained/)
- [The philosophical argument for using ROC curves](https://lukeoakdenrayner.wordpress.com/2018/01/07/the-philosophical-argument-for-using-roc-curves/)
```
# "The ROC curve is created by plotting the true positive rate (TPR)
# against the false positive rate (FPR)
# at various threshold settings."
# Use scikit-learn to calculate TPR & FPR at various thresholds
from sklearn.metrics import roc_curve
fpr, tpr, thresholds = roc_curve(y_val, y_pred_proba)
# See the results in a table
pd.DataFrame({
'False Positive Rate': fpr,
'True Positive Rate': tpr,
'Threshold': thresholds
})
# See the results on a plot.
# This is the "Receiver Operating Characteristic" curve
plt.scatter(fpr, tpr)
plt.title('ROC curve')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate');
# Use scikit-learn to calculate the area under the curve.
from sklearn.metrics import roc_auc_score
roc_auc_score(y_val, y_pred_proba)
```
**Recap:** ROC AUC measures how well a classifier ranks predicted probabilities. So, when you get your classifier’s ROC AUC score, you need to use predicted probabilities, not discrete predictions.
Your code may look something like this:
```python
from sklearn.metrics import roc_auc_score
y_pred_proba = model.predict_proba(X_test_transformed)[:, -1] # Probability for last class
print('Test ROC AUC:', roc_auc_score(y_test, y_pred_proba))
```
ROC AUC ranges from 0 to 1. Higher is better. A naive majority class baseline will have an ROC AUC score of 0.5.
| github_jupyter |
# test note
* jupyterはコンテナ起動すること
* テストベッド一式起動済みであること
```
!pip install --upgrade pip
!pip install --force-reinstall ../lib/ait_sdk-0.1.7-py3-none-any.whl
from pathlib import Path
import pprint
from ait_sdk.test.hepler import Helper
import json
# settings cell
# mounted dir
root_dir = Path('/workdir/root/ait')
ait_name='eval_mnist_data_coverage'
ait_version='0.1'
ait_full_name=f'{ait_name}_{ait_version}'
ait_dir = root_dir / ait_full_name
td_name=f'{ait_name}_test'
# (dockerホスト側の)インベントリ登録用アセット格納ルートフォルダ
current_dir = %pwd
with open(f'{current_dir}/config.json', encoding='utf-8') as f:
json_ = json.load(f)
root_dir = json_['host_ait_root_dir']
is_container = json_['is_container']
invenotory_root_dir = f'{root_dir}\\ait\\{ait_full_name}\\local_qai\\inventory'
# entry point address
# コンテナ起動かどうかでポート番号が変わるため、切り替える
if is_container:
backend_entry_point = 'http://host.docker.internal:8888/qai-testbed/api/0.0.1'
ip_entry_point = 'http://host.docker.internal:8888/qai-ip/api/0.0.1'
else:
backend_entry_point = 'http://host.docker.internal:5000/qai-testbed/api/0.0.1'
ip_entry_point = 'http://host.docker.internal:6000/qai-ip/api/0.0.1'
# aitのデプロイフラグ
# 一度実施すれば、それ以降は実施しなくてOK
is_init_ait = True
#is_init_ait = False
# インベントリの登録フラグ
# 一度実施すれば、それ以降は実施しなくてOK
is_init_inventory = True
helper = Helper(backend_entry_point=backend_entry_point,
ip_entry_point=ip_entry_point,
ait_dir=ait_dir,
ait_full_name=ait_full_name)
# health check
helper.get_bk('/health-check')
helper.get_ip('/health-check')
# create ml-component
res = helper.post_ml_component(name=f'MLComponent_{ait_full_name}', description=f'Description of {ait_full_name}', problem_domain=f'ProbremDomain of {ait_full_name}')
helper.set_ml_component_id(res['MLComponentId'])
# deploy AIT
if is_init_ait:
helper.deploy_ait_non_build()
else:
print('skip deploy AIT')
res = helper.get_data_types()
model_data_type_id = [d for d in res['DataTypes'] if d['Name'] == 'model'][0]['Id']
dataset_data_type_id = [d for d in res['DataTypes'] if d['Name'] == 'dataset'][0]['Id']
res = helper.get_file_systems()
unix_file_system_id = [f for f in res['FileSystems'] if f['Name'] == 'UNIX_FILE_SYSTEM'][0]['Id']
windows_file_system_id = [f for f in res['FileSystems'] if f['Name'] == 'WINDOWS_FILE'][0]['Id']
# add inventories
if is_init_inventory:
inv1_name = helper.post_inventory('images', dataset_data_type_id, windows_file_system_id,
f'{invenotory_root_dir}\\train_images\\train-images-idx3-ubyte.gz',
'MNIST images', ['gz'])
inv2_name = helper.post_inventory('labels', dataset_data_type_id, windows_file_system_id,
f'{invenotory_root_dir}\\train_labels\\train-labels-idx1-ubyte.gz',
'MNIST labels', ['gz'])
else:
print('skip add inventories')
# get ait_json and inventory_jsons
res_json = helper.get_bk('/QualityMeasurements/RelationalOperators', is_print_json=False).json()
eq_id = int([r['Id'] for r in res_json['RelationalOperator'] if r['Expression'] == '=='][0])
nq_id = int([r['Id'] for r in res_json['RelationalOperator'] if r['Expression'] == '!='][0])
gt_id = int([r['Id'] for r in res_json['RelationalOperator'] if r['Expression'] == '>'][0])
ge_id = int([r['Id'] for r in res_json['RelationalOperator'] if r['Expression'] == '>='][0])
lt_id = int([r['Id'] for r in res_json['RelationalOperator'] if r['Expression'] == '<'][0])
le_id = int([r['Id'] for r in res_json['RelationalOperator'] if r['Expression'] == '<='][0])
res_json = helper.get_bk('/testRunners', is_print_json=False).json()
ait_json = [j for j in res_json['TestRunners'] if j['Name'] == ait_name][-1]
inv_1_json = helper.get_inventory(inv1_name)
inv_2_json = helper.get_inventory(inv2_name)
# add teast_descriptions
helper.post_td(td_name, 3,
quality_measurements=[
{"Id":ait_json['Report']['Measures'][0]['Id'], "Value":"0.75", "RelationalOperatorId":gt_id, "Enable":True},
{"Id":ait_json['Report']['Measures'][1]['Id'], "Value":"0.75", "RelationalOperatorId":gt_id, "Enable":True}
],
target_inventories=[
{"Id":1, "InventoryId": inv_1_json['Id'], "TemplateInventoryId": ait_json['TargetInventories'][0]['Id']},
{"Id":2, "InventoryId": inv_2_json['Id'], "TemplateInventoryId": ait_json['TargetInventories'][1]['Id']}
],
test_runner={
"Id":ait_json['Id'],
"Params":[
{"TestRunnerParamTemplateId":ait_json['ParamTemplates'][0]['Id'], "Value":"Area"},
{"TestRunnerParamTemplateId":ait_json['ParamTemplates'][1]['Id'], "Value":"100"},
{"TestRunnerParamTemplateId":ait_json['ParamTemplates'][2]['Id'], "Value":"800"}
]
})
# get test_description_jsons
td_1_json = helper.get_td(td_name)
# run test_descriptions
helper.post_run_and_wait(td_1_json['Id'])
res_json = helper.get_td_detail(td_1_json['Id'])
pprint.pprint(res_json)
# generate report
res = helper.post_report(td_1_json['Id'])
pprint.pprint(res)
```
| github_jupyter |
# HM2: Numerical Optimization for Logistic Regression.
### Name: [Your-Name?]
## 0. You will do the following:
1. Read the lecture note: [click here](https://github.com/wangshusen/DeepLearning/blob/master/LectureNotes/Logistic/paper/logistic.pdf)
2. Read, complete, and run my code.
3. **Implement mini-batch SGD** and evaluate the performance.
4. Convert the .IPYNB file to .HTML file.
* The HTML file must contain **the code** and **the output after execution**.
* Missing **the output after execution** will not be graded.
5. Upload this .HTML file to your Google Drive, Dropbox, or your Github repo. (If you submit the file to Google Drive or Dropbox, you must make the file "open-access". The delay caused by "deny of access" may result in late penalty.)
6. Submit the link to this .HTML file to Canvas.
* Example: https://github.com/wangshusen/CS583-2020S/blob/master/homework/HM2/HM2.html
## Grading criteria:
1. When computing the ```gradient``` and ```objective function value``` using a batch of samples, use **matrix-vector multiplication** rather than a FOR LOOP of **vector-vector multiplications**.
2. Plot ```objective function value``` against ```epochs```. In the plot, compare GD, SGD, and MB-SGD (with $b=8$ and $b=64$). The plot must look reasonable.
# 1. Data processing
- Download the Diabete dataset from https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/diabetes
- Load the data using sklearn.
- Preprocess the data.
## 1.1. Load the data
```
from sklearn import datasets
import numpy
x_sparse, y = datasets.load_svmlight_file('diabetes')
x = x_sparse.todense()
print('Shape of x: ' + str(x.shape))
print('Shape of y: ' + str(y.shape))
```
## 1.2. Partition to training and test sets
```
# partition the data to training and test sets
n = x.shape[0]
n_train = 640
n_test = n - n_train
rand_indices = numpy.random.permutation(n)
train_indices = rand_indices[0:n_train]
test_indices = rand_indices[n_train:n]
x_train = x[train_indices, :]
x_test = x[test_indices, :]
y_train = y[train_indices].reshape(n_train, 1)
y_test = y[test_indices].reshape(n_test, 1)
print('Shape of x_train: ' + str(x_train.shape))
print('Shape of x_test: ' + str(x_test.shape))
print('Shape of y_train: ' + str(y_train.shape))
print('Shape of y_test: ' + str(y_test.shape))
```
## 1.3. Feature scaling
Use the standardization to trainsform both training and test features
```
# Standardization
import numpy
# calculate mu and sig using the training set
d = x_train.shape[1]
mu = numpy.mean(x_train, axis=0).reshape(1, d)
sig = numpy.std(x_train, axis=0).reshape(1, d)
# transform the training features
x_train = (x_train - mu) / (sig + 1E-6)
# transform the test features
x_test = (x_test - mu) / (sig + 1E-6)
print('test mean = ')
print(numpy.mean(x_test, axis=0))
print('test std = ')
print(numpy.std(x_test, axis=0))
```
## 1.4. Add a dimension of all ones
```
n_train, d = x_train.shape
x_train = numpy.concatenate((x_train, numpy.ones((n_train, 1))), axis=1)
n_test, d = x_test.shape
x_test = numpy.concatenate((x_test, numpy.ones((n_test, 1))), axis=1)
print('Shape of x_train: ' + str(x_train.shape))
print('Shape of x_test: ' + str(x_test.shape))
```
# 2. Logistic regression model
The objective function is $Q (w; X, y) = \frac{1}{n} \sum_{i=1}^n \log \Big( 1 + \exp \big( - y_i x_i^T w \big) \Big) + \frac{\lambda}{2} \| w \|_2^2 $.
```
# Calculate the objective function value
# Inputs:
# w: d-by-1 matrix
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# Return:
# objective function value (scalar)
def objective(w, x, y, lam):
n, d = x.shape
yx = numpy.multiply(y, x) # n-by-d matrix
yxw = numpy.dot(yx, w) # n-by-1 matrix
vec1 = numpy.exp(-yxw) # n-by-1 matrix
vec2 = numpy.log(1 + vec1) # n-by-1 matrix
loss = numpy.mean(vec2) # scalar
reg = lam / 2 * numpy.sum(w * w) # scalar
return loss + reg
# initialize w
d = x_train.shape[1]
w = numpy.zeros((d, 1))
# evaluate the objective function value at w
lam = 1E-6
objval0 = objective(w, x_train, y_train, lam)
print('Initial objective function value = ' + str(objval0))
```
# 3. Numerical optimization
## 3.1. Gradient descent
The gradient at $w$ is $g = - \frac{1}{n} \sum_{i=1}^n \frac{y_i x_i }{1 + \exp ( y_i x_i^T w)} + \lambda w$
```
# Calculate the gradient
# Inputs:
# w: d-by-1 matrix
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# Return:
# g: g: d-by-1 matrix, full gradient
def gradient(w, x, y, lam):
n, d = x.shape
yx = numpy.multiply(y, x) # n-by-d matrix
yxw = numpy.dot(yx, w) # n-by-1 matrix
vec1 = numpy.exp(yxw) # n-by-1 matrix
vec2 = numpy.divide(yx, 1+vec1) # n-by-d matrix
vec3 = -numpy.mean(vec2, axis=0).reshape(d, 1) # d-by-1 matrix
g = vec3 + lam * w
return g
# Gradient descent for solving logistic regression
# Inputs:
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# stepsize: scalar
# max_iter: integer, the maximal iterations
# w: d-by-1 matrix, initialization of w
# Return:
# w: d-by-1 matrix, the solution
# objvals: a record of each iteration's objective value
def grad_descent(x, y, lam, stepsize, max_iter=100, w=None):
n, d = x.shape
objvals = numpy.zeros(max_iter) # store the objective values
if w is None:
w = numpy.zeros((d, 1)) # zero initialization
for t in range(max_iter):
objval = objective(w, x, y, lam)
objvals[t] = objval
print('Objective value at t=' + str(t) + ' is ' + str(objval))
g = gradient(w, x, y, lam)
w -= stepsize * g
return w, objvals
```
Run gradient descent.
```
lam = 1E-6
stepsize = 1.0
w, objvals_gd = grad_descent(x_train, y_train, lam, stepsize)
```
## 3.2. Stochastic gradient descent (SGD)
Define $Q_i (w) = \log \Big( 1 + \exp \big( - y_i x_i^T w \big) \Big) + \frac{\lambda}{2} \| w \|_2^2 $.
The stochastic gradient at $w$ is $g_i = \frac{\partial Q_i }{ \partial w} = -\frac{y_i x_i }{1 + \exp ( y_i x_i^T w)} + \lambda w$.
```
# Calculate the objective Q_i and the gradient of Q_i
# Inputs:
# w: d-by-1 matrix
# xi: 1-by-d matrix
# yi: scalar
# lam: scalar, the regularization parameter
# Return:
# obj: scalar, the objective Q_i
# g: d-by-1 matrix, gradient of Q_i
def stochastic_objective_gradient(w, xi, yi, lam):
yx = yi * xi # 1-by-d matrix
yxw = float(numpy.dot(yx, w)) # scalar
# calculate objective function Q_i
loss = numpy.log(1 + numpy.exp(-yxw)) # scalar
reg = lam / 2 * numpy.sum(w * w) # scalar
obj = loss + reg
# calculate stochastic gradient
g_loss = -yx.T / (1 + numpy.exp(yxw)) # d-by-1 matrix
g = g_loss + lam * w # d-by-1 matrix
return obj, g
# SGD for solving logistic regression
# Inputs:
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# stepsize: scalar
# max_epoch: integer, the maximal epochs
# w: d-by-1 matrix, initialization of w
# Return:
# w: the solution
# objvals: record of each iteration's objective value
def sgd(x, y, lam, stepsize, max_epoch=100, w=None):
n, d = x.shape
objvals = numpy.zeros(max_epoch) # store the objective values
if w is None:
w = numpy.zeros((d, 1)) # zero initialization
for t in range(max_epoch):
# randomly shuffle the samples
rand_indices = numpy.random.permutation(n)
x_rand = x[rand_indices, :]
y_rand = y[rand_indices, :]
objval = 0 # accumulate the objective values
for i in range(n):
xi = x_rand[i, :] # 1-by-d matrix
yi = float(y_rand[i, :]) # scalar
obj, g = stochastic_objective_gradient(w, xi, yi, lam)
objval += obj
w -= stepsize * g
stepsize *= 0.9 # decrease step size
objval /= n
objvals[t] = objval
print('Objective value at epoch t=' + str(t) + ' is ' + str(objval))
return w, objvals
```
Run SGD.
```
lam = 1E-6
stepsize = 0.1
w, objvals_sgd = sgd(x_train, y_train, lam, stepsize)
```
# 4. Compare GD with SGD
Plot objective function values against epochs.
```
import matplotlib.pyplot as plt
%matplotlib inline
fig = plt.figure(figsize=(6, 4))
epochs_gd = range(len(objvals_gd))
epochs_sgd = range(len(objvals_sgd))
line0, = plt.plot(epochs_gd, objvals_gd, '--b', LineWidth=4)
line1, = plt.plot(epochs_sgd, objvals_sgd, '-r', LineWidth=2)
plt.xlabel('Epochs', FontSize=20)
plt.ylabel('Objective Value', FontSize=20)
plt.xticks(FontSize=16)
plt.yticks(FontSize=16)
plt.legend([line0, line1], ['GD', 'SGD'], fontsize=20)
plt.tight_layout()
plt.show()
fig.savefig('compare_gd_sgd.pdf', format='pdf', dpi=1200)
```
# 5. Prediction
```
# Predict class label
# Inputs:
# w: d-by-1 matrix
# X: m-by-d matrix
# Return:
# f: m-by-1 matrix, the predictions
def predict(w, X):
xw = numpy.dot(X, w)
f = numpy.sign(xw)
return f
# evaluate training error
f_train = predict(w, x_train)
diff = numpy.abs(f_train - y_train) / 2
error_train = numpy.mean(diff)
print('Training classification error is ' + str(error_train))
# evaluate test error
f_test = predict(w, x_test)
diff = numpy.abs(f_test - y_test) / 2
error_test = numpy.mean(diff)
print('Test classification error is ' + str(error_test))
```
# 6. Mini-batch SGD (fill the code)
## 6.1. Compute the objective $Q_I$ and its gradient using a batch of samples
Define $Q_I (w) = \frac{1}{b} \sum_{i \in I} \log \Big( 1 + \exp \big( - y_i x_i^T w \big) \Big) + \frac{\lambda}{2} \| w \|_2^2 $, where $I$ is a set containing $b$ indices randomly drawn from $\{ 1, \cdots , n \}$ without replacement.
The stochastic gradient at $w$ is $g_I = \frac{\partial Q_I }{ \partial w} = \frac{1}{b} \sum_{i \in I} \frac{- y_i x_i }{1 + \exp ( y_i x_i^T w)} + \lambda w$.
```
# Calculate the objective Q_I and the gradient of Q_I
# Inputs:
# w: d-by-1 matrix
# xi: b-by-d matrix
# yi: b-by-1 matrix
# lam: scalar, the regularization parameter
# b: integer, the batch size
# Return:
# obj: scalar, the objective Q_i
# g: d-by-1 matrix, gradient of Q_i
def mb_stochastic_objective_gradient(w, xi, yi, lam, b):
# Fill the function
# Follow the implementation of stochastic_objective_gradient
# Use matrix-vector multiplication; do not use FOR LOOP of vector-vector multiplications
...
return obj, g
```
## 6.2. Implement mini-batch SGD
Hints:
1. In every epoch, randomly permute the $n$ samples (just like SGD).
2. Each epoch has $\frac{n}{b}$ iterations. In every iteration, use $b$ samples, and compute the gradient and objective using the ``mb_stochastic_objective_gradient`` function. In the next iteration, use the next $b$ samples, and so on.
```
# Mini-Batch SGD for solving logistic regression
# Inputs:
# x: n-by-d matrix
# y: n-by-1 matrix
# lam: scalar, the regularization parameter
# b: integer, the batch size
# stepsize: scalar
# max_epoch: integer, the maximal epochs
# w: d-by-1 matrix, initialization of w
# Return:
# w: the solution
# objvals: record of each iteration's objective value
def mb_sgd(x, y, lam, b, stepsize, max_epoch=100, w=None):
# Fill the function
# Follow the implementation of sgd
# Record one objective value per epoch (not per iteration!)
...
return w, objvals
```
## 6.3. Run MB-SGD
```
# MB-SGD with batch size b=8
lam = 1E-6 # do not change
b = 8 # do not change
stepsize = 0.1 # you must tune this parameter
w, objvals_mbsgd8 = mb_sgd(x_train, y_train, lam, b, stepsize)
# MB-SGD with batch size b=64
lam = 1E-6 # do not change
b = 64 # do not change
stepsize = 0.1 # you must tune this parameter
w, objvals_mbsgd64 = mb_sgd(x_train, y_train, lam, b, stepsize)
```
# 7. Plot and compare GD, SGD, and MB-SGD
You are required to compare the following algorithms:
- Gradient descent (GD)
- SGD
- MB-SGD with b=8
- MB-SGD with b=64
Follow the code in Section 4 to plot ```objective function value``` against ```epochs```. There should be four curves in the plot; each curve corresponds to one algorithm.
Hint: Logistic regression with $\ell_2$-norm regularization is a strongly convex optimization problem. All the algorithms will converge to the same solution. **In the end, the ``objective function value`` of the 4 algorithms will be the same. If not the same, your implementation must be wrong. Do NOT submit wrong code and wrong result!**
```
# plot the 4 curves:
```
| github_jupyter |
```
%matplotlib inline
import math
import numpy
import pandas
import seaborn
import matplotlib.pyplot as plt
import plot
def fmt_money(number):
return "${:,.0f}".format(number)
def run_pmt(market, pmt_rate):
portfolio = 1_000_000
age = 65
max_age = 100
df = pandas.DataFrame(index=range(age, max_age), columns=['withdrawal', 'portfolio'])
for i in range(age, max_age):
withdraw = -numpy.pmt(pmt_rate, max_age-i, portfolio, 0, 1)
portfolio -= withdraw
portfolio *= (1 + market)
df.loc[i] = [int(withdraw), int(portfolio)]
return df
pmt_df = run_pmt(0.03, 0.04)
pmt_df.head()
def run_smile(target):
spend = target
s = pandas.Series(index=range(66,100), dtype=int)
for age in range(66, 100):
d = (0.00008 * age * age) - (0.0125 * age) - (0.0066 * math.log(target)) + 0.546
spend *= (1 + d)
s.loc[age] = int(spend)
return s
smile_s = run_smile(pmt_df.iloc[0]['withdrawal'])
smile_s.head()
def rmse(s1, s2):
return numpy.sqrt(numpy.mean((s1-s2)**2))
rmse(pmt_df['withdrawal'][1:26], smile_s[:26])
def harness():
df = pandas.DataFrame(columns=['market', 'pmtrate', 'rmse'])
for returns in numpy.arange(0.01, 0.10+0.001, 0.001):
for pmt_rate in numpy.arange(0.01, 0.10+0.001, 0.001):
pmt_df = run_pmt(returns, pmt_rate)
iwd = pmt_df.iloc[0]['withdrawal']
smile_s = run_smile(iwd)
errors = rmse(pmt_df['withdrawal'], smile_s)
df = df.append({'market': returns, 'pmtrate': pmt_rate, 'rmse': errors}, ignore_index=True)
return df
error_df = harness()
error_df.head()
#seaborn.scatterplot(data=error_df, x='market', y='pmtrate', size='rmse')
#seaborn.scatterplot(data=error_df[0:19], x='pmtrate', y='rmse')
error_df[0:91]
slice_size = 91
n_slices = int(len(error_df) / slice_size)
print(len(error_df), n_slices, slice_size)
for i in range(n_slices):
start = i * slice_size
end = i * slice_size + slice_size
slice_df = error_df[start:end]
delta = slice_df['pmtrate'] - slice_df['market']
plot_df = pandas.DataFrame({'delta': delta, 'rmse': slice_df['rmse']})
sp = seaborn.scatterplot(data=plot_df, x='delta', y='rmse')
mkt_rate = slice_df.iloc[0]['market']
plt.xticks(numpy.arange(-0.100, +0.100, 0.005), rotation='vertical')
# plt.title(f'Market returns: {mkt_rate*100}%')
series = pandas.Series(index=range(40_000, 101_000, 5_000))
for t in range(40_000, 101_000, 5_000):
s = run_smile(t)
contingency = (t - s[0:20]).sum()
series.loc[t] = contingency
series.plot()
plt.xlabel('Targeted annual withdrawal at retirement')
plt.ylabel('Contigency fund')
xticks = plt.xticks()
plt.xticks(xticks[0], [fmt_money(x) for x in xticks[0]])
yticks = plt.yticks()
plt.yticks(yticks[0], [fmt_money(y) for y in yticks[0]])
plt.title('Contigency at age 85')
series
(series / series.index).plot()
plt.title('Ratio of contingency to expected spending')
xticks = plt.xticks()
plt.xticks(xticks[0], [fmt_money(x) for x in xticks[0]])
len(error_df)
```
| github_jupyter |
```
import os
import json
import boto3
import sagemaker
import numpy as np
from source.config import Config
config = Config(filename="config/config.yaml")
sage_session = sagemaker.session.Session()
s3_bucket = config.S3_BUCKET
s3_output_path = 's3://{}/'.format(s3_bucket)
print("S3 bucket path: {}".format(s3_output_path))
# run in local_mode on this machine, or as a SageMaker TrainingJob
local_mode = False
if local_mode:
instance_type = 'local'
else:
instance_type = "ml.c5.xlarge"
role = sagemaker.get_execution_role()
print("Using IAM role arn: {}".format(role))
# only run from SageMaker notebook instance
if local_mode:
!/bin/bash ./setup.sh
cpu_or_gpu = 'gpu' if instance_type.startswith('ml.p') else 'cpu'
# create a descriptive job name
job_name_prefix = 'HPO-pdm'
metric_definitions = [
{'Name': 'Epoch', 'Regex': 'Epoch: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'train_loss', 'Regex': 'Train loss: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'train_acc', 'Regex': 'Train acc: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'train_auc', 'Regex': 'Train auc: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'test_loss', 'Regex': 'Test loss: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'test_acc', 'Regex': 'Test acc: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
{'Name': 'test_auc', 'Regex': 'Test auc: ([-+]?[0-9]*[.]?[0-9]+([eE][-+]?[0-9]+)?)'},
]
from sagemaker.pytorch import PyTorch
```
# Define your data
```
print("Using dataset {}".format(config.train_dataset_fn))
from sagemaker.s3 import S3Uploader
key_prefix='fpm-data'
training_data = S3Uploader.upload(config.train_dataset_fn, 's3://{}/{}'.format(s3_bucket, key_prefix))
testing_data = S3Uploader.upload(config.test_dataset_fn, 's3://{}/{}'.format(s3_bucket, key_prefix))
print("Training data: {}".format(training_data))
print("Testing data: {}".format(testing_data))
```
# HPO
```
from sagemaker.tuner import IntegerParameter, CategoricalParameter, ContinuousParameter, HyperparameterTuner
max_jobs = 20
max_parallel_jobs = 5
hyperparameter_ranges = {
'lr': ContinuousParameter(1e-5, 1e-2),
'batch_size': IntegerParameter(16, 256),
'dropout': ContinuousParameter(0.0, 0.8),
'fc_hidden_units': CategoricalParameter(["[256, 128]", "[256, 128, 128]", "[256, 256, 128]", "[256, 128, 64]"]),
'conv_channels': CategoricalParameter(["[2, 8, 2]", "[2, 16, 2]", "[2, 16, 16, 2]"]),
}
estimator = PyTorch(entry_point="train.py",
source_dir='source',
role=role,
dependencies=["source/dl_utils"],
train_instance_type=instance_type,
train_instance_count=1,
output_path=s3_output_path,
framework_version="1.5.0",
py_version='py3',
base_job_name=job_name_prefix,
metric_definitions=metric_definitions,
hyperparameters= {
'epoch': 5000,
'target_column': config.target_column,
'sensor_headers': json.dumps(config.sensor_headers),
'train_input_filename': os.path.basename(config.train_dataset_fn),
'test_input_filename': os.path.basename(config.test_dataset_fn),
}
)
if local_mode:
estimator.fit({'train': training_data, 'test': testing_data})
tuner = HyperparameterTuner(estimator,
objective_metric_name='test_auc',
objective_type='Maximize',
hyperparameter_ranges=hyperparameter_ranges,
metric_definitions=metric_definitions,
max_jobs=max_jobs,
max_parallel_jobs=max_parallel_jobs,
base_tuning_job_name=job_name_prefix)
tuner.fit({'train': training_data, 'test': testing_data})
# Save the HPO job name
hpo_job_name = tuner.describe()['HyperParameterTuningJobName']
if "hpo_job_name" in config.__dict__:
!sed -i 's/hpo_job_name: .*/hpo_job_name: \"{hpo_job_name}\"/' config/config.yaml
else:
!echo -e "\n" >> config/config.yaml
!echo "hpo_job_name: \"$hpo_job_name\"" >> config/config.yaml
```
| github_jupyter |
<!--BOOK_INFORMATION-->
<img align="left" style="padding-right:10px;" src="figures/PDSH-cover-small.png">
*This notebook contains an excerpt from the [Python Data Science Handbook](http://shop.oreilly.com/product/0636920034919.do) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/PythonDataScienceHandbook).*
*The text is released under the [CC-BY-NC-ND license](https://creativecommons.org/licenses/by-nc-nd/3.0/us/legalcode), and code is released under the [MIT license](https://opensource.org/licenses/MIT). If you find this content useful, please consider supporting the work by [buying the book](http://shop.oreilly.com/product/0636920034919.do)!*
<!--NAVIGATION-->
< [Keyboard Shortcuts in the IPython Shell](01.02-Shell-Keyboard-Shortcuts.ipynb) | [Contents](Index.ipynb) | [Input and Output History](01.04-Input-Output-History.ipynb) >
# IPython Magic Commands
The previous two sections showed how IPython lets you use and explore Python efficiently and interactively.
Here we'll begin discussing some of the enhancements that IPython adds on top of the normal Python syntax.
These are known in IPython as *magic commands*, and are prefixed by the ``%`` character.
These magic commands are designed to succinctly solve various common problems in standard data analysis.
Magic commands come in two flavors: *line magics*, which are denoted by a single ``%`` prefix and operate on a single line of input, and *cell magics*, which are denoted by a double ``%%`` prefix and operate on multiple lines of input.
We'll demonstrate and discuss a few brief examples here, and come back to more focused discussion of several useful magic commands later in the chapter.
## Pasting Code Blocks: ``%paste`` and ``%cpaste``
When working in the IPython interpreter, one common gotcha is that pasting multi-line code blocks can lead to unexpected errors, especially when indentation and interpreter markers are involved.
A common case is that you find some example code on a website and want to paste it into your interpreter.
Consider the following simple function:
``` python
>>> def donothing(x):
... return x
```
The code is formatted as it would appear in the Python interpreter, and if you copy and paste this directly into IPython you get an error:
```ipython
In [2]: >>> def donothing(x):
...: ... return x
...:
File "<ipython-input-20-5a66c8964687>", line 2
... return x
^
SyntaxError: invalid syntax
```
In the direct paste, the interpreter is confused by the additional prompt characters.
But never fear–IPython's ``%paste`` magic function is designed to handle this exact type of multi-line, marked-up input:
```ipython
In [3]: %paste
>>> def donothing(x):
... return x
## -- End pasted text --
```
The ``%paste`` command both enters and executes the code, so now the function is ready to be used:
```ipython
In [4]: donothing(10)
Out[4]: 10
```
A command with a similar intent is ``%cpaste``, which opens up an interactive multiline prompt in which you can paste one or more chunks of code to be executed in a batch:
```ipython
In [5]: %cpaste
Pasting code; enter '--' alone on the line to stop or use Ctrl-D.
:>>> def donothing(x):
:... return x
:--
```
These magic commands, like others we'll see, make available functionality that would be difficult or impossible in a standard Python interpreter.
## Running External Code: ``%run``
As you begin developing more extensive code, you will likely find yourself working in both IPython for interactive exploration, as well as a text editor to store code that you want to reuse.
Rather than running this code in a new window, it can be convenient to run it within your IPython session.
This can be done with the ``%run`` magic.
For example, imagine you've created a ``myscript.py`` file with the following contents:
```python
#-------------------------------------
# file: myscript.py
def square(x):
"""square a number"""
return x ** 2
for N in range(1, 4):
print(N, "squared is", square(N))
```
You can execute this from your IPython session as follows:
```ipython
In [6]: %run myscript.py
1 squared is 1
2 squared is 4
3 squared is 9
```
Note also that after you've run this script, any functions defined within it are available for use in your IPython session:
```ipython
In [7]: square(5)
Out[7]: 25
```
There are several options to fine-tune how your code is run; you can see the documentation in the normal way, by typing **``%run?``** in the IPython interpreter.
## Timing Code Execution: ``%timeit``
Another example of a useful magic function is ``%timeit``, which will automatically determine the execution time of the single-line Python statement that follows it.
For example, we may want to check the performance of a list comprehension:
```ipython
In [8]: %timeit L = [n ** 2 for n in range(1000)]
1000 loops, best of 3: 325 µs per loop
```
The benefit of ``%timeit`` is that for short commands it will automatically perform multiple runs in order to attain more robust results.
For multi line statements, adding a second ``%`` sign will turn this into a cell magic that can handle multiple lines of input.
For example, here's the equivalent construction with a ``for``-loop:
```ipython
In [9]: %%timeit
...: L = []
...: for n in range(1000):
...: L.append(n ** 2)
...:
1000 loops, best of 3: 373 µs per loop
```
We can immediately see that list comprehensions are about 10% faster than the equivalent ``for``-loop construction in this case.
We'll explore ``%timeit`` and other approaches to timing and profiling code in [Profiling and Timing Code](01.07-Timing-and-Profiling.ipynb).
## Help on Magic Functions: ``?``, ``%magic``, and ``%lsmagic``
Like normal Python functions, IPython magic functions have docstrings, and this useful
documentation can be accessed in the standard manner.
So, for example, to read the documentation of the ``%timeit`` magic simply type this:
```ipython
In [10]: %timeit?
```
Documentation for other functions can be accessed similarly.
To access a general description of available magic functions, including some examples, you can type this:
```ipython
In [11]: %magic
```
For a quick and simple list of all available magic functions, type this:
```ipython
In [12]: %lsmagic
```
Finally, I'll mention that it is quite straightforward to define your own magic functions if you wish.
We won't discuss it here, but if you are interested, see the references listed in [More IPython Resources](01.08-More-IPython-Resources.ipynb).
<!--NAVIGATION-->
< [Keyboard Shortcuts in the IPython Shell](01.02-Shell-Keyboard-Shortcuts.ipynb) | [Contents](Index.ipynb) | [Input and Output History](01.04-Input-Output-History.ipynb) >
| github_jupyter |
### Note
* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
```
# Dependencies and Setup
import pandas as pd
# File to Load
school_data_to_load = "Resources/schools_complete.csv"
student_data_to_load = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas DataFrames
school_data = pd.read_csv(school_data_to_load)
student_data = pd.read_csv(student_data_to_load)
# Combine the data into a single dataset.
school_data_complete = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
```
## District Summary
* Calculate the total number of schools
* Calculate the total number of students
* Calculate the total budget
* Calculate the average math score
* Calculate the average reading score
* Calculate the percentage of students with a passing math score (70 or greater)
* Calculate the percentage of students with a passing reading score (70 or greater)
* Calculate the percentage of students who passed math **and** reading (% Overall Passing)
* Create a dataframe to hold the above results
* Optional: give the displayed data cleaner formatting
```
# Perform all variable calculations needed to fill out the data frame
# Calculate the total number of schools
school_total = len(school_data_complete["school_name"].unique())
# Calculate the total number of students
student_total = len(school_data_complete["Student ID"].unique())
# Calculate the total budget
budget_total = sum(school_data_complete["budget"].unique())
# Calculate the average math score
average_math = school_data_complete["math_score"].mean()
# Calculate the average reading score
average_reading = school_data_complete["reading_score"].mean()
# Calculate the percentage of students with a passing math score (70 or greater)
pass_math = 100 * (len(school_data_complete.loc[school_data_complete["math_score"] >= 70]) / student_total)
# Calculate the percentage of students with a passing reading score (70 or greater)
pass_reading = 100 * (len(school_data_complete.loc[school_data_complete["reading_score"] >= 70]) / student_total)
# Calculate the percentage of students who passed math and reading (% Overall Passing)
pass_total = 100 * (len(school_data_complete.loc[(school_data_complete["math_score"] >= 70)&(school_data_complete["reading_score"]>= 70)]) / student_total)
# Create a dataframe to hold the above results.
District_df = pd.DataFrame({
"Total Schools": school_total,
"Total Students": student_total,
"Total Budget": budget_total,
"Average Math Score": average_math,
"Average Reading Score": average_reading,
"% Passing Math": [pass_math],
"% Passing Reading": [pass_reading],
"% Overall Passing": [pass_total]
})
# Use Map to format the Total Students and Total Budget columns, as shown in example
District_df["Total Students"] = District_df["Total Students"].map("{:,d}".format)
District_df["Total Budget"] = District_df["Total Budget"].map("${:,.2f}".format)
# Show our final district summary
District_df
```
## School Summary
* Create an overview table that summarizes key metrics about each school, including:
* School Name
* School Type
* Total Students
* Total School Budget
* Per Student Budget
* Average Math Score
* Average Reading Score
* % Passing Math
* % Passing Reading
* % Overall Passing (The percentage of students that passed math **and** reading.)
* Create a dataframe to hold the above results
```
# Perform all variable calculations needed to fill out the data frame
# Create an overview table that summarizes key metrics about each school
school_groupby = school_data_complete.groupby("school_name")
# School types of each school
school_type = school_groupby["type"].unique()
school_type = school_type.str[0] # Extracting from brackets
# Pull the total students by school
school_students = school_groupby["size"].unique()
school_students = school_students.str[0].astype('int32') # Extracting from brackets and choosing data type
# Calculate the total budget by school
school_budget = school_groupby["budget"].unique()
school_budget = school_budget.str[0].astype('float') # Extracting from brackets and choosing data type
# Per student budget by school
per_student_budget = school_budget/school_students
# Calculate the average math score by school
school_avg_math = school_groupby["math_score"].mean()
# Calculate the average reading score by school
school_avg_reading = school_groupby["reading_score"].mean()
# Calculate the percentage of students with a passing math score (70 or greater) by school
# Resources: http://bit.ly/3bKcSXs and http://bit.ly/2KmubCR
school_math_temp = school_data_complete.loc[school_data_complete["math_score"] >= 70, "school_name"]
school_math_pass = 100 * (school_math_temp.groupby(school_math_temp).size() / school_students)
# Calculate the percentage of students with a passing reading score (70 or greater) by school
school_reading_temp = school_data_complete.loc[school_data_complete["reading_score"] >= 70, "school_name"]
school_reading_pass = 100 * (school_reading_temp.groupby(school_reading_temp).size() / school_students)
# Calculate the percentage of students who passed math and reading (% Overall Passing) by school.
#Resource: http://bit.ly/3imB4Ri
school_overall_temp = school_data_complete.loc[((school_data_complete.math_score >= 70) & (school_data_complete.reading_score >= 70)), "school_name"]
school_overall_pass = 100 * (school_overall_temp.groupby(school_overall_temp).size() / school_students)
# Create a dataframe to hold the above results
School_df = pd.DataFrame({
"School Type": school_type,
"Total Students": school_students,
"Total School Budget": school_budget,
"Per Student Budget": per_student_budget,
"Average Math Score": school_avg_math,
"Average Reading Score": school_avg_reading,
"% Passing Math": school_math_pass,
"% Passing Reading": school_reading_pass,
"% Overall Passing": school_overall_pass
})
# Rename axis
School_df = School_df.rename_axis("School Name")
# Use Map to format the Total Students, Total Budget columns, as shown in example
School_df["Total School Budget"] = School_df["Total School Budget"].map("${:,.2f}".format)
School_df["Per Student Budget"] = School_df["Per Student Budget"].map("${:,.2f}".format)
# Show our summary by school
School_df
```
## Top Performing Schools (By % Overall Passing)
* Sort and display the top five performing schools by % overall passing.
```
# Sorting the DataFrame based on "% Overall Passing" column
School_df = School_df.sort_values("% Overall Passing", ascending=False)
School_df.head(5)
```
## Bottom Performing Schools (By % Overall Passing)
* Sort and display the five worst-performing schools by % overall passing.
```
# Sorting the DataFrame based on "% Overall Passing" column
School_df = School_df.sort_values("% Overall Passing", ascending=True)
School_df.head(5)
```
## Math Scores by Grade
* Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
* Create a pandas series for each grade. Hint: use a conditional statement.
* Group each series by school
* Combine the series into a dataframe
* Optional: give the displayed data cleaner formatting
```
# Create a pandas series for each grade. We will use these for this problem and the next.
grade_9 = school_data_complete.loc[school_data_complete["grade"] == "9th"]
grade_10 = school_data_complete.loc[school_data_complete["grade"] == "10th"]
grade_11 = school_data_complete.loc[school_data_complete["grade"] == "11th"]
grade_12 = school_data_complete.loc[school_data_complete["grade"] == "12th"]
# Group each series by school and calculate avg for all columns.
grade_9 = grade_9.groupby("school_name").mean()
grade_10 = grade_10.groupby("school_name").mean()
grade_11 = grade_11.groupby("school_name").mean()
grade_12 = grade_12.groupby("school_name").mean()
# Combine the series into a dataframe, indexing specifically for the math scores
Grades_Math_df = pd.DataFrame({
"9th": grade_9["math_score"],
"10th": grade_10["math_score"],
"11th": grade_11["math_score"],
"12th": grade_12["math_score"]
})
# Rename axis
Grades_Math_df = Grades_Math_df.rename_axis("School Name")
# Show table that lists the average Math Score for students of each grade level by school.
Grades_Math_df
```
## Reading Score by Grade
* Perform the same operations as above for reading scores
```
# All of the work in the prior table does not need to be repeated. Please refer to the math score setup above.
# Combine the series into a dataframe, indexing specifically for the reading scores
Grades_Reading_df = pd.DataFrame({
"9th": grade_9["reading_score"],
"10th": grade_10["reading_score"],
"11th": grade_11["reading_score"],
"12th": grade_12["reading_score"]
})
# Rename axis
Grades_Reading_df = Grades_Reading_df.rename_axis("School Name")
# Show table that lists the average Reading Score for students of each grade level by school.
Grades_Reading_df
```
## Scores by School Spending
* Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
* Average Math Score
* Average Reading Score
* % Passing Math
* % Passing Reading
* Overall Passing Rate (Average of the above two)
```
# Create the bins for the spending ranges
spending_bins = [0, 584.99, 629.99, 644.99, 680]
# Create the labels for the bins
spending_labels = ["<$585", "$585-630", "$630-645", "$645-680"]
# Copy over my School_df from the previous section
Spending_df = School_df
# Binning the information, calling upon the per_student_budget variable calculated earlier
Spending_df["Spending Ranges (Per Student)"] = pd.cut(per_student_budget, spending_bins, labels=spending_labels, right=False)
# Use groupby on the data frame to calculate the mean values needed
spending_mathscore = Spending_df.groupby("Spending Ranges (Per Student)").mean()["Average Math Score"]
spending_readscore = Spending_df.groupby("Spending Ranges (Per Student)").mean()["Average Reading Score"]
spending_passmath = Spending_df.groupby("Spending Ranges (Per Student)").mean()["% Passing Math"]
spending_passread = Spending_df.groupby("Spending Ranges (Per Student)").mean()["% Passing Reading"]
spending_passoverall = Spending_df.groupby("Spending Ranges (Per Student)").mean()["% Overall Passing"]
# Overwrite data frame with this information
Spending_df = pd.DataFrame({
"Average Math Score": spending_mathscore,
"Average Reading Score": spending_readscore,
"% Passing Math": spending_passmath,
"% Passing Reading": spending_passread,
"% Overall Passing": spending_passoverall
})
Spending_df
```
## Scores by School Size
* Perform the same operations as above, based on school size.
```
# Create the bins for the size ranges
size_bins = [0, 999, 1999, 5000]
# Create the labels for the bins
size_labels = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
# Copy over my School_df from the previous section
Size_df = School_df
# Binning the information, calling upon the per_student_budget variable calculated earlier
Size_df["School Size"] = pd.cut(school_students, size_bins, labels=size_labels, right=False)
# Use groupby on the data frame to calculate the mean values needed
size_mathscore = Size_df.groupby("School Size").mean()["Average Math Score"]
size_readscore = Size_df.groupby("School Size").mean()["Average Reading Score"]
size_passmath = Size_df.groupby("School Size").mean()["% Passing Math"]
size_passread = Size_df.groupby("School Size").mean()["% Passing Reading"]
size_passoverall = Size_df.groupby("School Size").mean()["% Overall Passing"]
# Overwrite data frame with this information
Size_df = pd.DataFrame({
"Average Math Score": size_mathscore,
"Average Reading Score": size_readscore,
"% Passing Math": size_passmath,
"% Passing Reading": size_passread,
"% Overall Passing": size_passoverall,
})
Size_df
```
## Scores by School Type
* Perform the same operations as above, based on school type
```
# We don't need to bin because of the school_type variable and column from School_df
# Copy over my School_df from the previous section.
Type_df = School_df
# Use groupby on the data frame to calculate the mean values needed
type_mathscore = Type_df.groupby("School Type").mean()["Average Math Score"]
type_readscore = Type_df.groupby("School Type").mean()["Average Reading Score"]
type_passmath = Type_df.groupby("School Type").mean()["% Passing Math"]
type_passread = Type_df.groupby("School Type").mean()["% Passing Reading"]
type_passoverall = Type_df.groupby("School Type").mean()["% Overall Passing"]
# Overwrite data frame with this information
Type_df = pd.DataFrame({
"Average Math Score": type_mathscore,
"Average Reading Score": type_readscore,
"% Passing Math": type_passmath,
"% Passing Reading": type_passread,
"% Overall Passing": type_passoverall,
})
Type_df
```
| github_jupyter |
# TV Script Generation
In this project, you'll generate your own [Seinfeld](https://en.wikipedia.org/wiki/Seinfeld) TV scripts using RNNs. You'll be using part of the [Seinfeld dataset](https://www.kaggle.com/thec03u5/seinfeld-chronicles#scripts.csv) of scripts from 9 seasons. The Neural Network you'll build will generate a new ,"fake" TV script, based on patterns it recognizes in this training data.
## Get the Data
The data is already provided for you in `./data/Seinfeld_Scripts.txt` and you're encouraged to open that file and look at the text.
>* As a first step, we'll load in this data and look at some samples.
* Then, you'll be tasked with defining and training an RNN to generate a new script!
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# load in data
import helper
data_dir = './data/Seinfeld_Scripts.txt'
text = helper.load_data(data_dir)
```
## Explore the Data
Play around with `view_line_range` to view different parts of the data. This will give you a sense of the data you'll be working with. You can see, for example, that it is all lowercase text, and each new line of dialogue is separated by a newline character `\n`.
```
view_line_range = (0, 10)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import numpy as np
print('Dataset Stats')
print('Roughly the number of unique words: {}'.format(len({word: None for word in text.split()})))
lines = text.split('\n')
print('Number of lines: {}'.format(len(lines)))
word_count_line = [len(line.split()) for line in lines]
print('Average number of words in each line: {}'.format(np.average(word_count_line)))
print()
print('The lines {} to {}:'.format(*view_line_range))
print('\n'.join(text.split('\n')[view_line_range[0]:view_line_range[1]]))
```
---
## Implement Pre-processing Functions
The first thing to do to any dataset is pre-processing. Implement the following pre-processing functions below:
- Lookup Table
- Tokenize Punctuation
### Lookup Table
To create a word embedding, you first need to transform the words to ids. In this function, create two dictionaries:
- Dictionary to go from the words to an id, we'll call `vocab_to_int`
- Dictionary to go from the id to word, we'll call `int_to_vocab`
Return these dictionaries in the following **tuple** `(vocab_to_int, int_to_vocab)`
```
import problem_unittests as tests
def create_lookup_tables(text):
"""
Create lookup tables for vocabulary
:param text: The text of tv scripts split into words
:return: A tuple of dicts (vocab_to_int, int_to_vocab)
"""
# TODO: Implement Function
int_to_vocab = {}
vocab_to_int = {}
for sent in text:
for word in sent.split():
if word not in vocab_to_int:
vocab_to_int[word] = len(vocab_to_int)
int_to_vocab = {ii : ch for ch , ii in vocab_to_int.items()}
# return tuple
return (vocab_to_int, int_to_vocab)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_create_lookup_tables(create_lookup_tables)
create_lookup_tables("Hi")
```
### Tokenize Punctuation
We'll be splitting the script into a word array using spaces as delimiters. However, punctuations like periods and exclamation marks can create multiple ids for the same word. For example, "bye" and "bye!" would generate two different word ids.
Implement the function `token_lookup` to return a dict that will be used to tokenize symbols like "!" into "||Exclamation_Mark||". Create a dictionary for the following symbols where the symbol is the key and value is the token:
- Period ( **.** )
- Comma ( **,** )
- Quotation Mark ( **"** )
- Semicolon ( **;** )
- Exclamation mark ( **!** )
- Question mark ( **?** )
- Left Parentheses ( **(** )
- Right Parentheses ( **)** )
- Dash ( **-** )
- Return ( **\n** )
This dictionary will be used to tokenize the symbols and add the delimiter (space) around it. This separates each symbols as its own word, making it easier for the neural network to predict the next word. Make sure you don't use a value that could be confused as a word; for example, instead of using the value "dash", try using something like "||dash||".
```
def token_lookup():
"""
Generate a dict to turn punctuation into a token.
:return: Tokenized dictionary where the key is the punctuation and the value is the token
"""
# TODO: Implement Function
punct = {'.' : '||dot||' ,
',' : '||comma||' ,
'"' : '||invcoma||',
';' : '||semicolon||',
'!' : '||exclamation_mark||' ,
'?' : '||question_mark||' ,
'(' : '||openparanthesys||' ,
')' : '||closeparanthesys||' ,
'-' : '||hyphen||' ,
'\n' : '||line_feed||'}
return punct
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_tokenize(token_lookup)
```
## Pre-process all the data and save it
Running the code cell below will pre-process all the data and save it to file. You're encouraged to lok at the code for `preprocess_and_save_data` in the `helpers.py` file to see what it's doing in detail, but you do not need to change this code.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# pre-process training data
helper.preprocess_and_save_data(data_dir, token_lookup, create_lookup_tables)
```
# Check Point
This is your first checkpoint. If you ever decide to come back to this notebook or have to restart the notebook, you can start from here. The preprocessed data has been saved to disk.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import helper
import problem_unittests as tests
int_text, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
token_dict
```
## Build the Neural Network
In this section, you'll build the components necessary to build an RNN by implementing the RNN Module and forward and backpropagation functions.
### Check Access to GPU
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import torch
# Check for a GPU
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('No GPU found. Please use a GPU to train your neural network.')
from torch.utils.data import TensorDataset, DataLoader
def batch_data(words, sequence_length, batch_size):
"""
Batch the neural network data using DataLoader
:param words: The word ids of the TV scripts
:param sequence_length: The sequence length of each batch
:param batch_size: The size of each batch; the number of sequences in a batch
:return: DataLoader with batched data
"""
n_batches = len(words)//batch_size
words = words[:batch_size*n_batches]
x , y = [] , []
for idx in range(0 , len(words) - sequence_length):
bx = words[idx:idx+sequence_length]
by = words[idx+sequence_length]
x.append(bx)
y.append(by)
x , y = np.array(x) , np.array(y)
print("Feature Data : ",x[:20])
print("Target Data : ", y[:20])
# TODO: Implement function
dataset = TensorDataset(torch.from_numpy(x) , torch.from_numpy(y))
# return a dataloader
return DataLoader(dataset , shuffle = True , batch_size = batch_size)
# there is no test for this function, but you are encouraged to create
# print statements and tests of your own
```
### Test your dataloader
You'll have to modify this code to test a batching function, but it should look fairly similar.
Below, we're generating some test text data and defining a dataloader using the function you defined, above. Then, we are getting some sample batch of inputs `sample_x` and targets `sample_y` from our dataloader.
Your code should return something like the following (likely in a different order, if you shuffled your data):
```
torch.Size([10, 5])
tensor([[ 28, 29, 30, 31, 32],
[ 21, 22, 23, 24, 25],
[ 17, 18, 19, 20, 21],
[ 34, 35, 36, 37, 38],
[ 11, 12, 13, 14, 15],
[ 23, 24, 25, 26, 27],
[ 6, 7, 8, 9, 10],
[ 38, 39, 40, 41, 42],
[ 25, 26, 27, 28, 29],
[ 7, 8, 9, 10, 11]])
torch.Size([10])
tensor([ 33, 26, 22, 39, 16, 28, 11, 43, 30, 12])
```
### Sizes
Your sample_x should be of size `(batch_size, sequence_length)` or (10, 5) in this case and sample_y should just have one dimension: batch_size (10).
### Values
You should also notice that the targets, sample_y, are the *next* value in the ordered test_text data. So, for an input sequence `[ 28, 29, 30, 31, 32]` that ends with the value `32`, the corresponding output should be `33`.
```
# test dataloader
test_text = list(range(50))
t_loader = batch_data(test_text, sequence_length=5, batch_size=10)
data_iter = iter(t_loader)
sample_x, sample_y = data_iter.next()
print(sample_x.shape)
print(sample_x)
print()
print(sample_y.shape)
print(sample_y)
```
---
## Build the Neural Network
Implement an RNN using PyTorch's [Module class](http://pytorch.org/docs/master/nn.html#torch.nn.Module). You may choose to use a GRU or an LSTM. To complete the RNN, you'll have to implement the following functions for the class:
- `__init__` - The initialize function.
- `init_hidden` - The initialization function for an LSTM/GRU hidden state
- `forward` - Forward propagation function.
The initialize function should create the layers of the neural network and save them to the class. The forward propagation function will use these layers to run forward propagation and generate an output and a hidden state.
**The output of this model should be the *last* batch of word scores** after a complete sequence has been processed. That is, for each input sequence of words, we only want to output the word scores for a single, most likely, next word.
### Hints
1. Make sure to stack the outputs of the lstm to pass to your fully-connected layer, you can do this with `lstm_output = lstm_output.contiguous().view(-1, self.hidden_dim)`
2. You can get the last batch of word scores by shaping the output of the final, fully-connected layer like so:
```
# reshape into (batch_size, seq_length, output_size)
output = output.view(batch_size, -1, self.output_size)
# get last batch
out = output[:, -1]
```
```
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5):
"""
Initialize the PyTorch RNN Module
:param vocab_size: The number of input dimensions of the neural network (the size of the vocabulary)
:param output_size: The number of output dimensions of the neural network
:param embedding_dim: The size of embeddings, should you choose to use them
:param hidden_dim: The size of the hidden layer outputs
:param dropout: dropout to add in between LSTM/GRU layers
"""
super(RNN, self).__init__()
# TODO: Implement function
# set class variables
self.vocab_size = vocab_size
self.output_size = output_size
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.dropout_prob = dropout
# define model layers
self.embed = nn.Embedding(self.vocab_size , self.embedding_dim)
self.lstm = nn.LSTM(self.embedding_dim , self.hidden_dim , self.n_layers , batch_first = True , dropout = self.dropout_prob)
self.linear = nn.Linear(self.hidden_dim , self.output_size)
def forward(self, nn_input, hidden):
"""
Forward propagation of the neural network
:param nn_input: The input to the neural network
:param hidden: The hidden state
:return: Two Tensors, the output of the neural network and the latest hidden state
"""
# TODO: Implement function
embed_out = self.embed(nn_input)
lstm_out , hidden_out = self.lstm(embed_out , hidden)
lstm_out = lstm_out.contiguous().view(-1 , self.hidden_dim)
output = self.linear(lstm_out)
output = output.view(nn_input.size(0) , -1 , self.output_size)
output = output[: , -1]
# return one batch of output word scores and the hidden state
return output, hidden
def init_hidden(self, batch_size):
'''
Initialize the hidden state of an LSTM/GRU
:param batch_size: The batch_size of the hidden state
:return: hidden state of dims (n_layers, batch_size, hidden_dim)
'''
# Implement function
weight = next(self.parameters()).data
# initialize hidden state with zero weights, and move to GPU if available
if train_on_gpu:
hidden = (weight.new(self.n_layers,batch_size,self.hidden_dim).zero_().cuda(),
weight.new(self.n_layers,batch_size,self.hidden_dim).zero_().cuda())
else:
hidden = (weight.new(self.n_layers,batch_size,self.hidden_dim).zero_(),
weight.new(self.n_layers,batch_size,self.hidden_dim).zero_())
return hidden
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_rnn(RNN, train_on_gpu)
```
### Define forward and backpropagation
Use the RNN class you implemented to apply forward and back propagation. This function will be called, iteratively, in the training loop as follows:
```
loss = forward_back_prop(decoder, decoder_optimizer, criterion, inp, target)
```
And it should return the average loss over a batch and the hidden state returned by a call to `RNN(inp, hidden)`. Recall that you can get this loss by computing it, as usual, and calling `loss.item()`.
**If a GPU is available, you should move your data to that GPU device, here.**
```
def forward_back_prop(rnn, optimizer, criterion, inp, target, hidden):
"""
Forward and backward propagation on the neural network
:param decoder: The PyTorch Module that holds the neural network
:param decoder_optimizer: The PyTorch optimizer for the neural network
:param criterion: The PyTorch loss function
:param inp: A batch of input to the neural network
:param target: The target output for the batch of input
:return: The loss and the latest hidden state Tensor
"""
# TODO: Implement Function
# move data to GPU, if available
if train_on_gpu:
rnn.cuda()
inp = inp.cuda()
target = target.cuda()
# perform backpropagation and optimization
h = tuple([w.data for w in hidden])
optimizer.zero_grad()
out , h = rnn(inp , h)
loss = criterion(out , target)
loss.backward()
nn.utils.clip_grad_norm_(rnn.parameters() , 5)
optimizer.step()
# return the loss over a batch and the hidden state produced by our model
return loss.item(), h
# Note that these tests aren't completely extensive.
# they are here to act as general checks on the expected outputs of your functions
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
tests.test_forward_back_prop(RNN, forward_back_prop, train_on_gpu)
```
## Neural Network Training
With the structure of the network complete and data ready to be fed in the neural network, it's time to train it.
### Train Loop
The training loop is implemented for you in the `train_decoder` function. This function will train the network over all the batches for the number of epochs given. The model progress will be shown every number of batches. This number is set with the `show_every_n_batches` parameter. You'll set this parameter along with other parameters in the next section.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
def train_rnn(rnn, batch_size, optimizer, criterion, n_epochs, show_every_n_batches=100):
batch_losses = []
rnn.train()
print("Training for %d epoch(s)..." % n_epochs)
for epoch_i in range(1, n_epochs + 1):
# initialize hidden state
hidden = rnn.init_hidden(batch_size)
for batch_i, (inputs, labels) in enumerate(train_loader, 1):
# make sure you iterate over completely full batches, only
n_batches = len(train_loader.dataset)//batch_size
if(batch_i > n_batches):
break
# forward, back prop
loss, hidden = forward_back_prop(rnn, optimizer, criterion, inputs, labels, hidden)
# record loss
batch_losses.append(loss)
# printing loss stats
if batch_i % show_every_n_batches == 0:
print('Epoch: {:>4}/{:<4} Loss: {}\n'.format(
epoch_i, n_epochs, np.average(batch_losses)))
batch_losses = []
# returns a trained rnn
return rnn
```
### Hyperparameters
Set and train the neural network with the following parameters:
- Set `sequence_length` to the length of a sequence.
- Set `batch_size` to the batch size.
- Set `num_epochs` to the number of epochs to train for.
- Set `learning_rate` to the learning rate for an Adam optimizer.
- Set `vocab_size` to the number of uniqe tokens in our vocabulary.
- Set `output_size` to the desired size of the output.
- Set `embedding_dim` to the embedding dimension; smaller than the vocab_size.
- Set `hidden_dim` to the hidden dimension of your RNN.
- Set `n_layers` to the number of layers/cells in your RNN.
- Set `show_every_n_batches` to the number of batches at which the neural network should print progress.
If the network isn't getting the desired results, tweak these parameters and/or the layers in the `RNN` class.
```
# Data params
# Sequence Length
sequence_length = 10 # of words in a sequence
# Batch Size
batch_size = 256
# data loader - do not change
train_loader = batch_data(int_text, sequence_length, batch_size)
# Training parameters
# Number of Epochs
num_epochs = 15
# Learning Rate
learning_rate = 3e-3
# Model parameters
# Vocab size
vocab_size = len(vocab_to_int)
# Output size
output_size = vocab_size
# Embedding Dimension
embedding_dim = 512
# Hidden Dimension
hidden_dim = 256
# Number of RNN Layers
n_layers = 3
# Show stats for every n number of batches
show_every_n_batches = 2000
```
### Train
In the next cell, you'll train the neural network on the pre-processed data. If you have a hard time getting a good loss, you may consider changing your hyperparameters. In general, you may get better results with larger hidden and n_layer dimensions, but larger models take a longer time to train.
> **You should aim for a loss less than 3.5.**
You should also experiment with different sequence lengths, which determine the size of the long range dependencies that a model can learn.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
# create model and move to gpu if available
rnn = RNN(vocab_size, output_size, embedding_dim, hidden_dim, n_layers, dropout=0.5)
if train_on_gpu:
rnn.cuda()
# defining loss and optimization functions for training
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
criterion = nn.CrossEntropyLoss()
# training the model
trained_rnn = train_rnn(rnn, batch_size, optimizer, criterion, num_epochs, show_every_n_batches)
# saving the trained model
helper.save_model('./save/trained_rnn', trained_rnn)
print('Model Trained and Saved')
```
### Question: How did you decide on your model hyperparameters?
For example, did you try different sequence_lengths and find that one size made the model converge faster? What about your hidden_dim and n_layers; how did you decide on those?
**Answer:** After training I observed that if the sequence_length is too large the model convergers faster. Also, taking bigger batches is improving the model. Coming to the hidden_dim I choose it to be 256 and I decided the value taking into consideration the embedding dim that is 512. Finally the n_layers I choose it to be 3 as 2 or 3 is the usually set value.
---
# Checkpoint
After running the above training cell, your model will be saved by name, `trained_rnn`, and if you save your notebook progress, **you can pause here and come back to this code at another time**. You can resume your progress by running the next cell, which will load in our word:id dictionaries _and_ load in your saved model by name!
```
"""
DON'T MODIFY ANYTHING IN THIS CELL
"""
import torch
import helper
import problem_unittests as tests
_, vocab_to_int, int_to_vocab, token_dict = helper.load_preprocess()
trained_rnn = helper.load_model('./save/trained_rnn')
```
## Generate TV Script
With the network trained and saved, you'll use it to generate a new, "fake" Seinfeld TV script in this section.
### Generate Text
To generate the text, the network needs to start with a single word and repeat its predictions until it reaches a set length. You'll be using the `generate` function to do this. It takes a word id to start with, `prime_id`, and generates a set length of text, `predict_len`. Also note that it uses topk sampling to introduce some randomness in choosing the most likely next word, given an output set of word scores!
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import torch.nn.functional as F
def generate(rnn, prime_id, int_to_vocab, token_dict, pad_value, predict_len=100):
"""
Generate text using the neural network
:param decoder: The PyTorch Module that holds the trained neural network
:param prime_id: The word id to start the first prediction
:param int_to_vocab: Dict of word id keys to word values
:param token_dict: Dict of puncuation tokens keys to puncuation values
:param pad_value: The value used to pad a sequence
:param predict_len: The length of text to generate
:return: The generated text
"""
rnn.eval()
# create a sequence (batch_size=1) with the prime_id
current_seq = np.full((1, sequence_length), pad_value)
current_seq[-1][-1] = prime_id
predicted = [int_to_vocab[prime_id]]
for _ in range(predict_len):
if train_on_gpu:
current_seq = torch.LongTensor(current_seq).cuda()
else:
current_seq = torch.LongTensor(current_seq)
# initialize the hidden state
hidden = rnn.init_hidden(current_seq.size(0))
# get the output of the rnn
output, _ = rnn(current_seq, hidden)
# get the next word probabilities
p = F.softmax(output, dim=1).data
if(train_on_gpu):
p = p.cpu() # move to cpu
# use top_k sampling to get the index of the next word
top_k = 5
p, top_i = p.topk(top_k)
top_i = top_i.numpy().squeeze()
# select the likely next word index with some element of randomness
p = p.numpy().squeeze()
word_i = np.random.choice(top_i, p=p/p.sum())
# retrieve that word from the dictionary
word = int_to_vocab[word_i]
predicted.append(word)
# the generated word becomes the next "current sequence" and the cycle can continue
current_seq = np.roll(current_seq, -1, 1)
current_seq[-1][-1] = word_i
gen_sentences = ' '.join(predicted)
# Replace punctuation tokens
for key, token in token_dict.items():
ending = ' ' if key in ['\n', '(', '"'] else ''
gen_sentences = gen_sentences.replace(' ' + token.lower(), key)
gen_sentences = gen_sentences.replace('\n ', '\n')
gen_sentences = gen_sentences.replace('( ', '(')
# return all the sentences
return gen_sentences
```
### Generate a New Script
It's time to generate the text. Set `gen_length` to the length of TV script you want to generate and set `prime_word` to one of the following to start the prediction:
- "jerry"
- "elaine"
- "george"
- "kramer"
You can set the prime word to _any word_ in our dictionary, but it's best to start with a name for generating a TV script. (You can also start with any other names you find in the original text file!)
```
# run the cell multiple times to get different results!
gen_length = 400 # modify the length to your preference
prime_word = 'jerry' # name for starting the script
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
pad_word = helper.SPECIAL_WORDS['PADDING']
generated_script = generate(trained_rnn, vocab_to_int[prime_word + ':'], int_to_vocab, token_dict, vocab_to_int[pad_word], gen_length)
print(generated_script)
```
#### Save your favorite scripts
Once you have a script that you like (or find interesting), save it to a text file!
```
# save script to a text file
f = open("generated_script_1.txt","w")
f.write(generated_script)
f.close()
```
# The TV Script is Not Perfect
It's ok if the TV script doesn't make perfect sense. It should look like alternating lines of dialogue, here is one such example of a few generated lines.
### Example generated script
>jerry: what about me?
>
>jerry: i don't have to wait.
>
>kramer:(to the sales table)
>
>elaine:(to jerry) hey, look at this, i'm a good doctor.
>
>newman:(to elaine) you think i have no idea of this...
>
>elaine: oh, you better take the phone, and he was a little nervous.
>
>kramer:(to the phone) hey, hey, jerry, i don't want to be a little bit.(to kramer and jerry) you can't.
>
>jerry: oh, yeah. i don't even know, i know.
>
>jerry:(to the phone) oh, i know.
>
>kramer:(laughing) you know...(to jerry) you don't know.
You can see that there are multiple characters that say (somewhat) complete sentences, but it doesn't have to be perfect! It takes quite a while to get good results, and often, you'll have to use a smaller vocabulary (and discard uncommon words), or get more data. The Seinfeld dataset is about 3.4 MB, which is big enough for our purposes; for script generation you'll want more than 1 MB of text, generally.
# Submitting This Project
When submitting this project, make sure to run all the cells before saving the notebook. Save the notebook file as "dlnd_tv_script_generation.ipynb" and save another copy as an HTML file by clicking "File" -> "Download as.."->"html". Include the "helper.py" and "problem_unittests.py" files in your submission. Once you download these files, compress them into one zip file for submission.
| github_jupyter |
```
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate, cross_val_predict
from keras import models
from keras import layers
model_data = pd.read_csv('write_data/stage_1/lr_modeling.csv')
model_data.head()
training = model_data[(model_data['Season'] < 2020) & (model_data['Target_clf'] >0)]
training.Target_clf.value_counts()
y = training['Target'].values
X = training.drop(columns=['WTeamID', 'LTeamID', 'Season', 'Target', 'Target_clf']).values
# baseline neural networks
def baseline_model():
# create model
model = Sequential()
model.add(Dense(15, input_dim=32, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
estimator = KerasRegressor(build_fn=baseline_model, epochs=100, batch_size=50, verbose=0)
kfold = KFold(n_splits=10)
results = cross_val_score(estimator, X, y, cv=kfold, scoring='neg_mean_squared_error')
print("Baseline: %.2f (%.2f) MSE" % (np.sqrt(-1 * results.mean()), results.std()))
estimators = []
estimators.append(('std', StandardScaler()))
estimators.append(('mlp', KerasRegressor(build_fn=baseline_model, epochs=100, batch_size=50, verbose=0)))
pipeline = Pipeline(estimators)
kfold = KFold(n_splits=10)
results = cross_val_score(pipeline, X, y, cv=kfold, scoring='neg_mean_squared_error')
print("Baseline: %.2f (%.2f) MSE" % (np.sqrt(-1 * results.mean()), np.sqrt(results.std())))
# baseline neural networks
def develop_model():
model = Sequential()
model.add(Dense(15, input_dim=32, kernel_initializer='normal', activation='relu'))
model.add(Dense(5, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
def eval_nn(model):
estimators = []
estimators.append(('std', StandardScaler()))
estimators.append(('mlp', KerasRegressor(build_fn=model, epochs=100, batch_size=50, verbose=0)))
pipeline = Pipeline(estimators)
kfold = KFold(n_splits=10)
pipeline.fit(X, y)
# results = cross_val_score(pipeline, X, y, cv=kfold, scoring='neg_mean_squared_error')
# mod = cross_val_predict(pipeline, X, y, cv=kfold)
# print('RMSE: ', np.round(np.sqrt(-1 * results.mean())))
return pipeline
# return [np.round(np.sqrt(-1 * results.mean())), np.round(np.sqrt(results.std()))], pipeline
# data
k = 5
num_val_samples = len(X_train) // k
num_epochs = 100
mse = []
rmse = []
X_train_ = StandardScaler().fit_transform(X_train)
# from keras.metrics import
from keras.metrics import RootMeanSquaredError
def build_model():
model = models.Sequential()
model.add(layers.Dense(15, activation='relu', input_shape=(X_train_.shape[1],)))
model.add(layers.Dense(5, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer='adam'
, loss="mean_squared_error"
, metrics=[RootMeanSquaredError(name="root_mean_squared_error"
, dtype=None)])
return model
for i in range(k):
print('processing fold #', i)
val_data = X_train_[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = y_train[i * num_val_samples: (i + 1) * num_val_samples]
partial_train_data = np.concatenate([X_train_[:i * num_val_samples]
, X_train_[(i + 1) * num_val_samples:]]
,axis=0)
partial_train_targets = np.concatenate([y_train[:i * num_val_samples]
,y_train[(i + 1) * num_val_samples:]]
, axis=0)
model = build_model()
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=1, verbose=0)
val_mse, val_rmse = model.evaluate(val_data, val_targets, verbose=0)
print('mse is: ', val_mse)
print('rmse is: ', val_rmse)
mse.append(val_mse)
rmse.append(val_rmse)
np.array(rmse).mean()
data_test = pd.read_csv('write_data/stage_1/submission_1.csv')
data_test = data_test.drop(columns=['WTeamID', 'LTeamID'])
data_test.head()
data_lr = data_test[['ID']].copy()
data_lr['pred_nn'] = model.predict(data_test.drop(columns=['Season', 'ID']))
data_lr.head()
df_sub = load_dataframe('write_data/stage_1/01_spread_pred.csv')
data_linear_predict = df_sub\
.withColumn('Season', split(df_sub.ID, '_').getItem(0)) \
.withColumn('WTeamID', split(df_sub.ID, '_').getItem(1)) \
.withColumn('LTeamID', split(df_sub.ID, '_').getItem(2)) \
.toPandas()
compare =data_linear_predict.join(data_lr, on='ID', how='inner')
```
| github_jupyter |
**[CDS-01]** 必要なモジュールをインポートして、乱数のシードを設定します。
```
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(20160704)
tf.set_random_seed(20160704)
```
**[CDS-02]** CIFAR-10 のデータセットをダウンロードします。ダウンロード完了まで少し時間がかかります。
```
%%bash
mkdir -p /tmp/cifar10_data
cd /tmp/cifar10_data
curl -OL http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz
tar xzf cifar-10-binary.tar.gz
```
**[CDS-03]** ダウンロードしたデータを確認します。ここでは、テストセット用のデータ test_batch.bin を使用します。
```
!ls -lR /tmp/cifar10_data
```
**[CDS-04]** データファイルから画像イメージとラベルデータを読み取る関数を用意します。
```
def read_cifar10(filename_queue):
class CIFAR10Record(object):
pass
result = CIFAR10Record()
label_bytes = 1
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
record_bytes = label_bytes + image_bytes
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(filename_queue)
record_bytes = tf.decode_raw(value, tf.uint8)
result.label = tf.cast(
tf.slice(record_bytes, [0], [label_bytes]), tf.int32)
depth_major = tf.reshape(tf.slice(record_bytes, [label_bytes], [image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
return result
```
**[CDS-04]** それぞれのラベルについて、8個ずつの画像イメージを表示します。
```
sess = tf.InteractiveSession()
filename = '/tmp/cifar10_data/cifar-10-batches-bin/test_batch.bin'
q = tf.FIFOQueue(99, [tf.string], shapes=())
q.enqueue([filename]).run(session=sess)
q.close().run(session=sess)
result = read_cifar10(q)
samples = [[] for l in range(10)]
while(True):
label, image = sess.run([result.label, result.uint8image])
label = label[0]
if len(samples[label]) < 8:
samples[label].append(image)
if all([len(samples[l]) >= 8 for l in range(10)]):
break
fig = plt.figure(figsize=(8,10))
for l in range(10):
for c in range(8):
subplot = fig.add_subplot(10, 8, l*8+c+1)
subplot.set_xticks([])
subplot.set_yticks([])
image = samples[l][c]
subplot.imshow(image.astype(np.uint8))
sess.close()
```
**[CDS-05]** 前処理を施した画像イメージを生成する関数を用意します。
```
def distorted_samples(image):
reshaped_image = tf.cast(image, tf.float32)
width, height = 24, 24
float_images = []
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
width, height)
float_image = tf.image.per_image_whitening(resized_image)
float_images.append(float_image)
for _ in range(6):
distorted_image = tf.random_crop(reshaped_image, [height, width, 3])
distorted_image = tf.image.random_flip_left_right(distorted_image)
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
float_image = tf.image.per_image_whitening(distorted_image)
float_images.append(float_image)
return tf.concat(0,float_images)
```
**[CDS-06]** それぞれのラベルについて、オリジナル、および、前処理を施した画像イメージを表示します。
```
sess = tf.InteractiveSession()
filename = '/tmp/cifar10_data/cifar-10-batches-bin/test_batch.bin'
q = tf.FIFOQueue(99, [tf.string], shapes=())
q.enqueue([filename]).run(session=sess)
q.close().run(session=sess)
result = read_cifar10(q)
fig = plt.figure(figsize=(8,10))
c = 0
original = {}
modified = {}
while len(original.keys()) < 10:
label, orig, dists = sess.run([result.label,
result.uint8image,
distorted_samples(result.uint8image)])
label = label[0]
if not label in original.keys():
original[label] = orig
modified[label] = dists
for l in range(10):
orig, dists = original[l], modified[l]
c += 1
subplot = fig.add_subplot(10, 8, c)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(orig.astype(np.uint8))
for i in range(7):
c += 1
subplot = fig.add_subplot(10, 8, c)
subplot.set_xticks([])
subplot.set_yticks([])
pos = i*24
image = dists[pos:pos+24]*40+120
subplot.imshow(image.astype(np.uint8))
sess.close()
```
| github_jupyter |
```
# - Decide which map to plot
# in main notebook code
#mapvarnow = 'skj' # choose: skj, bet
# - Define constant plot params
stipsizenow = 10; stipmarknow = 'o'
stipfacecolnow = 'none'
stipedgeltcolnow = 'whitesmoke'
stipewnow = 0.8 # marker edge width
eezfcnow = 'none'; eezlcnow = 'lightgray' #'silver'
eezlsnow = '-'; eezlwnow = 0.9
# - Define subplot-variable plot params
if mapvarnow=='skj':
fignamenow = 'S6_fig'
unitsnow = 8*['[metric tons/set]']
mapsnow = [skj_cp_tot_seas.sel(season='DJF'),
skj_cp_tot_seas.sel(season='DJF')-skj_cp_tot_mean,
skj_cp_tot_seas.sel(season='MAM'),
skj_cp_tot_seas.sel(season='MAM')-skj_cp_tot_mean,
skj_cp_tot_seas.sel(season='JJA'),
skj_cp_tot_seas.sel(season='JJA')-skj_cp_tot_mean,
skj_cp_tot_seas.sel(season='SON'),
skj_cp_tot_seas.sel(season='SON')-skj_cp_tot_mean]
vmaxsnow = 4*[60, 12]
vminsnow = 4*[0, -12]
pvsnow = 8*[skj_cp_tot_seas_kw_pval]
ptfsnow = 8*[skj_cp_tot_seas_kw_ptf]
titlesnow = ['SKJ CPUE - Winter','SKJ CPUE - Winter minus mean',
'SKJ CPUE - Spring','SKJ CPUE - Spring minus mean',
'SKJ CPUE - Summer','SKJ CPUE - Summer minus mean',
'SKJ CPUE - Fall','SKJ CPUE - Fall minus mean']
elif mapvarnow=='bet':
fignamenow = 'S7_fig'
unitsnow = 8*['[metric tons/set]']
mapsnow = [bet_cp_tot_seas.sel(season='DJF'),
bet_cp_tot_seas.sel(season='DJF')-bet_cp_tot_mean,
bet_cp_tot_seas.sel(season='MAM'),
bet_cp_tot_seas.sel(season='MAM')-bet_cp_tot_mean,
bet_cp_tot_seas.sel(season='JJA'),
bet_cp_tot_seas.sel(season='JJA')-bet_cp_tot_mean,
bet_cp_tot_seas.sel(season='SON'),
bet_cp_tot_seas.sel(season='SON')-bet_cp_tot_mean]
vmaxsnow = 4*[15, 4]
vminsnow = 4*[0, -4]
pvsnow = 8*[bet_cp_tot_seas_kw_pval]
ptfsnow = 8*[bet_cp_tot_seas_kw_ptf]
titlesnow = ['BET CPUE - Winter','BET CPUE - Winter minus mean',
'BET CPUE - Spring','BET CPUE - Spring minus mean',
'BET CPUE - Summer','BET CPUE - Summer minus mean',
'BET CPUE - Fall','BET CPUE - Fall minus mean']
stipltdkcosnow = 0.5*np.asarray(vmaxsnow) # light/dark stip cutoff value
stipedgedkcolsnow = 4*['lightgray', 'darkslategray', None]
signifstipsnow = 4*[0, 1, None]
ploteezsnow = 4*[1, 0, None]
cmseqnow = plt.cm.get_cmap('viridis',11)
cmdivnow = plt.cm.get_cmap('PuOr',11)
cmsnow = 4*[cmseqnow, cmdivnow]
stipltdkcosnow = 0.5*np.asarray(vmaxsnow) # light/dark stip cutoff value
stipedgedkcolsnow = 4*['lightgray', 'darkslategray']
signifstipsnow = 4*[0,1]
ploteezsnow = 4*[1,0]
# - Set proj and define axes
fig,axes = plt.subplots(nrows=4, ncols=2, figsize=(12,10),
subplot_kw={'projection': ccrs.PlateCarree(central_longitude=200)})
# - Make maps pretty + plot
isp = 0
for irow in range(4):
for icol in range(2):
ax = axes[irow][icol]
exec(open('helper_scripts/create_map_bgs.py').read())
ax.text(-0.08, 1.08, string.ascii_uppercase[isp],
transform=ax.transAxes, size=16, weight='bold')
mapsnow[isp].plot(
ax=ax, transform=ccrs.PlateCarree(), cmap=cmsnow[isp],
vmin=vminsnow[isp], vmax=vmaxsnow[isp],
cbar_kwargs={'pad': 0.02, 'label': unitsnow[isp]})
if ploteezsnow[isp]==1:
nueezs.plot(ax=ax, transform=ccrs.PlateCarree(),
color=eezfcnow, edgecolor=eezlcnow, linewidth=eezlwnow)
if signifstipsnow[isp]==1:
[ltcol_signiflonnow,ltcol_signiflatnow]=find_where_pval_small(
pvsnow[isp].where(abs(mapsnow[isp])>stipltdkcosnow[isp]),
ptfsnow[isp])
[dkcol_signiflonnow,dkcol_signiflatnow]=find_where_pval_small(
pvsnow[isp].where(abs(mapsnow[isp])<=stipltdkcosnow[isp]),
ptfsnow[isp])
ax.scatter(ltcol_signiflonnow, ltcol_signiflatnow,
marker=stipmarknow, linewidths=stipewnow,
facecolors=stipfacecolnow, edgecolors=stipedgeltcolnow,
s=stipsizenow, transform=ccrs.PlateCarree())
ax.scatter(dkcol_signiflonnow, dkcol_signiflatnow,
marker=stipmarknow, linewidths=stipewnow,
facecolors=stipfacecolnow, edgecolors=stipedgedkcolnow,
s=stipsizenow, transform=ccrs.PlateCarree())
ax.set_xlabel(''); ax.set_ylabel('')
ax.set_title(titlesnow[isp])
isp = isp + 1
# - Save fig
fig.savefig(figpath + fignamenow + '.pdf',
bbox_inches='tight', pad_inches = 0, dpi = 300)
fig.savefig(figpath + fignamenow + '.png',
bbox_inches='tight', pad_inches = 0, dpi = 300)
```
| github_jupyter |

# Ejemplo de simulación numérica
```
import numpy as np
from scipy.integrate import odeint
from matplotlib import rc
import matplotlib.pyplot as plt
%matplotlib inline
rc("text", usetex=True)
rc("font", size=18)
rc("figure", figsize=(6,4))
rc("axes", grid=True)
```
## Problema físico

Definimos un SR con el origen en el orificio donde el hilo atravieza el plano, la coordenada $\hat{z}$ apuntando hacia abajo. Con esto sacamos, de la segunda ley de Newton para las particulas:
$$
\begin{align}
\text{Masa 1)}\quad&\vec{F}_1 = m_1 \vec{a}_1 \\
&-T \hat{r} = m_1 \vec{a}_1 \\
&-T \hat{r} = m_1 \left\{ \left(\ddot{r} - r \dot{\theta}^2\right) \hat{r} + \left(r\ddot{\theta} + 2\dot{r}\dot{\theta}\right)\hat{\theta} \right\} \\
&\begin{cases}
\hat{r})\ - T = m_1\left( \ddot{r} - r\, \dot{\theta}^2\right)\\
\hat{\theta})\ 0 = m_1 \left(r \ddot{\theta} + 2 \dot{r}\dot{\theta}\right)\\
\end{cases}\\
\\
\text{Masa 2)}\quad&\vec{F}_2 = m_2 \vec{a}_2 \\
&-T \hat{z} + m_2 g \hat{z} = m_2 \ddot{z} \hat{z} \\
\implies & \boxed{T = m_2 \left( g - \ddot{z} \right)}\\
\end{align}
$$
Ahora reemplazando este resultado para la tension (que es igual en ambas expresiones) y entendiendo que $\ddot{z} = -\ddot{r}$ pues la soga es ideal y de largo constante, podemos rescribir las ecuaciones obtenidas para la masa 1 como:
$$
\begin{cases}
\hat{r})\quad - m_2 \left( g + \ddot{r} \right) = m_1\left( \ddot{r} - r\, \dot{\theta}^2\right)\\
\\
\hat{\theta})\quad 0 = m_1 \left(r \ddot{\theta} + 2 \dot{r}\dot{\theta}\right)
\end{cases}
\implies
\begin{cases}
\hat{r})\quad \ddot{r} = \dfrac{- m_2 g + m_1 r \dot{\theta}^2}{m_1 + m_2}\\
\\
\hat{\theta})\quad \ddot{\theta} = -2 \dfrac{\dot{r}\dot{\theta}}{r}\\
\end{cases}
$$
La gracia de estos métodos es lograr encontrar una expresión de la forma $y'(x) = f(x,t)$ donde x será la solución buscada, aca como estamos en un sistema de segundo orden en dos variables diferentes ($r$ y $\theta$) sabemos que nuestra solución va a tener que involucrar 4 componentes. Es como en el oscilador armónico, que uno tiene que definir posicion y velocidad inicial para poder conocer el sistema, solo que aca tenemos dos para $r$ y dos para $\theta$.
Se puede ver entonces que vamos a necesitar una solucion del tipo:
$$\mathbf{X} = \begin{pmatrix} r \\ \dot{r}\\ \theta \\ \dot{\theta} \end{pmatrix} $$
Y entonces
$$
\dot{\mathbf{X}} =
\begin{pmatrix} \dot{r} \\ \ddot{r}\\ \dot{\theta} \\ \ddot{\theta} \end{pmatrix} =
\begin{pmatrix} \dot{r} \\ \dfrac{-m_2 g + m_1 r \dot{\theta}^2}{m_1 + m_2} \\ \dot{\theta} \\ -2 \dfrac{\dot{r}\dot{\theta}}{r} \end{pmatrix} =
\mathbf{f}(\mathbf{X}, t)
$$
---
Si alguno quiere, tambien se puede escribir la evolucion del sistema de una forma piola, que no es otra cosa que una querida expansión de Taylor a orden lineal.
$$
\begin{align}
r(t+dt) &= r(t) + \dot{r}(t)\cdot dt \\
\dot{r}(t+dt) &= \dot{r}(t) + \ddot{r}(t)\cdot dt \\
\theta(t+dt) &= \theta(t) + \dot{\theta}(t)\cdot dt \\
\dot{\theta}(t+dt) &= \dot{\theta}(t) + \ddot{\theta}(t)\cdot dt
\end{align}
\implies
\begin{pmatrix}
r\\
\dot{r}\\
\theta\\
\ddot{\theta}
\end{pmatrix}(t + dt) =
\begin{pmatrix}
r\\
\dot{r}\\
\theta\\
\ddot{\theta}
\end{pmatrix}(t) +
\begin{pmatrix}
\dot{r}\\
\ddot{r}\\
\dot{\theta}\\
\ddot{\theta}
\end{pmatrix}(t) \cdot dt
$$
Aca tenemos que recordar que la compu no puede hacer cosas continuas, porque son infinitas cuentas, entones si o si hay que discretizar el tiempo y el paso temporal!
$$
\begin{pmatrix}
r\\
\dot{r}\\
\theta\\
\ddot{\theta}
\end{pmatrix}_{i+1} =
\begin{pmatrix}
r\\
\dot{r}\\
\theta\\
\ddot{\theta}
\end{pmatrix}_i +
\begin{pmatrix}
\dot{r}\\
\ddot{r}\\
\dot{\theta}\\
\ddot{\theta}
\end{pmatrix}_i \cdot dt
$$
Si entonces decido llamar a este vector columna $\mathbf{X}$, el sistema queda escrito como:
$$
\mathbf{X}_{i+1} = \mathbf{X}_i + \dot{\mathbf{X}}_i\ dt
$$
Donde sale denuevo que $\dot{\mathbf{X}}$ es lo que está escrito arriba.
Es decir que para encontrar cualquier valor, solo hace falta saber el vector anterior y la derivada, pero las derivadas ya las tenemos (es todo el trabajo que hicimos de fisica antes)!!
---
---
De cualquier forma que lo piensen, ojala hayan entendido que entonces con tener las condiciones iniciales y las ecuaciones diferenciales ya podemos resolver (tambien llamado *integrar*) el sistema.
```
# Constantes del problema:
M1 = 3
M2 = 3
g = 9.81
# Condiciones iniciales del problema:
r0 = 2
r_punto0 = 0
tita0 = 0
tita_punto0 = 1
C1 = (M2*g)/(M1+M2) # Defino constantes utiles
C2 = (M1)/(M1+M2)
cond_iniciales = [r0, r_punto0, tita0, tita_punto0]
def derivada(X, t, c1, c2): # esto sería la f del caso { x' = f(x,t) }
r, r_punto, tita, tita_punto = X
deriv = [0, 0, 0, 0] # es como el vector columna de arriba pero en filado
deriv[0] = r_punto # derivada de r
deriv[1] = -c1 + c2*r*(tita_punto)**2 # r dos puntos
deriv[2] = tita_punto # derivada de tita
deriv[3] = -2*r_punto*tita_punto/r
return deriv
def resuelvo_sistema(m1, m2, tmax = 20):
t0 = 0
c1 = (m2*g)/(m1+m2) # Defino constantes utiles
c2 = (m1)/(m1+m2)
t = np.arange(t0, tmax, 0.001)
# aca podemos definirnos nuestro propio algoritmo de integracion
# o bien usar el que viene a armado de scipy.
# Ojo que no es perfecto eh, a veces es mejor escribirlo uno
out = odeint(derivada, cond_iniciales, t, args = (c1, c2,))
return [t, out.T]
t, (r, rp, tita, titap) = resuelvo_sistema(M1, M2, tmax=10)
plt.figure()
plt.plot(t, r/r0, 'r')
plt.ylabel(r"$r / r_0$")
plt.xlabel(r"tiempo")
# plt.savefig("directorio/r_vs_t.pdf", dpi=300)
plt.figure()
plt.plot(t, tita-tita0, 'b')
plt.ylabel(r"$\theta - \theta_0$")
plt.xlabel(r"tiempo")
# plt.savefig("directorio/tita_vs_t.pdf", dpi=300)
plt.figure()
plt.plot(r*np.cos(tita-tita0)/r0, r*np.sin(tita-tita0)/r0, 'g')
plt.ylabel(r"$r/r_0\ \sin\left(\theta - \theta_0\right)$")
plt.xlabel(r"$r/r_0\ \cos\left(\theta - \theta_0\right)$")
# plt.savefig("directorio/trayectoria.pdf", dpi=300)
```
Todo muy lindo!!
Cómo podemos verificar si esto está andando ok igual? Porque hasta acá solo sabemos que dio razonable, pero el ojímetro no es una medida cuantitativa.
Una opción para ver que el algoritmo ande bien (y que no hay errores numéricos, y que elegimos un integrador apropiado **ojo con esto eh... te estoy mirando a vos, Runge-Kutta**), es ver si se conserva la energía.
Les recuerdo que la energía cinética del sistema es $K = \frac{1}{2} m_1 \left|\vec{v}_1 \right|^2 + \frac{1}{2} m_2 \left|\vec{v}_2 \right|^2$, cuidado con cómo se escribe cada velocidad, y que la energía potencial del sistema únicamente depende de la altura de la pelotita colgante.
Hace falta conocer la longitud $L$ de la cuerda para ver si se conserva la energía mecánica total? (Spoiler: No. Pero piensen por qué)
Les queda como ejercicio a ustedes verificar eso, y también pueden experimentar con distintos metodos de integración a ver qué pasa con cada uno, abajo les dejamos una ayudita para que prueben.
```
from scipy.integrate import solve_ivp
def resuelvo_sistema(m1, m2, tmax = 20, metodo='RK45'):
t0 = 0
c1 = (m2*g)/(m1+m2) # Defino constantes utiles
c2 = (m1)/(m1+m2)
t = np.arange(t0, tmax, 0.001)
# acá hago uso de las lambda functions, solamente para usar
# la misma funcion que definimos antes. Pero como ahora
# voy a usar otra funcion de integracion (no odeint)
# que pide otra forma de definir la funcion, en vez de pedir
# f(x,t) esta te pide f(t, x), entonces nada, hay que dar vuelta
# parametros y nada mas...
deriv_bis = lambda t, x: derivada(x, t, c1, c2)
out = solve_ivp(fun=deriv_bis, t_span=(t0, tmax), y0=cond_iniciales,\
method=metodo, t_eval=t)
return out
# Aca armo dos arrays con los metodos posibles y otro con colores
all_metodos = ['RK45', 'RK23', 'Radau', 'BDF', 'LSODA']
all_colores = ['r', 'b', 'm', 'g', 'c']
# Aca les dejo la forma piola de loopear sobre dos arrays a la par
for met, col in zip(all_metodos, all_colores):
result = resuelvo_sistema(M1, M2, tmax=30, metodo=met)
t = result.t
r, rp, tita, titap = result.y
plt.plot(t, r/r0, col, label=met)
plt.xlabel("tiempo")
plt.ylabel(r"$r / r_0$")
plt.legend(loc=3)
```
Ven cómo los distintos métodos van modificando más y más la curva de $r(t)$ a medida que van pasando los pasos de integración. Tarea para ustedes es correr el mismo código con la conservación de energía.
Cuál es mejor, por qué y cómo saberlo son preguntas que deberán hacerse e investigar si en algún momento trabajan con esto.
Por ejemplo, pueden buscar en Wikipedia "Symplectic Integrator" y ver qué onda.
### Les dejamos también abajo la simulación de la trayectoria de la pelotita
```
from matplotlib import animation
%matplotlib notebook
result = resuelvo_sistema(M1, M2, tmax=30, metodo='Radau')
t = result.t
r, rp, tita, titap = result.y
fig, ax = plt.subplots()
ax.set_xlim([-1, 1])
ax.set_ylim([-1, 1])
ax.plot(r*np.cos(tita)/r0, r*np.sin(tita)/r0, 'm', lw=0.2)
line, = ax.plot([], [], 'ko', ms=5)
N_SKIP = 50
N_FRAMES = int(len(r)/N_SKIP)
def animate(frame_no):
i = frame_no*N_SKIP
r_i = r[i]/r0
tita_i = tita[i]
line.set_data(r_i*np.cos(tita_i), r_i*np.sin(tita_i))
return line,
anim = animation.FuncAnimation(fig, animate, frames=N_FRAMES,
interval=50, blit=False)
```
Recuerden que esta animación no va a parar eh, sabemos que verla te deja en una especie de trance místico, pero recuerden pararla cuando haya transcurrido suficiente tiempo
# Animación Interactiva
Usando `ipywidgets` podemos agregar sliders a la animación, para modificar el valor de las masitas
```
from ipywidgets import interactive, interact, FloatProgress
from IPython.display import clear_output, display
%matplotlib inline
@interact(m1=(0,5,0.5), m2=(0,5,0.5), tmax=(0.01,20,0.5)) #Permite cambiar el parámetro de la ecuación
def resuelvo_sistema(m1, m2, tmax = 20):
t0 = 0
c1 = (m2*g)/(m1+m2) # Defino constantes utiles
c2 = (m1)/(m1+m2)
t = np.arange(t0, tmax, 0.05)
# out = odeint(derivada, cond_iniciales, t, args = (c1, c2,))
r, rp, tita, titap = odeint(derivada, cond_iniciales, t, args=(c1, c2,)).T
plt.xlim((-1,1))
plt.ylim((-1,1))
plt.plot(r*np.cos(tita)/r0, r*np.sin(tita)/r0,'b-')
# plt.xlabel("tiempo")
# plt.ylabel(r"$r / r_0$")
# plt.show()
```
| github_jupyter |
# 4 データ前処理
## 4.1 欠損データへの対処
```
from IPython.core.display import display
import pandas as pd
from io import StringIO
csv_data = '''A,B,C,D
1.0,2.0,3.0,4.0
5.0,6.0,,8.0
10.0,11.0,12.0,'''
df = pd.read_csv(StringIO(csv_data))
df
# 各特徴量の欠測値をカウント
df.isnull().sum()
df.values
```
### 4.1.1 欠測値を持つサンプル/特徴量を取り除く
```
# 欠測値を含む行を削除
df.dropna()
# 欠測値を含む列を削除
df.dropna(axis=1)
# すべての列がNaNである行だけを削除
df.dropna(how='all')
# 非NaN値が4つ未満の行を削除
df.dropna(thresh=4)
# 特定の列にNaNが含まれている行だけを削除
df.dropna(subset=['C'])
```
### 4.1.2 欠測値を補完する
```
from sklearn.preprocessing import Imputer
# 欠測値補完のインスタンスを生成(平均値補完)
# median: 中央値、most_frequent: 最頻値
imr = Imputer(missing_values='NaN', strategy='mean', axis=0)
# データを適合
imr = imr.fit(df)
# 補完を実行
imputed_data = imr.transform(df.values)
imputed_data
```
## 4.2 カテゴリデータの処理
```
import pandas as pd
# サンプルデータを生成
df = pd.DataFrame([
['green', 'M', 10.1, 'class1'],
['red', 'L', 13.5, 'class2'],
['blue', 'XL', 15.3, 'class1'],
])
# 列名を設定
df.columns = ['color', 'size', 'price', 'classlabel']
df
```
### 4.2.1 順序特徴量のマッピング
```
# Tシャツのサイズと整数を対応させるディクショナリを生成
size_mapping = {'XL': 3, 'L': 2, 'M': 1}
# Tシャツのサイズを整数に変換
df['size'] = df['size'].map(size_mapping)
df
# Tシャツのサイズを文字列に戻す辞書
inv_size_mapping = {v: k for k, v in size_mapping.items()}
inv_size_mapping
```
### 4.2.2 クラスラベルのエンコーディング
```
import numpy as np
# クラスラベルと整数を対応させる辞書
class_mapping = {label: i for i, label in enumerate(np.unique(df['classlabel']))}
class_mapping
# クラスラベルを整数に変換
df['classlabel'] = df['classlabel'].map(class_mapping)
df
inv_class_mapping = {v: k for k, v in class_mapping.items()}
# 整数からクラスラベルに変換
df['classlabel'] = df['classlabel'].map(inv_class_mapping)
df
from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
y = class_le.fit_transform(df['classlabel'].values)
y
class_le.inverse_transform(y)
```
### 4.2.3 名義特徴量での one-hot エンコーディング
```
# Tシャツの色、サイズ、価格を抽出
X = df[['color', 'size', 'price']].values
color_le = LabelEncoder()
X[:, 0] = color_le.fit_transform(X[:, 0])
X
from sklearn.preprocessing import OneHotEncoder
# one-hot エンコーダの生成
ohe = OneHotEncoder(categorical_features=[0])
# one-hot エンコーディングを実行
ohe.fit_transform(X).toarray()
# one-hot エンコーディングを実行
pd.get_dummies(df[['price', 'color', 'size']])
```
## 4.3 データセットをトレーニングデータセットとテストデータセットに分割する
```
# http://archive.ics.uci.edu/ml/datasets/Wine
df_wine = pd.read_csv('http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
display(df_wine.head())
# 列名を設定
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue',
'OD280/OD315 of diluted wines', 'Proline']
display(df_wine.head())
print('Class labels', np.unique(df_wine['Class label']))
from sklearn.cross_validation import train_test_split
# 特徴量とクラスラベルを別々に抽出
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
# 全体の30%をテストデータにする
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
```
## 4.4 特徴量の尺度を揃える
```
from sklearn.preprocessing import MinMaxScaler
# min-max スケーリングのインスタンスを生成
mms = MinMaxScaler()
# トレーニングデータをスケーリング
X_train_norm = mms.fit_transform(X_train)
# テストデータをスケーリング
X_test_norm = mms.transform(X_test)
X_train, X_train_norm
from sklearn.preprocessing import StandardScaler
# 標準化のインスタンスを生成
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
X_train_std
```
## 4.5 有益な特徴量の選択
### 4.5.1 L1 正則化による疎な解
```
from sklearn.linear_model import LogisticRegression
# L1正則化ロジスティック回帰のインスタンスを生成
LogisticRegression(penalty='l1')
# L1正則化ロジスティック回帰のインスタンスを生成(逆正則化パラメータ C=0.1)
lr = LogisticRegression(penalty='l1', C=0.1)
lr.fit(X_train_std, y_train)
print('Training accuracy:', lr.score(X_train_std, y_train))
print('Test accuracy:', lr.score(X_test_std, y_test))
# 切片の表示
lr.intercept_
# 重み係数の表示
lr.coef_
import matplotlib.pyplot as plt
fig = plt.figure()
ax = plt.subplot(111)
colors = ['blue', 'green', 'red', 'cyan', 'magenta', 'yellow', 'black',
'pink', 'lightgreen', 'lightblue', 'gray', 'indigo', 'orange']
# 空のリストを生成(重み係数、逆正則化パラメータ
weights, params = [], []
# 逆正則化パラメータの値ごとに処理
for c in np.arange(-4, 6):
# print(c) # -4~5
lr = LogisticRegression(penalty='l1', C=10 ** c, random_state=0)
lr.fit(X_train_std, y_train)
weights.append(lr.coef_[1])
params.append(10 ** c)
# 重み係数をNumPy配列に変換
weights = np.array(weights)
# 各重み係数をプロット
# print(weights.shape[1]) # -> 13
for column, color in zip(range(weights.shape[1]), colors):
plt.plot(params, weights[:, column], label=df_wine.columns[column + 1], color=color)
# y=0 に黒い破線を引く
plt.axhline(0, color='black', linestyle='--', linewidth=3)
plt.xlim([10 ** (-5), 10 ** 5])
# 軸のラベルの設定
plt.ylabel('weight coefficient')
plt.xlabel('C')
# 横軸を対数スケールに設定
plt.xscale('log')
plt.legend(loc='upper left')
ax.legend(loc='upper center', bbox_to_anchor=(1.38, 1.03), ncol=1, fancybox=True)
plt.show()
```
### 4.5.2 逐次特徴選択アルゴリズム
```
from sklearn.base import clone
from itertools import combinations
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.metrics import accuracy_score
class SBS():
"""
逐次後退選択(sequencial backward selection)を実行するクラス
"""
def __init__(self, estimator, k_features, scoring=accuracy_score,
test_size=0.25, random_state=1):
self.scoring = scoring # 特徴量を評価する指標
self.estimator = clone(estimator) # 推定器
self.k_features = k_features # 選択する特徴量の個数
self.test_size = test_size # テストデータの悪愛
self.random_state = random_state # 乱数種を固定する random_state
def fit(self, X, y):
# トレーニングデータとテストデータに分割
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size,
random_state=self.random_state)
#print(len(X_train), len(X_test), len(y_train), len(y_test))
# 全ての特徴量の個数、列インデックス
dim = X_train.shape[1]
self.indices_ = tuple(range(dim))
self.subsets_ = [self.indices_]
#print(self.indices_)
# 全ての特徴量を用いてスコアを算出
score = self._calc_score(X_train, y_train, X_test, y_test, self.indices_)
# スコアを格納
self.scores_ = [score]
# 指定した特徴量の個数になるまで処理を反復
while dim > self.k_features:
# 空のリストの生成(スコア、列インデックス)
scores = []
subsets = []
# 特徴量の部分集合を表す列インデックスの組み合わせ毎に処理を反復
for p in combinations(self.indices_, r=dim - 1):
# スコアを算出して格納
score = self._calc_score(X_train, y_train, X_test, y_test, p)
scores.append(score)
# 特徴量の部分集合を表す列インデックスのリストを格納
subsets.append(p)
# 最良のスコアのインデックスを抽出
best = np.argmax(scores)
# 最良のスコアとなる列インデックスを抽出して格納
self.indices_ = subsets[best]
self.subsets_.append(self.indices_)
# 特徴量の個数を1つだけ減らして次のステップへ
dim -= 1
# スコアを格納
self.scores_.append(scores[best])
# 最後に格納したスコア
self.k_score_ = self.scores_[-1]
return self
def transform(self, X):
# 抽出した特徴量を返す
return X[:, self.indices_]
def _calc_score(self, X_train, y_train, X_test, y_test, indices):
# 指定された列番号 indices の特徴量を抽出してモデルに適合
self.estimator.fit(X_train[:, indices], y_train)
# テストデータを用いてクラスラベルを予測
y_pred = self.estimator.predict(X_test[:, indices])
# 真のクラスラベルと予測値を用いてスコアを算出
score = self.scoring(y_test, y_pred)
return score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
knn = KNeighborsClassifier(n_neighbors=2)
sbs = SBS(knn, k_features=1)
sbs.fit(X_train_std, y_train)
# 近傍点の個数のリスト
k_feat = [len(k) for k in sbs.subsets_]
display(k_feat)
# 横軸を近傍店の個数、縦軸をスコアとした折れ線グラフのプロット
plt.plot(k_feat, sbs.scores_, marker='o')
plt.ylim([0.7, 1.1])
plt.ylabel('Accuracy')
plt.xlabel('Number of features')
plt.grid()
plt.show()
k5 = list(sbs.subsets_[8])
print(k5)
print(df_wine.columns[1:][k5])
# 13個全ての特徴量を用いてモデルに適合
knn.fit(X_train_std, y_train)
# トレーニングの正解率を出力
print('Training accuracy:', knn.score(X_train_std, y_train))
# テストの正解率を出力
print('Test accuracy:', knn.score(X_test_std, y_test))
# 5個の特徴量を用いてモデルに適合
knn.fit(X_train_std[:, k5], y_train)
# トレーニングの正解率を出力
print('Training accuracy:', knn.score(X_train_std[:, k5], y_train))
# テストの正解率を出力
print('Test accuracy:', knn.score(X_test_std[:, k5], y_test))
```
## 4.6 ランダムフォレストで特徴量の重要度にアクセスする
```
from sklearn.ensemble import RandomForestClassifier
# Wine データセットの特徴量の名所
feat_labels = df_wine.columns[1:]
# ランダムフォレストオブジェクトの生成
# (木の個数=10,000、すべての怖を用いて並列計算を実行
forest = RandomForestClassifier(n_estimators=10000, random_state=0, n_jobs=-1)
# モデルに適合
forest.fit(X_train, y_train)
# 特徴量の重要度を抽出
importances = forest.feature_importances_
# 重要度の降順で特徴量のインデックスを抽出
indices = np.argsort(importances)[::-1]
# 重要度の降順で特徴量の名称、重要度を表示
for f in range(X_train.shape[1]):
print("{:2d}) {:<30} {:f}".format(f + 1, feat_labels[indices[f]], importances[indices[f]]))
plt.title('Feature Importances')
plt.bar(range(X_train.shape[1]), importances[indices], color='lightblue', align='center')
plt.xticks(range(X_train.shape[1]), feat_labels[indices], rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.tight_layout()
plt.show()
from sklearn.feature_selection import SelectFromModel
# 特徴選択オブジェクトの生成(重要度のしきい値を0.15に設定)
sfm = SelectFromModel(forest, prefit=True, threshold=0.15)
# 特徴量を抽出
X_selected = sfm.transform(X_train)
X_selected.shape
for f in range(X_selected.shape[1]):
print("{:2d}) {:<30} {:f}".format(f + 1, feat_labels[indices[f]], importances[indices[f]]))
```
| github_jupyter |
REMEMBER: FIRST CREATE A COPY OF THIS FILE WITH A UNIQUE NAME AND DO YOUR WORK THERE. AND MAKE SURE YOU COMMIT YOUR CHANGES TO THE `hw3_submissions` BRANCH.
# Assignment 3 | Cleaning and Exploring Data with Pandas
<img src="data/scoreCard.jpg" width=250>
In this assignment, you will investigate restaurant food safety scores for restaurants in San Francisco. Above is a sample score card for a restaurant. The scores and violation information have been made available by the San Francisco Department of Public Health.
## Loading Food Safety Data
There are 2 files in the data directory:
1. business.csv containing food establishments in San Francisco
1. inspections.csv containing retaurant inspections records
Let's start by loading them into Pandas dataframes. One of the files, business.csv, has encoding (ISO-8859-1), so you will need to account for that when reading it.
### Question 1
#### Question 1a
Read the two files noted above into two pandas dataframes named `bus` and `ins`, respectively. Print the first 5 rows of each to inspect them.
```
import pandas as pd
bus = pd.read_csv('data/businesses.csv', encoding='ISO-8859-1')
ins = pd.read_csv('data/inspections.csv')
bus.head()
ins.head()
```
## Examining the Business data
From its name alone, we expect the `businesses.csv` file to contain information about the restaurants. Let's investigate this dataset.
### Question 2
#### Question 2a: How many records are there?
<font color='Red'>There are 6315 Records</font>
```
len(bus)
```
#### Question 2b: How many unique business IDs are there?
<font color='Red'>There are 6315 Unique Business ID's</font>
```
len(bus['business_id'].unique())
```
#### Question 2c: What are the 5 most common businesses by name, and how many are there in San Francisco?
<font color='Red'>The 5 most common business names are: Starbucks Coffee:72, Peet's Coffee & Tea:24, McDonalds:12, San Francisco Soup Company:11, Walgreens:11</font>
```
bus['name'].value_counts().to_frame().head(5)
```
## Zip code
Next, let's explore some of the variables in the business table. We begin by examining the postal code.
### Question 3
#### Question 3a
How are the zip code values stored in python (i.e. data type)?
To answer this you might want to examine a particular entry.
<font color='Red'>Zip codes are stored as strings. This makes sense to me, because they are an identifier and should be used for any mathematical operations. This is also good to know for string manipulation purposes and also merging with other dataframes</font>
```
type(bus['postal_code'][0])
```
#### Question 3b
What are the unique values of postal_code?
<font color='Red'>There are 46 unique values. The Unique values are shown below.</font>
```
bus['postal_code'].value_counts().to_frame()
```
#### Question 3c
Let's say we decide to exclude the businesses that have no zipcode for our analysis (which might include food trucks for example). Use the list of valid 5-digit zip codes below to create a new dataframe called bus_valid, with only businesses whose postal_codes show up in this list of valid zipcodes. How many businesses are there in this new dataframe?
<font color='Red'>There are 5999 businesses in the new dataframe</font>
```
validZip = ["94102", "94103", "94104", "94105", "94107", "94108",
"94109", "94110", "94111", "94112", "94114", "94115",
"94116", "94117", "94118", "94121", "94122", "94123",
"94124", "94127", "94131", "94132", "94133", "94134"]
bus_valid = bus[bus['postal_code'].isin(validZip)]
len(bus_valid)
```
## Latitude and Longitude
Another aspect of the data we want to consider is the prevalence of missing values. If many records have missing values then we might be concerned about whether the nonmissing values are representative of the population.
### Question 4
Consider the longitude and latitude in the business DataFrame.
#### Question 4a
How many businesses are missing longitude values, working with only the businesses that are in the list of valid zipcodes?
<font color='Red'>There are 2483 records with missing longitude values</font>
```
bus_valid[pd.isnull(bus_valid['longitude'])]
sum(pd.isnull(bus_valid['longitude']))
```
#### Question 4b
Create a new dataframe with one row for each valid zipcode. The dataframe should include the following three columns:
1. `postal_code`: Contains the zip codes in the `validZip` variable above.
2. `null_lon`: The number of businesses in that zipcode with missing `longitude` values.
3. `not_null_lon`: The number of businesses without missing `longitude` values.
```
#There's gotta be an easier way - pls excuse my OVERCOMPLICATED CODE
#initialize dataframe with postal codes
postal_code = list(bus_valid['postal_code'].value_counts().to_frame().index)
postaldf = pd.DataFrame(postal_code)
postaldf = postaldf.rename(columns={0:'postal_code'})
#how many null/not null values in each postal code?
null_counts = []
not_null_counts = []
for code in postal_code:
zipdf = bus_valid[bus_valid['postal_code'] == code]
null_counts.append(sum(pd.isnull(zipdf['longitude'])))
not_null_counts.append(sum(pd.notnull(zipdf['longitude'])))
postaldf['null_lon'] = null_counts
postaldf['not_null_lon'] = not_null_counts
postaldf.head()
```
#### 4c. Do any zip codes appear to have more than their 'fair share' of missing longitude?
To answer this, you will want to compute the proportion of missing longitude values for each zip code, and print the proportion missing longitude, and print the top five zipcodes in descending order of proportion missing postal_code.
<font color='Red'>Zip code 94107 has the most null with 0.55 fraction null</font>
```
postaldf['missing_lon_frac'] = postaldf['null_lon'] / (postaldf['null_lon'] + postaldf['not_null_lon'])
postaldf_frac_sorted = postaldf.sort_values('missing_lon_frac', ascending=False)
postaldf_frac_sorted.head()
```
# Investigate the inspection data
Let's now turn to the inspection DataFrame. Earlier, we found that `ins` has 4 columns, these are named `business_id`, `score`, `date` and `type`. In this section, we determine the granularity of `ins` and investigate the kinds of information provided for the inspections.
### Question 5
#### Question 5a
As with the business data, assess whether there is one inspection record for each business, by counting how many rows are in the data and how many unique businesses there are in the data. If they are exactly the same number, it means there is only one inspection per business, clearly.
<font color='Red'>Since there are more inspections than businesses, there is more than one inspection record for each business</font>
```
print(len(bus))
print(len(ins))
```
#### Question 5b
What values does `type` take on? How many occurrences of each value is in the DataFrame? Create a new dataframe named `ins2` by copying `ins` and keeping only records with values of `type` that occur more than 10 times in the original table. In other words, eliminate records that have values of `type` that occur rarely (< 10 times). Check the result to make sure rare types are eliminated.
<font color='Red'>type takes on "routine" and "complaint". "complaint" has only 1 record while "routine" has 15429 records. I have eliminated complaints by subsetting only routine inspections.</font>
```
ins['type'].value_counts().to_frame()
ins2 = ins[ins['type'] == 'routine']
ins2
```
#### Question 5c
Since the data was stored in a .csv file, the dates are formatted as strings such as `20160503`. Once we read in the data, we would like to have dates in an appropriate format for analysis. Add a new column called `year` by capturing the first four characters of the date column.
Hint: we have seen multiple ways of doing this in class, includings `str` operations, `lambda` functions, `datetime` operations, and others. Choose the method that works best for you :)
```
ins2['year'] = ins2['date']
year_only = lambda x: str(x)[0:4]
ins2['year'] = ins2['year'].apply(year_only)
ins2
```
#### Question 5d
What range of years is covered in this data set? Are there roughly same number of inspections each year? Try dropping records for any years with less than 50 inspections and store the result in a new dataframe named `ins3`.
<font color='Red'>2013 only has 38 records whereas 2015-2016 has thousands of records. ins3 is created without records of 2013.</font>
```
ins2['year'].value_counts().to_frame()
ins3 = ins2[ins2['year'] != '2013']
ins3
```
Let's examine only the inspections for one year: 2016. This puts businesses on a more equal footing because [inspection guidelines](https://www.sfdph.org/dph/eh/Food/Inspections.asp) generally refer to how many inspections should occur in a given year.
### Question 6
#### Question 6a
Merge the business and 2016 inspections data, keeping all businesses regardless of whether they show up in the inspections file. Show the first several rows of the resulting dataframe.
```
ins3_2016 = ins3[ins3['year']=='2016']
Ratings_2016 = pd.merge(ins3_2016, bus_valid, how="inner", left_on ='business_id', right_on ='business_id')
Ratings_2016.head(7)
```
#### Question 6b
Print the 20 lowest rated businesses names, their addresses, and their ratings.
```
Ratings_2016_sorted = Ratings_2016.sort_values('score')
Ratings_2016_sorted.head(20)[['name', 'address', 'score']]
```
## Done!
Now commit this notebook to your `hw3_submissions` branch, push it to your GitHub repo, and open a PR!
NICE!
| github_jupyter |
<a href="https://colab.research.google.com/github/Ipal23/LSTM-Neural-Network-Bitcoin-Stock-prediction/blob/main/Bitcoin_Prediction_with_the_use_of_an_LSTM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Project 1. The prediction of the Bitcoin Stock Price with the use of an Long-Short-Term-Memory Neural Network.
In case you track Artificial Intelligence, you have certainly read about Neural Networks. They have gained traction due to the fact that they perform extraordinarily well on a lot of different problems. They also have great ability to handle incomplete data. As such Deep learning is an important topic nowadays.
Individuals can be empowered to make decisions based on statistical analysis and may lean on the intuition the prediction provides.
Long Short Term Memory networks were introduced by Hochreiter & Schmidhuber(1997).
The stock market industry is producing huge amounts of data which need to be mined to discover hidden information for effective decision making in terms of shareholder purchases and sales.
LSTMs are designed to avoid issues such as the long-term dependency problem. Recalling information for long periods of time refer to their default behavior.
The preparation of independent variables was one of the major challenges faced.
Input Data
Choosing the proper set of independent variables is of utmost importance for accurate forecasting. The data used in this paper work were historical daily stock prices.
In this study the closing price is chosen to be modeled and predicted.
In terms of Time Series and econometric methods, MSE is considered an acceptable measure of performance.
```
# Author Iliana Paliari
#The below code depicts the creation of an LSTM Neural Network for the prediction of the Bitcoin Stock prices.
import pandas as pd
from google.colab import files
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import math
from sklearn.preprocessing import normalize, MinMaxScaler
from sklearn.metrics import mean_squared_error
from keras import backend as K
import keras
from keras.models import Sequential
from keras.layers import GRU, Dense
from keras.layers import LSTM
from keras import callbacks
from keras.utils import np_utils
from keras.layers.core import Dense, Dropout, Activation
from keras import optimizers
import tensorflow as tf
# Dataset is now stored in a Pandas Dataframe
uploaded = files.upload()
import io
df = pd.read_csv(io.BytesIO(uploaded['BCHAIN-MKPRU.csv']))
#Display first values
df.head()
#Convert df to Dataframe
df = pd.DataFrame(df, columns=['Date', 'Value'])
print(df)
#Data preparation
columns_to_view = ['Value']
df = df[columns_to_view]
df.index.names = ['Date']
df.sort_index(inplace=True)
print('Total rows: {}'.format(len(df)))
df.head()
# Display plot diagram
df.plot()
#Additional Data checks
df.isnull().sum()
null_columns=df.columns[df.isnull().any()]
df[null_columns].isnull().sum()
#Additional Data checks
null_columns=df.columns[df.isnull().any()]
#Additional Data checks
df.isnull().sum()
print(df[df.isnull().any(axis=1)][null_columns].head())
df.dropna(inplace=True)
#Print the Min and the Max value
print('Min', np.min(df))
print('Max', np.max(df))
df.values.tolist()
df.dtypes
df['Value'] = pd.to_numeric(df['Value'],errors='coerce')
dataset = df.astype('float64')
def create_dataset(dataset, look_back=1):
print(len(dataset), look_back)
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
#The target is always the next value. And the lookback are the previous prices
a = dataset[i:(i+look_back), 0]
print(i)
print('X {} to {}'.format(i, i+look_back))
print(a)
print('Y {}'.format(i + look_back))
print(dataset[i + look_back, 0])
dataset[i + look_back, 0]
dataX.append(a)
dataY.append(dataset[i + look_back, :][0])#Isolate the target with [0] it must be 1st
return np.array(dataX), np.array(dataY)
#Normalize the dataset
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(dataset)
print('Min', np.min(scaled))
print('Max', np.max(scaled))
df.dropna(inplace=True)
df.hist(bins=10)
len(df[df['Value'] == 0])
print(scaled[:10])
#split into train and test sets
train_size = int(len(scaled) * 0.70)
test_size = len(scaled - train_size)
train, test = scaled[0:train_size, :], scaled[train_size: len(scaled), :]
print('train: {}\ntest: {}'.format(len(train), len(test)))
```
| github_jupyter |
# CLX Asset Classification (Supervised)
## Authors
- Eli Fajardo (NVIDIA)
- Görkem Batmaz (NVIDIA)
- Bhargav Suryadevara (NVIDIA)
## Table of Contents
* Introduction
* Dataset
* Reading in the datasets
* Training and inference
* References
# Introduction
In this notebook, we will show how to predict the function of a server with Windows Event Logs using cudf, cuml and pytorch. The machines are labeled as DC, SQL, WEB, DHCP, MAIL and SAP. The dependent variable will be the type of the machine. The features are selected from Windows Event Logs which is in a tabular format. This is a first step to learn the behaviours of certain types of machines in data-centres by classifying them probabilistically. It could help to detect unusual behaviour in a data-centre. For example, some compromised computers might be acting as web/database servers but with their original tag.
This work could be expanded by using different log types or different events from the machines as features to improve accuracy. Various labels can be selected to cover different types of machines or data-centres.
## Library imports
```
from clx.analytics.asset_classification import AssetClassification
import cudf
from cuml.preprocessing import train_test_split
from cuml.preprocessing import LabelEncoder
import torch
from sklearn.metrics import accuracy_score, f1_score, confusion_matrix
import pandas as pd
from os import path
import s3fs
```
## Initialize variables
10000 is chosen as the batch size to optimise the performance for this dataset. It can be changed depending on the data loading mechanism or the setup used.
EPOCH should also be adjusted depending on convergence for a specific dataset.
label_col indicates the total number of features used plus the dependent variable. Feature names are listed below.
```
batch_size = 10000
label_col = '19'
epochs = 15
ac = AssetClassification()
```
## Read the dataset into a GPU dataframe with `cudf.read_csv()`
The original data had many other fields. Many of them were either static or mostly blank. After filtering those, there were 18 meaningful columns left. In this notebook we use a fake continuous feature to show the inclusion of continuous features too. When you are using raw data the cell below need to be uncommented
```
# win_events_gdf = cudf.read_csv("raw_features_and_labels.csv")
```
```
win_events_gdf.dtypes
eventcode int64
keywords object
privileges object
message object
sourcename object
taskcategory object
account_for_which_logon_failed_account_domain object
detailed_authentication_information_authentication_package object
detailed_authentication_information_key_length float64
detailed_authentication_information_logon_process object
detailed_authentication_information_package_name_ntlm_only object
logon_type float64
network_information_workstation_name object
new_logon_security_id object
impersonation_level object
network_information_protocol float64
network_information_direction object
filter_information_layer_name object
cont1 int64
label object
dtype: object
```
### Define categorical and continuous feature columns.
```
cat_cols = [
"eventcode",
"keywords",
"privileges",
"message",
"sourcename",
"taskcategory",
"account_for_which_logon_failed_account_domain",
"detailed_authentication_information_authentication_package",
"detailed_authentication_information_key_length",
"detailed_authentication_information_logon_process",
"detailed_authentication_information_package_name_ntlm_only",
"logon_type",
"network_information_workstation_name",
"new_logon_security_id",
"impersonation_level",
"network_information_protocol",
"network_information_direction",
"filter_information_layer_name",
"label"
]
cont_cols = [
"cont1"
]
```
The following are functions used to preprocess categorical and continuous feature columns. This can very depending on what best fits your application and data.
```
def categorize_columns(cat_gdf):
for col in cat_gdf.columns:
cat_gdf[col] = cat_gdf[col].astype('str')
cat_gdf[col] = cat_gdf[col].fillna("NA")
cat_gdf[col] = LabelEncoder().fit_transform(cat_gdf[col])
cat_gdf[col] = cat_gdf[col].astype('int16')
return cat_gdf
def normalize_conts(cont_gdf):
means, stds = (cont_gdf.mean(0), cont_gdf.std(ddof=0))
cont_gdf = (cont_gdf - means) / stds
return cont_gdf
```
Preprocessing steps below are not executed in this notebook, because we release already preprocessed data.
```
#win_events_gdf[cat_cols] = categorize_columns(win_events_gdf[cat_cols])
#win_events_gdf[cont_cols] = normalize_conts(win_events_gdf[cont_cols])
```
Read Windows Event data already preprocessed by above steps
```
S3_BASE_PATH = "rapidsai-data/cyber/clx"
WINEVT_PREPROC_CSV = "win_events_features_preproc.csv"
# Download Zeek conn log
if not path.exists(WINEVT_PREPROC_CSV):
fs = s3fs.S3FileSystem(anon=True)
fs.get(S3_BASE_PATH + "/" + WINEVT_PREPROC_CSV, WINEVT_PREPROC_CSV)
win_events_gdf = cudf.read_csv("win_events_features_preproc.csv")
win_events_gdf.head()
```
### Split the dataset into training and test sets using cuML `train_test_split` function
Column 19 contains the ground truth about each machine's function that the logs come from. i.e. DC, SQL, WEB, DHCP, MAIL and SAP. Hence it will be used as a label.
```
X_train, X_test, Y_train, Y_test = train_test_split(win_events_gdf, "label", train_size=0.9)
X_train["label"] = Y_train
X_train.head()
Y_train.unique()
```
### Print Labels
Making sure the test set contains all labels
```
Y_test.unique()
```
## Training
Asset Classification training uses the fastai tabular model. More details can be found at https://github.com/fastai/fastai/blob/master/fastai/tabular/models.py#L6
Feature columns will be embedded so that they can be used as categorical values. The limit can be changed depending on the accuracy of the dataset.
Adam is the optimizer used in the training process; it is popular because it produces good results in various tasks. In its paper, computing the first and the second moment estimates and updating the parameters are summarized as follows
$$\alpha_{t}=\alpha \cdot \sqrt{1-\beta_{2}^{t}} /\left(1-\beta_{1}^{t}\right)$$
More detailson Adam can be found at https://arxiv.org/pdf/1412.6980.pdf
We have found that the way we partition the dataframes with a 10000 batch size gives us the optimum data loading capability. The **batch_size** argument can be adjusted for different sizes of datasets.
```
cat_cols.remove("label")
ac.train_model(X_train, cat_cols, cont_cols, "label", batch_size, epochs, lr=0.01, wd=0.0)
```
## Evaluation
```
pred_results = ac.predict(X_test, cat_cols, cont_cols).to_array()
true_results = Y_test.to_array()
f1_score_ = f1_score(pred_results, true_results, average='micro')
print('micro F1 score: %s'%(f1_score_))
torch.cuda.empty_cache()
labels = ["DC","DHCP","MAIL","SAP","SQL","WEB"]
a = confusion_matrix(true_results, pred_results)
pd.DataFrame(a, index=labels, columns=labels)
```
The confusion matrix shows that some machines' function can be predicted really well, whereas some of them need more tuning or more features. This work can be improved and expanded to cover individual data-centres to create a realistic map of the network using ML by not just relying on the naming conventions. It could also help to detect more prominent scale anomalies like multiple machines, not acting per their tag.
## References:
* https://github.com/fastai/fastai/blob/master/fastai/tabular/models.py#L6
* https://jovian.ml/aakashns/04-feedforward-nn
* https://www.kaggle.com/dienhoa/reverse-tabular-module-of-fast-ai-v1
* https://github.com/fastai/fastai/blob/master/fastai/layers.py#L44
| github_jupyter |

# Chapter 8: Basic Data Wrangling With Pandas
<h2>Chapter Outline<span class="tocSkip"></span></h2>
<hr>
<div class="toc"><ul class="toc-item"><li><span><a href="#1.-DataFrame-Characteristics" data-toc-modified-id="1.-DataFrame-Characteristics-2">1. DataFrame Characteristics</a></span></li><li><span><a href="#2.-Basic-DataFrame-Manipulations" data-toc-modified-id="2.-Basic-DataFrame-Manipulations-3">2. Basic DataFrame Manipulations</a></span></li><li><span><a href="#3.-DataFrame-Reshaping" data-toc-modified-id="3.-DataFrame-Reshaping-4">3. DataFrame Reshaping</a></span></li><li><span><a href="#4.-Working-with-Multiple-DataFrames" data-toc-modified-id="4.-Working-with-Multiple-DataFrames-5">4. Working with Multiple DataFrames</a></span></li><li><span><a href="#5.-More-DataFrame-Operations" data-toc-modified-id="5.-More-DataFrame-Operations-6">5. More DataFrame Operations</a></span></li></ul></div>
## Chapter Learning Objectives
<hr>
- Inspect a dataframe with `df.head()`, `df.tail()`, `df.info()`, `df.describe()`.
- Obtain dataframe summaries with `df.info()` and `df.describe()`.
- Manipulate how a dataframe displays in Jupyter by modifying Pandas configuration options such as `pd.set_option("display.max_rows", n)`.
- Rename columns of a dataframe using the `df.rename()` function or by accessing the `df.columns` attribute.
- Modify the index name and index values of a dataframe using `.set_index()`, `.reset_index()` , `df.index.name`, `.index`.
- Use `df.melt()` and `df.pivot()` to reshape dataframes, specifically to make tidy dataframes.
- Combine dataframes using `df.merge()` and `pd.concat()` and know when to use these different methods.
- Apply functions to a dataframe `df.apply()` and `df.applymap()`
- Perform grouping and aggregating operations using `df.groupby()` and `df.agg()`.
- Perform aggregating methods on grouped or ungrouped objects such as finding the minimum, maximum and sum of values in a dataframe using `df.agg()`.
- Remove or fill missing values in a dataframe with `df.dropna()` and `df.fillna()`.
## 1. DataFrame Characteristics
<hr>
Last chapter we looked at how we can create dataframes. Let's now look at some helpful ways we can view our dataframe.
```
import numpy as np
import pandas as pd
```
### Head/Tail
The `.head()` and `.tail()` methods allow you to view the top/bottom *n* (default 5) rows of a dataframe. Let's load in the cycling data set from last chapter and try them out:
```
df = pd.read_csv('data/cycling_data.csv')
df.head()
```
The default return value is 5 rows, but we can pass in any number we like. For example, let's take a look at the top 10 rows:
```
df.head(10)
```
Or the bottom 5 rows:
```
df.tail()
```
### DataFrame Summaries
Three very helpful attributes/functions for getting high-level summaries of your dataframe are:
- `.shape`
- `.info()`
- `.describe()`
`.shape` is just like the ndarray attribute we've seen previously. It gives the shape (rows, cols) of your dataframe:
```
df.shape
```
`.info()` prints information about the dataframe itself, such as dtypes, memory usages, non-null values, etc:
```
df.info()
```
`.describe()` provides summary statistics of the values within a dataframe:
```
df.describe()
```
By default, `.describe()` only print summaries of numeric features. We can force it to give summaries on all features using the argument `include='all'` (although they may not make sense!):
```
df.describe(include='all')
```
### Displaying DataFrames
Displaying your dataframes effectively can be an important part of your workflow. If a dataframe has more than 60 rows, Pandas will only display the first 5 and last 5 rows:
```
pd.DataFrame(np.random.rand(100))
```
For dataframes of less than 60 rows, Pandas will print the whole dataframe:
```
df
```
I find the 60 row threshold to be a little too much, I prefer something more like 20. You can change the setting using `pd.set_option("display.max_rows", 20)` so that anything with more than 20 rows will be summarised by the first and last 5 rows as before:
```
pd.set_option("display.max_rows", 20)
df
```
There are also other display options you can change, such as how many columns are shown, how numbers are formatted, etc. See the [official documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#options-and-settings) for more.
One display option I will point out is that Pandas allows you to style your tables, for example by highlighting negative values, or adding conditional colour maps to your dataframe. Below I'll style values based on their value ranging from negative (purple) to postive (yellow) but you can see the [styling documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html#Styling) for more examples.
```
test = pd.DataFrame(np.random.randn(5, 5),
index = [f"row_{_}" for _ in range(5)],
columns = [f"feature_{_}" for _ in range(5)])
test.style.background_gradient(cmap='plasma')
```
### Views vs Copies
In previous chapters we've discussed views ("looking" at a part of an existing object) and copies (making a new copy of the object in memory). These things get a little abstract with Pandas and "...it’s very hard to predict whether it will return a view or a copy" (that's a quote straight [from a dedicated section in the Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy)).
Basically, it depends on the operation you are trying to perform, your dataframe's structure and the memory layout of the underlying array. But don't worry, let me tell you all you need to know. Firstly, the most common warning you'll encounter in Pandas is the `SettingWithCopy`, Pandas raises it as a warning that you might not be doing what you think you're doing. Let's see an example. You may recall there is one outlier `Time` in our dataframe:
```
df[df['Time'] > 4000]
```
Imagine we wanted to change this to `2000`. You'd probably do the following:
```
df[df['Time'] > 4000]['Time'] = 2000
```
Ah, there's that warning. Did our dataframe get changed?
```
df[df['Time'] > 4000]
```
No it didn't, even though you probably thought it did. What happened above is that `df[df['Time'] > 4000]` was executed first and returned a copy of the dataframe, we can confirm by using `id()`:
```
print(f"The id of the original dataframe is: {id(df)}")
print(f" The id of the indexed dataframe is: {id(df[df['Time'] > 4000])}")
```
We then tried to set a value on this new object by appending `['Time'] = 2000`. Pandas is warning us that we are doing that operation on a copy of the original dataframe, which is probably not what we want. To fix this, you need to index in a single go, using `.loc[]` for example:
```
df.loc[df['Time'] > 4000, 'Time'] = 2000
```
No error this time! And let's confirm the change:
```
df[df['Time'] > 4000]
```
The second thing you need to know is that if you're ever in doubt about whether something is a view or a copy, you can just use the `.copy()` method to force a copy of a dataframe. Just like this:
```
df2 = df[df['Time'] > 4000].copy()
```
That way, your guaranteed a copy that you can modify as you wish.
## 2. Basic DataFrame Manipulations
<hr>
### Renaming Columns
We can rename columns two ways:
1. Using `.rename()` (to selectively change column names)
2. By setting the `.columns` attribute (to change all column names at once)
```
df
```
Let's give it a go:
```
df.rename(columns={"Date": "Datetime",
"Comments": "Notes"})
df
```
Wait? What happened? Nothing changed? In the code above we did actually rename columns of our dataframe but we didn't modify the dataframe inplace, we made a copy of it. There are generally two options for making permanent dataframe changes:
- 1. Use the argument `inplace=True`, e.g., `df.rename(..., inplace=True)`, available in most functions/methods
- 2. Re-assign, e.g., `df = df.rename(...)`
The Pandas team recommends **Method 2 (re-assign)**, for a [few reasons](https://www.youtube.com/watch?v=hK6o_TDXXN8&t=700) (mostly to do with how memory is allocated under the hood).
```
df = df.rename(columns={"Date": "Datetime",
"Comments": "Notes"})
df
```
If you wish to change all of the columns of a dataframe, you can do so by setting the `.columns` attribute:
```
df.columns = [f"Column {_}" for _ in range(1, 7)]
df
```
### Changing the Index
You can change the index labels of a dataframe in 3 main ways:
1. `.set_index()` to make one of the columns of the dataframe the index
2. Directly modify `df.index.name` to change the index name
3. `.reset_index()` to move the current index as a column and to reset the index with integer labels starting from 0
4. Directly modify the `.index()` attribute
```
df
```
Below I will set the index as `Column 1` and rename the index to "New Index":
```
df = df.set_index("Column 1")
df.index.name = "New Index"
df
```
I can send the index back to a column and have a default integer index using `.reset_index()`:
```
df = df.reset_index()
df
```
Like with column names, we can also modify the index directly, but I can't remember ever doing this, usually I'll use `.set_index()`:
```
df.index
df.index = range(100, 133, 1)
df
```
### Adding/Removing Columns
There are two main ways to add/remove columns of a dataframe:
1. Use `[]` to add columns
2. Use `.drop()` to drop columns
Let's re-read in a fresh copy of the cycling dataset.
```
df = pd.read_csv('data/cycling_data.csv')
df
```
We can add a new column to a dataframe by simply using `[]` with a new column name and value(s):
```
df['Rider'] = 'Tom Beuzen'
df['Avg Speed'] = df['Distance'] * 1000 / df['Time'] # avg. speed in m/s
df
df = df.drop(columns=['Rider', 'Avg Speed'])
df
```
### Adding/Removing Rows
You won't often be adding rows to a dataframe manually (you'll usually add rows through concatenating/joining - that's coming up next). You can add/remove rows of a dataframe in two ways:
1. Use `.append()` to add rows
2. Use `.drop()` to drop rows
```
df
```
Let's add a new row to the bottom of this dataframe:
```
another_row = pd.DataFrame([["12 Oct 2019, 00:10:57", "Morning Ride", "Ride",
2331, 12.67, "Washed and oiled bike last night"]],
columns = df.columns,
index = [33])
df = df.append(another_row)
df
```
We can drop all rows above index 30 using `.drop()`:
```
df.drop(index=range(30, 34))
```
## 3. DataFrame Reshaping
<hr>
[Tidy data](https://vita.had.co.nz/papers/tidy-data.pdf) is about "linking the structure of a dataset with its semantics (its meaning)". It is defined by:
1. Each variable forms a column
2. Each observation forms a row
3. Each type of observational unit forms a table
Often you'll need to reshape a dataframe to make it tidy (or for some other purpose).

Source: [r4ds](https://r4ds.had.co.nz/tidy-data.html#fig:tidy-structure)
### Melt and Pivot
Pandas `.melt()`, `.pivot()` and `.pivot_table()` can help reshape dataframes
- `.melt()`: make wide data long.
- `.pivot()`: make long data width.
- `.pivot_table()`: same as `.pivot()` but can handle multiple indexes.

Source: [Garrick Aden-Buie's GitHub](https://github.com/gadenbuie/tidyexplain#spread-and-gather)
The below data shows how many courses different instructors taught across different years. If the question you want to answer is something like: "Does the number of courses taught vary depending on year?" then the below would probably not be considered tidy because there are multiple observations of courses taught in a year per row (i.e., there is data for 2018, 2019 and 2020 in a single row):
```
df = pd.DataFrame({"Name": ["Tom", "Mike", "Tiffany", "Varada", "Joel"],
"2018": [1, 3, 4, 5, 3],
"2019": [2, 4, 3, 2, 1],
"2020": [5, 2, 4, 4, 3]})
df
```
Let's make it tidy with `.melt()`. `.melt()` takes a few arguments, most important is the `id_vars` which indicated which column should be the "identifier".
```
df_melt = df.melt(id_vars="Name",
var_name="Year",
value_name="Courses")
df_melt
```
The `value_vars` argument allows us to select which specific variables we want to "melt" (if you don't specify `value_vars`, all non-identifier columns will be used). For example, below I'm omitting the `2018` column:
```
df.melt(id_vars="Name",
value_vars=["2019", "2020"],
var_name="Year",
value_name="Courses")
```
Sometimes, you want to make long data wide, which we can do with `.pivot()`. When using `.pivot()` we need to specify the `index` to pivot on, and the `columns` that will be used to make the new columns of the wider dataframe:
```
df_pivot = df_melt.pivot(index="Name",
columns="Year",
values="Courses")
df_pivot
```
You'll notice that Pandas set our specified `index` as the index of the new dataframe and preserved the label of the columns. We can easily remove these names and reset the index to make our dataframe look like it originally did:
```
df_pivot = df_pivot.reset_index()
df_pivot.columns.name = None
df_pivot
```
`.pivot()` will often get you what you want, but it won't work if you want to:
- Use multiple indexes (next chapter), or
- Have duplicate index/column labels
In these cases you'll have to use `.pivot_table()`. I won't focus on it too much here because I'd rather you learn about `pivot()` first.
```
df = pd.DataFrame({"Name": ["Tom", "Tom", "Mike", "Mike"],
"Department": ["CS", "STATS", "CS", "STATS"],
"2018": [1, 2, 3, 1],
"2019": [2, 3, 4, 2],
"2020": [5, 1, 2, 2]}).melt(id_vars=["Name", "Department"], var_name="Year", value_name="Courses")
df
```
In the above case, we have duplicates in `Name`, so `pivot()` won't work. It will throw us a `ValueError: Index contains duplicate entries, cannot reshape`:
```
df.pivot(index="Name",
columns="Year",
values="Courses")
```
In such a case, we'd use `.pivot_table()`. It will apply an aggregation function to our duplicates, in this case, we'll `sum()` them up:
```
df.pivot_table(index="Name", columns='Year', values='Courses', aggfunc='sum')
```
If we wanted to keep the numbers per department, we could specify both `Name` and `Department` as multiple indexes:
```
df.pivot_table(index=["Name", "Department"], columns='Year', values='Courses')
```
The result above is a mutlti-index or "hierarchically indexed" dataframe (more on those next chapter). If you ever have a need to use it, you can read more about `pivot_table()` in the [documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/reshaping.html#pivot-tables).
## 4. Working with Multiple DataFrames
<hr>
Often you'll work with multiple dataframes that you want to stick together or merge. `df.merge()` and `df.concat()` are all you need to know for combining dataframes. The Pandas [documentation](https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html) is very helpful for these functions, but they are pretty easy to grasp.
```{note}
The example joins shown in this section are inspired by [Chapter 15](https://stat545.com/join-cheatsheet.html) of Jenny Bryan's STAT 545 materials.
```
### Sticking DataFrames Together with `pd.concat()`
You can use `pd.concat()` to stick dataframes together:
- Vertically: if they have the same **columns**, OR
- Horizontally: if they have the same **rows**
```
df1 = pd.DataFrame({'A': [1, 3, 5],
'B': [2, 4, 6]})
df2 = pd.DataFrame({'A': [7, 9, 11],
'B': [8, 10, 12]})
df1
df2
pd.concat((df1, df2), axis=0) # axis=0 specifies a vertical stick, i.e., on the columns
```
Notice that the indexes were simply joined together? This may or may not be what you want. To reset the index, you can specify the argument `ignore_index=True`:
```
pd.concat((df1, df2), axis=0, ignore_index=True)
```
Use `axis=1` to stick together horizontally:
```
pd.concat((df1, df2), axis=1, ignore_index=True)
```
You are not limited to just two dataframes, you can concatenate as many as you want:
```
pd.concat((df1, df2, df1, df2), axis=0, ignore_index=True)
```
### Joining DataFrames with `pd.merge()`
`pd.merge()` gives you the ability to "join" dataframes using different rules (just like with SQL if you're familiar with it). You can use `df.merge()` to join dataframes based on shared `key` columns. Methods include:
- "inner join"
- "outer join"
- "left join"
- "right join"
See this great [cheat sheet](https://pandas.pydata.org/pandas-docs/stable/getting_started/comparison/comparison_with_sql.html#compare-with-sql-join) and [these great animations](https://github.com/gadenbuie/tidyexplain) for more insights.
```
df1 = pd.DataFrame({"name": ['Magneto', 'Storm', 'Mystique', 'Batman', 'Joker', 'Catwoman', 'Hellboy'],
'alignment': ['bad', 'good', 'bad', 'good', 'bad', 'bad', 'good'],
'gender': ['male', 'female', 'female', 'male', 'male', 'female', 'male'],
'publisher': ['Marvel', 'Marvel', 'Marvel', 'DC', 'DC', 'DC', 'Dark Horse Comics']})
df2 = pd.DataFrame({'publisher': ['DC', 'Marvel', 'Image'],
'year_founded': [1934, 1939, 1992]})
```

An "inner" join will return all rows of `df1` where matching values for "publisher" are found in `df2`:
```
pd.merge(df1, df2, how="inner", on="publisher")
```

An "outer" join will return all rows of `df1` and `df2`, placing NaNs where information is unavailable:
```
pd.merge(df1, df2, how="outer", on="publisher")
```

Return all rows from `df1` and all columns of `df1` and `df2`, populated where matches occur:
```
pd.merge(df1, df2, how="left", on="publisher")
```

```
pd.merge(df1, df2, how="right", on="publisher")
```
There are many ways to specify the `key` to join dataframes on, you can join on index values, different, column names, etc. Another helpful argument is the `indicator` argument which will add a column to the result telling you where matches were found in the dataframes:
```
pd.merge(df1, df2, how="outer", on="publisher", indicator=True)
```
By the way, you can use `pd.concat()` to do a simple "inner" or "outer" join on multiple datadrames at once. It's less flexible than merge, but can be useful sometimes.
## 5. More DataFrame Operations
<hr>
### Applying Custom Functions
There will be times when you want to apply a function that is not built-in to Pandas. For this, we also have methods:
- `df.apply()`, applies a function column-wise or row-wise across a dataframe (the function must be able to accept/return an array)
- `df.applymap()`, applies a function element-wise (for functions that accept/return single values at a time)
- `series.apply()`/`series.map()`, same as above but for Pandas series
For example, say you want to use a numpy function on a column in your dataframe:
```
df = pd.read_csv('data/cycling_data.csv')
df[['Time', 'Distance']].apply(np.sin)
```
Or you may want to apply your own custom function:
```
def seconds_to_hours(x):
return x / 3600
df[['Time']].apply(seconds_to_hours)
```
This may have been better as a lambda function...
```
df[['Time']].apply(lambda x: x / 3600)
```
You can even use functions that require additional arguments. Just specify the arguments in `.apply()`:
```
def convert_seconds(x, to="hours"):
if to == "hours":
return x / 3600
elif to == "minutes":
return x / 60
df[['Time']].apply(convert_seconds, to="minutes")
```
Some functions only accept/return a scalar:
```
int(3.141)
float([3.141, 10.345])
```
For these, we need `.applymap()`:
```
df[['Time']].applymap(int)
```
However, there are often "vectorized" versions of common functions like this already available, which are much faster. In the case above, we can use `.astype()` to change the dtype of a whole column quickly:
```
time_applymap = %timeit -q -o -r 3 df[['Time']].applymap(float)
time_builtin = %timeit -q -o -r 3 df[['Time']].astype(float)
print(f"'astype' is {time_applymap.average / time_builtin.average:.2f} faster than 'applymap'!")
```
### Grouping
Often we are interested in examining specific groups in our data. `df.groupby()` allows us to group our data based on a variable(s).
```
df = pd.read_csv('data/cycling_data.csv')
df
```
Let's group this dataframe on the column `Name`:
```
dfg = df.groupby(by='Name')
dfg
```
What is a `DataFrameGroupBy` object? It contains information about the groups of the dataframe:

The groupby object is really just a dictionary of index-mappings, which we could look at if we wanted to:
```
dfg.groups
```
We can also access a group using the `.get_group()` method:
```
dfg.get_group('Afternoon Ride')
```
The usual thing to do however, is to apply aggregate functions to the groupby object:

```
dfg.mean()
```
We can apply multiple functions using `.aggregate()`:
```
dfg.aggregate(['mean', 'sum', 'count'])
```
And even apply different functions to different columns:
```
def num_range(x):
return x.max() - x.min()
dfg.aggregate({"Time": ['max', 'min', 'mean', num_range],
"Distance": ['sum']})
```
By the way, you can use aggregate for non-grouped dataframes too. This is pretty much what `df.describe` does under-the-hood:
```
df.agg(['mean', 'min', 'count', num_range])
```
### Dealing with Missing Values
Missing values are typically denoted with `NaN`. We can use `df.isnull()` to find missing values in a dataframe. It returns a boolean for each element in the dataframe:
```
df.isnull()
```
But it's usually more helpful to get this information by row or by column using the `.any()` or `.info()` method:
```
df.info()
df[df.isnull().any(axis=1)]
```
When you have missing values, we usually either drop them or impute them.You can drop missing values with `df.dropna()`:
```
df.dropna()
```
Or you can impute ("fill") them using `.fillna()`. This method has various options for filling, you can use a fixed value, the mean of the column, the previous non-nan value, etc:
```
df = pd.DataFrame([[np.nan, 2, np.nan, 0],
[3, 4, np.nan, 1],
[np.nan, np.nan, np.nan, 5],
[np.nan, 3, np.nan, 4]],
columns=list('ABCD'))
df
df.fillna(0) # fill with 0
df.fillna(df.mean()) # fill with the mean
df.fillna(method='bfill') # backward (upwards) fill from non-nan values
df.fillna(method='ffill') # forward (downward) fill from non-nan values
```
Finally, sometimes I use visualizations to help identify (patterns in) missing values. One thing I often do is print a heatmap of my dataframe to get a feel for where my missing values are. If you want to run this code, you may need to install `seaborn`:
```sh
conda install seaborn
```
```
import seaborn as sns
sns.set(rc={'figure.figsize':(7, 7)})
df
sns.heatmap(df.isnull(), cmap='viridis', cbar=False);
# Generate a larger synthetic dataset for demonstration
np.random.seed(2020)
npx = np.zeros((100,20))
mask = np.random.choice([True, False], npx.shape, p=[.1, .9])
npx[mask] = np.nan
sns.heatmap(pd.DataFrame(npx).isnull(), cmap='viridis', cbar=False);
```
| github_jupyter |
```
import sys,os
sys.path.append('../')
from deep_rl import *
import matplotlib.pyplot as plt
import torch
from tqdm.notebook import trange, tqdm
import random
import numpy as np
import time
%load_ext autoreload
%reload_ext autoreload
%autoreload 2
select_device(0)
def dqn_feature(hu=676,**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: VanillaNet(config.action_dim, FCBody(config.state_dim,
hidden_units=(hu,)))
# config.network_fn = lambda: DuelingNet(config.action_dim, FCBody(config.state_dim))
# config.replay_fn = lambda: Replay(memory_size=int(1e4), batch_size=10)
config.replay_fn = lambda: Replay(memory_size=int(1e4), batch_size=10)
config.random_action_prob = LinearSchedule(1.0, 0.1, 3e4)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 0
# config.double_q = True
config.double_q = False
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.eval_interval = int(5e3)
config.max_steps = 5e4
config.async_actor = False
agent = DQNAgent(config)
#run_steps function below
config = agent.config
agent_name = agent.__class__.__name__
t0 = time.time()
while True:
if config.save_interval and not agent.total_steps % config.save_interval:
agent.save('data/%s-%s-%d' % (agent_name, config.tag, agent.total_steps))
if config.log_interval and not agent.total_steps % config.log_interval:
t0 = time.time()
if config.eval_interval and not agent.total_steps % config.eval_interval:
agent.eval_episodes()
pass
if config.max_steps and agent.total_steps >= config.max_steps:
return agent
break
agent.step()
agent.switch_task()
return agent
start = time.time()
game = 'FourRoomsMatrix'
agent = dqn_feature(game=game)
print(time.time()-start)
plt.figure(figsize=(18,6))
plt.plot(np.array(agent.returns)[:,0], np.array(agent.returns)[:,1], '.-')
plt.xlabel('timesteps'), plt.ylabel('returns')
plt.title('DQN performance on ' + game), plt.show()
print(agent.network)
weights = list(agent.network.parameters())[2]
biases = list(agent.network.parameters())[3]
weights = weights.detach().cpu().numpy().flatten()
biases = biases.detach().cpu().numpy()
plt.figure(figsize=(12,4))
plt.subplot(121), plt.hist(weights, bins=100)
plt.title('weights'), plt.subplot(122)
plt.hist(biases, bins=100)
plt.title('biases'), plt.show()
print(weights.shape, biases.shape)
# random shuffling
np.random.shuffle(biases)
np.random.shuffle(weights)
weights = np.reshape(weights, (676, 169))
print(weights.shape, biases.shape)
"""
1. Use these new weights to initialize a network.
2. Fix these weights and fine tune the following layer.
3. See learning performance and save plots.
"""
# Step 1
import collections
od_weights = collections.OrderedDict()
od_weights['layers.0.weight'] = torch.Tensor(weights)
od_weights['layers.0.bias'] = torch.Tensor(biases)
import pickle
# pickle.dump( od_weights, open( "storage/layer1_noshuffle.p", "wb" ) )
# od_weights = pickle.load( open( "save.p", "rb" ) )
# agent.network.load_state_dict(od_weights, strict=False)
import pickle
# pickle.dump( od_weights, open( "tmp.p", "wb" ) )
od_weights = pickle.load( open( "storage/layer1_noshuffle.p", "rb" ) )
# Step 2
def dsr_feature_init(ref,**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.c = 1
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: SRNet(config.action_dim, SRIdentityBody(config.state_dim), config=0)
config.replay_fn = lambda: Replay(memory_size=int(1e5), batch_size=10)
config.random_action_prob = LinearSchedule(1.0, 0.1, 3e4)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 0
# config.double_q = True
config.double_q = False
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.eval_interval = int(5e3)
config.max_steps = 5e4
config.async_actor = False
agent = DSRAgent(config)
#run_steps function below
config = agent.config
agent_name = agent.__class__.__name__
if(ref is not None):
if(ref == -1):
print(agent.network.load_state_dict(od_weights, strict=False))
else:
print(agent.network.load_state_dict(ref.network.state_dict(), strict=False))
t0 = time.time()
while True:
if config.save_interval and not agent.total_steps % config.save_interval:
agent.save('data/%s-%s-%d' % (agent_name, config.tag, agent.total_steps))
if config.log_interval and not agent.total_steps % config.log_interval:
# agent.logger.info('steps %d, %.2f steps/s' % (agent.total_steps, config.log_interval / (time.time() - t0)))
t0 = time.time()
if config.eval_interval and not agent.total_steps % config.eval_interval:
agent.eval_episodes()
if config.max_steps and agent.total_steps >= config.max_steps:
return agent
break
# import pdb; pdb.set_trace()
agent.step()
agent.switch_task()
return agent
def runNAgents(function, runs, store=False, freeze=0, ref=None,hu=676):
r_dqn = []; t_dqn = []
if(store):
agents = []
for i in range(runs):
agent = function(game='FourRoomsMatrix', freeze=freeze, ref=ref, hu=hu)
rewards = np.array(agent.returns)
t_dqn.append(rewards[:,0])
r_dqn.append(rewards[:,1])
if(store):
agents.append(agent)
if(store):
return agents, t_dqn, r_dqn
return t_dqn, r_dqn
# r_shuffle = runNAgents(dsr_feature_init, runs=3, freeze=2, ref=-1)
# r_main_676 = runNAgents(dsr_feature_init, runs=3, freeze=0, ref=None)
r_main_16 = runNAgents(dqn_feature, runs=3, freeze=0, ref=None, hu=16)
def plot_rewards(rewards, plot_seperate=True , clip=50000, title='unnamed'):
smooth = 5000
colors = ['red', 'blue', 'green', 'm', 'k', 'y', '#999999']
plt.figure(figsize=(18,6), dpi=200)
if(plot_seperate):
for k, v in rewards.items():
for t, r in zip(v[0], v[1]):
plt.plot(t, r, label=k)
plt.legend(), plt.show()
return
for j, (k, v) in enumerate(rewards.items()):
r_vec = np.zeros((len(v[0]), clip-smooth+1))
for i, (t, r) in enumerate(zip(v[0], v[1])):
r_vec[i,:] = convolve(np.interp(np.arange(clip), t, r), smooth)
mean = np.mean(np.array(r_vec), axis=0)
std = np.std(np.array(r_vec), axis=0)
plt.plot(mean, label=k, color=colors[j])
plt.fill_between(np.arange(0, len(mean)), mean+std, mean-std, facecolor=colors[j], alpha=0.3)
plt.xlabel('timesteps'), plt.ylabel('episodic returns')
plt.title(title)
plt.legend(loc='lower right'), plt.show()
rewards_dict = {
'DQN h=(676,) - 2708 parameters': r_shuffle,
'DQN h=(676,) - 117628 parameters': r_main_676,
'DQN h=(16,) - 2708 parameters': r_main_16
}
# rewards_dict = {'avDSR, 1eps: 169 learnable params':r_dsr_rand,
# 'avDSR, 1eps: 2708 learnable params':r_dsr_abs_rand[1:],
# 'DQN, h=(676,): 117628 learnable params': r_dqn_base,
# 'DQN, h=(16,): 2788 learnable params': r_dqn_base2[1:]}
# plot_rewards(rewards_dict, plot_seperate=True)
plot_rewards(rewards_dict, plot_seperate=False, title='3 runs on 3roomsh env')
%pwd
import pickle
with open("../storage/33-3rooms-baselines.p", 'wb') as f:
pickle.dump(rewards_dict, f, pickle.HIGHEST_PROTOCOL)
# rewards_dict = pickle.load( open( "storage/33-3rooms-baselines.p", "rb" ) )
from deep_rl.component.fourrooms import * # CHECK
env = FourRoomsMatrix()
env.reset()
env.reset()
plt.imshow(env.render())
```
| github_jupyter |
```
import time
import pandas as pd
import numpy as np
city_con = {
1: ["chicago.csv","Chicago"],
2: ["new_york_city.csv","New York City"],
3: ["washington.csv","Washington"],
4:[0,"Exit"],
"NS":[0,"Not selected"]
}
fltr_choice = {
1:"Month",
2:"Day",
3:"Show all data",
4:"Exit",
"NS":"Not selected"
}
days_con = {
0: "Saturday",
1: "Sunday",
2: "Monday",
3: "Tuesday",
4: "Wednesday",
5: "Thursday",
6: "Friday",
"NS": "Not selected"
}
months_con = {
1: ["January",31],
2: ["February",28],
3: ["March",31],
4: ["April",30],
5: ["May",31],
6: ["June",30],
"NS":["Not selected",0]
}
def get_filters():
city=dy=fltr=mon="NS"
print('Hello! Let\'s explore some US bikeshare data!')
while True:
city = int(input("Would you like to see data for Chicago Enter 1, New York City Enter 2, Washington Enter 3 ,or 4 to exit:: "))
if city==1 or city==2 or city==3:
print("You have entered ", city_con[city][1])
break
elif city == 4:
print("You choosed to exit")
break
else:
print("You have entered a wrong number, Kindly try again")
while True:
if city==4:
break
fltr = int(input("Would you like to filter the data by month Enter 1, day Enter 2, not at all Enter 3 or 4 to exit:: "))
if fltr==1 or fltr==2 or fltr==3 :
print("You have choosed:", fltr_choice[fltr])
if fltr==1:
dy="NS"
elif fltr==2:
mon="NS"
else:
dy=mon="NS"
break
elif fltr == 4:
print("You choosed to exit")
break
else:
print("You have entered a wrong number, Kindly try again")
while True:
if city==4 or fltr==3 or fltr==4:
break
elif fltr==1:
mon = int(input("January Enter 1, February Enter 2, March Enter 3, April Enter 4, May Enter 5, June Enter 6 :: "))
break
elif fltr==2:
dy = int(input("Saturday Enter 0, Sunday Enter 1, Monday Enter 2, Tuesday Enter 3, Wednesday Enter 4, Thursday Enter 5, FridayEnter 6 :: "))
break
else:
print("You have entered a wrong number, Kindly try again")
print("\n")
print('-'*40)
return city, fltr, mon, dy
def day_to_letter(month,day):
if month>1:
for i in range(1,month):
day=day+months_con[i][1]
return days_con[day%7]
def load_data(city, fltr, mon, dy):
if city==4 or fltr==4:
return 0
else:
df = pd.read_csv(city_con[city][0])
df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime
df['year'] = df['Start Time'].dt.year
df['month'] = df['Start Time'].dt.month
df['day'] = df['Start Time'].dt.day
df['hour'] = df['Start Time'].dt.hour
df["dayletter"] = df[["month","day"]].apply(lambda x: day_to_letter(*x),axis=1)
df["trip"]= "From " + df["Start Station"] + " to " + df["End Station"]
if fltr == 1: # filter by day of week if applicable
df = df[df['month'] == mon] # filter by day of week to create the new dataframe
if fltr == 2: # filter by day of week if applicable
df = df[df['dayletter'] == days_con[dy]] # filter by day of week to create the new dataframe
return df
def time_stats(df,fltr):
if city==4 or fltr==4:
return 0
else:
if fltr != 1:
print("*) The most common month is",months_con[df["month"].value_counts().index[0]][0], ", with a total bike riders of",df["month"].value_counts().iloc[0],".\n")
elif fltr != 2:
print("*) The most common day of the week is",df["dayletter"].value_counts().index[0], ", with a total bike riders of",df["dayletter"].value_counts().iloc[0],".\n")
print("*) The most common start hour is",df["hour"].value_counts().index[0], ":00 , with a total bike riders of",df["hour"].value_counts().iloc[0],".\n")
print('-'*40)
def station_stats(df):
if city==4 or fltr==4:
return 0
else:
print("*) The most common start station is ",df['Start Station'].value_counts().index[0]," and there are ",df['Start Station'].value_counts()[0], " bike riders started out of there.\n")
print("*) The first five start stations (sorted by the total number of users) are: ", df['Start Station'].value_counts().index[0:5], "\n")
print("*) The most common end station is ",df['End Station'].value_counts().index[0]," and there are ",df['End Station'].value_counts()[0], " bike riders ended there.\n")
print("*) The first five start stations (sorted by the total number of users) are: ", df['End Station'].value_counts().index[0:5], "\n")
print("\n*) The most common rout (same start and end stations) is ", df["trip"].value_counts().index[0]," and there are ",df['trip'].value_counts()[0], " used it.\n")
print('-'*40)
def trip_duration_stats(df):
if city==4 or fltr==4:
return 0
else:
print("*) The smallest trip duration is ",df["Trip Duration"].min()," seconds, for the trip ",df.loc[df["Trip Duration"] == (df["Trip Duration"].min()),"trip"].iloc[0], ".\n")
print("*) The longest trip duration is ",df["Trip Duration"].max()," seconds, for the trip ",df.loc[df["Trip Duration"] == (df["Trip Duration"].max()),"trip"].iloc[0], ".\n")
print("*) The trip duration first quartile = ", df["Trip Duration"].quantile(.25) ,"\n The trip duration second quartile = ", df["Trip Duration"].quantile(),"\n The trip duration third quartile = ", df["Trip Duration"].quantile(.75),"\n")
print('-'*40)
def user_stats(df):
if city==4 or fltr==4:
return 0
else:
v0=df["User Type"].value_counts()[0]
v1=df["User Type"].value_counts()[1]
v00=((v0/(v0+v1))*100).round(2)
v11=((v1/(v0+v1))*100).round(2)
print("*) The ",df["User Type"].value_counts().index[0]," bike riders are ",df["User Type"].value_counts()[0], ", and they are ", v00, " % of all population\n")
print("*) The ",df["User Type"].value_counts().index[1]," bike riders are ",df["User Type"].value_counts()[1], ", and they are ", v11, " % of all population\n")
if city != 3:
g0=df["Gender"].value_counts()[0]
g1=df["Gender"].value_counts()[1]
g00=((g0/(g0+g1))*100).round(2)
g11=((g1/(g0+g1))*100).round(2)
print("*) The ",df["Gender"].value_counts().index[0]," bike riders are ",df["Gender"].value_counts()[0], ", and they are ", g00, " % of all population\n")
print("*) The ",df["Gender"].value_counts().index[1]," bike riders are ",df["Gender"].value_counts()[1], ", and they are ", g11, " % of all population\n")
print("*) The youngest bike rider is ", 2021-df["Birth Year"].max(), " years old.\n")
print("*) The oldest bike rider is ", 2021-df["Birth Year"].min(), " years old.\n")
print('-'*40)
def main():
while True:
city, fltr, mon, dy = get_filters()
df = load_data(city, fltr, mon, dy)
time_stats(df,fltr)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
print("Thank you!")
break
if __name__ == "__main__":
main()
import pandas as pd
df = pd.read_csv(city_con[city][0])
print(df.head()) # start by viewing the first few rows of the dataset!
df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime
df['year'] = df['Start Time'].dt.year
df['month'] = df['Start Time'].dt.month
df['day'] = df['Start Time'].dt.day
df['hour'] = df['Start Time'].dt.hour
df['min'] = df['Start Time'].dt.minute
df['sec'] = df['Start Time'].dt.second
df["dayletter"] = df[["month","day"]].apply(lambda x: day_to_letter(*x),axis=1)
x=df["dayletter"].value_counts()
import matplotlib.pyplot as plt
%matplotlib inline
#only with jupyter to plot inline
x.plot(kind="bar",figsize=(14,8), color='red', edgecolor='black')
plt.xlabel(x.name)
plt.ylabel("No. of rider")
plt.title("%ss distribution" % x.name)
plt.show()
df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime
df['year'] = df['Start Time'].dt.year
df['month'] = df['Start Time'].dt.month
df['day'] = df['Start Time'].dt.day
df['hour'] = df['Start Time'].dt.hour
df['min'] = df['Start Time'].dt.minute
df['sec'] = df['Start Time'].dt.second
month = int(input("please enter a month: "))
day = int(input("please enter a day: "))
def day_to_letter(month,day):
if month>1:
for i in range(1,month):
day=day+months_con[i][1]
return days_con[day%7]
day_to_letter(month,day)
df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime
df['year'] = df['Start Time'].dt.year
df['month'] = df['Start Time'].dt.month
df['day'] = df['Start Time'].dt.day
df['hour'] = df['Start Time'].dt.hour
df['min'] = df['Start Time'].dt.minute
df['sec'] = df['Start Time'].dt.second
x= df['month']
import matplotlib.pyplot as plt
%matplotlib inline
#only with jupyter to plot inline
x.hist(bins=20, figsize=(8,5), align='left', color='red', edgecolor='black')
plt.xlabel(x.name)
plt.ylabel("No. of rider")
plt.title("%ss distribution" % x.name)
plt.show()
print(x.describe())
x= df['Start Station'].value_counts().iloc[0:20] # the maximum 10 stations
import matplotlib.pyplot as plt
%matplotlib inline
#only with jupyter to plot inline
x.plot(kind="bar",figsize=(14,8), color='red', edgecolor='black')
plt.xlabel(x.name)
plt.ylabel("No. of rider")
plt.title("%ss distribution" % x.name)
plt.show()
df["trip"]= "From " + df["Start Station"] + " to " + df["End Station"]
#print(df["trip"].mode())
x = df["trip"].value_counts().iloc[0:10] # the maximum 10 same trips
import matplotlib.pyplot as plt
%matplotlib inline
#only with jupyter to plot inline
x.plot(kind="bar",figsize=(14,8), color='red', edgecolor='black')
plt.xlabel(x.name)
plt.ylabel("No. of rider")
plt.title("%ss distribution" % x.name)
plt.show()
print(df.shape[0])
print(df["Start Station"].unique().shape[0])
print(df["Start Station"].unique()[0])
print(df.shape[0])
print(df["End Station"].unique().shape[0])
print(df["End Station"].unique()[0])
y= df[df['month']==1] # for the month number 1
print(y.shape[0]) # number of rider at this month
import pandas as pd
filename = 'chicago.csv'
df = pd.read_csv(filename) # load data file into a dataframe
df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime
df['hour'] = df['Start Time'].dt.hour # extract hour from the Start Time column to create an hour column
popular_hour = df['hour'].mode()[0] # find the most popular hour
print('Most Popular Start Hour:', popular_hour)
import pandas as pd
filename = 'chicago.csv'
df = pd.read_csv(filename) # load data file into a dataframe
user_types = df['User Type'].value_counts() # print value counts for each user type
print(user_types)
import pandas as pd
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city]) # load data file into a dataframe
df['Start Time'] = pd.to_datetime(df['Start Time']) # convert the Start Time column to datetime
df['month'] = df['Start Time'].dt.month # extract month and day of week from Start Time to create new columns
df['day_of_week'] = df['Start Time'].dt.weekday_name
if month != 'all': # filter by month if applicable
months = ['january', 'february', 'march', 'april', 'may', 'june'] # use the index of the months list to get the corresponding int
month = months.index(month) + 1
df = df[df['month'] == month] # filter by month to create the new dataframe
if day != 'all': # filter by day of week if applicable
df = df[df['day_of_week'] == day.title()] # filter by day of week to create the new dataframe
return df
time_stats(df)
```
| github_jupyter |
```
# installing required module
!pip install fuzzy-c-means
import cv2
import numpy as np
import math
import bisect
from google.colab.patches import cv2_imshow
from skimage import morphology
from sklearn.cluster import KMeans
from fcmeans import FCM
def imadjust(src, tol=1, vin=[0,255], vout=(0,255)):
# src : input one-layer image (numpy array)
# tol : tolerance, from 0 to 100.
# vin : src image bounds
# vout : dst image bounds
# return : output img
dst = src.copy()
tol = max(0, min(100, tol))
if tol > 0:
# Compute in and out limits
# Histogram
hist = np.zeros(256, dtype=np.int)
for r in range(src.shape[0]):
for c in range(src.shape[1]):
hist[src[r,c]] += 1
# Cumulative histogram
cum = hist.copy()
for i in range(1, len(hist)):
cum[i] = cum[i - 1] + hist[i]
# Compute bounds
total = src.shape[0] * src.shape[1]
low_bound = total * tol / 100
upp_bound = total * (100 - tol) / 100
vin[0] = bisect.bisect_left(cum, low_bound)
vin[1] = bisect.bisect_left(cum, upp_bound)
# Stretching
scale = (vout[1] - vout[0]) / (vin[1] - vin[0])
for r in range(dst.shape[0]):
for c in range(dst.shape[1]):
vs = max(src[r,c] - vin[0], 0)
vd = min(int(vs * scale + 0.5) + vout[0], vout[1])
dst[r,c] = vd
return dst
def matlab_fspecial(typex = "motion", len = 9, theta = 0):
# h = fspecial('motion',len,theta)
if typex == 'motion':
# Create the vertical kernel.
kernel_v = np.zeros((len, theta))
# Fill the middle row with ones.
kernel_v[:, int((kernel_size - 1)/2)] = np.ones(kernel_size)
# Normalize.
kernel_v /= kernel_size
# Apply the vertical kernel.
motion_blur = cv2.filter2D(img, -1, kernel_v)
return motion_blur
# equal to mat2gray on matlab
# https://stackoverflow.com/questions/39808545/implement-mat2gray-in-opencv-with-python
def matlab_mat2gray(A, alpha = False, beta = False):
if not alpha:
alpha = min(A.flatten())
else:
alpha = 0
if not beta:
beta = max(A.flatten())
else:
beta = 255
I = A
cv2.normalize(A, I, alpha , beta ,cv2.NORM_MINMAX)
I = np.uint8(I)
return I
def matlab_strel_disk(r1):
from skimage.morphology import disk
mask = disk(r1)
return mask
def matlab_strel_ball(r1,r2):
from skimage.morphology import (octagon)
mask = octagon(r1,r2)
return mask
# function to resize image
def resize_img(file, size = 200):
nrows = (np.shape(file)[0]) # image height
ncols = (np.shape(file)[1])
ratio = nrows/ncols
t_width = size
t_height = math.ceil(t_width * ratio)
return cv2.resize(file, (t_width, t_height))
# get filename from path
def getfilename(path, ext = False):
import ntpath
import os
if ext:
return ntpath.basename(path)
else:
return os.path.splitext(ntpath.basename(path))[0]
def scanFolder(path = './', max_file_each_folder = "all", verbose = False):
import os.path # untuk cek file
files_path = []
in_folder = []
total_files = str(sum([len(f2)-len(d2) for r2, d2, f2 in os.walk(path)]))
# r=root, d=directories, f = files
for r, d, f in os.walk(path):
# cut f
if max_file_each_folder != "all":
f = f[:int(max_file_each_folder)]
for file in f:
if (file.find('.png') > -1 or file.find('.jpg') > -1 or file.find('.tif') > -1):
files_path.append(os.path.join(r, file))
in_folder.append(os.path.basename(os.path.dirname(files_path[-1])))
if verbose:
for f, i in zip(files_path, in_folder):
print(f)
print(i)
# ret
file_scanned = str(len(files_path))
return files_path, in_folder, total_files, file_scanned
# // END OF scanFolder
def performance_evaluation(ground_truth, segmented_img):
verbose_mode = False #debug
# performance evaluation
nrows = (np.shape(ground_truth)[0]) # image height
ncols = (np.shape(ground_truth)[1])
# try again
iGT = ground_truth
iPM = segmented_img
iTP = cv2.bitwise_and(iGT, iPM)
iFN = np.subtract(iGT,iTP)
iFP = np.subtract(iPM,iTP)
iTN = cv2.bitwise_not(cv2.bitwise_or(iGT, iPM))
# sum
FN = np.sum(iFN)/255
FP = np.sum(iFP)/255
TP = np.sum(iTP)/255
TN = np.sum(iTN)/255
# hasil lebih detail
acc = (TP+TN)/(TP+TN+FP+FN)
spec = TN/(TN+FP)
prec = TP/(TP+FP)
recall = TP/(TP+FN)
j = (((TP+TN)/(TP+TN+FP+FN))-((((TP+FN)*(TP+FP))+((TN+FN)*(TN+FP)))/(TP+TN+FP+FN)**2))/(1-((((TP+FN)*(TP+FP))+((TN+FN)*(TN+FP)))/(TP+TN+FP+FN)**2))
dc = (2*TP)/(2*(TP+FP+FN))
# performance evaluation
import pandas as pd
from IPython.display import display, HTML
df = pd.DataFrame(
{
"Pred. (1)": [str(TP) + " (TP)", str(FP) + " (FP)"],
"Pred. (0)": [str(FN) + " (FN)", str(TN) + " (TN)"]
},index=["Actu. (1)", "Actu. (0)"])
display(HTML(df.to_html()))
df = pd.DataFrame(
{
"Accuracy (A)": [acc],
"Specificity (S)": [spec],
"Precision (P)": [prec],
"Recall (R)": [recall],
"Kappa Index (J)": [j],
"Dice coefficiet (DC),": [dc],
})
display(HTML(df.to_html()))
# showing image
if verbose_mode:
# show
print("(a) FN = Ground Truth")
cv2_imshow(resize_img(iFN,500))
print("(b) FP = Segmented Image")
cv2_imshow(resize_img(iFP,500))
print("(c) TP = Correct Region")
cv2_imshow(resize_img(iTP,500))
print("(d) TN = True Negative")
cv2_imshow(resize_img(iTN,500))
return df
# original script of vogado's segmentation method
# clustering using k-means
def wbc_vogado(f, debug_mode = False):
image_lab = int(0)
image_rgb = f # send into figure (a)]
# time measurement
import time
start_time = time.time()
# pre-processing step, convert rgb into CIELAB (L*a*b)
image_lab = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2Lab);
L = image_lab[:,:,0]
A = image_lab[:,:,1]
B = image_lab[:,:,2] # the bcomponent
lab_y = B # send into figure (c)
AD = cv2.add(L,B)
# f (bgr)
r = f[:,:,2] # red channel (rgb)
r = imadjust(r)
g = f[:,:,1]
g = imadjust(g)
b = f[:,:,0]
b = imadjust(b)
c = np.subtract(255,r)
c = imadjust(c)
m = np.subtract(255,g)
cmyk_m = m # send into figure (b)
m = cv2.blur(m,(10,10)) # updated in 13/04/2016 - 6:15
cmyk_m_con_med = m # send into figure (d)
m = imadjust(m)
y = np.subtract(255,b)
y = imadjust(y)
AD = matlab_mat2gray(B)
AD = cv2.medianBlur(AD,7)
lab_y_con_med = AD # send into figure (e)
# subtract the M and b
sub = cv2.subtract(m,AD)
img_subt = sub # send into figure (f)
CMY = np.stack((c,m,y), axis=2)
F = np.stack((r, g, b), axis=2)
ab = CMY # generate CMY color model
nrows = (np.shape(f)[0]) # image height
ncols = (np.shape(f)[1])
# reshape into one single array
ab = ab.flatten()
x = nrows
y = ncols
data = sub.flatten() # sub = result of subtraction M and b, put them into one long array
## step 2 - clustering
nColors = 3 # Number of clusters (k)
kmeans = KMeans(n_clusters=nColors, random_state=0)
kmeans.fit_predict(data.reshape(-1, 1))
# cluster_idx, cluster_center = kmeans.cluster_centers_
cluster_idx = kmeans.labels_ # index result of kmeans
cluster_center = kmeans.cluster_centers_ # position of cluster center
pixel_labels = np.reshape(cluster_idx, (nrows, ncols));
pixel_labels = np.uint8(pixel_labels)
# the problem is here,
tmp = np.sort(cluster_center.flatten())
idx = np.zeros((len(tmp), 1))
for i in range(len(tmp)):
idx[i] = cluster_center.tolist().index(tmp[i])
nuclei_cluster = idx[2] # sort asc, nuclei cluster is always who has higher value
A = np.zeros((nrows, ncols), dtype=np.uint8)
# print(np.shape(A))
for row in range(nrows):
for col in range(ncols):
# print(" pixel_labels[row,col] = ", row, col)
if pixel_labels[row,col] == nuclei_cluster:
A[row,col] = 255
else:
A[row,col] = 0
## step 3 - post-processing
img_clustering = A # send into figure (x)
img_clustering = imadjust(img_clustering)
sed = matlab_strel_disk(7) # disk
see = matlab_strel_ball(3,3) #circle
A = cv2.dilate(A,sed)
# erosion
A = cv2.erode(A, see)
# remove area < 800px
A = morphology.area_opening(A, area_threshold=800*3, connectivity=1) # vogado mention he use 800px
img_morpho = A # send into figure (g)
# debug mode
if(debug_mode):
# resize image into width 200px
ratio = ncols/nrows
t_width = 200
t_height = math.ceil(t_width * ratio)
print("(a) Original")
cv2_imshow(resize_img(image_rgb, t_width))
print("(b) M from CMYK")
cv2_imshow(resize_img(cmyk_m, t_width))
print("(c) *b from CIELAB")
cv2_imshow(resize_img(lab_y, t_width))
print("(d) M con adj + med(7x7)")
cv2_imshow(resize_img(cmyk_m_con_med, t_width))
print("(e) *b con adj + med(7x7)")
cv2_imshow(resize_img(lab_y_con_med, t_width))
print("(f) *b - M")
cv2_imshow(resize_img(img_subt, t_width))
print("(x) clustering" )
cv2_imshow(resize_img(img_clustering, t_width))
print("(g) Morphological Ops.")
cv2_imshow(resize_img(img_morpho, t_width))
print("--- %s seconds ---" % (time.time() - start_time))
return img_morpho
# modified vogado's segmentaion
# segmentation: fcm
def wbc_vogado_modified(f, debug_mode = False):
image_lab = int(0)
image_rgb = f # send into figure (a)
# time measurement
import time
start_time = time.time()
# pre-processing step, convert rgb into CIELAB (L*a*b)
image_lab = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2Lab);
L = image_lab[:,:,0]
A = image_lab[:,:,1]
B = image_lab[:,:,2] # the bcomponent
lab_y = B # send into figure (c)
AD = cv2.add(L,B)
# f (bgr)
r = f[:,:,2] # red channel (rgb)
r = imadjust(r)
g = f[:,:,1]
g = imadjust(g)
b = f[:,:,0]
b = imadjust(b)
c = np.subtract(255,r)
c = imadjust(c)
m = np.subtract(255,g)
cmyk_m = m # send into figure (b)
# add median filter into M component of CMYK
m = cv2.blur(m,(10,10)) # updated in 13/04/2016 - 6:15
cmyk_m_con_med = m # send into figure (d)
m = imadjust(m)
y = np.subtract(255,b)
y = imadjust(y)
AD = matlab_mat2gray(B)
AD = cv2.medianBlur(AD,7)
lab_y_con_med = AD # send into figure (e)
# subtract the M and b
sub = cv2.subtract(m,AD)
img_subt = sub # send into figure (f)
CMY = np.stack((c,m,y), axis=2)
F = np.stack((r, g, b), axis=2)
ab = CMY # generate CMY color model
nrows = (np.shape(f)[0]) # image height
ncols = (np.shape(f)[1])
# reshape into one single array
ab = ab.flatten()
x = nrows
y = ncols
data = sub.flatten() # sub = result of subtraction M and b, put them into one long array
## step 2 - clustering
nColors = 3 # Number of clusters (k)
# fit the fuzzy-c-means
fcm = FCM(n_clusters=nColors)
fcm.fit(data.reshape(-1, 1))
# outputs
cluster_idx = fcm.u.argmax(axis=1)
cluster_center = fcm.centers
kmeans = fcm
pixel_labels = np.reshape(cluster_idx, (nrows, ncols));
pixel_labels = np.uint8(pixel_labels)
# the problem is here,
tmp = np.sort(cluster_center.flatten())
idx = np.zeros((len(tmp), 1))
for i in range(len(tmp)):
idx[i] = cluster_center.tolist().index(tmp[i])
nuclei_cluster = idx[2] # sort asc, nuclei cluster is always who has higher value
A = np.zeros((nrows, ncols), dtype=np.uint8)
# print(np.shape(A))
for row in range(nrows):
for col in range(ncols):
# print(" pixel_labels[row,col] = ", row, col)
if pixel_labels[row,col] == nuclei_cluster:
A[row,col] = 255
else:
A[row,col] = 0
## step 3 - post-processing
img_clustering = A # send into figure (x)
img_clustering = imadjust(img_clustering)
# dilation (thing goes weird here)
sed = matlab_strel_disk(7) # disk
see = matlab_strel_ball(3,3) #circle
A = cv2.dilate(A,sed)
# erosion
A = cv2.erode(A, see)
# remove area < 800px
A = morphology.area_opening(A, area_threshold=800*3, connectivity=1) # vogado mention he use 800px
img_morpho = A # send into figure (g)
# debug mode
if(debug_mode):
# resize image into width 200px
ratio = ncols/nrows
t_width = 200
t_height = math.ceil(t_width * ratio)
print("(a) Original")
cv2_imshow(resize_img(image_rgb, t_width))
print("(b) M from CMYK")
cv2_imshow(resize_img(cmyk_m, t_width))
print("(c) *b from CIELAB")
cv2_imshow(resize_img(lab_y, t_width))
print("(d) M con adj + med(7x7)")
cv2_imshow(resize_img(cmyk_m_con_med, t_width))
print("(e) *b con adj + med(7x7)")
cv2_imshow(resize_img(lab_y_con_med, t_width))
print("(f) *b - M")
cv2_imshow(resize_img(img_subt, t_width))
print("(x) clustering")
cv2_imshow(resize_img(img_clustering, t_width))
print("(g) Morphological Ops.")
cv2_imshow(resize_img(img_morpho, t_width))
print("--- %s seconds ---" % (time.time() - start_time))
return img_morpho
# SET THIS
# path = folder contain image dataset and groundtruth
# gt_path = ground truth folder inside active dir
# data = data folder inside active dir
path = "drive/My Drive/ALL_IDB2/"
gt_path = path + "gt/"
target_path = path + "data/"
# here the loop
# scanFolder(target_path, <limit image tested ["all"/int]>, <verbose>)
files_path, in_folder, total_files, file_scanned = scanFolder(target_path, 1, 1)
for f in files_path:
original_image = cv2.imread(f)
# load grount truth
gt_file_path = gt_path + getfilename(f) + ".png"
gt_image = cv2.imread(gt_file_path)
# convert gt into binary
gray = cv2.cvtColor(gt_image, cv2.COLOR_BGR2GRAY)
(thresh, gt_image) = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
# calculte dc
print("kmeans:")
kmeans = wbc_vogado(original_image, False)
performance_evaluation(gt_image, kmeans)
print("fcm:")
fcm = wbc_vogado_modified(original_image, False)
performance_evaluation(gt_image, fcm)
# show gt_image
print("original image:")
cv2_imshow(resize_img(original_image,250))
print("ground truth image:")
cv2_imshow(resize_img(gt_image,250))
print("KM final result:")
cv2_imshow(resize_img(kmeans,250))
print("FCM final result:")
cv2_imshow(resize_img(fcm,250))
```
| github_jupyter |
```
import pandas as pd
pd.set_option('max_colwidth', 400)
df_glove_parag = pd.read_csv("../results/final_results_08_03/test_results_glove_preprocessed_reports_08_03.csv")
df_glove_no_back_parag = pd.read_csv("../results/final_results_08_03/test_results_glove_no_back_preprocessed_reports_08_03.csv")
df_ft_parag = pd.read_csv("../results/final_results_08_03/test_results_fastText_preprocessed_reports_08_03.csv")
df_ft_no_back_parag = pd.read_csv("../results/final_results_08_03/test_results_fastText_no_back_preprocessed_reports_08_03.csv")
df_glove_seq = pd.read_csv("../results/final_results_08_03/test_results_glove_seq_filltered_reports_08_03.csv")
df_glove_no_back_seq = pd.read_csv("../results/final_results_08_03/test_results_glove_no_back_seq_filltered_reports_08_03.csv")
df_ft_seq = pd.read_csv("../results/final_results_08_03/test_results_fastText_seq_filltered_reports_08_03.csv")
df_ft_no_back_seq = pd.read_csv("../results/final_results_08_03/test_results_fastText_no_back_seq_filltered_reports_08_03.csv")
test_files = ['9.txt', '21.txt', '25.txt', '58.txt', '63.txt', '76.txt', '104.txt', '105.txt', '127.txt', '152.txt', '156.txt', '160.txt', '165.txt', '188.txt', '199.txt', '201.txt', '210.txt', '251.txt', '273.txt', '295.txt', '322.txt', '355.txt', '380.txt', '404.txt', '417.txt', '422.txt', '426.txt', '463.txt', '493.txt', '498.txt', '504.txt', '511.txt', '516.txt', '580.txt', '598.txt', '616.txt', '621.txt', '653.txt', '656.txt', '659.txt', '675.txt', '680.txt', '689.txt', '703.txt', '709.txt', '777.txt', '782.txt', '813.txt', '817.txt', '821.txt', '837.txt', '843.txt', '860.txt', '880.txt', '906.txt', '915.txt', '922.txt', '927.txt', '929.txt', '938.txt', '949.txt', '984.txt', '1001.txt', '1002.txt', '1016.txt', '1035.txt', '1044.txt', '1076.txt', '1088.txt', '1094.txt', '1100.txt', '1121.txt', '1133.txt', '1171.txt', '1184.txt', '1247.txt', '1248.txt', '1261.txt', '1284.txt', '1288.txt', '1290.txt', '1291.txt', '1302.txt', '1324.txt', '1348.txt', '1353.txt', '1376.txt', '1383.txt', '1386.txt', '1405.txt', '1407.txt', '1411.txt', '1430.txt', '1437.txt', '1441.txt', '1449.txt', '1453.txt', '1459.txt', '1475.txt', '1478.txt', '1479.txt', '1496.txt', '1501.txt', '1513.txt', '1527.txt', '1543.txt', '1554.txt', '1570.txt', '1576.txt', '1577.txt', '1600.txt', '1648.txt', '1664.txt', '1676.txt', '1702.txt', '1709.txt', '1722.txt', '1725.txt', '1727.txt', '1743.txt', '1767.txt', '1808.txt', '1818.txt', '1822.txt', '1845.txt', '1849.txt', '1859.txt', '1880.txt', '1886.txt', '1888.txt', '1890.txt', '1898.txt', '1904.txt', '1936.txt', '1945.txt', '2034.txt', '2035.txt', '2037.txt', '2044.txt', '2073.txt', '2084.txt', '2105.txt', '2133.txt', '2138.txt', '2142.txt', '2180.txt', '2187.txt', '2197.txt', '2209.txt', '2213.txt', '2226.txt', '2235.txt', '2270.txt', '2296.txt', '2341.txt', '2350.txt', '2351.txt', '2352.txt', '2379.txt', '2400.txt', '2409.txt', '2411.txt', '2421.txt', '2439.txt', '2455.txt', '2479.txt', '2506.txt', '2521.txt', '2543.txt', '2565.txt', '2582.txt', '2588.txt', '2589.txt', '2602.txt', '2618.txt', '2647.txt', '2653.txt', '2695.txt', '2702.txt', '2705.txt', '2711.txt', '2737.txt', '2763.txt', '2793.txt', '2801.txt', '2802.txt', '2857.txt', '2860.txt', '2882.txt', '2883.txt', '2931.txt', '2934.txt', '2943.txt', '2944.txt', '2954.txt', '2957.txt', '2999.txt', '3000.txt', '3001.txt', '3017.txt', '3035.txt', '3062.txt', '3068.txt', '3072.txt', '3074.txt', '3096.txt', '3107.txt', '3110.txt', '3111.txt', '3116.txt', '3124.txt', '3137.txt', '3140.txt', '3172.txt', '3193.txt', '3213.txt', '3219.txt', '3242.txt', '3273.txt', '3331.txt', '3348.txt', '3349.txt', '3353.txt', '3372.txt', '3379.txt', '3423.txt', '3428.txt', '3451.txt', '3457.txt', '3482.txt', '3492.txt', '3493.txt', '3514.txt', '3516.txt', '3533.txt', '3550.txt', '3589.txt', '3616.txt', '3702.txt', '3714.txt', '3741.txt', '3748.txt', '3797.txt', '3798.txt', '3822.txt', '3949.txt', '3991.txt']
df_glove_parag["file"] = test_files
df_glove_no_back_parag["file"] = test_files
df_ft_parag["file"] = test_files
df_ft_no_back_parag["file"] = test_files
df_glove_seq["file"] = test_files
df_glove_no_back_seq["file"] = test_files
df_ft_seq["file"] = test_files
df_ft_no_back_seq["file"] = test_files
df_glove_parag.head(20)
df_glove_no_back_parag.head(20)
df_ft_parag.head(20)
df_ft_no_back_parag.head()
df_glove_seq.head(20)
df_glove_no_back_seq.head(20)
df_ft_seq.head(20)
df_ft_no_back_seq[-20:]
print(sum(df_ft_no_back_seq.cumulative_bleu_4) / len(df_ft_no_back_seq) * 100)
print(sum(df_ft_seq.cumulative_bleu_4) / len(df_ft_no_back_seq) * 100)
print(sum(df_glove_no_back_seq.cumulative_bleu_4) / len(df_ft_no_back_seq) * 100)
print(sum(df_glove_seq.cumulative_bleu_4) / len(df_ft_no_back_seq) * 100)
print(sum(df_ft_no_back_parag.cumulative_bleu_4) / len(df_ft_no_back_parag) * 100)
print(sum(df_ft_parag.cumulative_bleu_4) / len(df_ft_no_back_parag) * 100)
print(sum(df_glove_no_back_parag.cumulative_bleu_4) / len(df_ft_no_back_parag) * 100)
print(sum(df_glove_parag.cumulative_bleu_4) / len(df_ft_no_back_parag) * 100)
print(sum(df_ft_no_back_seq.rougeL) / len(df_ft_no_back_seq) * 100)
print(sum(df_ft_seq.rougeL) / len(df_ft_no_back_seq) * 100)
print(sum(df_glove_no_back_seq.rougeL) / len(df_ft_no_back_seq) * 100)
print(sum(df_glove_seq.rougeL) / len(df_ft_no_back_seq) * 100)
print(sum(df_ft_no_back_parag.rougeL) / len(df_ft_no_back_parag) * 100)
print(sum(df_ft_parag.rougeL) / len(df_ft_no_back_parag) * 100)
print(sum(df_glove_no_back_parag.rougeL) / len(df_ft_no_back_parag) * 100)
print(sum(df_glove_parag.rougeL) / len(df_ft_no_back_parag) * 100)
new_df = df_ft_no_back_parag[["ground truth", "prediction"]]
new_df.columns = [["ground truth", "parag_prediction"]]
# pd.concat([df_human, df_fastText_rule_based_v2], axis=1)
new_df_1 = df_ft_no_back_seq[["ground truth", "prediction"]]
new_df_1.columns = [["ground truth1", "sent_prediction"]]
df_result = pd.concat([new_df, new_df_1], axis=1).drop(["ground truth1"], axis=1)
df_result[20:30]
df_glove_parag_old = pd.read_csv("../results/final_results/test_results_update_glove_paragraph.csv")
df_glove_no_back_parag_old = pd.read_csv("../results/final_results/test_results_update_glove_paragraph.csv")
df_ft_parag_old = pd.read_csv("../results/final_results/test_results_update_fastText_paragraph.csv")
df_ft_no_back_parag_old = pd.read_csv("../results/final_results/test_results_update_fastText_no_back_paragraph.csv")
df_glove_seq_old = pd.read_csv("../results/final_results/test_results_update_glove_sequence.csv")
df_glove_no_back_seq_old = pd.read_csv("../results/final_results/test_results_update_glove_no_back_sequence.csv")
df_ft_seq_old = pd.read_csv("../results/final_results/test_results_update_fastText_sequence.csv")
df_ft_no_back_seq_old = pd.read_csv("../results/final_results/test_results_update_fastText_no_back_sequence.csv")
print(sum(df_ft_no_back_parag.cumulative_bleu_4) / len(df_ft_no_back_parag) * 100)
print(sum(df_ft_parag.cumulative_bleu_4) / len(df_ft_no_back_parag) * 100)
print(sum(df_glove_no_back_parag.cumulative_bleu_4) / len(df_ft_no_back_parag) * 100)
print(sum(df_glove_parag.cumulative_bleu_4) / len(df_ft_no_back_parag) * 100)
print(sum(df_ft_no_back_seq_old.cumulative_bleu_4) / len(df_ft_no_back_seq_old) * 100)
print(sum(df_ft_seq_old.cumulative_bleu_4) / len(df_ft_no_back_seq_old) * 100)
print(sum(df_glove_no_back_seq_old.cumulative_bleu_4) / len(df_ft_no_back_seq_old) * 100)
print(sum(df_glove_seq_old.cumulative_bleu_4) / len(df_ft_no_back_seq_old) * 100)
print(sum(df_ft_no_back_parag_old.rougeL) / len(df_ft_no_back_parag_old) * 100)
print(sum(df_ft_parag_old.rougeL) / len(df_ft_no_back_parag_old) * 100)
print(sum(df_glove_no_back_parag_old.rougeL) / len(df_ft_no_back_parag_old) * 100)
print(sum(df_glove_parag_old.rougeL) / len(df_ft_no_back_parag_old) * 100)
print(sum(df_ft_no_back_seq_old.rougeL) / len(df_ft_no_back_seq_old) * 100)
print(sum(df_ft_seq_old.rougeL) / len(df_ft_no_back_seq_old) * 100)
print(sum(df_glove_no_back_seq_old.rougeL) / len(df_ft_no_back_seq_old) * 100)
print(sum(df_glove_seq_old.rougeL) / len(df_ft_no_back_seq_old) * 100)
```
| github_jupyter |
# Modeling and fitting
## Prerequisites
- Knowledge of spectral analysis to produce 1D On-Off datasets, [see the following tutorial](spectrum_analysis.ipynb)
- Reading of pre-computed datasets [see the MWL tutorial](analysis_mwl.ipynb)
- General knowledge on statistics and optimization methods
## Proposed approach
This is a hands-on tutorial to `~gammapy.modeling`, showing how the model, dataset and fit classes work together. As an example we are going to work with HESS data of the Crab Nebula and show in particular how to :
- perform a spectral analysis
- use different fitting backends
- acces covariance matrix informations and parameter errors
- compute likelihood profile
- compute confidence contours
See also: [Models gallery tutorial](models.ipynb) and `docs/modeling/index.rst`.
## The setup
```
import numpy as np
from astropy import units as u
import matplotlib.pyplot as plt
import scipy.stats as st
from gammapy.modeling import Fit
from gammapy.datasets import Datasets, SpectrumDatasetOnOff
from gammapy.modeling.models import LogParabolaSpectralModel, SkyModel
from gammapy.visualization.utils import plot_contour_line
from itertools import combinations
```
## Model and dataset
First we define the source model, here we need only a spectral model for which we choose a log-parabola
```
crab_spectrum = LogParabolaSpectralModel(
amplitude=1e-11 / u.cm ** 2 / u.s / u.TeV,
reference=1 * u.TeV,
alpha=2.3,
beta=0.2,
)
crab_spectrum.alpha.max = 3
crab_spectrum.alpha.min = 1
crab_model = SkyModel(spectral_model=crab_spectrum, name="crab")
```
The data and background are read from pre-computed ON/OFF datasets of HESS observations, for simplicity we stack them together.
Then we set the model and fit range to the resulting dataset.
```
datasets = []
for obs_id in [23523, 23526]:
dataset = SpectrumDatasetOnOff.read(
f"$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{obs_id}.fits"
)
datasets.append(dataset)
dataset_hess = Datasets(datasets).stack_reduce(name="HESS")
# Set model and fit range
dataset_hess.models = crab_model
e_min = 0.66 * u.TeV
e_max = 30 * u.TeV
dataset_hess.mask_fit = dataset_hess.counts.geom.energy_mask(e_min, e_max)
```
## Fitting options
First let's create a `Fit` instance:
```
fit = Fit([dataset_hess], store_trace=True)
```
By default the fit is performed using MINUIT, you can select alternative optimizers and set their option using the `optimize_opts` argument of the `Fit.run()` method. In addition we have specified to store the trace of parameter values of the fit.
Note that, for now, covaraince matrix and errors are computed only for the fitting with MINUIT. However depending on the problem other optimizers can better perform, so somethimes it can be usefull to run a pre-fit with alternative optimization methods.
For the "scipy" backend the available options are desribed in detail here:
https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html
```
%%time
scipy_opts = {"method": "L-BFGS-B", "options": {"ftol": 1e-4, "gtol": 1e-05}}
result_scipy = fit.run(backend="scipy", optimize_opts=scipy_opts)
```
For the "sherpa" backend you can choose the optimization algorithm between method = {"simplex", "levmar", "moncar", "gridsearch"}.
Those methods are described and compared in detail on http://cxc.cfa.harvard.edu/sherpa/methods/index.html.
The available options of the optimization methods are described on the following page https://cxc.cfa.harvard.edu/sherpa/methods/opt_methods.html
```
%%time
sherpa_opts = {"method": "simplex", "ftol": 1e-3, "maxfev": int(1e4)}
results_simplex = fit.run(backend="sherpa", optimize_opts=sherpa_opts)
```
For the "minuit" backend see https://iminuit.readthedocs.io/en/latest/reference.html for a detailed description of the available options. If there is an entry ‘migrad_opts’, those options will be passed to [iminuit.Minuit.migrad](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.migrad). Additionnaly you can set the fit tolerance using the [tol](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.tol
) option. The minimization will stop when the estimated distance to the minimum is less than 0.001*tol (by default tol=0.1). The [strategy](https://iminuit.readthedocs.io/en/latest/reference.html#iminuit.Minuit.strategy) option change the speed and accuracy of the optimizer: 0 fast, 1 default, 2 slow but accurate. If you want more reliable error estimates, you should run the final fit with strategy 2.
```
%%time
minuit_opts = {"tol": 0.001, "strategy": 1}
result_minuit = fit.run(backend="minuit", optimize_opts=minuit_opts)
```
## Fit quality assessment
There are various ways to check the convergence and quality of a fit. Among them:
- Refer to the automatically-generated results dictionary
```
print(result_scipy)
print(results_simplex)
print(result_minuit)
```
- Check the trace of the fit e.g. in case the fit did not converge properly
```
result_minuit.trace
```
- Check that the fitted values and errors for all parameters are reasonable, and no fitted parameter value is "too close" - or even outside - its allowed min-max range
```
result_minuit.parameters.to_table()
```
- Plot fit statistic profiles for all fitted prameters, using `~gammapy.modeling.Fit.stat_profile()`. For a good fit and error estimate each profile should be parabolic
```
total_stat = result_minuit.total_stat
for par in dataset_hess.models.parameters:
if par.frozen is False:
profile = fit.stat_profile(parameter=par)
plt.plot(
profile[f"{par.name}_scan"], profile["stat_scan"] - total_stat
)
plt.xlabel(f"{par.unit}")
plt.ylabel("Delta TS")
plt.title(f"{par.name}: {par.value} +- {par.error}")
plt.show()
plt.close()
```
- Inspect model residuals. Those can always be accessed using `~Dataset.residuals()`, that will return an array in case a the fitted `Dataset` is a `SpectrumDataset` and a full cube in case of a `MapDataset`. For more details, we refer here to the dedicated fitting tutorials: [analysis_3d.ipynb](analysis_3d.ipynb) (for `MapDataset` fitting) and [spectrum_analysis.ipynb](spectrum_analysis.ipynb) (for `SpectrumDataset` fitting).
## Covariance and parameters errors
After the fit the covariance matrix is attached to the model. You can get the error on a specific parameter by accessing the `.error` attribute:
```
crab_model.spectral_model.alpha.error
```
As an example, this step is needed to produce a butterfly plot showing the envelope of the model taking into account parameter uncertainties.
```
energy_range = [1, 10] * u.TeV
crab_spectrum.plot(energy_range=energy_range, energy_power=2)
ax = crab_spectrum.plot_error(energy_range=energy_range, energy_power=2)
```
## Confidence contours
In most studies, one wishes to estimate parameters distribution using observed sample data.
A 1-dimensional confidence interval gives an estimated range of values which is likely to include an unknown parameter.
A confidence contour is a 2-dimensional generalization of a confidence interval, often represented as an ellipsoid around the best-fit value.
Gammapy offers two ways of computing confidence contours, in the dedicated methods `Fit.minos_contour()` and `Fit.stat_profile()`. In the following sections we will describe them.
An important point to keep in mind is: *what does a $N\sigma$ confidence contour really mean?* The answer is it represents the points of the parameter space for which the model likelihood is $N\sigma$ above the minimum. But one always has to keep in mind that **1 standard deviation in two dimensions has a smaller coverage probability than 68%**, and similarly for all other levels. In particular, in 2-dimensions the probability enclosed by the $N\sigma$ confidence contour is $P(N)=1-e^{-N^2/2}$.
### Computing contours using `Fit.minos_contour()`
After the fit, MINUIT offers the possibility to compute the confidence confours.
gammapy provides an interface to this functionnality throught the `Fit` object using the `minos_contour` method.
Here we defined a function to automatize the contour production for the differents parameterer and confidence levels (expressed in term of sigma):
```
def make_contours(fit, result, npoints, sigmas):
cts_sigma = []
for sigma in sigmas:
contours = dict()
for par_1, par_2 in combinations(["alpha", "beta", "amplitude"], r=2):
contour = fit.minos_contour(
result.parameters[par_1],
result.parameters[par_2],
numpoints=npoints,
sigma=sigma,
)
contours[f"contour_{par_1}_{par_2}"] = {
par_1: contour[par_1].tolist(),
par_2: contour[par_2].tolist(),
}
cts_sigma.append(contours)
return cts_sigma
```
Now we can compute few contours.
```
%%time
sigma = [1, 2]
cts_sigma = make_contours(fit, result_minuit, 10, sigma)
```
Then we prepare some aliases and annotations in order to make the plotting nicer.
```
pars = {
"phi": r"$\phi_0 \,/\,(10^{-11}\,{\rm TeV}^{-1} \, {\rm cm}^{-2} {\rm s}^{-1})$",
"alpha": r"$\alpha$",
"beta": r"$\beta$",
}
panels = [
{
"x": "alpha",
"y": "phi",
"cx": (lambda ct: ct["contour_alpha_amplitude"]["alpha"]),
"cy": (
lambda ct: np.array(1e11)
* ct["contour_alpha_amplitude"]["amplitude"]
),
},
{
"x": "beta",
"y": "phi",
"cx": (lambda ct: ct["contour_beta_amplitude"]["beta"]),
"cy": (
lambda ct: np.array(1e11)
* ct["contour_beta_amplitude"]["amplitude"]
),
},
{
"x": "alpha",
"y": "beta",
"cx": (lambda ct: ct["contour_alpha_beta"]["alpha"]),
"cy": (lambda ct: ct["contour_alpha_beta"]["beta"]),
},
]
```
Finally we produce the confidence contours figures.
```
fig, axes = plt.subplots(1, 3, figsize=(16, 5))
colors = ["m", "b", "c"]
for p, ax in zip(panels, axes):
xlabel = pars[p["x"]]
ylabel = pars[p["y"]]
for ks in range(len(cts_sigma)):
plot_contour_line(
ax,
p["cx"](cts_sigma[ks]),
p["cy"](cts_sigma[ks]),
lw=2.5,
color=colors[ks],
label=f"{sigma[ks]}" + r"$\sigma$",
)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.legend()
plt.tight_layout()
```
### Computing contours using `Fit.stat_surface()`
This alternative method for the computation of confidence contours, although more time consuming than `Fit.minos_contour()`, is expected to be more stable. It consists of a generalization of `Fit.stat_profile()` to a 2-dimensional parameter space. The algorithm is very simple:
- First, passing two arrays of parameters values, a 2-dimensional discrete parameter space is defined;
- For each node of the parameter space, the two parameters of interest are frozen. This way, a likelihood value ($-2\mathrm{ln}\,\mathcal{L}$, actually) is computed, by either freezing (default) or fitting all nuisance parameters;
- Finally, a 2-dimensional surface of $-2\mathrm{ln}(\mathcal{L})$ values is returned.
Using that surface, one can easily compute a surface of $TS = -2\Delta\mathrm{ln}(\mathcal{L})$ and compute confidence contours.
Let's see it step by step.
First of all, we can notice that this method is "backend-agnostic", meaning that it can be run with MINUIT, sherpa or scipy as fitting tools. Here we will stick with MINUIT, which is the default choice:
```
optimize_opts = {"backend": "minuit", "print_level": 0}
```
As an example, we can compute the confidence contour for the `alpha` and `beta` parameters of the `dataset_hess`. Here we define the parameter space:
```
result = result_minuit
par_1 = result.parameters["alpha"]
par_2 = result.parameters["beta"]
x = par_1
y = par_2
x_values = np.linspace(1.55, 2.7, 20)
y_values = np.linspace(-0.05, 0.55, 20)
```
Then we run the algorithm, by choosing `reoptimize=False` for the sake of time saving. In real life applications, we strongly recommend to use `reoptimize=True`, so that all free nuisance parameters will be fit at each grid node. This is the correct way, statistically speaking, of computing confidence contours, but is expected to be time consuming.
```
stat_surface = fit.stat_surface(
x, y, x_values, y_values, reoptimize=False, **optimize_opts
)
```
In order to easily inspect the results, we can convert the $-2\mathrm{ln}(\mathcal{L})$ surface to a surface of statistical significance (in units of Gaussian standard deviations from the surface minimum):
```
# Compute TS
TS = stat_surface["stat_scan"] - result.total_stat
# Compute the corresponding statistical significance surface
gaussian_sigmas = np.sqrt(TS.T)
```
Notice that, as explained before, $1\sigma$ contour obtained this way will not contain 68% of the probability, but rather
```
# Compute the corresponding statistical significance surface
# p_value = 1 - st.chi2(df=1).cdf(TS)
# gaussian_sigmas = st.norm.isf(p_value / 2).T
```
Finally, we can plot the surface values together with contours:
```
fig, ax = plt.subplots(figsize=(8, 6))
# We choose to plot 1 and 2 sigma confidence contours
levels = [1, 2]
contours = plt.contour(gaussian_sigmas, levels=levels, colors="white")
plt.clabel(contours, fmt="%.0f$\,\sigma$", inline=3, fontsize=15)
im = plt.imshow(
gaussian_sigmas,
extent=[0, len(x_values) - 1, 0, len(y_values) - 1],
origin="lower",
)
fig.colorbar(im)
plt.xticks(range(len(x_values)), np.around(x_values, decimals=2), rotation=45)
plt.yticks(range(len(y_values)), np.around(y_values, decimals=2));
```
Note that, if computed with `reoptimize=True`, this plot would be completely consistent with the third panel of the plot produced with `Fit.minos_contour` (try!).
Finally, it is always remember that confidence contours are approximations. In particular, when the parameter range boundaries are close to the contours lines, it is expected that the statistical meaning of the countours is not well defined. That's why we advise to always choose a parameter space that com contain the contours you're interested in.
| github_jupyter |
```
#hide
from qbism import *
```
# Tutorial
> "Chauncey Wright, a nearly forgotten philosopher of real merit, taught me when young that I must not say necessary about the universe, that we don’t know whether anything is necessary or not. So I describe myself as a bettabilitarian. I believe that we can bet on the behavior of the universe in its contact with us." (Oliver Wendell Holmes, Jr.)
QBism, as I understand it, consists of two interlocking components, one part philosophical and one part mathematical. We'll deal with the mathematical part first.
## The Math
A Von Neumann measurement consists in a choice of observable represented by a Hermitian operator $H$. Such an operator will have real eigenvalues and orthogonal eigenvectors. For example, $H$ could be the energy operator. Then the eigenvectors would represent possible energy states, and the eigenvalues would represent possible values of the energy. According to textbook quantum mechanics, which state the system ends up in after a measurement will in general be random, and quantum mechanics allows you to calculate the probabilities.
A Hermitian observable provides what is known as a "projection valued measure." Suppose our system were represented by a density matrix $\rho$. We could form the projectors $P_{i} = \mid v_{i} \rangle \langle v_{i} \mid$, where $\mid v_{i} \rangle$ is the $i^{th}$ eigenvector. Then the probability for the $i^{th}$ outcome would be given by $Pr(i) = tr(P_{i}\rho)$, and the state after measurement would be given by $\frac{P_{i} \rho P_{i}}{tr(P_{i}\rho)}$. Moreover, the expectation value of the observable $\langle H \rangle$ would be given by $tr(H\rho)$, and it amounts to a sum over the eigenvalues weighted by the corresponding probabilities.
```
import numpy as np
import qutip as qt
d = 2
rho = qt.rand_dm(d)
H = qt.rand_herm(d)
L, V = H.eigenstates()
P = [v*v.dag() for v in V]
p = [(proj*rho).tr() for proj in P]
print("probabilities: %s" % p)
print("expectation value: %.3f" % (H*rho).tr())
print("expectation value again: %.3f" % (sum([L[i]*p[i] for i in range(d)])))
```
<hr>
But there is a more general notion of measurement: a POVM (a positive operator valued measure). A POVM consists in a set of positive semidefinite operators that sum to the identity, i.e., a set $\{E_{i}\}$ such that $\sum_{i} E_{i} = I$. Positive semidefinite just means that the eigenvalues must be non-negative, so that $\langle \psi \mid E \mid \psi \rangle$ is always positive or zero for any $\mid \psi \rangle$. Indeed, keep in mind that density matrices are defined by Hermitian, positive semi-definite operators with trace $1$.
For a POVM, each *operator* corresponds to a possible outcome of the experiment, and whereas for a Von Neumann measurement, assuming no degeneracies, there would be $d$ possible outcomes, corresponding to the dimension of the Hilbert space, there can be *any* number of outcomes to a POVM measurement, as long as all the associated operators sum to the identity. The probability of an outcome, however, is similarly given by $Pr(i) = tr(E_{i}\rho)$.
If we write each $E_{i}$ as a product of so-called Kraus operators $E_{i} = A_{i}^{\dagger}A_{i}$, then the state after measurement will be: $\frac{A_{i}\rho A_{i}^{\dagger}}{tr(E_{i}\rho)}$. The Kraus operators, however, aren't uniquely defined by the POVM, and so the state after measurement will depend on its implementation: to implement POVM's, you couple your system to an auxilliary system and make a standard measurement on the latter. We'll show how to do that in a little bit!
In the case we'll be considering, however, the $\{E_{i}\}$ will be rank-1, and so the state after measurement will be $\frac{\Pi_{i}\rho \Pi_{i}}{tr(\Pi_{i}\rho)}$ as before, where $\Pi_{i}$ are normalized projectors associated to each element of the POVM (details to follow).
(For a reference, recall that spin coherent states form an "overcomplete" basis, or frame, for spin states of a given $j$ value. This can be viewed as a POVM. In this case, the POVM would have an infinite number of elements, one for each point on the sphere: and the integral over the sphere gives $1$.)
<hr>
A very special kind of POVM is a so-called SIC-POVM: a symmetric informationally complete positive operator valued measure. They've been conjectured to exist in all dimensions, and numerical evidence suggests this is indeed the case. For a given Hilbert space of dimension $d$, a SIC is a set of $d^2$ rank-one projection operators $\Pi_{i} = \mid \psi_{i} \rangle \langle \psi_{i} \mid$ such that:
$$tr(\Pi_{k}\Pi_{l}) = \frac{d\delta_{k,l} + 1}{d+1} $$
Such a set of projectors will be linearly independent, and if you rescale they to $\frac{1}{d}\Pi_{i}$, they form a POVM: $\sum_{i} \frac{1}{d} \Pi_{i} = I$.
The key point is that for any quantum state $\rho$, a SIC specifies a measurement *for which the probabilities of outcomes $p(i)$ specify $\rho$ itself*. Normally, say, in the case of a qubit, we'd have to measure the separate expectation values $(\langle X \rangle, \langle Y \rangle, \langle Z \rangle)$ to nail down the state: in other words, we'd have to repeat many times three *different* measurements. But for a SIC-POVM, the probabilities on each of the elements of the POVM fully determine the state: we're talking here about a *single* type of measurement.
<hr>
Thanks to Chris Fuchs & Co., we have a repository of SIC-POVM's in a variety of dimensions. One can download them [here](http://www.physics.umb.edu/Research/QBism/solutions.html). You'll get a zip of text files, one for each dimension: and in each text file will be a single complex vector: the "fiducial" vector. From this vector, the SIC can be derived.
In order to do this, we first define (with Sylvester) the unitary clock and shift matrices for a given dimension $d$:
$$
X = \begin{pmatrix}
0 & 0 & 0 & \cdots & 0 & 1\\
1 & 0 & 0 & \cdots & 0 & 0\\
0 & 1 & 0 & \cdots & 0 & 0\\
0 & 0 & 1 & \cdots & 0 & 0\\
\vdots & \vdots & \vdots & \ddots &\vdots &\vdots\\
0 & 0 & 0 & \cdots & 1 & 0\\
\end{pmatrix}
$$
$$
Z = \begin{pmatrix}
1 & 0 & 0 & \cdots & 0\\
0 & \omega & 0 & \cdots & 0\\
0 & 0 & \omega^2 & \cdots & 0\\
\vdots & \vdots & \vdots & \ddots & \vdots\\
0 & 0 & 0 & \cdots & \omega^{d-1}
\end{pmatrix}
$$
Where $\omega = e^{\frac{2\pi i}{d}}$.
Note that when $d=2$, this amounts to Pauli $X$ and $Z$.
```
def shift(d):
return sum([qt.basis(d, i+1)*qt.basis(d, i).dag()\
if i != d-1 else qt.basis(d, 0)*qt.basis(d, i).dag()\
for i in range(d) for j in range(d)])/d
def clock(d):
w = np.exp(2*np.pi*1j/d)
return qt.Qobj(np.diag([w**i for i in range(d)]))
```
We can then define displacement operators:
$$D_{a,b} = (-e^{\frac{i\pi}{d}})^{ab}X^{b}Z^{a} $$
For $a, b$ each from $0$ to $d$.
```
def displace(d, a, b):
Z, X = clock(d), shift(d)
return (-np.exp(1j*np.pi/d))**(a*b)*X**b*Z**a
def displacement_operators(d):
return dict([((a, b), displace(d, a, b)) for b in range(d) for a in range(d)])
```
Finally, if we act on the fiducial vector with each of the displacement operators, we obtain the $d^2$ pure states, whose projectors, weighted by $\frac{1}{d}$, form the SIC-POVM.
```
def sic_states(d):
fiducial = load_fiducial(d)
return [D*fiducial for index, D in displacement_operators(d).items()]
```
Cf. `load_fiducial`.
By the way, this construction works because these SIC-POVM's are covariant under the Weyl-Heisenberg group. This means is that if you apply one of those displacement operators to all the SIC states, you get the same set of SIC states back! They just switch places among themselves. (It's also worth considering the action of elements of the "Clifford group", since these operators leave the Weyl-Heisenberg group invariant or, in other words, "normalize" it.)
```
sic = sic_states(2)
D = displacement_operators(2)
print(sic)
print()
print([D[(1,1)]*state for state in sic])
```
As far as anyone knows, the construction seems to work for SIC's in all dimensions. It's worth noting, however, the exceptional case of $d=8$, where there is *also another* SIC-POVM covariant under the tensor product of three copies of the Pauli group ($d=2$). Cf. `hoggar_fiducial`.
We can test that a given SIC has the property:
$$tr(\Pi_{k}\Pi_{l}) = \frac{d\delta_{k,l} + 1}{d+1} $$
```
def test_sic_states(states):
d = int(np.sqrt(len(states)))
for i, s in enumerate(states):
for j, t in enumerate(states):
should_be = 1 if i == j else 1/(d+1)
print("(%d, %d): %.4f | should be: %.4f" % (i, j, np.abs(s.overlap(t)**2), should_be))
states = sic_states(2)
test_sic_states(states)
```
In the case of a two dimensional Hilbert space, the SIC-POVM states will form a regular tetrahedron in the Bloch sphere:
```
pts = np.array([[qt.expect(qt.sigmax(), state),\
qt.expect(qt.sigmay(), state),\
qt.expect(qt.sigmaz(), state)] for state in states])
sphere = qt.Bloch()
sphere.point_size = [300]
sphere.add_points(pts.T)
sphere.add_vectors(pts)
sphere.make_sphere()
```
In general, in higher dimensions, the study of SIC's is a very interesting geometry problem involving the study of "maximal sets of complex equiangular lines," which has implications in various domains of mathematics.
```
def sic_povm(d):
return [(1/d)*state*state.dag() for state in sic_states(d)]
d = 2
ref_povm = sic_povm(d)
print("elements sum to identity? %s" % np.allclose(sum(ref_povm), qt.identity(d)))
```
Given a density matrix $\rho$, we can expand it in terms of the SIC-POVM elements via $tr(E_{i}\rho)$:
```
def dm_probs(dm, ref_povm):
return np.array([(e*dm).tr() for e in ref_povm]).real
rho = qt.rand_dm(d)
p = dm_probs(rho, ref_povm)
print("probabilities: %s" % p)
print("sum to 1? %s" % np.isclose(sum(p), 1))
```
From these probabilities, we can uniquely reconstruct the density matrix via:
$$ \rho = \sum_{i} ((d+1)p(i) - \frac{1}{d})\Pi_{i} $$
Where $\Pi_{i}$ are the projectors onto the SIC states: $E_{i} = \frac{1}{d}\Pi_{i}$.
Or given the fact that $\sum_{i} \frac{1}{d} \Pi_{i} = I$:
$$\rho = (d+1) \sum_{i} p(i)\Pi_{i} - I $$
```
def probs_dm_sic(p, ref_povm):
d = int(np.sqrt(len(p)))
return sum([((d+1)*p[i] - 1/d)*(e/e.tr()) for i, e in enumerate(ref_povm)])
def probs_dm_sic2(p, ref_povm):
d = int(np.sqrt(len(p)))
return (d+1)*sum([p[i]*e/e.tr() for i, e in enumerate(ref_povm)]) - qt.identity(d)
rho2 = probs_dm_sic(p, ref_povm)
rho3 = probs_dm_sic2(p, ref_povm)
print("recovered? %s" % (np.allclose(rho, rho2, rtol=1e-02, atol=1e-04) and np.allclose(rho, rho3, rtol=1e-02, atol=1e-04)))
```
<hr>
Now suppose we have the following situation. We first make a SIC-POVM measurement, and then we make a standard Von Neumann (PVM) measurement on a given system. Following the vivid imagery of Fuchs, we'll refer to the SIC-POVM as being "up in the sky" and the Von Neumann measurement as being "down on the ground".
So given our state $\rho$, above we've calculated the probabilities $p(i)$ for each outcome of the POVM. Now we'd like to assign probabilities for the outcomes of the Von Neumann measurement. What we need are the conditional probabilities $r(j|i)$, the probability of Von Neumann outcome $j$ given that the SIC-POVM returned $i$. Then:
$s(j) = \sum_{i}^{d^2} p(i)r(j|i)$
This is just standard probability theory: the law of total probability. The probability for an outcome $j$ of the Von Neumann measurement is the sum over all the conditional probabilities for $j$, given some outcome $i$ of the SIC-POVM, multiplied by the probability that $i$ occured.
The standard way of thinking about this would be that after the SIC-POVM measurement:
$\rho^{\prime} = \sum_{i} p(i)\Pi_{i}$
In other words, after the first measurement, $\rho$ becomes a mixture of outcome states weighted by the probabilities of them occuring. In this simple case, where we aren't considering a subsystem of larger system, and we're sticking with SIC-POVM's whose elements, we recall, are rank-1, we can just use the projectors $\Pi_{i}$ for the SIC-POVM outcome states. Then the probabilities for the Von Neumann measurement are:
$s(j) = tr(\tilde{\Pi}_{j}\rho^{\prime})$
Where $\tilde{\Pi}_{j}$ is the projector for the $j^{th}$ Von Neumann outcome.
```
von_neumann = qt.rand_herm(d)
vn_projectors = [v*v.dag() for v in von_neumann.eigenstates()[1]]
vn_rho = sum([prob*ref_povm[i]/ref_povm[i].tr() for i, prob in enumerate(p)])
vn_s = np.array([(proj*vn_rho).tr() for proj in vn_projectors]).real
print("vn probabilities after sic: %s" % vn_s)
```
Alternatively, however, we could form conditional probabilities directly:
$r(j|i) = tr(\tilde{\Pi}_{j}\Pi_{i})$
Where $\Pi_{i}$ is the projector for the $i^{th}$ POVM outcome (in the sky), and $\tilde{\Pi}_{j}$ is the projector for the $j^{th}$ Von Neumann outcome (on the ground).
Then we can use the formula:
$s(j) = \sum_{i}^{d^2} p(i)r(j|i)$
```
def vn_conditional_probs(von_neumann, ref_povm):
d = von_neumann.shape[0]
vn_projectors = [v*v.dag() for v in von_neumann.eigenstates()[1]]
return np.array([[(vn_projectors[j]*(e/e.tr())).tr() for i, e in enumerate(ref_povm)] for j in range(d)]).real
def vn_posterior(dm, von_neumann, ref_povm):
d = dm.shape[0]
p = dm_probs(rho, ref_povm)
r = vn_conditional_probs(von_neumann, ref_povm)
return np.array([sum([p[i]*r[j][i] for i in range(d**2)]) for j in range(d)])
print("vn probabilities after sic: %s" % vn_posterior(rho, von_neumann, ref_povm))
```
Indeed, $r(j|i)$ is a valid conditional probability matrix: its columns all sum to 1.
```
np.sum(vn_conditional_probs(von_neumann, ref_povm), axis=0)
```
Incidentally, there's no need to confine ourselves to the case of Von Neumann measurements. Suppose the "measurement on the ground" is given by another POVM. In fact, we can get one by just rotating our SIC-POVM by some random unitary. We'll obtain another SIC-POVM $\{F_{j}\}$.
In this case, we'd form $\rho^{\prime} = \sum_{i} p(i)\Pi_{i}$ just as before, and then take $s(j) = tr(F_{j}\rho^{\prime})$.
```
U = qt.rand_unitary(d)
ground_povm = [U*e*U.dag() for e in ref_povm]
povm_rho = sum([prob*ref_povm[i]/ref_povm[i].tr() for i, prob in enumerate(p)])
povm_s = np.array([(e*povm_rho).tr() for e in ground_povm]).real
print("povm probabilities after sic: %s" % povm_s)
```
And alternatively, we could work with the conditional probabilities:
$r(j|i) = tr(F_{j}\Pi_{i})$
And then apply:
$s(j) = \sum_{i}^{d^2} p(i)r(j|i)$
Where now $j$ will range from $0$ to $d^2$.
```
def povm_conditional_probs(povm, ref_povm):
d = int(np.sqrt(len(ref_povm)))
return np.array([[(a*(b/b.tr())).tr() for i, b in enumerate(ref_povm)] for j, a in enumerate(povm)]).real
def povm_posterior(dm, povm, ref_povm):
d = dm.shape[0]
p = dm_probs(dm, ref_povm)
r = povm_conditional_probs(povm, ref_povm)
return np.array([sum([p[i]*r[j][i] for i in range(d**2)]) for j in range(d**2)])
print("povm probabilities after sic: %s" % povm_posterior(rho, ground_povm, ref_povm))
```
<hr>
Okay, now we get to the punch line. Let's consider the case of the Von Neumann measurement. Suppose we *didn't* make the SIC-POVM measurement first. What would the probabilities be? Well, we all know:
$q(j) = tr(\tilde{\Pi}_{i}\rho)$
```
vn_p = np.array([(proj*rho).tr() for proj in vn_projectors]).real
print("vn probabilities (no sic in the sky): %s" % vn_p)
```
Now it turns out that we can get these same probabilities in a different way:
$q(j) = (d+1)[\sum_{i}^{d^2} p(i)r(j|i)] - 1$
```
def vn_born(dm, von_neumann, ref_povm):
d = dm.shape[0]
p = dm_probs(dm, ref_povm)
r = vn_conditional_probs(von_neumann, ref_povm)
return np.array([(d+1)*sum([p[i]*r[j][i] for i in range(d**2)]) - 1 for j in range(d)]).real
print("vn probabilities (no sic in the sky): %s" % vn_born(rho, von_neumann, ref_povm))
```
In other words, we can express the usual quantum probabilities in the case that we go directly to the Von Neumann measurement in a way that looks *ridiculously* close to our formula from before, involving probabilities for the SIC-POVM outcomes and conditional probabilities for Von Neumann outcomes given SIC-POVM outcomes! We sum over *hypothetical* outcomes of the SIC-POVM, multiplying the probability of each outcome, given our state $\rho$, by the conditional probability for the Von Neumann measurement giving the $j^{th}$ outcome, given that the SIC-POVM outcome was $i$. Except the formula is somewhat deformed by the the $(d+1)$ and the $-1$.
Clearly, this is equivalent to the usual Born Rule: but it's expressed *entirely* in terms of probabilities and conditional probabilities. It makes sense, in the end, that you can do this, given that the probabilities for the SIC-POVM measurement completely nail down the state. The upshot is that we can just work with the probabilities instead! Indeed, we could just pick some SIC-POVM to be our "reference apparatus", and describe any quantum state we're ever interested in terms of probabilities with reference to it, and any measurement in terms of conditional probabilities.
Operationally, what *is* difference between:
$s(j) = \sum_{i}^{d^2} p(i)r(j|i)$
and
$q(j) = (d+1)[\sum_{i}^{d^2} p(i)r(j|i)] - 1$
The difference is precisely *whether the SIC-POVM measurement has actually been performed*. If it has, then we lose quantum coherence. If it hasn't, we maintain it. In other words, the difference between classical and quantum is summed up in the minor difference between these two formulas.
In slogan form, due to Asher Peres, "unperformed measurements have no results." We'll get to the philosophy of this later, but the point is that classically speaking, we should be able to use the law of total probability *whether or not we actually do the measurement in the sky*: but quantum mechanically, if we don't actually do the measurement, we can't. But we have something just as good: the Born Rule.
<hr>
If we want to consider a more general measurement "on the ground," in particular, another SIC-POVM measurement, then our formula becomes:
$q(j) = (d+1)[\sum_{i}^{d^2} p(i)r(j|i)] - \frac{1}{d}[\sum_{i}^{d^2} r(j|i) ]$
Where now $i$ ranges to $d^2$.
```
print("povm probabilities (no sic in the sky): %s" % dm_probs(rho, ground_povm))
def povm_born(dm, povm, ref_povm):
d = dm.shape[0]
p = dm_probs(dm, ref_povm)
r = povm_conditional_probs(povm, ref_povm)
return np.array([(d+1)*sum([p[i]*r[j][i] for i in range(d**2)]) - (1/d)*sum([r[j][i] for i in range(d**2)]) for j in range(d**2)]).real
print("povm probabilities (no sic in the sky): %s" % povm_born(rho, ground_povm, ref_povm))
```
We can write these rules in much more compact matrix form.
Define $\Phi = (d+1)I_{d^2} - \frac{1}{d}J_{d^2}$
Where $I_{d^2}$ is the $d^2 \times d^2$ identity, and $J_{d^2}$ is the $d^2 \times d^2$ matrix all full of $1$'s.
If $R$ is the matrix of conditional probabilities, and $p$ is the vector of probabilities for the reference POVM in the sky, then the vector of values for $q(i)$ is:
$\vec{q} = R \Phi p$
```
def vn_born_matrix(dm, von_neumann, ref_povm):
d = rho.shape[0]
p = dm_probs(dm, ref_povm)
r = vn_conditional_probs(von_neumann, ref_povm)
phi = (d+1)*np.eye(d**2) - (1/d)*np.ones((d**2,d**2))
return r @ phi @ p
print("vn probabilities (no sic in the sky): %s" % vn_born_matrix(rho, von_neumann, ref_povm))
def povm_born_matrix(dm, povm, ref_povm):
d = dm.shape[0]
p = dm_probs(dm, ref_povm)
r = povm_conditional_probs(povm, ref_povm)
phi = (d+1)*np.eye(d**2) - (1/d)*np.ones((d**2,d**2))
return r @ phi @ p
print("povm probabilities (no sic in the sky): %s" % povm_born_matrix(rho, ground_povm, ref_povm))
```
And for that matter, we can calculate the "classical" probabilities from before in the same vectorized way: we just leave out $\Phi$!
```
print("vn probabilities after sic: %s" % (vn_conditional_probs(von_neumann, ref_povm) @ dm_probs(rho, ref_povm)))
print("povm probabilities after sic: %s" % (povm_conditional_probs(ground_povm, ref_povm) @ dm_probs(rho, ref_povm)))
```
In fact, this this is how qbist operators are implemented in this library behind the scenes. It allows one to easily handle the general case of IC-POVM's (informationally complete POVM's) which aren't SIC's: in that case, the matrix $\Phi$ will be different. Cf. `povm_phi`.
<hr>
Let's consider time evolution in this picture. We evolve our $\rho$ by some unitary:
$\rho_{t} = U \rho U^{\dagger}$
Naturally, we can calculate the new probabilities with reference to our SIC-POVM:
```
U = qt.rand_unitary(d)
rhot = U*rho*U.dag()
pt = dm_probs(rhot, ref_povm)
print("time evolved probabilities: %s" % pt)
```
But we could also express this in terms of conditional probabilities:
$u(j|i) = \frac{1}{d}tr(\Pi_{j}U\Pi_{i}U^{\dagger})$
As:
$p_{t}(j) = \sum_{i}^{d^2} ((d+1)p(i) - \frac{1}{d})u(j|i)$
```
def temporal_conditional_probs(U, ref_povm):
d = U.shape[0]
return np.array([[(1/d)*((a/a.tr())*U*(b/b.tr())*U.dag()).tr() for i, b in enumerate(ref_povm)] for j, a in enumerate(ref_povm)]).real
u = temporal_conditional_probs(U, ref_povm)
pt2 = np.array([sum([((d+1)*p[i] - 1/d)*u[j][i] for i in range(d**2)]) for j in range(d**2)]).real
print("time evolved probabilities: %s" % pt2)
```
We can compare this to the standard rule for stochastic evolution:
$p_{t}(j) = \sum_{i} p(i)u(j|i)$
We can see how the expression is deformed in exactly the same way. Indeed $u(j|i)$ is a doubly stochastic matrix: its rows and colums all sum to 1. And we can describe the time evolution of the quantum system in terms of it.
```
print(np.sum(u, axis=0))
print(np.sum(u, axis=1))
```
For more on the subleties of time evolution, consider the notes on `conditional_probs`.
<hr>
You can express the inner product between states in terms of SIC-POVM probability vectors via:
$tr(\rho \sigma) = d(d+1)[\vec{p} \cdot \vec{s}] - 1$
```
d = 3
ref_povm = sic_povm(d)
rho = qt.rand_dm(d)
sigma = qt.rand_dm(d)
p = dm_probs(rho, ref_povm)
s = dm_probs(sigma, ref_povm)
def quantum_inner_product_sic(p, s):
d = int(np.sqrt(len(p)))
return d*(d+1)*np.dot(p, s) - 1
print("inner product of rho and sigma: %.3f" % (rho*sigma).tr().real)
print("inner product of rho and sigma: %.3f" % quantum_inner_product_sic(p, s))
```
This brings up an important point.
You might wonder: Suppose we have a SIC-POVM with $d^2$ elements which provides $d^2$ probabilities which completely nail down the quantum state, given as a $d \times d$ density matrix. But what if we just start off with any old random vector of $d^2$ probabilities? Will we always get a valid density matrix? In other words, we've seen how we can start with quantum states, and then proceed to do quantum mechanics entirely in terms of probabilities and conditional probabilities. But now we're considering going in reverse. Does *any* assignment of probabilities to SIC-POVM outcomes specify a valid quantum state?
Well: any probability assignment will give us a $\rho$ which is Hermitian and has trace 1, which is great--BUT: this $\rho$ may not be positive-semidefinite (which is a requirement for density matrices). Like: if you assigned any old probabilites to the SIC-POVM outcomes, and then constructed a correponding $\rho$, it might end up having negative eigenvalues. Since the eigenvalues of $\rho$ are supposed to be probabilities (positive, summing to 1, etc), this is a problem.
In fact, you can't even have probability vectors that are too sharply peaked at any one value!
```
d = 3
povm = sic_povm(d)
vec = np.zeros(d**2)
vec[np.random.randint(d**2)] = 1
print("probs: %s" % vec)
print(probs_dm(vec, povm))
```
Note the negative entries. Furthermore, even if we start off in a SIC-POVM state, that doesn't mean we'll get that state with certainty after the measurement--indeed, unlike with projective measurements, repeated measurements don't always give the same results.
```
d = 3
povm = sic_povm(d)
print(dm_probs(povm[0]/povm[0].tr(), povm))
```
Above we see the probabilities for SIC-POVM outcomes given that we start off in the first SIC-POVM state. We see that indeed, the first SIC-POVM state has the highest probability, but all the other elements have non-zero probability (and for SIC's this is the same probability: not true for general IC-POVM's).
Indeed, it's a theorem that no such probability vector can have an element which exceeds $\frac{1}{d}$, and that the number of $0$ entries is bounded above by $\frac{d(d-1)}{2}$.
So we need another constraint. In other words, the quantum state space is a *proper subset* of the probability simplex over $d^2$ outcomes. There's some very interesting work exploring the geometric aspects of this constraint.
For example, insofar as pure states are those Hermitian matrices satisfying $tr(\rho^2) = tr(\rho^3) = 1$, we can evidently finagle this into two conditions:
$\sum_{i}^{d^2} p(i)^2 = \frac{2}{d(d+1)}$
and
$\sum_{i,j,k} c_{i, j, k}p(i)p(j)p(k) = \frac{d+7}{(d+1)^3}$
Where $c_{i, j, k} = \Re{[tr(\Pi_{i}\Pi_{j}\Pi_{k})]}$, which is a real-valued, completely symmetric three index tensor. The quantum state space is the <a href="https://en.wikipedia.org/wiki/Convex_hull">convex hull</a> of probability distributions satisfying these two equations.
On this same note, considering our expression for the inner product, since we know that the inner product between two quantum states $\rho$ and $\sigma$ is bounded between $0$ and $1$, we must have:
$\frac{1}{d(d+1)} \leq \vec{p} \cdot \vec{s} \leq \frac{2}{d(d+1)}$
The upper bound corresponds to our first condition. Call two vectors $\vec{p}$ and $\vec{s}$ "consistent" if their inner product obeys both inequalities. If we have a subset of the probability simplex for which every pair of vectors satisfies the inequalities, call it a "germ." If adding one more vector to a germ makes the set inconsistent, call the germ "maximal." And finally, call a maximal germ a "qplex." The space of quantum states in the SIC representation form a qplex, but not all qplexes correspond to quantum state spaces. The geometry of the qplexes are explored in <a href="https://arxiv.org/abs/1612.03234">Introducing the Qplex: A Novel Arena for Quantum Theory</a>. The conclusion?
"\[Turning\] to the problem of identifying the “missing assumption” which will serve to pick out quantum state space uniquely from the set of all qplexes... Of course, as is usual in such cases, there is more than one possibility. We identify one such assumption: the requirement that the symmetry group contain a subgroup isomorphic to the projective unitary group. This is a useful result because it means that we have a complete characterization of quantum state space in probabilistic terms. It also has an important corollary: That SIC existence in dimension d is equivalent to the existence of a certain kind of subgroup of the real orthogonal group in dimension $d^2 − 1$."
<hr>
Here's one final thing, for flavor. Having specified a SIC-POVM with $n$ elements and then an additional measurement (Von Neumann or POVM), we can construct the matrix $r(j|i)$.
```
d = 2
ref_povm = sic_povm(d)
von_neumann = qt.rand_herm(d)
n = len(ref_povm)
r = vn_conditional_probs(von_neumann, ref_povm)
r
```
We can then consider its rows, and extract a set of vectors $s_{j}$, each of which sums to 1:
$r(j|i) = n\gamma_{j} s_{j}(i)$
```
s = np.array([row/sum(row) for row in r])
gammas = [sum(row)/n for row in r]
np.array([n*gammas[i]*row for i, row in enumerate(s)])
```
We'll call these vectors $s_{j}$ "measurement vectors."
Suppose we're completely indifferent to the outcomes of the POVM in the sky. We could represent this by: $p(i) = \frac{1}{n}$. In other words, equal probability for each outcome.
The probabilities for outcomes to the later Von Neumann measurement would be:
$q(j) = \frac{1}{n}\sum_{i}r(j|i)$
```
p = [1/n for i in range(n)]
vn_probs = np.array([sum([p[i]*r[j][i] for i in range(n)]) for j in range(d)])
vn_probs
```
We could describe this by assigning to $\rho$ the maximally mixed state.
```
max_mixed = qt.identity(d)/d
vn_born(max_mixed, von_neumann, ref_povm)
```
But we could also rewrite $q(j)$ as:
$q(j) = \frac{1}{n} \sum_{i} n\gamma_{j} s_{j}(i) = \gamma_{j} \sum_{i} s_{j}(i)$
And since the $s_{j}(i)$ sum to 1:
$q(j) = \gamma_{j}$
```
np.array([gammas[j]*sum([s[j][i] for i in range(n)]) for j in range(d)])
gammas
```
Thus you can interpret the $\gamma_{j}$'s as: the probabilities of obtaining the $j^{th}$ outcome on the ground when you're completely indifferent to the potential outcomes in the sky.
Now let's rewrite:
$r(j|i) = n\gamma_{j} s_{j}(i)$
as
$s_{j}(i) = \frac{\frac{1}{n}r(j|i)}{\gamma_{j}}$
We know that $\gamma_{j}$ is the probability of obtaining $j$ on the ground, given complete ignorance about the potential outcomes of the sky experiment. We also know that $\frac{1}{n}$ is the probability assigned to each outcome of the sky experiment from complete indifference.
So write $Pr_{CI}(i)= \frac{1}{n}$ and $Pr_{CI}(j) = \gamma_{i}$, where $CI$ stands for complete ignorance/indifference. And we could apply the same notation: $Pr_{CI}(j|i) = r(j|i)$:
$s_{j}(i) = \frac{Pr_{CI}(i)Pr_{CI}(j|i)}{Pr_{CI}(j)}$
But this is just the Baysian formula for inverting conditional probabilities:
$Pr_{CI}(i|j) = \frac{Pr_{CI}(i)Pr_{CI}(j|i)}{Pr_{CI}(j)}$
In a similar vein:
<img src="img/fuchs.png">
<hr>
## Interlude: Implementing POVM's
It's worth mentioning how POVM's are actually implemented in practice. Here's the simplest way of thinking about it. Suppose we have a system with Hilbert space dimension $d$, and we have a POVM with $n$ elements. (In the case of our SIC-POVM's, we'd have $d^2$ elements.) We then adjoin an auxilliary system with Hilbert space dimension $n$: as many dimensions as POVM elements. So now we're working with $\mathcal{H}_{d} \otimes \mathcal{H}_{n}$.
Let's define projectors onto the basis states of the auxilliary system: $\Xi_{i} = I_{d} \otimes \mid i \rangle \langle i \mid$. If we denote the elements of the POVM by $\{ E_{i} \}$, then we can construct an isometry:
$V = \sum_{i}^{n} \sqrt{E_{i}} \otimes \mid i \rangle$
Such that any element of the POVM can be written:
$E_{i} = V^{\dagger}\Xi_{i}V $
```
d = 3
my_povm = sic_povm(d)
n = len(my_povm)
aux_projectors = [qt.tensor(qt.identity(d), qt.basis(n, i)*qt.basis(n, i).dag()) for i in range(n)]
V = sum([qt.tensor(my_povm[i].sqrtm(), qt.basis(n, i)) for i in range(n)])
povm_elements = [V.dag()*aux_projectors[i]*V for i in range(n)]
print("recovered povm elements? %s" % np.all([np.allclose(my_povm[i], povm_elements[i]) for i in range(n)]))
```
So this isometry $V$ takes us from $\mathcal{H}_{d}$ to $\mathcal{H}_{d} \otimes \mathcal{H}_{n}$.
We can extend this to a unitary $U$ (that takes $\mathcal{H}_{d} \otimes \mathcal{H}_{n}$ to $\mathcal{H}_{d} \otimes \mathcal{H}_{n}$) using the QR decomposition. In essence, we use the Gram-Schmidt procedure to fill out the rectangular matrix to a square matrix with extra orthogonal columns. (And then we have to rearrange the columns so that the columns of $V$ appear every $n^{th}$ column, in order to take into account the tensor product structure.)
```
Q, R = np.linalg.qr(V, mode="complete")
for i in range(d):
Q.T[[i,n*i]] = Q.T[[n*i,i]]
Q[:,n*i] = V[:,i].T
U = qt.Qobj(Q)
U.dims = [[d, n],[d, n]]
```
We can check our work. It should be the case that:
$V = U(I_{d} \otimes \mid 0 \rangle)$
```
print("recovered V?: %s" % np.allclose(V, U*qt.tensor(qt.identity(d), qt.basis(n, 0))))
```
Now for the finale. We know how to calculate the probabilities for each of the POVM outcomes. It's just:
$Pr(i) = tr(E_{i}\rho)$
To actually implement this, we start off with our auxilliary system in the $\mid 0 \rangle$ state, so that the overall density matrix is: $\rho \otimes \mid 0 \rangle \langle 0 \mid$. We then evolve the system and the auxilliary with our unitary $U$:
$$U [\rho \otimes \mid 0 \rangle \langle 0 \mid] U^{\dagger} $$
Finally, we perform a standard Von Neumann measurement on the auxilliary system (whose outcomes correspond to the basis states we've been using). Recalling that we defined the projectors onto the auxilliary basis states as $\Xi_{i} = I_{d} \otimes \mid i \rangle \langle i \mid$, we can then write probabilities for each outcome:
$Pr(i) = tr(\Xi_{i} U [\rho \otimes \mid 0 \rangle \langle 0 \mid] U^{\dagger} )$
These are the same probabilities as above.
```
rho = qt.rand_dm(d)
povm_probs = np.array([(my_povm[i]*rho).tr() for i in range(n)]).real
system_aux_probs = np.array([(aux_projectors[i]*\
U*qt.tensor(rho, qt.basis(n,0)*qt.basis(n,0).dag())*U.dag()).tr()\
for i in range(n)]).real
print("povm probs:\n%s" % povm_probs)
print("system and aux probs:\n%s" % system_aux_probs)
```
Moreover, we can see that the states after measurement correspond to the SIC-POVM projectors:
```
states = [(aux_projectors[i]*(U*qt.tensor(rho, qt.basis(n,0)*qt.basis(n,0).dag())*U.dag())).ptrace(0) for i in range(n)]
print(states[0].unit())
print(d*my_povm[0])
```
Indeed, whether you buy the philosophy that we're about to go into, SIC-POVM's have deep practical value in terms of quantum tomography and quantum information theory generally.
Cf. `implement_povm`.
<hr>
## The Philosophy
So in some sense the difference between classical and quantum is summed up in the difference between these two formulas:
$s(j) = \sum_{i}^{d^2} p(i)r(j|i)$
and
$q(j) = (d+1)[\sum_{i}^{d^2} p(i)r(j|i)] - 1$
In the first case, I make a SIC-POVM measurement in the sky, and then make a Von Neumann measurement on the ground. I can calculate the probabilities for the outcomes of the latter measurement using the law of total probability. Given the probabilities for the sky outcomes, and the conditional probabilities that relate ground outcomes to sky outcomes, I can calculate the probabilities for ground outcomes. Classically speaking, and this is the crucial point, I could use the first formula *whether or not I actually did the sky measurement*.
In other words, insofar as classically we've identified the relevant "degrees of freedom," and the assignment of sky probabilities uniquely characterizes the state, then it's a matter of mathematical convenience if we express $s(j)$ as a sum over those degrees of freedom $\sum_{i}^{d^2} p(i)r(j|i)$: by the nature of the formula, by the law of total probability, all the $i$'s drop out, and we're left with the value for $j$. We could actually perform the sky measurement or not: either way, we'd use the same formula to calculate the ground probabilities.
This is precisely what changes with quantum mechanics: it makes a difference *whether you actually do the sky measurement or not*. If you do, then you use the classical formula. If you don't, then you use the quantum formula.
One way of interpreting the moral of this is that, to quote Asher Peres again, "Unperformed measurements have no results." In contrast, classically, you *can* always regard unperformed measurements as having results: indeed, classical objectivity consists in, as it were, everything wearing its outcomes on its sleeve. In other words, outcomes aren't a special category: one can just speak of the properties of things. And this is just another way of saying you can use the law of total probability whether or not you actually do an intermediate measurement. But this is exactly what you can't rely on in quantum mechanics.
But remarkably, all you need to do to update your probability calculus is to use the quantum formula, which is ultimately the Born Rule in disguise. In other words, in a world where unperformed measurements have no results, when we consider different kinds of sequences of measurements, we need a (minor) addition to probability theory so that our probability assignments are coherent/consistent/no one can make a buck off of us.
Moreover, Blake Stacey makes the nice point, considering the realtionship between SIC-POVM's and Von Neumann measurements:
"Two orthogonal quantum states are perfectly distinguishable with respect to some experiment, yet in terms of the reference \[SIC-POVM\] measurement, they are inevitably overlapping probability distributions. The idea that any two valid probability distributions for the reference measurement must overlap, and that the minimal overlap in fact corresponds to distinguishability with respect to some other test, expresses the fact that quantum probability is not about hidden variables" (Stacey 2020).
<hr>
de Finetti famously advocated a subjectivist, personalist view of classical probability theory, and he and his theorems have proved to be an inspiration for QBists like Christopher Fuchs and others. In this view, probabilities don't "exist" out in the world: they are mathematical representations of personal beliefs which you are free to update in the face of new evidence. There isn't ever "one objective probability distribution" for things: rather, there's a constant personal process of convergence towards better beliefs. If you don't want to make bad bets, there are some basic consistency criteria that your probabilities have to satisfy. And that's what probability theory as such amounts to. The rest is just "priors."
"Statisticians for years had been speaking of how statistical sampling can reveal the 'unknown probability distribution'. But from de Finetti’s point of view, this makes as little sense as the unknown quantum state made for us. What de Finetti’s representation theorem established was that all this talk of an unknown probability was just that, talk. Instead, one could show that there was a way of thinking of the resultant of statistical sampling purely in terms of a transition from prior subjective probabilities (for the sampler himself) to posterior subjective probabilities (for the sampler himself). That is, every bit of statistical sampling from beginning to end wasn’t about revealing a true state of affairs (the “unknown probability”), but about the statistician’s own states of information about a set of “exchangeable” trials, full stop. The quantum de Finetti theorem does the same sort of thing, but for quantum states" (Fuchs 2018).
Indeed, QBists advocate a similar epistemic interpretation of the quantum state. The quantum state does not represent a quantum system. It represents *your beliefs about that quantum system*. In other words, interpretations that assign ontological roles to quantum states miss the mark. Quantum states are just packages of probabilities, indeed, probabilities personal to you. (In this sense, one can see a close relation to relational interpretations of quantum mechanics, where the quantum state is always defined not objectively, but to one system relative to another system.) Similarly, all the superstructure of quantum mechanics, operators, time evolution, etc-- are all just a matter of making subjective probabilities consistent with each other, given the *objective fact* that you should use the quantum formula when you haven't done an intermediate measurement, and the classical formula if you have. (And one should also mention that the formulas above imply that the *dimension* of the Hilbert space is, in fact, objective.)
On the other hand, QBists also hold that the very outcomes of measurements themselves are subjective--not in the sense of being vacuously open to intepretation, but in the sense that they are *experiences*; and it is precisely these subjective experiences that are being gambled upon. In other words, quantum mechanics is not a theory of the objective physical world as such, but is instead a first person theory by which one may predict the future consequences of one's own actions in experience.
This is how they deal with the dilemma of Wigner's friend. Fuchs: "...for the QBist, the real world, the one both agents are embedded in—with its objects and events—is taken for granted. What is not taken for granted is each agent's access to the parts of it he has not touched. Wigner holds two thoughts in his head: a) that his friend interacted with a quantum system, eliciting some consequences of the interaction for himself, and b) after the specified time, for any of Wigner's own future interactions with his friend or the system or both, he ought to gamble upon their consequences according to $U(\rho \otimes \mid \psi \rangle \langle \psi \mid) U^{\dagger}$. One statement refers to the friend's potential experiences, and one refers to Wigner's own. So long as it is explicit that $U(\rho \otimes \mid \psi \rangle \langle \psi \mid) U^{\dagger}$ refers to the latter--i.e., how Wigner should gamble upon the things that might happen to him--making no statement whatsoever about the former, there is no conflict. The world is filled with all the same things it was before quantum theory came along, like each of our experiences, that rock and that tree, and all the other things under the sun; it is just that quantum theory provides a calculus for gambling on each agent's experiences--it doesn't give anything other than that. It certainly doesn't give one agent the ability to conceptually pierce the other agent's personal experience. It is true that with enough effort Wigner \[could apply the reverse unitary, disentangling the friend and the spin\], causing him to predict that his friend will have amnesia to any future questions on his old measurement results. But we always knew Wigner could do that--a mallet to the head would have been good enough" (Fuchs, Stacey 2019).
Most assuredly, this is not a solipsistic theory: indeed, the actual results of measurement are precisely not within one's control. The way they imagine it is that whenever you set up an experiment, you divide the world into subject and object: the subject has the autonomy to set up the experiment, and the object has the autonomy to respond to the experiment. But the act of measurement itself is a kind of creation, a mutual experience which transcends the very distinction between subject and object itself, a linkage between oneself and the other. "QBism says that when an agent reaches out and touches a quantum system—when he performs a quantum measurement—this process gives rise to birth in a nearly literal sense" (Fuchs, Stacey 2019).
The only conflict here is with a notion that the only valid physical theories are those that attempt to directly represent the universe "in its totality as a pre-existing static system; an unchanging, monistic something that just *is*." Moreover, a theory like QBism clears a space for "real particularity and 'interiority' in the world." For Wigner, considering his friend and the system, with his back turned, "that phenomenon has an inside, a vitality that he takes no part in until he again interacts with one or both relevant pieces of it."
Often in the interpretation of quantum mechanics, one tries to achieve objectivity by focusing on the big bulky apparatuses we use and the "objective" record of outcomes left behind by these machines. The QBists take a different track: Bohr himself considers the analogy of a blind man seeing with a stick. He's not actively, rationally thinking about the stick and how it's skittering off this or that: rather, for him, it becomes an extension of his body: he *sees with the stick*. And thus one can understand Fuchs's three tenants of QBism:
1. Quantum Theory Is Normative, Not Descriptive
2. My Probabilities Cannot Tell Nature What To Do
3. A Measuring Device Is Literally an Extension of the Agent
<hr>
<img width=600 src="img/qbism_assumptions1.png">
<img width=600 src="img/qbism_assumptions2.png">
<hr>
Indeed, one might wonder about entanglement in this picture. In line with the discussion of Wigner's friend, we can interpret entanglement and the use of tensor product itself as relating to the objective fact that we require a way of representing correlations while being completely agnostic about what is correlated insofar as we haven't yet reached out and "touched" the thing.
Moreover, in this sense, one can look at QBism as a completely "local" theory. An experimenter has one half of an entangled pair of spins, and makes a measurement, and has an experience. In the textbook way of thinking it, this causes the state of the other spin to immedietely collapse. QBism takes a different approach. They say: quantum theory allows the experimenter to predict that if they go over and measure the other spin in the same direction, they will have another experience, of the answers of the two particles being correlated. But just because quantum theory licenses the experimenter to assign a probability 1 for the latter outcome after they do the first measurement doesn't mean that the latter particle *really is now $\uparrow$, say, as a property*. If the experimenter never actually goes to check out the other particle, it's yet another unperformed measurement: and it has no outcome yet. To paraphrase William James, if it isn't experienced, it isn't real. And in order to "cash out" on entanglement, one actually has to traverse the distance between the two particles and compare the results.
With regard to quantum teleportation, in this view, it's not about getting "things" from one place to another, but about making one's information cease referring to this part of the universe and start referring instead to another part of the universe, without referring to anything else in between. "The only nontrivial thing transferred in the process of teleportation is *reference*" (Fuchs, Stacey 2019).
<hr>
One of the things that makes QBism so interesting is its attempt to give nature as much latitude as possible. Usually in science, we're mentally trying to constraint nature, applying concepts, laws, systems, to it, etc. QBism instead proposes that we live in a unfinished world, whose creation is ongoing and ceaseless, and that this profound openendedness is the real meaning behind "quantum indeterminism." In itself, the universe is not governed by immutable laws and initial conditions fixed from the beginning: instead, new situations are coming into being all the time. Of course, regularities arise by evolution, the laws of large numbers, symmetries and so forth. But they take seriously John Wheeler's idea of the "participatory universe," that we and everything else are constantly engaged bringing the universe into being, together.
Wheeler writes:
"How did the universe come into being? Is that some strange, far-off process beyond hope of analysis? Or is the mechanism that comes into play one which all the time shows itself? Of all the signs that testify to 'quantum phenomenon' as being the elementary act and building block of existence, none is more striking than its utter absence of internal structure and its untouchability. For a process of creation that can and does operate anywhere, that is more basic than particles or fields or spacetime geometry themselves, a process that reveals and yet hides itself, what could one have dreamed up out of pure imagination more magic and more fitting than this?"
"'Law without law': It is difficult to see what else than that can be the “plan” for physics. It is preposterous to think of the laws of physics as installed by a Swiss watchmaker to endure from everlasting to everlasting when we know that the universe began with a big bang. The laws must have come into being. Therefore they could not have been always a hundred percent accurate. That means that they are derivative, not primary. Also derivative, also not primary is the statistical law of distribution of the molecules of a dilute gas between two intersecting portions of a total volume. This law is always violated and yet always upheld. The individual molecules laugh at it; yet as they laugh they find themselves obeying it. ... Are the laws of physics of a similar statistical character? And if so, statistics of what? Of billions and billions of acts of observer-participancy which individually defy all law? . . . \[Might\] the entirety of existence, rather than \[be\] built on particles or fields or multidimensional geometry, \[be\] built on billions upon billions of elementary quantum phenomena, those elementary acts of observer-participancy?"
<img src="img/wheeler.png">
<hr>
In such world, to quote William James, "Theories thus become instruments, not answers to enigmas, in which we can rest. We don’t lie back upon them, we move forward, and, on occasion, make nature over again by their aid." Moreover, in relegating quantum states to the observers who use them for predictions, one clears some ontological space for the quantum systems themselves to be "made of" who knows what qualitative, experiential stuff.
"\[QBism\] means that reality differs from one agent to another. This is not as strange as it may sound. What is real for an agent rests entirely on what that agent experiences, and different agents have different experiences. An agent-dependent reality is constrained by the fact that different agents can communicate their experience to each other, limited only by the extent that personal experience can be expressed in ordinary language. Bob’s verbal representation of his own experience can enter Alice’s, and vice-versa. In this way a common body of reality can be constructed, limited only by the inability of language to represent the full flavor — the “qualia” — of personal experience" (Fuchs, Mermin, Schack 2013).
Indeed, the QBists reach back in time and draw on the work of the old American pragmatists: James, John Dewey, Charles Sanders Peirce, and others. It's interesting to read their works particularly as many of them date from the pre-quantum era, so that even in the very face of classical physics, they were advocating a radically indeterministic, experience-first view of the world.
For example, James writes:
"Chance] is a purely negative and relative term, giving us no information about that of which it is predicated, except that it happens to be disconnected with something else—not controlled, secured, or necessitated by other things in advance of its own actual presence... What I say is that it tells us nothing about what a thing may be in itself to call it “chance.” ... All you mean by calling it “chance” is that this is not guaranteed, that it may also fall out otherwise. For the system of other things has no positive hold on the chance-thing. Its origin is in a certain fashion negative: it escapes, and says, Hands off! coming, when it comes, as a free gift, or not at all."
"This negativeness, however, and this opacity of the chance-thing when thus considered ab extra, or from the point of view of previous things or distant things, do not preclude its having any amount of positiveness and luminosity from within, and at its own place and moment. All that its chance-character asserts about it is that there is something in it really of its own, something that is not the unconditional property of the whole. If the whole wants this property, the whole must wait till it can get it, if it be a matter of chance. That the universe may actually be a sort of joint-stock society of this sort, in which the sharers have both limited liabilities and limited powers, is of course a simple and conceivable notion."
<hr>
"Why may not the world be a sort of republican banquet of this sort, where all the qualities of being respect one another’s personal sacredness, yet sit at the common table of space and time?
To me this view seems deeply probable. Things cohere, but the act of cohesion itself implies but few conditions, and leaves the rest of their qualifications indeterminate. As the first three notes of a tune comport many endings, all melodious, but the tune is not named till a particular ending has actually come,—so the parts actually known of the universe may comport many ideally possible complements. But as the facts are not the complements, so the knowledge of the one is not the knowledge of the other in anything but the few necessary elements of which all must partake in order to be together at all. Why, if one act of knowledge could from one point take in the total perspective, with all mere possibilities abolished, should there ever have been anything more than that act? Why duplicate it by the tedious unrolling, inch by inch, of the foredone reality? No answer seems possible. On the other hand, if we stipulate only a partial community of partially independent powers, we see perfectly why no one part controls the whole view, but each detail must come and be actually given, before, in any special sense, it can be said to be determined at all. This is the moral view, the view that gives to other powers the same freedom it would have itself."
<hr>
"Does our act then create the world’s salvation so far as it makes room for itself, so far as it leaps into the gap? Does it create, not the whole world’s salvation of course, but just so much of this as itself covers of the world’s extent? Here I take the bull by the horns, and in spite of the whole crew of rationalists and monists, of whatever brand they be, I ask why not? Our acts, our turning-places, where we seem to ourselves to make ourselves and grow, are the parts of the world to which we are closest, the parts of which our knowledge is the most intimate and complete. Why should we not take them at their facevalue? Why may they not be the actual turning-places and growing-places which they seem to be, of the world—why not the workshop of being, where we catch fact in the making, so that nowhere may the world grow in any other kind of way than this?"
"Irrational! we are told. How can new being come in local spots and patches which add themselves or stay away at random, independently of the rest? There must be a reason for our acts, and where in the last resort can any reason be looked for save in the material pressure or the logical compulsion of the total nature of the world? There can be but one real agent of growth, or seeming growth, anywhere, and that agent is the integral world itself. It may grow all-over, if growth there be, but that single parts should grow per se is irrational."
"But if one talks of rationality—and of reasons for things, and insists that they can’t just come in spots, what kind of a reason can there ultimately be why anything should come at all?"
<hr>
"What does determinism profess? It professes that those parts of the universe already laid down absolutely appoint and decree what the other parts shall be. The future has no ambiguous possibilities hidden in its womb; the part we call the present is compatible with only one totality. Any other future complement than the one fixed from eternity is impossible. The whole is in each and every part, and welds it with the rest into an absolute unity, an iron block, in which there can be no equivocation or shadow of turning."
"Indeterminism, on the contrary, says that the parts have a certain amount of loose play on one another, so that the laying down of one of them does not necessarily determine what the others shall be. It admits that possibilities may be in excess of actualities, and that things not yet revealed to our knowledge may really in themselves be ambiguous. Of two alternative futures which we conceive, both may now be really possible; and the one become impossible only at the very moment when the other excludes it by becoming real itself. Indeterminism thus denies the world to be one unbending unit of fact. It says there is a certain ultimate pluralism in it."
<hr>
"The import of the difference between pragmatism and rationalism is now in sight throughout its whole extent. The essential contrast is that for rationalism reality is ready-made and complete from all eternity, while for pragmatism it is still in the making, and awaits part of its complexion from the future. On the one side the universe is absolutely secure, on the other it is still pursuing its adventures..."
"The humanist view of 'reality,' as something resisting, yet malleable, which controls our thinking as an energy that must be taken 'account' of incessantly is evidently a difficult one to introduce to novices...
The alternative between pragmatism and rationalism, in the shape in which we now have it before us, is no longer a question in the theory of knowledge, it concerns the structure of the universe itself."
"On the pragmatist side we have only one edition of the universe, unfinished, growing in all sorts of places, especially in the places where thinking beings are at work. On the rationalist side we have a universe in many editions, one real one, the infinite folio, or ́edition de luxe, eternally complete; and then the various finite editions, full of false readings, distorted and mutilated each in its own way."
<hr>
And yet, we know that quantum mechanics presents many faces, Bohmian deterministic faces, the many faces of Many Worlds, and so forth. It's beautiful, in a way: there's something for everybody. One is reminded of another passage from James:
"The history of philosophy is to a great extent that of a certain clash of human temperaments. Undignified as such a treatment may seem to some of my colleagues, I shall have to take account of this clash and explain a good many of the divergencies of philosophies by it. Of whatever temperament a professional philosopher is, he tries, when philosophizing, to sink the fact of his temperament. Temperament is no conventionally recognized reason, so he urges impersonal reasons only for his conclusions. Yet his temperament really gives him a stronger bias than any of his more strictly objective premises. It loads the evidence for him one way or the other ... just as this fact or that principle would. He trusts his temperament. Wanting a universe that suits it, he believes in any representation of the universe that does suit it."
"Why does Clifford fearlessly proclaim his belief in the conscious-automaton theory, although the ‘proofs’ before him are the same which make Mr. Lewes reject it? Why does he believe in primordial units of ‘mind-stuff’ on evidence which would seem quite worthless to Professor Bain? Simply because, like every human being of the slightest mental originality, he is peculiarly sensitive to evidence that bears in some one direction. It is utterly hopeless to try to exorcise such sensitiveness by calling it the disturbing subjective factor, and branding it as the root of all evil. ‘Subjective’ be it called! and ‘disturbing’ to those whom it foils! But if it helps those who, as Cicero says, “vim naturae magis sentiunt” \[feel the force of nature more\], it is good and not evil. Pretend what we may, the whole man within us is at work when we form our philosophical opinions. Intellect, will, taste, and passion co-operate just as they do in practical affairs...\[I\]n the forum \[one\] can make no claim, on the bare ground of his temperament, to superior discernment or authority. There arises thus a certain insincerity in our philosophic discussions: the potentest of all our premises is never mentioned. I am sure it would contribute to clearness if in these lectures we should break this rule and mention it, and I accordingly feel free to do so."
Indeed, for James, the value of a philosophy lies not so much in its proofs, but in the total vision that it expresses. As I say, perhaps the universe itself has something for everyone, whatever their temperament.
<hr>
As a final word, it seems to me that QBism has taught us something genuinely new about quantum theory and its relationship to probability theory. On the other hand, it also pretends to be a theory of "experience": and yet, I'm not sure that I've learned anything new about experience. If QBism is to really prove itself, it will have to make novel predictions not just on the quantum side, but also on the side of our everyday perceptions.
"The burning question for the QBist is how to model in Hilbert-space terms the common sorts of measurements we perform just by opening our eyes, cupping our ears, and extending our fingers" (Fuchs, Stacey 2019).
## Bibliography
<a href="https://arxiv.org/abs/1612.07308">QBism: Quantum Theory as a Hero’s Handbook</a>
<a href="https://arxiv.org/abs/1612.03234">Introducing the Qplex: A Novel Arena for Quantum Theory</a>
<a href="https://arxiv.org/abs/1311.5253">An Introduction to QBism with an Application to the Locality of Quantum Mechanics</a>
<a href="https://arxiv.org/abs/1003.5209">QBism, the Perimeter of Quantum Bayesianism</a>
<a href="https://arxiv.org/abs/1301.3274">Quantum-Bayesian Coherence: The No-Nonsense Version</a>
<a href="https://arxiv.org/abs/1401.7254">Some Negative Remarks on Operational Approaches to Quantum Theory</a>
<a href="https://arxiv.org/abs/1405.2390">My Struggles with the Block Universe</a>
<a href="https://arxiv.org/abs/1412.4209">Quantum Measurement and the Paulian Idea</a>
<a href="https://arxiv.org/abs/quant-ph/0105039">Notes on a Paulian Idea</a>
<a href="https://arxiv.org/abs/1601.04360">On Participatory Realism</a>
<a href="https://arxiv.org/abs/0906.1968">Delirium Quantum</a>
<a href="https://arxiv.org/abs/1703.07901">The SIC Question: History and State of Play</a>
<a href="https://arxiv.org/abs/1705.03483">Notwithstanding Bohr, the Reasons for QBism</a>
<a href="https://arxiv.org/abs/2012.14397">The Born Rule as Dutch-Book Coherence (and only a little more)</a>
<a href="https://arxiv.org/abs/quant-ph/0205039">Quantum Mechanics as Quantum Information (and only a little more)</a>
<a href="https://arxiv.org/abs/1907.02432">Quantum Theory as Symmetry Broken by Vitality</a>
https://en.wikipedia.org/wiki/POVM
https://en.wikipedia.org/wiki/SIC-POVM
<a href="refs/wheeler_law_without_law.pdf">Law without Law</a>
<a href="http://www.gutenberg.org/ebooks/11984">A Pluralistic Universe</a>
<a href="http://www.gutenberg.org/ebooks/32547">Essays in Radical Empiricism</a>
| github_jupyter |
```
%run ../Python_files/load_dicts.py
%run ../Python_files/util.py
from util import *
import numpy as np
from numpy.linalg import inv, matrix_rank
import json
# # load logit_route_choice_probability_matrix
# P = zload('../temp_files/logit_route_choice_probability_matrix_Sioux.pkz')
# P = np.matrix(P)
# print('rank of P is: ')
# print(matrix_rank(P))
# print('shape of P is: ')
# print(np.shape(P))
# # load path-link incidence matrix
# A = zload('../temp_files/path-link_incidence_matrix_Sioux-Falls.pkz')
# print('rank of A is: ')
# print(matrix_rank(A))
# print('shape of A is: ')
# print(np.shape(A))
# load link counts data
flow_list = []
with open('SiouxFallsFlow.txt', 'r') as f:
read_data = f.readlines()
flag = 0
for row in read_data:
flag += 1
if flag > 1:
flow_list.append(float(row.split('\t')[2]))
x_0 = np.array(flow_list)
x_0
```
### Assignment Equation
We have the following equation:
$$AP'\boldsymbol{\lambda} = \textbf{x},$$
whose least-squares solution can be written as
$$\boldsymbol{\lambda} = (AP')^+\textbf{x}, \quad (1)$$
where $(AP')^{+}$ is the pseudo-inverse of $AP'$.
However, the $\boldsymbol{\lambda}$ given by (1) might contain negative entries, which is not desired. Thus, instead, we solve a constrained least-squares problem:
$$\mathop {\min }\limits_{\boldsymbol{\lambda} \geq \textbf{0}} {\left\| {AP'\boldsymbol{\lambda} - \textbf{x}} \right\|_2}. \quad (2)$$
Note that (2) typically contains a non-PSD matrix Q, thus preventing the solver calculating the correct $\boldsymbol{\lambda}$.
In the end, we return to the flow conservation expression in CDC16 paper; that is
$$\mathcal{F} = \left\{ {\textbf{x}:\exists {\textbf{x}^{\textbf{w}}} \in \mathbb{R}_ +
^{\left| \mathcal{A} \right|} ~\text{s.t.}~\textbf{x} =
\sum\limits_{\textbf{w} \in \mathcal{W}} {{\textbf{x}^{\textbf{w}}}}
,~\textbf{N}{\textbf{x}^{\textbf{w}}} = {\textbf{d}^{\textbf{w}}},~\forall
\textbf{w} \in \mathcal{W}} \right\}.$$
```
# load node-link incidence matrix
N = zload('node_link_incidence_Sioux.pkz')
N
# load link counts data
with open('demands_Sioux.json', 'r') as json_file:
demands_Sioux = json.load(json_file)
demands_Sioux['(1,2)']
# assert(1==2)
n = 24 # number of nodes
m = 76 # number of links
model = Model("OD_matrix_est_Sioux")
# lam = {}
# for i in range(n+1)[1:]:
# for j in range(n+1)[1:]:
# if i != j:
# key = str(i) + '->' + str(j)
# lam[key] = model.addVar(name='lam_' + key)
x = {}
for k in range(m):
for i in range(n+1)[1:]:
for j in range(n+1)[1:]:
if i != j:
key = str(k) + '->' + str(i) + '->' + str(j)
x[key] = model.addVar(name='x_' + key)
model.update()
# Set objective
obj = 0
# for i in range(n+1)[1:]:
# for j in range(n+1)[1:]:
# if i != j:
# key = str(i) + '->' + str(j)
# obj += lam[key] * lam[key]
model.setObjective(obj)
# # Add constraint: lam >= 0
# for i in range(n+1)[1:]:
# for j in range(n+1)[1:]:
# if i != j:
# key = str(i) + '->' + str(j)
# key_ = '(' + str(i) + ',' + str(j) + ')'
# # model.addConstr(lam[key] >= 0)
# model.addConstr(lam[key] == demands_Sioux[key_])
for k in range(m):
s = 0
for i in range(n+1)[1:]:
for j in range(n+1)[1:]:
if i != j:
key = str(k) + '->' + str(i) + '->' + str(j)
s += x[key]
model.addConstr(x[key] >= 0)
model.addConstr(s - x_0[k] <= 1e2)
model.addConstr(x_0[k] - s <= 1e2)
for l in range(n):
for i in range(n+1)[1:]:
for j in range(n+1)[1:]:
if i != j:
key_ = str(i) + '->' + str(j)
key__ = '(' + str(i) + ',' + str(j) + ')'
s = 0
for k in range(m):
key = str(k) + '->' + str(i) + '->' + str(j)
s += N[l, k] * x[key]
if (l+1 == i):
model.addConstr(s + demands_Sioux[key__] == 0)
elif (l+1 == j):
model.addConstr(s - demands_Sioux[key__]== 0)
else:
model.addConstr(s == 0)
# if (i == 1 and j == 2):
# print(s)
model.update()
# model.setParam('OutputFlag', False)
model.optimize()
lam_list = []
for v in model.getVars():
print('%s %g' % (v.varName, v.x))
lam_list.append(v.x)
# print('Obj: %g' % obj.getValue())
sum(lam_list[0:551])
# write estimation result to file
n = 24 # number of nodes
with open('OD_demand_matrix_Sioux.txt', 'w') as the_file:
idx = 0
for i in range(n + 1)[1:]:
for j in range(n + 1)[1:]:
if i != j:
the_file.write("%d,%d,%f\n" %(i, j, lam_list[idx]))
idx += 1
```
| github_jupyter |
```
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import datetime
import pytz
import urllib as ur
import json
```
## Funções para auxiliar a manipulação do tempo
```
def convert_datetime_timezone(dt, tz1, tz2):
"""
Converte uma hora no fuso UTC ou São Paulo para um provável fuso de Vancouver.
Parameters
------------
dt : unix timestamp
Timestamp a ser convertido para outro fuso horário.
tz1, tz2 : Timezone String
Time zone atual e a que a hora irá ser convertida.
Returns
----------
dt : unix timestamp
Timestamp já convertida para o fuso de Vancouver.
"""
tz1 = pytz.timezone(tz1)
tz2 = pytz.timezone(tz2)
dt = datetime.datetime.fromtimestamp(dt)
dt = datetime.datetime.strptime(str(dt),"%Y-%m-%d %H:%M:%S")
dt = tz1.localize(dt)
dt = dt.astimezone(tz2)
try:
# Fuso horário comum de Vancouver
dt = datetime.datetime.strptime(str(dt),"%Y-%m-%d %H:%M:%S-08:00")
except:
# Fuso horário característico de horário de verão em Vancouver
dt = datetime.datetime.strptime(str(dt),"%Y-%m-%d %H:%M:%S-07:00")
dt = int(dt.timestamp())
return dt
def Hour_Diff(h1,h2):
"""
Faz a diferença entre duas horas dadas e retorna em minutos
Parameters
----------
h1, h2 : unix timestamp
Hora inicio e fim para ser feito o cálculo da diferença
Returns
---------
diff : float
Diferença entre as duas horas dadas em minutos
"""
h1Aux = datetime.datetime.fromtimestamp(h1)
h2Aux = datetime.datetime.fromtimestamp(h2)
diff = abs((h1Aux - h2Aux)).total_seconds()/60
return diff
```
## Requisitando Estacionamentos, carros e coordenadas
```
response = ur.request.urlopen('https://bookit.modo.coop/api/v2/car_list').read().decode('UTF-8')
json_cars = json.loads(response)
response = ur.request.urlopen('https://bookit.modo.coop/api/v2/location_list').read().decode('UTF-8')
json_location = json.loads(response)
car_coord = pd.DataFrame(list(json_cars['Response']['Cars'].keys()), columns=['car_id'], dtype='int')
car_coord['location'] = [0] * len(car_coord)
car_coord['lat'] = [0] * len(car_coord)
car_coord['lon'] = [0] * len(car_coord)
for i in range(len(car_coord)):
car_coord['location'].iloc[i] = int(json_cars['Response']['Cars'][str(car_coord['car_id'].iloc[i])]['Location'][0]['LocationID'])
car_coord['lat'].iloc[i] = float(json_location['Response']['Locations'][str(car_coord['location'].iloc[i])]['Latitude'])
car_coord['lon'].iloc[i] = float(json_location['Response']['Locations'][str(car_coord['location'].iloc[i])]['Longitude'])
```
## Fazendo a média da quantidade de viagens para cada hora
```
# CSV criado a partir dos dados coletados do arquivo ModoApi_Data_Filter
dfTravels = pd.read_csv('travels_v2.csv')
dfTravels = dfTravels.sort_values(by='car_id')
def cont_travels(df):
"""
Função para calcular a quantidade de viagens por hora de cada carro
Parameters
----------
df : pandas.DataFrame
DataFrame com dados das viagens registradas
Returns
---------
df_count : pandas.DataFrame
DataFrame com os dados organizados com horas como colunas e linhas sendo id dos veículos.
Os seus valores representam a quantidade de viagens efetuadas em dada hora por tal veículo.
"""
df_cont = []
id_prox = df['car_id'].iloc[1]
id_atual = df['car_id'].iloc[0]
tempo = []
for i in range(len(df)):
try:
id_prox = df['car_id'].iloc[i+1]
id_atual = df['car_id'].iloc[i]
except:
pass
# Se irá mudar de id então é registrado o somatório dos intervalos de tempo estacionado
# i == len(df)-1 para não pular o ultimo
if (id_prox != id_atual or i == len(df)-1):
hour = datetime.datetime.fromtimestamp(df['start'].iloc[i]).hour
auxHour = [0] * 24
auxHour[hour] += 1
tempo.append(auxHour)
#Somando todas a quantidade de ocorrências de cada horário
tempo = pd.DataFrame(tempo)
tempo = list(pd.Series.sum(tempo))
tempo = [id_atual] + tempo
df_cont.append(tempo)
tempo = []
else:
# Verificando a hora de inicio da viagem e somando em uma lista que representa as 24h
# Evita mais de uma viagen por hora
hour = datetime.datetime.fromtimestamp(df['start'].iloc[i]).hour
hour_anterior = datetime.datetime.fromtimestamp(df['start'].iloc[i-1]).hour
if (hour == hour_anterior):
continue
auxHour = [0] * 24
auxHour[hour] += 1
#Armazenando a quantidade de viagens em dada hora
tempo.append(auxHour)
#Labels das colunas
labels = list(range(-1,24))
[format(x,'02d') for x in labels]
labels[0] = 'car_id'
df_cont = pd.DataFrame(df_cont, columns=labels)
df_cont = df_cont.sort_values(by=['car_id'])
return df_cont
hours = cont_travels(dfTravels)
# Preparando o dataframe para colocar a location, latitude ,longitude e numero de carros
hours['lat'] = [0]*len(hours)
hours['lon'] = [0]*len(hours)
hours['location'] = [0]*len(hours)
hours['n_cars'] = [1]*len(hours)
# Colocando as coordenadas de cada carro
for i in range(len(hours)):
try:
coord = car_coord[car_coord['car_id'] == hours['car_id'].iloc[i]]
hours['lat'].iloc[i] = coord['lat'].iloc[0]
hours['lon'].iloc[i] = coord['lon'].iloc[0]
hours['location'].iloc[i] = coord['location'].iloc[0]
except Exception as e:
# Carros que sairam da frota
print(e)
print('id:'+str(hours['car_id'].iloc[i]))
hours = hours.sort_values(by='location')
# Somando todos os valores para cada estação
# A cada loop verifica se ainda existe repetições de id
while(True in list(hours.duplicated(subset=['location']))):
i = 0
while i < len(hours):
try:
if (hours['location'].iloc[i] == hours['location'].iloc[i+1]):
print('Antes:')
print(len(hours))
# Percorre todas as 24 somando a quantidade de viagens de cada carro da estação
for j in range(24):
hours[j].iloc[i] = hours[j].iloc[i] + hours[j].iloc[i+1]
# Adicionando mais um ao numero de carros da estação
hours['n_cars'].iloc[i] = hours['n_cars'].iloc[i] + 1
# Retirando a linha já analisada
hours = hours.drop(hours.index[i+1])
hours.index = range(len(hours))
print('Depois:')
print(len(hours))
except Exception as e:
print(e)
break
i+=1
# Ordenando por estacionamento
hours = hours.sort_values(by='location')
# Dividindo pela quantidade de dias e numero de carros
for i in range(len(hours)):
for j in range(24):
hours[j].iloc[i] = hours[j].iloc[i] / (31*hours['n_cars'].iloc[i])
aux_csv = hours
aux_csv.dropna(how='any', axis=0, inplace=True)
for i in range(24):
aux_csv[['lat', 'lon', i]].to_csv('CarrosPorHora/Hour'+str(i)+'_v2.csv')
```
## Plotagem em mapas de calor de cada hora
```
import geoplotlib as gpl
from geoplotlib.utils import read_csv, BoundingBox, DataAccessObject
# Imprimindo todas as 24 horas
for i in range(0,24):
hora = str(i)
# Lendo csv com dados de tempo estacionado médio, latitude, longitude de cada estacionamento
location = pd.read_csv('CarrosPorHora/Hour'+hora+'_v2.csv', usecols=[1,2,3])
data = location
# Multiplicando os valores por um escalar para se tornarem mais visíveis
location[hora] = location[hora] * 100
location_aux = []
# Utilizando um auxiliar para gerar repetições de incidencias para a plotagem no mapa de calor
for i in range(len(location)):
for j in range(int(location[hora].iloc[i])):
location_aux.append([location['lat'].iloc[i], location['lon'].iloc[i]])
location_aux = pd.DataFrame(location_aux, columns=['lat', 'lon'])
# Vancouver
gpl.kde(location_aux, bw=3, cut_below=1e-4, cmap='jet', alpha=150 )
# data = DataAccessObject(pd.DataFrame({'lat': [],'lon': []}))
# gpl.hist(data, scalemin=0, scalemax=100, cmap='jet', colorscale='lin', alpha=190)
# Coordenadas para o mapa focar em Vancouver
lat = pd.DataFrame([49.246292, 49.262428, 49.24966])
lon = pd.DataFrame([-123.11554, -123.116226, -123.04464])
gpl.set_bbox(BoundingBox.from_points(lon[0], lat[0]))
gpl.request_zoom(12)
gpl.set_window_size(1280,700)
gpl.savefig('CarrosPorHora/CarrosPorHoraPNGs/vanc_'+hora+'_v2')
# Victoria
gpl.kde(location_aux, bw=3, cut_below=1e-4, cmap='jet', alpha=150 )
# data = DataAccessObject(pd.DataFrame({'lat': [],'lon': []}))
# gpl.hist(data, scalemin=0, scalemax=100, cmap='jet', colorscale='lin', alpha=190)
# Coordenadas para o mapa focar em Victoria
lat = pd.DataFrame([48.42666, 48.44344, 48.44560])
lon = pd.DataFrame([-123.36027,-123.35853,-123.33673])
gpl.set_bbox(BoundingBox.from_points(lon[0], lat[0]))
gpl.request_zoom(13)
gpl.set_window_size(1280,700)
gpl.savefig('CarrosPorHora/CarrosPorHoraPNGs/vic_'+hora+'_v2')
```
| github_jupyter |
# 5장
```
import matplotlib
matplotlib.rc('font', family="NanumBarunGothicOTF")
%matplotlib inline
```
# 5.2 아이리스 데이터셋
```
import pandas as pd
from matplotlib import pyplot as plt
import sklearn.datasets
def get_iris_df():
ds = sklearn.datasets.load_iris()
df = pd.DataFrame(ds['data'], columns=ds['feature_names'])
code_species_map = dict(zip(
range(3), ds['target_names']))
df['species'] = [code_species_map[c] for c in ds['target']]
return df
df = get_iris_df()
df_iris = df
```
# 5.3 원형 차트
```
sums_by_species = df.groupby('species').sum()
var = 'sepal width (cm)'
sums_by_species[var].plot(kind='pie', fontsize=20)
plt.ylabel(var, horizontalalignment='left')
plt.title('꽃받침 너비로 분류한 붓꽃', fontsize=25)
# plt.savefig('iris_pie_for_one_variable.png')
# plt.close()
sums_by_species = df.groupby('species').sum()
sums_by_species.plot(kind='pie', subplots=True,
layout=(2,2), legend=False)
plt.title('종에 따른 전체 측정값Total Measurements, by Species')
# plt.savefig('iris_pie_for_each_variable.png')
# plt.close()
```
# 5.4 막대그래프
```
sums_by_species = df.groupby('species').sum()
var = 'sepal width (cm)'
sums_by_species[var].plot(kind='bar', fontsize=15, rot=30)
plt.title('꽃받침 너비(cm)로 분류한 붓꽃', fontsize=20)
# plt.savefig('iris_bar_for_one_variable.png')
# plt.close()
sums_by_species = df.groupby('species').sum()
sums_by_species.plot(
kind='bar', subplots=True, fontsize=12)
plt.suptitle('종에 따른 전체 측정값')
# plt.savefig('iris_bar_for_each_variable.png')
# plt.close()
```
# 5.5 히스토그램
```
df.plot(kind='hist', subplots=True, layout=(2,2))
plt.suptitle('붓꽃 히스토그램', fontsize=20)
# plt.show()
for spec in df['species'].unique():
forspec = df[df['species']==spec]
forspec['petal length (cm)'].plot(kind='hist', alpha=0.4, label=spec)
plt.legend(loc='upper right')
plt.suptitle('종에 따른 꽃잎 길이')
# plt.savefig('iris_hist_by_spec.png')
```
# 5.6 평균, 표준편차, 중간값, 백분위
```
col = df['petal length (cm)']
average = col.mean()
std = col.std()
median = col.quantile(0.5)
percentile25 = col.quantile(0.25)
percentile75 = col.quantile(0.75)
print(average, std, median, percentile25, percentile75)
```
### 아웃라이어 걸러내기
```
col = df['petal length (cm)']
perc25 = col.quantile(0.25)
perc75 = col.quantile(0.75)
clean_avg = col[(col>perc25)&(col<perc75)].mean()
print(clean_avg)
```
# 5.7 상자그림
```
col = 'sepal length (cm)'
df['ind'] = pd.Series(df.index).apply(lambda i: i% 50)
df.pivot('ind','species')[col].plot(kind='box')
# plt.show()
```
# 5.8 산포도
```
df.plot(kind="scatter",
x="sepal length (cm)", y="sepal width (cm)")
plt.title("Length vs Width")
# plt.show()
colors = ["r", "g", "b"]
markers= [".", "*", "^"]
fig, ax = plt.subplots(1, 1)
for i, spec in enumerate(df['species'].unique() ):
ddf = df[df['species']==spec]
ddf.plot(kind="scatter",
x="sepal width (cm)", y="sepal length (cm)",
alpha=0.5, s=10*(i+1), ax=ax,
color=colors[i], marker=markers[i], label=spec)
plt.legend()
plt.show()
import pandas as pd
import sklearn.datasets as ds
import matplotlib.pyplot as plt
# 팬다스 데이터프레임 생성
bs = ds.load_boston()
df = pd.DataFrame(bs.data, columns=bs.feature_names)
df['MEDV'] = bs.target
# 일반적인 산포도
df.plot(x='CRIM',y='MEDV',kind='scatter')
plt.title('일반축에 나타낸 범죄 발생률')
# plt.show()
```
## 로그를 적용
```
df.plot(x='CRIM',y='MEDV',kind='scatter',logx=True)
plt.title('Crime rate on logarithmic axis')
plt.show()
```
# 5.10 산포 행렬
```
from pandas.tools.plotting import scatter_matrix
scatter_matrix(df_iris)
plt.show()
```
# 5.11 히트맵
```
df_iris.plot(kind="hexbin", x="sepal width (cm)", y="sepal length (cm)")
plt.show()
```
# 5.12 상관관계
```
df["sepal width (cm)"].corr(df["sepal length (cm)"]) # Pearson corr
df["sepal width (cm)"].corr(df["sepal length (cm)"], method="pearson")
df["sepal width (cm)"].corr(df["sepal length (cm)"], method="spearman")
df["sepal width (cm)"].corr(df["sepal length (cm)"], method="spearman")
```
# 5.12 시계열 데이터
```
# $ pip install statsmodels
import statsmodels.api as sm
dta = sm.datasets.co2.load_pandas().data
dta.plot()
plt.title("이산화탄소 농도")
plt.ylabel("PPM")
plt.show()
```
## 구글 주가 불러오는 코드는 야후 API가 작동하지 않아서 생략합니다.
| github_jupyter |
In this notebook you can define your own configuration and run the model based on your custom configuration.
## Dataset
`dataset_name` is the name of the dataset which will be used in the model. In case of using KITTI, `dataset_path` shows the path to `data_paths` directory that contains every image and its pair path, and for Cityscape it is the path to the directory that contains `leftImg8bit` and `rightImg8bit` folders. The `resize` value selects the width, and the height dimensions that each image will be resized to.
```
dataset_name: 'KITTI'
dataset_path = '.'
resize = [128, 256]
```
## Model
`baseline_model` selects the compression model. The accepted models for this parameter are bmshj18 for [Variational image compression with a scale hyperprior](https://arxiv.org/abs/1802.01436) and bls17 for [End-to-end Optimized Image Compression](https://arxiv.org/abs/1611.01704). If `use_side_info` is set as `True`, then the baseline model is modified using our proposed method for using side information for compressing.
If `load_weight` is `True`, then in model initialization, the weight saved in `weight_path` is loaded to the model. You can also specify the experiment name in `experiment_name`.
```
baseline_model = 'bls17' # can be bmshj18 for Variational image compression with a scale hyperprior by Ballé, et al.
# or bls17 for End-to-end Optimized Image Compression by Ballé, et al.
use_side_info = True # if True then the modified version of baseline model for distributed compression is used.
num_filters = 192 # number of filters used in the baseline model network
cuda = True
load_weight = False
weight_path = './pretrained_weights/ours+balle17_MS-SSIM_lambda3e-05.pt' # weight path for loading the weight
# note that we provide some pretrained weights, accessible from the anonymous link provided in README.md
```
## Training
For training set `train` to be `True`. `lambda` shows the lambda value in the rate-distortion equation and `alpha` and `beta` correspond to the handles on the reconstruction of the correlated image and amount of common information extracted from the decoder-only side information, respectively. `distortion_loss` selects the distortion evaluating method. Its accepted values are MS-SSIM for the ms-ssim method or MSE for mean squared error.
`verbose_period: 50` indicates that every 50 epochs print the results of the validation dataset.
```
train = True
epochs = 50000
train_batch_size = 1
lr = 0.0001
lmbda = 0.00003 # the lambda value in rate-distortion equation
alpha = 1
beta = 1
distortion_loss = 'MS-SSIM' # can be MS-SSIM or MSE. selects the method by which the distortion is calculated during training
verbose_period = 50 # non-positive value indicates no verbose
```
## Weights and Results parameters
If you wish to save the model weights after training set `save_weights` `True`. `save_output_path` shows the directory path where the model weights are saved.
For the weights, in `save_output_path` a `weight` folder will be created, and the weights will be saved there with the name according to `experiment_name`.
```
save_weights = True
save_output_path = './outputs' # path where results and weights will be saved
experiment_name = 'bls17_with_side_info_MS-SSIM_lambda:3e-05'
```
## Test
If you wish to test the model and save the results set `test` to `True`. If `save_image` is set to `True` then a `results` folder will be created, and the reconstructed images will be saved in `save_output_path/results` during testing, with the results named according to `experiment_name`.
```
test = True
save_image = True
```
## Inference
In order to (only) carry out inference, please open `configs/config.yaml` and change the relevant lines as follows:
```
resize = [128, 256] # we used this crop size for our inference
dataset_path = '.'
train = False
load_weight = True
test = True
save_output_path = './inference'
save_image = True
```
Download the desired weights and put them in `pretrained_weights` folder and put the dataset folder in the root .
Based on the weight you chose, specify the weight name, and the experiment name in `configs/config.yaml`:
```
weight_path: './pretrained_weights/...' # load a specified pre-trained weight
experiment_name: '...' # a handle for the saved results of the inference
```
Also, change `baseline_model` and `use_side_info` parameters in `configs/config.yaml` accordingly.
For example, for the `balle2017+ours` weights, these parameters should be:
```
baseline_model: 'bls17'
use_side_info: True
```
After running the code using the commands in below section, the results will be saved in `inference` folder.
## Saving Custom Configuration
By running this piece of code you can save your configuration as a yaml file file in the configs folder. You can set your configuration file name by changing `config_name` variable.
```
import yaml
config = {
"dataset_name": dataset_name,
"dataset_path": dataset_path,
"resize": resize,
"baseline_model": baseline_model,
"use_side_info": use_side_info,
"num_filters": num_filters,
"cuda": cuda,
"load_weight": load_weight,
"weight_path": weight_path,
"experiment_name": experiment_name,
"train": train,
"epochs": epochs,
"train_batch_size": train_batch_size,
"lr": lr,
"lambda": lmbda,
"distortion_loss": distortion_loss,
"verbose_period": verbose_period,
"save_weights": save_weights,
"save_output_path": save_output_path,
"test": test,
"save_image": save_image
}
config_name = "CUSTOM_CONFIG_FILE_NAME.yaml"
with open('configs/' + config_name) + config_name, 'w') as outfile:
yaml.dump(config, outfile, default_flow_style=None, sort_keys=False)
```
## Running the Model
```
!python main.py --config=configs/$config_name
```
| github_jupyter |
# Case 1. Heart Disease Classification
Joona Klemetti
4.2.2018
Cognitive Systems for Health Technology Applications
Helsinki Metropolia University of Applied Science
# 1. Objectives
The aim of this case is learn to manipulate and read data from externals sources using panda’s functions and use keras dense neural networks to make an expert system to support in diagnostic decision making.
<br>
After the neural network and the expert system is made it's intended to examine how number of nodes, layers and epochs affects to systems reliability. Also it's tested how batch size
and train-test distribution affects the results.
# 2. Required libraries
At first it is necessary to import all libraries. In this assignment is used numpy to scientific computing and creating multidimensional arrays, matplotlib to ploting figures, pandas to data analysis and handling, scikit-learn to preprocessing data and spliting it train and test groups and keras to build the neural network.
```
# import libraries
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
import sklearn as sk
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras import models
from keras import layers
# Check the versions
print('numpy:', np.__version__)
print('pandas:', pd.__version__)
print('sklearn:', sk.__version__)
print('keras:', keras.__version__)
```
# 3. Data description and preprocessing
Data consists four different datasets with numerical information of heart disease diagnosis. All datasets is modified in same formation. Because of that it was easy to merge them to one data frame. For more information of used datasets it is recommended to visit original information file made by David Aha https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/heart-disease.names
<br>
At first datasets is imported to dataframes and merged to one dataframe. After that data is described and analysed with figures and tables. Data preprocessing is continued for replacing missing values with column mode values. Mode values is elected because mean values could harm some atributes i.e. 'thal' column mean value is 5.088 but its mode value is 3. Mode value means most common value. Because 'thal' value should be 3, 6 or 7 it is recommended to use mode values instead of mean values.
<br>
Next step is to define the labels. Data frames 'num' value represent persons health condition. If 'num' value is 0 person is healthy otherwise person got heart disease. Label is the output value and in this case it should be 1 or 0, true or false. After defining the labels it is needed to drop 'num' atribute from training set. Next the data frame is converted to the numerical array and it is scaled between 0 and 1. That is important because otherwise small numerical values may remain insignificant. Last task before defining the neural network is divide the data for training and testing sets. It is decided to use 30% of data to testing set and it is executed by using train_test_split() function from scikit-learn library.
```
# location of datasets
filename = 'http://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.cleveland.data'
filename1 = 'https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.hungarian.data'
filename2 = 'https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.switzerland.data'
filename3 = 'https://archive.ics.uci.edu/ml/machine-learning-databases/heart-disease/processed.va.data'
# column names for data
colnames = ['age','sex','cp','trestbps',
'chol','fbs','restecg','thalach',
'exang','oldpeak','slope','ca','thal','num']
# read datas to data frames
df1 = pd.read_csv(filename,
names = colnames,
na_values = '?')
df2 = pd.read_csv(filename1,
names = colnames,
na_values = '?')
df3 = pd.read_csv(filename2,
names = colnames,
na_values = '?')
df4 = pd.read_csv(filename3,
names = colnames,
na_values = '?')
# merge all 4 data frames to one data frame
frames = [df1,df2,df3,df4]
df = pd.concat(frames)
df.index = range(920)
# calculate descriptive statistics
df.describe()
# Create histogram of age distribution
df['age'].hist(bins = np.arange(20, 90, 5))
plt.xlabel('Age (years)')
plt.ylabel('Count')
plt.show()
# Replace missing values with column mode values
df = df.where(~np.isnan(df), df.mode(), axis = 'columns')
# Calculate the labels:
# Output value: = 0 Normal, 0 > Heart disaese
label = (df['num'] > 0).values
#Select the columns for training
columns = ['age','sex','cp','trestbps',
'chol','fbs','restecg','thalach',
'exang','oldpeak','slope','ca','thal']
#Convert data into numerical array
data = df[columns].values
# Scale the data using min_max_scaler
min_max_scaler = preprocessing.MinMaxScaler()
data = min_max_scaler.fit_transform(data)
# dividing the data for training and testing
train_data, test_data, train_label, test_label = train_test_split(
data, label, test_size = 0.35)
```
# 4. Modeling and compilation
In this case it is choosed to use Keras Sequential model to build dense neural network. At first architecture of network is defined and layers is added via .add() method. Compilation is have to done before training. Compilation configures the learning process. According the formula Nh=Ns(α∗(Ni+No)) number of nodes should be between 6 and 32. It is decided to use 10 nodes for both hidden layers because that seems to work best.
```
# Define the architecture of the network
network = []
network = models.Sequential()
network.add(layers.Dense(10, activation= 'relu', input_shape=(13,)))
network.add(layers.Dense(10, activation= 'relu', ))
network.add(layers.Dense(1, activation= 'sigmoid'))
# Compile the network
network.compile(optimizer = 'rmsprop',
loss = 'binary_crossentropy',
metrics = ['accuracy'])
```
# 5. Training and Validation
Next step is train the network. Training is executed with .fit() method.
```
#Train the network
# N = number of epochs
N = 120
h = network.fit(train_data, train_label,
verbose = 0,
epochs = N,
batch_size=128,
validation_data=(test_data, test_label)
)
```
# 6. Evaluation
Evaluation is made by .evaluate() method. It computes testing sets loss function and accuracity.
```
# Evaluation the network
score = network.evaluate(test_data, test_label, batch_size = 128)
score
```
# 7. Results and Discussion
In the testing neural network it is noticed that regardless choosen number of node and train-test distribution the accuracy and loss function will be about same. Either batch size doesn't seems to affect ressults. Accuracy stays in range 0.77 to 0.85 generally. Loss function is between 0.40 and 0.50. If number of nodes increase too much there is noticed big variation in test accuracy and loss function. Best train-test distribution would seem to be about 70% to training and 30% to testing. Small variation in train-test distribution isn't harmful. Optimal number of nodes seems to be between 8 to 15. Adding more layers doesn't seems to affect scores either. Randomness seems to affect a lot of accuracy and loss function.
```
# Plot the results
epochs = range(1, N + 1)
acc = h.history['acc']
val_acc = h.history['val_acc']
loss = h.history['loss']
val_loss = h.history['val_loss']
# Accuracy plot
plt.figure(figsize = (20, 5))
plt.plot(epochs, acc, 'bo', label='Training')
plt.plot(epochs, val_acc, 'b', label = 'Validation')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.ylim([0, 1])
plt.grid()
plt.legend()
plt.show()
# Loss plot
plt.figure(figsize = (20, 5))
plt.plot(epochs, loss, 'bo', label='Training')
plt.plot(epochs, val_loss, 'b', label = 'Validation')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.ylim([0.2, 0.8])
plt.grid()
plt.legend()
plt.show()
```
# 8. Conclusions
The case 1. was very good introduction to the neural networks. I think the difficulty level was just right at this point. The objectives were achieved such the neural network works.
<br>
There was a little variation of results. Even there were used the same atributes randomness seems to affect the results. That must be due to train_test_split() function. Function splits data into random train and test sets. Therefore, there was variation in the results.
<br>
About 80% accuracy is fine but in diagnostic medical system it isn't enough. Even if accuracy would always be 85% it is still too poor. There should be more patient data and system have to be developed before using in real situations. Even though system is used only to support diagnostic decision making.
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.