text stringlengths 2.5k 6.39M | kind stringclasses 3
values |
|---|---|
```
from sklearn.datasets import make_regression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error
import random
import numpy as np
import pandas as pd
import os
os.chdir("..")
%load_ext autoreload
%autoreload 2
```
# Utility Functions
```
def make_mixed_regression(n_samples, n_features, n_categories):
X,y = make_regression(n_samples=n_samples, n_features=n_features, random_state=42, n_informative=5, n_targets=2)
cat_cols = random.choices(list(range(X.shape[-1])),k=n_categories)
num_cols = [i for i in range(X.shape[-1]) if i not in cat_cols]
for col in cat_cols:
X[:,col] = pd.qcut(X[:,col], q=4).codes.astype(int)
col_names = []
num_col_names=[]
cat_col_names=[]
for i in range(X.shape[-1]):
if i in cat_cols:
col_names.append(f"cat_col_{i}")
cat_col_names.append(f"cat_col_{i}")
if i in num_cols:
col_names.append(f"num_col_{i}")
num_col_names.append(f"num_col_{i}")
X = pd.DataFrame(X, columns=col_names)
y = pd.DataFrame(y, columns=["target_1","target_2"])
data = X.join(y)
return data, cat_col_names, num_col_names
def print_metrics(y_true, y_pred, tag):
if isinstance(y_true, pd.DataFrame) or isinstance(y_true, pd.Series):
y_true = y_true.values
if isinstance(y_pred, pd.DataFrame) or isinstance(y_pred, pd.Series):
y_pred = y_pred.values
if y_true.ndim>1:
y_true=y_true.ravel()
if y_pred.ndim>1:
y_pred=y_pred.ravel()
val_acc = mean_squared_error(y_true, y_pred)
val_f1 = mean_absolute_error(y_true, y_pred)
print(f"{tag} MSE: {val_acc} | {tag} MAE: {val_f1}")
```
# Generate Synthetic Data
First of all, let's create a synthetic data which is a mix of numerical and categorical features and have multiple targets for regression
```
data, cat_col_names, num_col_names = make_mixed_regression(n_samples=10000, n_features=20, n_categories=4)
target_cols = ['target_1','target_2']
train, test = train_test_split(data, random_state=42)
train, val = train_test_split(train, random_state=42)
```
# Importing the Library
```
from pytorch_tabular import TabularModel
from pytorch_tabular.models import CategoryEmbeddingModelConfig, NodeConfig
from pytorch_tabular.config import DataConfig, OptimizerConfig, TrainerConfig, ExperimentConfig
batch_size = 1024 #Will set the same in the Trainer YAML file
steps_per_epoch = int(train.shape[0]/1024)
epochs = 20
```
## Basic
**Define the Configs**
In the Basic tutorial, we saw how we declare these params programatically. We can also use YAML files to manage the configuration. In that case, we just need to pass in the path to the file as the argument in `TabularModel`. Let's use a YAML file for TrainerConfig.
For the Learning Rate Scheduler, let's use a OneCycleLR popularized by fast.ai.
```
data_config = DataConfig(
target=target_cols, #target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented
continuous_cols=num_col_names,
categorical_cols=cat_col_names,
)
optimizer_config = OptimizerConfig(lr_scheduler="OneCycleLR", lr_scheduler_params={"max_lr":0.00478, "epochs": epochs, "steps_per_epoch":steps_per_epoch})
model_config = CategoryEmbeddingModelConfig(
task="regression",
layers="1024-512-512", # Number of nodes in each layer
activation="LeakyReLU", # Activation between each layers
learning_rate = 1e-3
)
```
**Trainer Config YAML file**
```yaml
batch_size: 1024
fast_dev_run: false
max_epochs: 20
min_epochs: 1
gpus: -1
accumulate_grad_batches: 1
auto_lr_find: false
check_val_every_n_epoch: 1
gradient_clip_val: 0.0
overfit_batches: 0.0
profiler: null
early_stopping: null #null because we want to turn off early stopping. With OneCycleLR, it doesnt always work great
early_stopping_min_delta: 0.001
early_stopping_mode: min
early_stopping_patience: 3
checkpoints: valid_loss
checkpoints_path: saved_models
checkpoints_mode: min
checkpoints_save_top_k: 1
load_best: true
track_grad_norm: -1
```
```
tabular_model = TabularModel(
data_config=data_config,
model_config=model_config,
optimizer_config=optimizer_config,
trainer_config="examples/yaml_config/trainer_config.yml",
)
tabular_model.fit(train=train, validation=val)
result = tabular_model.evaluate(test)
```
We can see the metrics and loss for each target and a total loss/metric. We can pin the EarlyStopping or the Checkpoint Saving on any one of these metrics
```
pred_df = tabular_model.predict(test)
pred_df.head()
print("Target 1")
print_metrics(test['target_1'], pred_df["target_1_prediction"], tag="Holdout")
print("Target 2")
print_metrics(test['target_2'], pred_df["target_2_prediction"], tag="Holdout")
```
## Advanced
Let's do the following:
1. A data transform for the continuous columns
2. Set Target Ranges for the multiple targets
3. Use NODE model
4. A Custom Optimizer
```
#Since we are using a lower learning rate, increasing the epochs
batch_size = 512
steps_per_epoch = int(train.shape[0]/batch_size)
epochs = 50
data_config = DataConfig(
target=target_cols, #target should always be a list. Multi-targets are only supported for regression. Multi-Task Classification is not implemented
continuous_cols=num_col_names,
categorical_cols=cat_col_names,
continuous_feature_transform="quantile_normal"
)
trainer_config = TrainerConfig(
auto_lr_find=False, # Runs the LRFinder to automatically derive a learning rate
batch_size=batch_size,
max_epochs=epochs,
early_stopping=None,
accumulate_grad_batches=2,
gpus=-1, #index of the GPU to use. -1 means using all available GPUs. None, means CPU
)
optimizer_config = OptimizerConfig(lr_scheduler="OneCycleLR", lr_scheduler_params={"max_lr":2e-3, "epochs": epochs, "steps_per_epoch":steps_per_epoch})
model_config = NodeConfig(
task="regression",
num_layers=2, # Number of Dense Layers
num_trees=1024, #Number of Trees in each layer
depth=5, #Depth of each Tree
embed_categorical=False, #If True, will use a learned embedding, else it will use LeaveOneOutEncoding for categorical columns
learning_rate = 1e-3,
target_range=[(train[col].min(),train[col].max()) for col in target_cols]
)
tabular_model = TabularModel(
data_config=data_config,
model_config=model_config,
optimizer_config=optimizer_config,
trainer_config=trainer_config,
)
from torch_optimizer import QHAdam
from sklearn.preprocessing import PowerTransformer
tabular_model.fit(train=train,
validation=val,
# target_transform=PowerTransformer(method="yeo-johnson"),
optimizer=QHAdam,
optimizer_params={"nus": (0.7, 1.0), "betas": (0.95, 0.998)})
result = tabular_model.evaluate(test)
pred_df = tabular_model.predict(test)
pred_df.head()
print("Target 1")
print_metrics(test['target_1'], pred_df["target_1_prediction"], tag="Holdout")
print("Target 2")
print_metrics(test['target_2'], pred_df["target_2_prediction"], tag="Holdout")
```
## Comparison
|Target|Basic|Advanced|
|--|--|--|
|Target 1 MSE|8054.42|**6342.21**|
|Target 1 MAE|69.09|**61.97**|
|Target 2 MSE|13669.88|**6692.86**|
|Target 2 MAE|90.77|**60.51**|
| github_jupyter |
```
import pandas as pd
import altair as alt
alt.renderers.enable('notebook') # https://altair-viz.github.io/user_guide/renderers.html
alt.data_transformers.enable('json') # https://altair-viz.github.io/user_guide/faq.html
%load_ext watermark
%watermark -iv
```
# Stocks
```
# multi series line chart: https://altair-viz.github.io/gallery/multi_series_line.html
# multi-line tooltip: https://altair-viz.github.io/gallery/multiline_tooltip.html
import altair as alt
# from vega_datasets import data
# stocks = data.stocks()
# Create a selection that chooses the nearest point & selects based on x-value
nearest = alt.selection(type='single', nearest=True, on='mouseover',
fields=['date'], empty='none')
# The basic line
line = alt.Chart().mark_line(interpolate='basis').encode(
alt.X('date:T', axis=alt.Axis(title='')),
alt.Y('price:Q', axis=alt.Axis(title='',format='$f')),
color='symbol:N'
)
# Transparent selectors across the chart. This is what tells us
# the x-value of the cursor
selectors = alt.Chart().mark_point().encode(
x='date:T',
opacity=alt.value(0),
).add_selection(
nearest
)
# Draw points on the line, and highlight based on selection
points = line.mark_point().encode(
opacity=alt.condition(nearest, alt.value(1), alt.value(0))
)
# Draw text labels near the points, and highlight based on selection
text = line.mark_text(align='left', dx=5, dy=-5).encode(
text=alt.condition(nearest, 'price:Q', alt.value(' '))
)
# Draw a rule at the location of the selection
rules = alt.Chart().mark_rule(color='gray').encode(
x='date:T',
).transform_filter(
nearest
)
# Put the five layers into a chart and bind the data
stockChart = alt.layer(line, selectors, points, rules, text,
data='https://raw.githubusercontent.com/altair-viz/vega_datasets/master/vega_datasets/_data/stocks.csv',
width=600, height=300,title='Stock History')
stockChart.save('stocks.html')
```
# Airports
## Airports with vega data
```
import altair as alt
from vega_datasets import data
states = alt.topo_feature(data.us_10m.url, feature='states')
airports = data.airports.url
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white',
).properties(
width=800,
height=500
).project('albersUsa')
# airport positions on background
points = alt.Chart(airports).mark_circle().encode(
longitude='longitude:Q',
latitude='latitude:Q',
size=alt.value(15),
color=alt.value('#3377B3'),
tooltip=['iata:N','name:N','city:N','state:N','latitude:Q','longitude:Q'],
)
chart = (background + points)
chart.save('airports.html')
chart
```
## Airports with OurAirports data
```
# # data from here: http://ourairports.com/data/
# # download their csv and then run this to filter it...
# airports = (pd.read_csv('airports-all.csv')
# # .dropna(how='any')
# )
# print(airports.shape)
# airports = airports[airports['Type'].isin(['medium_airport','large_airport'])]
# # airports = airports[airports['Type'].isin(['heliport'])]
# airports=airports[['ID','Name','Latitude','Longitude','Elevation','City']]
# # airports.to_csv('heliports.csv', index=False)
# airports.to_csv('airports-world.csv', index=False)
# airports.head()
import altair as alt
states = alt.topo_feature(data.us_10m.url, feature='states')
airports = pd.read_csv('airports.csv')
# US states background
background = alt.Chart(states).mark_geoshape(
fill='white',
stroke='gray',
).properties(
width=800,
height=500
).project('albersUsa')
# airport positions on background
points = alt.Chart(airports).mark_circle(stroke='gray',strokeWidth=.5).encode(
longitude='Longitude:Q',
latitude='Latitude:Q',
size=alt.value(10),
color=alt.Color('Elevation:O', bin=alt.Bin(maxbins=20), sort='ascending',
legend=alt.Legend(title="Airport Elevation (ft.)")),
tooltip=['ID:N','Name:N','City:N','Latitude:Q','Longitude:Q','Elevation:O'],
).properties(
title='US Airports'
)
chart = (background + points)
chart.save('airports.html')
chart
```
## Heliports with OurAirports data
```
import altair as alt
states = alt.topo_feature(data.us_10m.url, feature='states')
airports = pd.read_csv('heliports.csv')
# US states background
background = alt.Chart(states).mark_geoshape(
fill='white',
stroke='gray',
).properties(
width=800,
height=500
).project('albersUsa')
# airport positions on background
points = alt.Chart(airports).mark_circle(stroke='gray',strokeWidth=.5).encode(
longitude='Longitude:Q',
latitude='Latitude:Q',
size=alt.value(15),
color=alt.Color('Elevation:O', bin=alt.Bin(maxbins=20), sort='ascending',
legend=alt.Legend(title="Heliport Elevation (ft.)")),
tooltip=['ID:N','Name:N','City:N','Latitude:Q','Longitude:Q','Elevation:O'],
).properties(
title='US Heliports'
)
chart = (background + points)
chart.save('heliports.html')
chart
```
# Birdstrikes
```
# This example borrows heavily from the Seattle Weather Interactive example:
# https://altair-viz.github.io/gallery/seattle_weather_interactive.html
import altair as alt
from vega_datasets import data
# scale = alt.Scale(domain=['European starling', 'Rock pigeon', 'Mourning dove'
# , 'Canada goose', 'Red-tailed hawk'],
# range=['#e7ba52', '#a7a7a7', '#aec7e8', '#1f77b4', '#9467bd'])
# color = alt.Color('Wildlife__Species:N', scale=scale)
color = alt.Color('Wildlife__Species:N')
# We create two selections:
# - a brush that is active on the top panel
# - a multi-click that is active on the bottom panel
brush = alt.selection_interval(encodings=['x'])
click = alt.selection_multi(encodings=['color'])
# Top panel is scatter plot of temperature vs time
points = alt.Chart().mark_circle().encode(
alt.X('yearmonthdate(Flight_Date):T', axis=alt.Axis(title='Date')),
alt.Y('Speed_IAS_in_knots:Q',
axis=alt.Axis(title='Indicated Airspeed (kts)'),
),
color=alt.condition(brush, color, alt.value('lightgray')),
tooltip=['Airport__Name:N','Aircraft__Make_Model:N','Flight_Date:T','When__Phase_of_flight:N','Wildlife__Species:N','Speed_IAS_in_knots:Q'],
).properties(
width=600,
height=300
).add_selection(
brush
).transform_filter(
click
)
# Bottom panel is a bar chart of species
bars = alt.Chart().mark_bar().encode(
alt.Y('count()', scale=alt.Scale(type='log')),
alt.X('Wildlife__Species:N', sort=alt.SortField(field='sort_order', op='count', order='descending')),
color=alt.condition(click, color, alt.value('lightgray')),
).transform_filter(
brush
).properties(
width=600,
).add_selection(
click
)
alt.vconcat(points, bars,
data=data.birdstrikes.url,
title="Aircraft Birdstrikes: 1990-2003"
).save('birdstrikes.html')
```
# Clickable legend
```
import altair as alt
from vega_datasets import data
cars = data.cars.url
alt.Chart(cars).mark_point().encode(
x='Horsepower:Q',
y='Miles_per_Gallon:Q',
color='Origin:N'
)
import altair as alt
from vega_datasets import data
cars = data.cars.url
# define selection
click = alt.selection_multi(encodings=['color'])
# scatter plots of points
scatter = alt.Chart(cars).mark_circle().encode(
x='Horsepower:Q',
y='Miles_per_Gallon:Q',
size=alt.Size('Cylinders:O',
scale=alt.Scale(range=(20,100))
),
color=alt.Color('Origin:N', legend=None),
tooltip=['Name:N','Horsepower:Q','Miles_per_Gallon:Q',
'Cylinders:O','Origin:N'],
).transform_filter(
click
).properties(
selection=click
).interactive()
# legend
legend = alt.Chart(cars).mark_rect().encode(
y=alt.Y('Origin:N', axis=alt.Axis(title='Select Origin')),
color=alt.condition(click, 'Origin:N',
alt.value('lightgray'), legend=None),
size=alt.value(250)
).properties(
selection=click
)
chart = (scatter | legend)
chart.save('cars-clickable-legend.html')
```
# Jobs
```
import altair as alt
from vega_datasets import data
jobs = data.jobs.url
points = alt.Chart(jobs).mark_circle().encode(
alt.X('year:T', axis=alt.Axis(title='Date')),
alt.Y('job:N',
axis=alt.Axis(title='Sex'),
),
color='sex:N',
size='count:Q'
).properties(
width=600,
height=3100
)
points.save('jobs.html')
```
# Flights
```
# example: http://vega.github.io/vega-tutorials/airports/
import altair as alt
from vega_datasets import data
states = alt.topo_feature(data.us_10m.url, feature='states')
airports = pd.read_csv('https://raw.githubusercontent.com/vega/vega-datasets/gh-pages/data/airports.csv')
airports['origin'] = airports['iata']
flights = pd.read_csv('https://raw.githubusercontent.com/vega/vega-datasets/gh-pages/data/flights-airport.csv')
flights = pd.merge(flights,airports[['origin','latitude','longitude']],how='left',on='origin')
# flights = flights.groupby(['origin','latitude','longitude'])['count'].sum()
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white',
).properties(
width=800,
height=500
).project('albersUsa')
# airport positions on background
points = alt.Chart(airports).mark_circle().encode(
longitude='longitude:Q',
latitude='latitude:Q',
# size='count:Q',
color=alt.value('#3377B3'),
# tooltip=['iata:N','origin:N','latitude:Q','longitude:Q'],
)
chart = (background + points)
chart.save('flights.html')
chart
```
| github_jupyter |
Fredholm integral equation of the second kind using mechanical quadrature method (Book 2, p. 38, ex. 4.2.2).
# Common parts
```
from numpy import array, sum, cosh, zeros
from numpy.linalg import solve
import pandas as pd
```
We are given task below, but solution will be appropriate for all fredholm equations of the second kind:
$u(x)-0.6 \int_{0}^{1} \operatorname{ch}(x y) u(y) d y=x-0.6$
```
def H(x, y):
return cosh(x * y)
def f(x):
return x - 0.6
c = 0.6
a = 0
b = 1
```
Because of the fact that we will use mechanical quadrature method, we will need quadrature formula. In current task we will use middle rectangles rule.
```
def middle_rectangles(f, N, a=0, b=1):
"""Composite middle rectangles formula.
Args:
f: Function that we want to count integral from.
N (int): Number of partitions.
a, b (int, int): Bounds.
Returns:
result (float): Approximate integral value.
"""
# Length of part split.
h = (b - a) / N
# Points.
x = array([a + (h / 2) + (k - 1) * h for k in range(1, N + 1)])
# Values in points.
y = f(x)
# Integral value.
result = h * sum(y)
return result
```
But in current task we only need points and coefficents from quadrature formula.
```
def middle_rectangles_points_coefficents(N, a=0, b=1):
"""Composite middle rectangles formula points and coefficents.
Args:
N (int): Number of partitions.
a, b (int, int): Bounds.
Returns:
points (list<float>).
coefficents (list<float>).
"""
# Length of part split.
h = (b - a) / N
points = array([a + (h / 2) + (k - 1) * h
for k in range(1, N + 1)])
coefficents = [h] * N
return points, coefficents
```
Also at some point we will need kroneker delta.
```
def kroneker_delta(x, y):
return 1 if x == y else 0
```
# Mechanical quadrature method
```
def mechanical_quadrature(H, f, N, find_points_coefficents, x, c=1, a=0, b=1):
"""Solving fredholm integral equation of the second kind.
Using mechanical quadrature method.
Args:
H (func(x, y)): Function under integral.
f (func(x)): Free value function.
c (float): Constant before integral.
a, b (float): Integral bounds.
N (int): Number of points in quadrature method.
find_points_coefficents (func(N, a, b)): Function that returns
points and coefficents from
appropriate quadrature
formula.
x (float): Point to count result function in.
Returns:
result (float): value of desired function in point x.
"""
points, coefficents = find_points_coefficents(N, a, b)
# Solving Dz=g system.
D = zeros(shape=(N,N))
for row in range(N):
for col in range(N):
D[row][col] = kroneker_delta(row, col) - \
coefficents[col] * \
H(points[row], points[col])
g = f(points)
z = solve(D, g)
summ = 0
for i in range(N):
summ += coefficents[i] * H(x, points[i]) * z[i]
result = summ + f(x)
return result
```
# Testing
```
# Starting number of splits.
n = 5
# Number of calculations.
# Number of splits grows twice
# as much on every iteration.
calcs = 7
# Generating number of splits.
splits = [2**i*n for i in range(calcs)]
# Generating first column of table.
x_s = list(map(lambda x: 'u^({})(x)'.format(x), splits))
# Generating values in point a.
a_s = [mechanical_quadrature(H,
f,
N=n,
find_points_coefficents=middle_rectangles_points_coefficents,
x=a,
c=c,
a=a,
b=b) for n in splits]
# Generating values in point (a+b)/2.
a_b_s = [mechanical_quadrature(H,
f,
N=n,
find_points_coefficents=middle_rectangles_points_coefficents,
x=(a+b)/2,
c=c,
a=a,
b=b) for n in splits]
# Generating values in point b.
b_s = [mechanical_quadrature(H,
f,
N=n,
find_points_coefficents=middle_rectangles_points_coefficents,
x=b,
c=c,
a=a,
b=b) for n in splits]
# Creating table.
data = pd.DataFrame(list(zip(x_s, a_s, a_b_s, b_s)),
columns =['$x$', '$a$', '$(a + b)/2$', '$b$'])
data.set_index('$x$', inplace=True)
display(data)
```
| github_jupyter |
```
%matplotlib inline
```
# Weak lensing
This example computes weak lensing maps (convergence and shear) for a redshift
distribution of sources. The lensing is simulated by a line of sight
integration of the matter fields.
## Setup
To the simulate lensing fields in each shell of the light cone, it suffices to
generate random (e.g. lognormal) matter fields, and perform a line of sight
integration to obtain the convergence field. The shear field is readily
computed from there.
From there, it is possible to obtain the effective integrated lensing maps of
a distribution of sources. Given such a distribution, which is set up here at
the top, the :func:`glass.lensing.lensing_dist` generator will iteratively
collect and integrate the contributions to the lensing from each shell.
```
import numpy as np
import healpy as hp
import matplotlib.pyplot as plt
# these are the GLASS imports: cosmology, glass modules, and the CAMB module
from cosmology import LCDM
import glass.sim
import glass.camb
import glass.matter
import glass.lensing
# also needs camb itself to get the parameter object, and the expectation
import camb
# cosmology for the simulation
cosmo = LCDM(h=0.7, Om=0.3)
# basic parameters of the simulation
nside = 512
lmax = nside
# localised redshift distribution
z = np.linspace(0, 1, 101)
nz = np.exp(-(z - 0.5)**2/(0.1)**2)
# set up CAMB parameters for matter angular power spectrum
pars = camb.set_params(H0=100*cosmo.h, omch2=cosmo.Om*cosmo.h**2,
NonLinear=camb.model.NonLinear_both)
# generators for a lensing-only simulation
generators = [
glass.sim.zspace(0, 1.01, dz=0.1),
glass.camb.camb_matter_cl(pars, lmax),
glass.matter.lognormal_matter(nside),
glass.lensing.convergence(cosmo),
glass.lensing.shear(),
glass.lensing.lensing_dist(z, nz, cosmo),
]
```
## Simulation
The simulation is then straightforward: Only the integrated lensing maps are
stored here. While the simulation returns the result after every redshift
interval in the light cone, only the last result will be show below, so the
previous values are not kept.
```
# simulate and store the integrated lensing maps
for shell in glass.sim.generate(generators):
kappa = shell['kappa_bar']
gamma1 = shell['gamma1_bar']
gamma2 = shell['gamma2_bar']
```
## Analysis
To make sure the simulation works, compute the angular power spectrum ``cls``
of the simulated convergence field, and compare with the expectation (from
CAMB) for the given redshift distribution of sources.
```
# get the angular power spectra of the lensing maps
cls = hp.anafast([kappa, gamma1, gamma2], pol=True, lmax=lmax)
# get the expected cls from CAMB
pars.Want_CMB = False
pars.min_l = 1
pars.SourceWindows = [camb.sources.SplinedSourceWindow(z=z, W=nz, source_type='lensing')]
theory_cls = camb.get_results(pars).get_source_cls_dict(lmax=lmax, raw_cl=True)
# plot the realised and expected cls
l = np.arange(lmax+1)
plt.plot(l, (2*l+1)*cls[0], '-k', lw=2, label='simulation')
plt.plot(l, (2*l+1)*theory_cls['W1xW1'], '-r', lw=2, label='expectation')
plt.xscale('symlog', linthresh=10, linscale=0.5, subs=[2, 3, 4, 5, 6, 7, 8, 9])
plt.yscale('symlog', linthresh=1e-7, linscale=0.5, subs=[2, 3, 4, 5, 6, 7, 8, 9])
plt.xlabel(r'angular mode number $l$')
plt.ylabel(r'angular power spectrum $(2l+1) \, C_l^{\kappa\kappa}$')
plt.legend()
plt.show()
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
fish = pd.read_csv("fish.csv")
landing = pd.read_csv("landing.csv")
fish.head()
landing.head()
landing.shape
landing['TotalPrice'].replace(' ', np.nan, inplace=True)
landing['CatchLbs'].replace(' ', np.nan, inplace=True)
landing
landing.dropna(subset=['TotalPrice'], inplace=True)
landing.dropna(subset=['CatchLbs'], inplace=True)
landing.head()
landing.shape
landing['TotalPrice'] = landing['TotalPrice'].astype(float)
landing['CatchLbs'] = landing['CatchLbs'].astype(float)
landing.describe()
landing.head()
landing.SpeciesName.unique()
landing['SpeciesName'].value_counts()
landing_grouped = landing.groupby(['Year', 'SpeciesName']).sum()
landing_grouped = landing_grouped.reset_index()
landing_grouped = landing_grouped.drop(columns=['BlockCode', 'SpeciesCode', 'Month'])
landing_grouped.head()
landing_grouped.shape
landing_grouped['SpeciesName'].replace('Anchovy, northern ', 'Anchovy, northern', inplace=True)
landing_grouped['SpeciesName'].replace('Sardine, Pacific ', 'Sardine, Pacific', inplace=True)
landing_price = landing_grouped[['Year','SpeciesName','TotalPrice']]
landing_lbs = landing_grouped[['Year','SpeciesName','CatchLbs']]
landing_grouped.SpeciesName.unique()
inflation = [2.32, 2.23, 2.13, 2.02, 1.92, 1.87, 1.81, 1.76, 1.72, 1.67, 1.62, 1.6, 1.57, 1.53, 1.47, 1.46, 1.42, 1.39, 1.35, 1.3, 1.27, 1.22, 1.22, 1.19, 1.17, 1.14, 1.12, 1.1, 1.1, 1.09, 1.06, 1.04, 1.02, 1]
landing_anchovy = landing_grouped[landing_grouped.values == 'Anchovy, northern']
landing_mackerelp = landing_grouped[landing_grouped.values == 'Mackerel, Pacific']
landing_mackerelj = landing_grouped[landing_grouped.values == 'Mackerel, jack']
landing_opah = landing_grouped[landing_grouped.values == 'Opah']
landing_sardine = landing_grouped[landing_grouped.values == 'Sardine, Pacific']
landing_yellowtail = landing_grouped[landing_grouped.values == 'Yellowtail']
landing_anchovy['PricePound'] = landing_anchovy['TotalPrice']/landing_anchovy['CatchLbs']
inflation_anchovy = [2.32, 2.23, 2.13, 2.02, 1.92, 1.81, 1.76, 1.67, 1.62, 1.6, 1.57, 1.53, 1.47, 1.46, 1.42, 1.39, 1.35, 1.3, 1.27, 1.22, 1.22, 1.19, 1.17, 1.12, 1.1, 1.09, 1.04, 1.02, 1]
landing_anchovy['PricePoundAdj'] = landing_anchovy['TotalPrice']*inflation_anchovy/landing_anchovy['CatchLbs']
landing_anchovy.plot(x='Year', y = 'CatchLbs', title = 'Northern Anchovy Catch Lbs Per Year')
landing_anchovy.plot(x='Year', y = 'TotalPrice', title = 'Northern Anchovy Total Price Per Year')
landing_anchovy.plot(x='Year', y = 'PricePound', title = 'Northern Anchovy Price/Pound Per Year')
landing_anchovy.plot(x='Year', y = 'PricePoundAdj', title = 'Northern Anchovy Price/Pound Per Year (Adjusted for Inflation)')
landing_mackerelp['PricePound'] = landing_mackerelp['TotalPrice']/landing_mackerelp['CatchLbs']
landing_mackerelp['PricePoundAdj'] = landing_mackerelp['TotalPrice']*inflation/landing_mackerelp['CatchLbs']
landing_mackerelp.plot(x='Year', y = 'CatchLbs', title = 'Pacific Mackerel Catch Lbs Per Year')
landing_mackerelp.plot(x='Year', y = 'TotalPrice', title = 'Pacific Mackerel Total Price Per Year')
landing_mackerelp.plot(x='Year', y = 'PricePound', title = 'Pacific Mackerel Price/Pound Per Year')
landing_mackerelp.plot(x='Year', y = 'PricePoundAdj', title = 'Pacific Macerel Price/Pound Per Year (Adjusted for Inflation)')
landing_mackerelj['PricePound'] = landing_mackerelj['TotalPrice']/landing_mackerelj['CatchLbs']
inflation_j = [2.32, 2.23, 2.13, 2.02, 1.92, 1.87, 1.81, 1.76, 1.72, 1.67, 1.62, 1.6, 1.57, 1.53, 1.47, 1.46, 1.39, 1.35, 1.3, 1.27, 1.22, 1.22, 1.19, 1.17, 1.14, 1.12, 1.1, 1.1, 1.09, 1.06, 1.04, 1]
landing_mackerelj['PricePoundAdj'] = landing_mackerelj['TotalPrice']*inflation_j/landing_mackerelj['CatchLbs']
landing_mackerelj.plot(x='Year', y = 'CatchLbs', title = 'Jack Mackerel Catch Lbs Per Year')
landing_mackerelj.plot(x='Year', y = 'TotalPrice', title = 'Jack Mackerel Total Price Per Year')
landing_mackerelj.plot(x='Year', y = 'PricePound', title = 'Jack Mackerel Price/Pound Per Year')
landing_mackerelj.plot(x='Year', y = 'PricePoundAdj', title = 'Jack Macerel Price/Pound Per Year (Adjusted for Inflation)')
inflation_opah = [2.32, 2.23, 2.13, 2.02, 1.92, 1.87, 1.81, 1.76, 1.72, 1.67, 1.62, 1.6, 1.57, 1.53, 1.47, 1.46, 1.42, 1.39, 1.35, 1.3, 1.27, 1.22, 1.22, 1.19, 1.17, 1.14, 1.12, 1.1, 1.09, 1.06]
landing_opah['PricePound'] = landing_opah['TotalPrice']/landing_opah['CatchLbs']
landing_opah['PricePoundAdj'] = landing_opah['TotalPrice']*inflation_opah/landing_opah['CatchLbs']
landing_opah.plot(x='Year', y = 'CatchLbs', title = 'Opah Catch Lbs Per Year')
landing_opah.plot(x='Year', y = 'TotalPrice', title = 'Opah Total Price Per Year')
landing_opah.plot(x='Year', y = 'PricePound', title = 'Opah Price/Pound Per Year')
landing_opah.plot(x='Year', y = 'PricePoundAdj', title = 'Opah Price/Pound Per Year (Adjusted for Inflation)')
landing_sardine['PricePound'] = landing_sardine['TotalPrice']/landing_sardine['CatchLbs']
landing_sardine['PricePoundAdj'] = landing_sardine['TotalPrice']*inflation/landing_sardine['CatchLbs']
landing_sardine.plot(x='Year', y = 'CatchLbs', title = 'Pacific Sardine Catch Lbs Per Year')
landing_sardine.plot(x='Year', y = 'TotalPrice', title = 'Pacific Sardine Total Price Per Year')
landing_sardine.plot(x='Year', y = 'PricePound', title = 'Sardine Price/Pound Per Year')
landing_sardine.plot(x='Year', y = 'PricePoundAdj', title = 'Sardine Price/Pound Per Year (Adjusted for Inflation)')
inflation_yellowtail = [2.32, 2.23, 2.13, 2.02, 1.92, 1.87, 1.81, 1.72, 1.67, 1.62, 1.6, 1.57, 1.53, 1.47, 1.46, 1.42, 1.39, 1.35, 1.3, 1.27, 1.22, 1.22, 1.19, 1.17, 1.14, 1.12, 1.1, 1.1, 1.09, 1.06, 1.04, 1.02, 1]
landing_yellowtail['PricePound'] = landing_yellowtail['TotalPrice']/landing_yellowtail['CatchLbs']
landing_yellowtail['PricePoundAdj'] = landing_yellowtail['TotalPrice']*inflation_yellowtail/landing_yellowtail['CatchLbs']
landing_yellowtail.plot(x='Year', y = 'CatchLbs', title = 'Yellowtail Catch Lbs Per Year')
landing_yellowtail.plot(x='Year', y = 'TotalPrice', title = 'Yellowtail Total Price Per Year')
landing_yellowtail.plot(x='Year', y = 'PricePound', title = 'Yellowtail Price/Pound Per Year')
landing_yellowtail.plot(x='Year', y = 'PricePoundAdj', title = 'Yellowtail Price/Pound Per Year (Adjusted for Inflation)')
```
| github_jupyter |
## Check ESG evaluation automatically
Automate “checking list based on disclosed document” by natural language processing.
1. Upload: Uploading the annual report and read descriptions.
2. Preprocess: Preprocess the descriptions for natural language processing.
3. Retrieval: Retrieving the sections by keywords of theme.
4. Answering: Try to check automatically
* Using question answering method
* Using vector similarity method
Conclusion of trial
* Retrieving the theme related sections by keyword, and extracting sentences by vector similarity seems to work well.
* Checking automatically has issues to overcome.
* Question answering method: It will need training data (can't complete by only applying pre-trained model by Wikipedia based question answering model).
* Vector similarity method: We can extract related sentences but we need to set the threshold to check (if x number of sentences can be extracted, then check, etc.)
Next Step
1. At first use the retrieval method, after confirming its effectiveness, then try to improve the question answering method.
2. Implements the simple system to do 1.
### 0. Preparation
Prepare the libraries to execute programs.
```
import os
import sys
import numpy as np
import pandas as pd
def set_root():
root = os.path.join(os.path.realpath("."), "../")
if root not in sys.path:
sys.path.append(root)
return root
ROOT_DIR = set_root()
DATA_DIR = os.path.join(ROOT_DIR, "data")
```
### 1. Upload
(※実際システムになる時はPDFのUploadだが、現在はファイルを取ってくるのでDownload)
ああ
```
# トヨタの統合報告書
url = "https://global.toyota/pages/global_toyota/ir/library/annual/2019_001_annual_en.pdf"
from chariot.storage import Storage
from evaluator.data.pdf_reader import PDFReader
storage = Storage(DATA_DIR)
file_name = os.path.basename(url)
file_path = storage.download(url, f"raw/{file_name}")
reader = PDFReader()
df = reader.read_to_frame(file_path)
```
PDF読み込み結果の表示
* page: ページ番号
* order: ページ内のセクション番号(登場順に上からカウント)
```
df.head(5)
```
### 2. Preprocess
PDF読み込み結果は様々なノイズを含んでいるので、処理しやすいよう前処理を行う。
```
preprocessed = reader.preprocess_frame(df)
preprocessed.head(5)
```
文を含んでいないセクションを除外
```
import re
has_sentence = re.compile("(•)?\s?[A-Za-z](\s)?(\.|;)")
preprocessed = preprocessed[preprocessed["content"].apply(lambda s: re.search(has_sentence, s) is not None)]
print(f"Rows are decreased from {len(df)} to {len(preprocessed)}")
preprocessed.assign(length=preprocessed["content"].apply(lambda s: len(s)))
preprocessed.head(5)
```
### 3. Retrieval
評価項目に関係しているセクションを抽出する。
手法は様々あるが、単純に評価項目の質問に含まれているキーワードを含むセクションを抽出する。実際自分でやってみたところ、「CO2」などのキーワードでまず検索することが多かったので。
```
question = "Climate Change impact including CO2 / GHG emissions. Policy or commitment statement"
question = question.lower()
language = "en"
from spacy.util import get_lang_class
class Parser():
def __init__(self, lang):
self.lang = lang
self.parser = get_lang_class(self.lang)()
def parse(self, text):
return self.parser(text)
```
評価項目の質問から、キーワードを抽出
```
parser = Parser(language)
question_words = [t.lemma_ for t in parser.parse(question) if not t.is_stop and not re.match("\'|\.|\?|\/|\,", t.text)]
question_words
```
文書内の各セクションについて、キーワードが含まれる数を計算
```
def count_keyword_match(parser, keywords, text):
tokens = parser.parse(text)
count = 0
_keywords = [k for k in keywords]
for t in tokens:
if t.lemma_ in _keywords:
count += 1
return count
counted = preprocessed.assign(
keyword_match=preprocessed["content"].apply(
lambda s: count_keyword_match(parser, question_words, s)))
matched = counted[counted["keyword_match"] > 0]
matched.sort_values(by=["keyword_match"], ascending=False).head(5)
```
当然ながら、検索でかかるようなセクションは取れている。
### 4.Answering
#### 4.1 Use Question Answering Model
キーワード検索から絞り込んだ結果から、具体的な回答関連箇所を抽出する。抜粋に成功したらチェック?とできるか。
(=>実際のチェック結果が現在手に入らないので、結果の確認はできない)
回答箇所の抽出には、自然言語処理の質問回答の手法を使用。Wikipediaをベースにした質問回答のデータセット(SQuADと呼ばれる)を学習させれば、一応[人間より精度は高くなる](https://rajpurkar.github.io/SQuAD-explorer/)。ただESGの質問回答データセットはないので、SQuADで学習したモデルをESGにそのまま適用してみる。
実際人間がチェックすると以下の箇所になる

```
from evaluator.models.question_answer import answer
# Climate Change impact including CO2 / GHG emissions. Policy or commitment statement
asking = "What policy or commitment does company have for climate change impact including CO2 / GHG emissions ?"
```
回答箇所抜粋
```
question_context = matched["content"].apply(lambda s: (asking.lower(), s)).tolist()
answers = answer("distilbert-base-uncased-distilled-squad", question_context)
pd.DataFrame(answers).head(5)
```
answerは抽出できているが、意味が通らないものが多い。
手法のせいなのか、学習データがあればうまく動くのかは、現時点ではわからない(質問と回答のペアを作る必要がある)。
#### 4.2 Use Feature Representation
直接質問回答ではなく、評価の質問に近い文を抽出してみる(あればチェック、なければチェックしない)。
先ほどのキーワードでの抽出と変えて、もう少し文の意味を考慮できる手法を使用する。具体的には、Googleの検索で最近採用された手法を使用する。
* [BERT](https://www.blog.google/products/search/search-language-understanding-bert/)
まずは、セクションを文に分割する。
```
sentences = []
for i, row in matched.iterrows():
c = row["content"]
for j, s in enumerate(c.replace("•", ".").replace(";", ".").split(".")):
sentences.append({
"page": row["page"],
"section_order": row["order"],
"sentence_order": j,
"sentence": s,
"length": len(s)
})
sentences = pd.DataFrame(sentences)
sentences.head(5)
```
文をベクトル表現(BERT表現)に変換する。
```
from evaluator.features.encoder import encode
model_name = "bert-base-uncased"
embeddings = encode(model_name, sentences["sentence"].values.tolist())
embeddings.shape
```
評価項目の質問と、文書中の文とで、ベクトル表現が近いものを抽出する。
```
query = encode(model_name, "Climate Change impact including CO2 / GHG emissions. Policy or commitment statement".lower())
query = np.reshape(query, (1, -1))
from sklearn.metrics.pairwise import cosine_similarity
distance = cosine_similarity(query, embeddings)
np.sort(-distance).flatten()[:10]
```
質問に近い文トップ10を表示
```
pd.set_option("display.max_colwidth", -1)
sentences.assign(distance=distance.flatten()).iloc[np.argsort(-distance).flatten()].head(10)
```
そこそこ関連ある文章は取れているように思える。"the agree-ment set the long-term"等を見ると、"Policy"がトヨタのものなのか世界的なものなのか判別つかない問題が考えられる。
| github_jupyter |
# Codealong 05
Categorical variable - values are disjoint but differences matter (e.g. blood type) <br>
Ordinal variable - order matters, but difference doesn't as much (e.g. movie ratings) <br>
Interval variable - ordinal variable but difference means something (e.g. temperature) <br>
Ratio variable - interval variable + '0' means non-existent (e.g. income) <br>
```
import os
import numpy as np
import pandas as pd
import csv
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn import feature_selection, linear_model
from mpl_toolkits.mplot3d import Axes3D
pd.set_option('display.max_rows', 10)
pd.set_option('display.notebook_repr_html', True)
pd.set_option('display.max_columns', 10)
%matplotlib inline
plt.style.use('ggplot')
```
## Activity: Model's F-statistic
```
df = pd.read_csv(os.path.join('zillow-05-starter.csv'), index_col = 'ID')
df
model = smf.ols(formula = 'SalePrice ~ IsAStudio', data = df).fit()
model.summary()
```
## Accessing the model's F-value and its p-value
### F-value (with significance level of `5%`)
```
model.fvalue
```
### Corresponding p-value
```
model.f_pvalue
```
## Part A - Linear Modeling with `scikit-learn`
```
subset_df = df.dropna(axis = 'index', subset = ['Size', 'LotSize', 'IsAStudio'])
def linear_modeling_with_sklearn(X, y):
model = linear_model.LinearRegression(fit_intercept = True)
model.fit(X, y)
print 'F-statistic (performed for each regressor independently)'
print '- F-value', feature_selection.f_regression(X, y)[0]
print '- p-value', feature_selection.f_regression(X, y)[1]
print 'R^2 =', model.score(X, y)
print 'Coefficients'
print '- beta_0 (intercept) =', model.intercept_
print '- beta_n (n > 0) =', model.coef_
```
### SalePrice ~ IsAStudio with `smf.ols`
```
smf.ols(formula = 'SalePrice ~ IsAStudio', data = subset_df).fit().summary()
```
### SalePrice ~ IsAStudio with `scikit-learn` (Simple Linear Modeling)
```
X = subset_df[ ['IsAStudio'] ]
y = subset_df['SalePrice']
linear_modeling_with_sklearn(X, y)
```
### SalePrice ~ Size + LotSize with `smf.ols`
```
smf.ols(formula = 'SalePrice ~ Size + LotSize', data = subset_df).fit().summary()
```
### SalePrice ~ IsAStudio with `scikit-learn` (Multiple Linear Modeling)
```
X = subset_df[ ['Size', 'LotSize'] ]
y = subset_df['SalePrice']
linear_modeling_with_sklearn(X, y)
```
# Advertising dataset
```
df = pd.read_csv(os.path.join('advertising.csv'))
df
```
## Plots
### Sales ~ TV
```
sns.lmplot('TV','Sales',df)
```
### Sales ~ Radio
```
sns.lmplot('Radio','Sales',df)
```
### Sales ~ Newspaper
```
sns.lmplot('Newspaper','Sales',df)
```
## Simple linear regressions
### Sales ~ TV
```
model_tv = smf.ols(formula = 'Sales ~ TV', data = df).fit()
model_tv.summary()
```
### Sales ~ Radio
```
model_radio = smf.ols(formula = 'Sales ~ Radio', data = df).fit()
model_radio.summary()
```
### Sales ~ Newspaper
```
model_newspaper = smf.ols(formula = 'Sales ~ Newspaper', data = df).fit()
model_newspaper.summary()
```
## Residuals
### Sales ~ TV
```
model_tv.resid.plot(kind = 'hist', bins = 10)
figure = sm.qqplot(model_tv.resid, line = 's')
figure = sm.graphics.plot_regress_exog(model_tv, 'TV')
```
### Sales ~ Radio
```
figure = sm.graphics.plot_regress_exog(model_radio, 'Radio')
figure = sm.qqplot(model_radio.resid, line = 's')
```
### Sales ~ Newspaper
```
figure = sm.qqplot(model_newspaper.resid, line = 's')
figure = sm.graphics.plot_regress_exog(model_newspaper, 'Newspaper')
```
### Sales ~ TV + Radio + Newspaper
```
model = smf.ols(formula = 'Sales ~ TV + Radio + Newspaper', data = df).fit()
model.summary()
```
### Sales ~ TV + Radio
```
model = smf.ols(formula = 'Sales ~ TV + Radio', data = df).fit()
model.summary()
figure = sm.qqplot(model.resid, line = 's')
figure = sm.graphics.plot_regress_exog(model, 'TV')
figure = sm.graphics.plot_regress_exog(model, 'Radio')
```
## Part B - Interaction Effects
### Sales ~ TV + Radio + TV * Radio
```
model = smf.ols(formula = 'Sales ~ TV + Radio + TV * Radio', data = df).fit()
model.summary()
figure = sm.qqplot(model.resid, line = 's')
figure = sm.graphics.plot_regress_exog(model, 'TV')
figure = sm.graphics.plot_regress_exog(model, 'Radio')
figure = sm.graphics.plot_regress_exog(model, 'TV:Radio')
threedee = plt.figure().gca(projection='3d')
threedee.scatter(df['Radio'],df['TV'],df['Sales'])
threedee.set_xlabel('Radio')
threedee.set_ylabel('TV')
threedee.set_zlabel('Sales')
plt.show()
model_test = smf.ols(formula = 'Sales ~ TV * Radio * Newspaper', data = df).fit()
model_test.summary()
```
## Part C - Dummy Variables
```
df = pd.read_csv(os.path.join('zillow-05-starter.csv'), index_col = 'ID')
df
df['Bath1'] = df['BathCount'] == 1
df['Bath2'] = df['BathCount'] == 2
df['Bath3'] = df['BathCount'] == 3
df['Bath4'] = df['BathCount'] == 4
df
df.drop(df[df['IsAStudio'] == 1].index)
df
model = smf.ols(formula = 'SalePrice ~ BathCount', data = df).fit()
model.summary()
model = smf.ols(formula = 'SalePrice ~ Bath1 + Bath2 + Bath3', data = df).fit()
model.summary()
```
### What's the bathrooms' distribution in the dataset?
```
print np.nan, df.BathCount.isnull().sum()
for bath_count in np.sort(df.BathCount.dropna().unique()):
print bath_count, len(df[df.BathCount == bath_count])
df.BathCount.plot(kind = 'hist', bins = 15)
```
### Let's keep properties with 1, 2, 3, or 4 bathrooms
```
df = df[df.BathCount.isin([1,2,3,4])]
df
print np.nan, df.BathCount.isnull().sum()
for bath_count in np.sort(df.BathCount.dropna().unique()):
print bath_count, len(df[df.BathCount == bath_count])
```
### We can create the dummy variables manually
```
df['Bath1'] = 0
df['Bath2'] = 0
df['Bath3'] = 0
df['Bath4'] = 0
df.loc[df.BathCount == 1, 'Bath1'] = 1
df.loc[df.BathCount == 2, 'Bath2'] = 1
df.loc[df.BathCount == 3, 'Bath3'] = 1
df.loc[df.BathCount == 4, 'Bath4'] = 1
df.columns
df
```
### But we can also use `get_dummies` from `pandas` as well (on `BedCount` for the sake of variety)
```
beds_df = pd.get_dummies(df.BedCount, prefix = 'Bed')
beds_df
df = df.join([beds_df])
df.columns
```
### `SalesPrice` as a function of `Bath_2`, `Bath_3`, and `Bath_4`
```
smf.ols(formula = 'SalePrice ~ Bath4 + Bath2 + Bath3', data = df).fit().summary()
```
### `SalesPrice` as a function of `Bath_1`, `Bath_3`, and `Bath_4`
```
smf.ols(formula = 'SalePrice ~ Bath4 + Bath1 + Bath3', data = df).fit().summary()
```
### `SalesPrice` as a function of `Bath_1`, `Bath_2`, and `Bath_4`
```
smf.ols(formula = 'SalePrice ~ Bath4 + Bath2 + Bath1', data = df).fit().summary()
```
### `SalesPrice` as a function of `Bath_1`, `Bath_2`, and `Bath_3`
```
smf.ols(formula = 'SalePrice ~ Bath1 + Bath2 + Bath3', data = df).fit().summary()
```
| github_jupyter |
# Chapter 2 - Functions
Functions are the cornerstone of coding and are one of the best tools to **encapsulate** code. Functions in many programming languages are not like the ones found in math classes, where one set of inputs maps onto another set of outputs (see below)
<img src="images/maps.png" alt="map" style="zoom:30%;" />
However, there are languages where that this is the case. These programming languages are called **functional programming** languages and I will go into more detail in later chapters.
Most programming languages like C++, Java, JavaScript, and Python **may** have functions that return nothing or requires no input(s). Despite this freedom, it is often the case that a functions requires some number of input with some number of output because it is easier to read for the coder. For example, the following returns the double of the inputted number
```
# function
def double(number):
# doubles the number
return number * 2
# end of function
# gets input
number = int(input())
# calls function
doubled_number = double(number)
# prints output
print(number, "* 2 =", doubled_number)
# the following also print also works, but requires f strings
# which was explained in the extra section in chapter 1
# print(f'{number} * 2 = {doubled_number}')
```
When running, input the number in the box and hit enter
We can see that the return value is indeed double the inputed value, unless you inputed a word or character, in which case an error occurred.
First we will go through the code to see how this is implemented line by line.
1. the function (wrapped in the function comments) starts off with the **def** keyword. This keyword identifies the following as a function.
2. the next part is the name of the function (in this case **double**) and the variables it is taking in (in this case **number**) enclosed in brackets. \* Note the variable taken in does not have to match the variable name when it is later called because the variable in the function is a copy of the variable in the function call, unless it is a list, set, dictionary, or class object.
3. after, we see " **:** " (colon) to indicate that the function has started
4. on to the next line, we see it is indented in 4 spaces. This is very important and Python will look for this indent because no indent indicates that the code block (in this case a function), has ended.
5. something that is optional is a return statement. Return is a keyword that specifies the following will be given to whatever called the function
6. the next line, number = int(input()) assigns the variable number to the inputted number. The int() function turns the input to a integer because input() function returns a string.
7. It is important that the functional call is ahead of the function itself because Python reads code top to down, right to left. This function calls the number function with the number variable as it's argument (\* note the argument being fed in does not need to be a variable and can be anything, ie. number, string, etc)
8. the last line is a prints out the equation and the answer
Some other things of note include having a default value for a variable, so if nothing is supplied as a argument
```
def circle_area(radius, pi = 3.14):
# calculates the circle's area
# given the radius and the value of pi; defaults to 3.14
area = radius ** 2 * pi
return area, pi
user_input = float(input('Radius: '))
area, pi = circle_area(user_input)
print(f'Calculated with a pi of {pi}, the area is {area}')
```
Now note that functions may return multiple things and you will need to **unpack** them by setting two or however any variables you return on the left hand side.
Another good habit is to write the description of the function underneath it with descriptions of the arguments being taking in as a comment. Sometimes this is written in a docs string, which is another form of a comment but can be multi line
```
'''
calculates the circle's area
given the radius and the value of pi; defaults to 3.14
'''
```
## Extra
In the circle area, you may encounter an error of something that is off by a very, very small decimal (like .00000000001) because of something called **floating point error**. This is something that you must be careful about because this is a fundamental issue with how computers calculate floats.
Learn more about it [here](https://en.wikipedia.org/wiki/Floating-point_error_mitigation)
# Exercise 2
Using what you have learned in this chapter, construct a function that calculates the volume of a sphere given an inputted value. The formula for a sphere is $\frac{4}{3}\pi r^3$.
Again, the answer will be in the end of the next chapter
```
# write your code below
```
## Chapter 1 Answer:
Chapter 1's exercise asks for you to print a float zero using what you learnt in chapter 1. One of the easiest (but certainly not the only way) way we can do this if we have two of the same floats and subtracting them
```
# number variable
number = 42.1 - 42.1
# prints number
print(number)
```
| github_jupyter |
**[SQL Home Page](https://www.kaggle.com/learn/intro-to-sql)**
---
# Introduction
[Stack Overflow](https://stackoverflow.com/) is a widely beloved question and answer site for technical questions. You'll probably use it yourself as you keep using SQL (or any programming language).
Their data is publicly available. What cool things do you think it would be useful for?
Here's one idea:
You could set up a service that identifies the Stack Overflow users who have demonstrated expertise with a specific technology by answering related questions about it, so someone could hire those experts for in-depth help.
In this exercise, you'll write the SQL queries that might serve as the foundation for this type of service.
As usual, run the following cell to set up our feedback system before moving on.
```
# Set up feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.sql.ex6 import *
print("Setup Complete")
```
Run the next cell to fetch the `stackoverflow` dataset.
```
from google.cloud import bigquery
# Create a "Client" object
client = bigquery.Client()
# Construct a reference to the "stackoverflow" dataset
dataset_ref = client.dataset("stackoverflow", project="bigquery-public-data")
# API request - fetch the dataset
dataset = client.get_dataset(dataset_ref)
```
# Exercises
### 1) Explore the data
Before writing queries or **JOIN** clauses, you'll want to see what tables are available.
*Hint*: Tab completion is helpful whenever you can't remember a command. Type `client.` and then hit the tab key. Don't forget the period before hitting tab.
```
# Get a list of available tables
tables = client.list_tables(dataset)
list_of_tables = [table.table_id for table in tables]
# Print your answer
print(list_of_tables)
# Check your answer
q_1.check()
```
For the solution, uncomment the line below.
```
#q_1.solution()
```
### 2) Review relevant tables
If you are interested in people who answer questions on a given topic, the `posts_answers` table is a natural place to look. Run the following cell, and look at the output.
```
# Construct a reference to the "posts_answers" table
answers_table_ref = dataset_ref.table("posts_answers")
# API request - fetch the table
answers_table = client.get_table(answers_table_ref)
# Preview the first five lines of the "posts_answers" table
client.list_rows(answers_table, max_results=5).to_dataframe()
```
It isn't clear yet how to find users who answered questions on any given topic. But `posts_answers` has a `parent_id` column. If you are familiar with the Stack Overflow site, you might figure out that the `parent_id` is the question each post is answering.
Look at `posts_questions` using the cell below.
```
# Construct a reference to the "posts_questions" table
questions_table_ref = dataset_ref.table("posts_questions")
# API request - fetch the table
questions_table = client.get_table(questions_table_ref)
# Preview the first five lines of the "posts_questions" table
client.list_rows(questions_table, max_results=5).to_dataframe()
```
Are there any fields that identify what topic or technology each question is about? If so, how could you find the IDs of users who answered questions about a specific topic?
Think about it, and then check the solution by running the code in the next cell.
```
q_2.solution()
```
### 3) Selecting the right questions
A lot of this data is text.
We'll explore one last technique in this course which you can apply to this text.
A **WHERE** clause can limit your results to rows with certain text using the **LIKE** feature. For example, to select just the third row of the `pets` table from the tutorial, we could use the query in the picture below.

You can also use `%` as a "wildcard" for any number of characters. So you can also get the third row with:
```
query = """
SELECT *
FROM `bigquery-public-data.pet_records.pets`
WHERE Name LIKE '%ipl%'
"""
```
Try this yourself. Write a query that selects the `id`, `title` and `owner_user_id` columns from the `posts_questions` table.
- Restrict the results to rows that contain the word "bigquery" in the `tags` column.
- Include rows where there is other text in addition to the word "bigquery" (e.g., if a row has a tag "bigquery-sql", your results should include that too).
```
# Your code here
questions_query = """
SELECT id, title, owner_user_id
FROM `bigquery-public-data.stackoverflow.posts_questions`
WHERE tags Like '%bigquery%'
"""
# Set up the query (cancel the query if it would use too much of
# your quota, with the limit set to 1 GB)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
questions_query_job = client.query(questions_query, job_config=safe_config)# Your code goes here
# API request - run the query, and return a pandas DataFrame
questions_results = questions_query_job.to_dataframe() # Your code goes here
# Preview results
print(questions_results.head())
# Check your answer
q_3.check()
```
For a hint or the solution, uncomment the appropriate line below.
```
#q_3.hint()
#q_3.solution()
```
### 4) Your first join
Now that you have a query to select questions on any given topic (in this case, you chose "bigquery"), you can find the answers to those questions with a **JOIN**.
Write a query that returns the `id`, `body` and `owner_user_id` columns from the `posts_answers` table for answers to "bigquery"-related questions.
- You should have one row in your results for each answer to a question that has "bigquery" in the tags.
- Remember you can get the tags for a question from the `tags` column in the `posts_questions` table.
Here's a reminder of what a **JOIN** looked like in the tutorial:
```
query = """
SELECT p.Name AS Pet_Name, o.Name AS Owner_Name
FROM `bigquery-public-data.pet_records.pets` as p
INNER JOIN `bigquery-public-data.pet_records.owners` as o
ON p.ID = o.Pet_ID
"""
```
It may be useful to scroll up and review the first several rows of the `posts_answers` and `posts_questions` tables.
```
answers_query = """
SELECT a.id, a.body, a.owner_user_id
FROM `bigquery-public-data.stackoverflow.posts_questions` AS q
INNER JOIN `bigquery-public-data.stackoverflow.posts_answers` AS a
ON q.id = a.parent_id
WHERE q.tags LIKE '%bigquery%'
"""
# Set up the query (cancel the query if it would use too much of
# your quota, with the limit set to 1 GB)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
answers_query_job = client.query(answers_query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
answers_results = answers_query_job.to_dataframe()
# Preview results
print(answers_results.head())
# Check your answer
q_4.check()
```
For a hint or the solution, uncomment the appropriate line below.
```
#q_4.hint()
#q_4.solution()
```
### 5) Answer the question
You have the merge you need. But you want a list of users who have answered many questions... which requires more work beyond your previous result.
Write a new query that has a single row for each user who answered at least one question with a tag that includes the string "bigquery". Your results should have two columns:
- `user_id` - contains the `owner_user_id` column from the `posts_answers` table
- `number_of_answers` - contains the number of answers the user has written to "bigquery"-related questions
```
# Your code here
bigquery_experts_query = """
SELECT a.owner_user_id AS user_id, COUNT(1) AS number_of_answers
FROM `bigquery-public-data.stackoverflow.posts_questions` AS q
INNER JOIN `bigquery-public-data.stackoverflow.posts_answers` AS a
ON q.id = a.parent_Id
WHERE q.tags LIKE '%bigquery%'
GROUP BY a.owner_user_id
"""
# Set up the query
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
bigquery_experts_query_job = client.query(bigquery_experts_query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
bigquery_experts_results = bigquery_experts_query_job.to_dataframe()
# Preview results
print(bigquery_experts_results.head())
# Check your answer
q_5.check()
```
For a hint or the solution, uncomment the appropriate line below.
```
#q_5.hint()
#q_5.solution()
```
### 6) Building a more generally useful service
How could you convert what you've done to a general function a website could call on the backend to get experts on any topic?
Think about it and then check the solution below.
```
q_6.solution()
```
# Congratulations!
You know all the key components to use BigQuery and SQL effectively. Your SQL skills are sufficient to unlock many of the world's largest datasets.
Want to go play with your new powers? Kaggle has BigQuery datasets available [here](https://www.kaggle.com/datasets?sortBy=hottest&group=public&page=1&pageSize=20&size=sizeAll&filetype=fileTypeBigQuery).
# Feedback
Bring any questions or feedback to the [Learn Discussion Forum](https://www.kaggle.com/learn-forum).
---
**[SQL Home Page](https://www.kaggle.com/learn/intro-to-sql)**
*Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum) to chat with other Learners.*
| github_jupyter |
# Multi-Task Example
In this notebook, we are going to fine-tune a multi-task model. Multi-task training is useful in many situations, and is a first-class feature in `jiant`.
---
In this notebook, we will:
* Train a RoBERTa base model on RTE, STS-B, and CommonsenseQA simultaneously
## Setup
#### Install dependencies
First, we will install libraries we need for this code.
```
%%capture
!git clone https://github.com/nyu-mll/jiant.git
%cd jiant
!pip install -r requirements-no-torch.txt
!pip install --no-deps -e ./
```
#### Download data
Next, we will download RTE, STS-B and CommonsenseQA data.
```
%%capture
%cd /content
# Download RTE, STS-B and CommonsenseQA data
!PYTHONPATH=/content/jiant python jiant/jiant/scripts/download_data/runscript.py \
download \
--tasks rte stsb commonsenseqa \
--output_path=/content/tasks/
```
## `jiant` Pipeline
```
import sys
sys.path.insert(0, "/content/jiant")
import jiant.proj.main.tokenize_and_cache as tokenize_and_cache
import jiant.proj.main.export_model as export_model
import jiant.proj.main.scripts.configurator as configurator
import jiant.proj.main.runscript as main_runscript
import jiant.shared.caching as caching
import jiant.utils.python.io as py_io
import jiant.utils.display as display
import os
```
#### Download model
Next, we will download a `roberta-base` model. This also includes the tokenizer.
```
export_model.export_model(
hf_pretrained_model_name_or_path="roberta-base",
output_base_path="./models/roberta-base",
)
```
#### Tokenize and cache
With the model and data ready, we can now tokenize and cache the inputs features for our tasks. This converts the input examples to tokenized features ready to be consumed by the model, and saved them to disk in chunks.
```
# Tokenize and cache each task
for task_name in ["rte", "stsb", "commonsenseqa"]:
tokenize_and_cache.main(tokenize_and_cache.RunConfiguration(
task_config_path=f"./tasks/configs/{task_name}_config.json",
hf_pretrained_model_name_or_path="roberta-base",
output_dir=f"./cache/{task_name}",
phases=["train", "val"],
))
```
We can inspect the first examples of the first chunk of each task.
```
row = caching.ChunkedFilesDataCache("./cache/rte/train").load_chunk(0)[0]["data_row"]
print(row.input_ids)
print(row.tokens)
row = caching.ChunkedFilesDataCache("./cache/stsb/val").load_chunk(0)[0]["data_row"]
print(row.input_ids)
print(row.tokens)
row = caching.ChunkedFilesDataCache("./cache/commonsenseqa/val").load_chunk(0)[0]["data_row"]
print(row.input_ids)
for context_and_choice in row.tokens_list:
print(context_and_choice)
```
#### Writing a run config
Here we are going to write what we call a `jiant_task_container_config`. This configuration file basically defines a lot of the subtleties of our training pipeline, such as what tasks we will train on, do evaluation on, batch size for each task. The new version of `jiant` leans heavily toward explicitly specifying everything, for the purpose of inspectability and leaving minimal surprises for the user, even as the cost of being more verbose.
We use a helper "Configurator" to write out a `jiant_task_container_config`, since most of our setup is pretty standard.
**Depending on what GPU your Colab session is assigned to, you may need to lower the train batch size.**
```
jiant_run_config = configurator.SimpleAPIMultiTaskConfigurator(
task_config_base_path="./tasks/configs",
task_cache_base_path="./cache",
train_task_name_list=["rte", "stsb", "commonsenseqa"],
val_task_name_list=["rte", "stsb", "commonsenseqa"],
train_batch_size=4,
eval_batch_size=8,
epochs=0.5,
num_gpus=1,
).create_config()
os.makedirs("./run_configs/", exist_ok=True)
py_io.write_json(jiant_run_config, "./run_configs/jiant_run_config.json")
display.show_json(jiant_run_config)
```
To briefly go over the major components of the `jiant_task_container_config`:
* `task_config_path_dict`: The paths to the task config files we wrote above.
* `task_cache_config_dict`: The paths to the task features caches we generated above.
* `sampler_config`: Determines how to sample from different tasks during training.
* `global_train_config`: The number of total steps and warmup steps during training.
* `task_specific_configs_dict`: Task-specific arguments for each task, such as training batch size and gradient accumulation steps.
* `taskmodels_config`: Task-model specific arguments for each task-model, including what tasks use which model.
* `metric_aggregator_config`: Determines how to weight/aggregate the metrics across multiple tasks.
#### Start training
Finally, we can start our training run.
Before starting training, the script also prints out the list of parameters in our model. You should notice that there is a unique task head for each task.
```
run_args = main_runscript.RunConfiguration(
jiant_task_container_config_path="./run_configs/jiant_run_config.json",
output_dir="./runs/run1",
hf_pretrained_model_name_or_path="roberta-base",
model_path="./models/roberta-base/model/model.p",
model_config_path="./models/roberta-base/model/config.json",
learning_rate=1e-5,
eval_every_steps=500,
do_train=True,
do_val=True,
force_overwrite=True,
)
main_runscript.run_loop(run_args)
```
Finally, we should see the validation scores for all three tasks. We are not winning any awards with these scores, but this example should show how easy it is to run multi-task training in `jiant`.
| github_jupyter |
# COVID-19: Healthcare Facility Capacity Optimization
## Objective and Prerequisites
This COVID-19 Healthcare Facility Capacity Optimization problem shows you how to determine the optimal location and capacity of healthcare facilities in order to:
* Satisfy demand from COVID-19 patients for treatment,
* Minimize the cost of opening temporary facilities for healthcare providers, and
* Predict the allocation of COVID-19 patients from a specific county to a specific healthcare facility.
This modeling example is at the beginner level, where we assume that you know Python and that you have some knowledge of how to build mathematical optimization models.
**Download the Repository** <br />
You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip).
---
## Problem Description
Hospitals in various counties throughout the US are reaching full capacity due to a surge in COVID-19 patients. Many hospitals are considering creating temporary facilities to increase their capacity to handle COVID-19 patients.

In this example, we focus on nine counties in the US. Each county has existing facilities to treat COVID-19 patients, and also has the option of building temporary facilities to increase the overall capacity to handle COVID-19 patients.
The following table defines the coordinates of the centroid and the forecasted demand (i.e. the projected number of COVID-19 patients) of each county. To estimate this demand, we consider the population of nine fictional counties in California, the current number of COVID-19 cases per day in California, the average percentage of COVID-19 cases who require hospitalization, and the average number of days that a COVID-19 patient stays in the hospital.
| Centroid | Coordinates | Demand |
| --- | --- | --- |
| county 1 | (1, 1.5) | 351 |
| county 2 | (3, 1) | 230 |
| county 3 | (5.5, 1.5) | 529 |
| county 4 | (1, 4.5 ) | 339 |
| county 5 | (3, 3.5) | 360 |
| county 6 | (5.5, 4.5) | 527 |
| county 7 | (1, 8) | 469 |
| county 8 | (3, 6) | 234 |
| county 9 | (4.5, 8) | 500 |
The following table defines the coordinates and capacity of existing facilities. The capacity of existing facilities is calculated as 80% of the forecasted demand of the county in which the existing facilities are located. The exception to this is county 9, where we assume that we have an excess of existing capacity.
| Existing | Coordinates | Capacity |
| --- | --- | --- |
| facility 1 | (1, 2) | 281 |
| facility 2 | (2.5, 1) | 187 |
| facility 3 | (5, 1) | 200 |
| facility 4 | (6.5, 3.5) | 223 |
| facility 5 | (1, 5) | 281 |
| facility 6 | (3, 4) | 281 |
| facility 7 | (5, 4) | 222 |
| facility 8 | (6.5, 5.5) | 200 |
| facility 9 | (1, 8.5) | 250 |
| facility 10 | (1.5, 9.5) | 125 |
| facility 11 | (8.5, 6) | 187 |
| facility 12 | (5, 8) | 300 |
| facility 13 | (3, 9) | 300 |
| facility 14 | (6, 9) | 243 |
The following table defines the coordinates and capacity of new temporary facilities. The cost of building a temporary facility
with a capacity of treating one hundred COVID-19 patients is $\$500,000$.
| Temporary | Coordinates | Capacity |
| --- | --- | --- |
| facility 15 | (1.5, 1) | 100 |
| facility 16 | (3.5, 1.5) | 100 |
| facility 17 | (5.5, 2.5) | 100 |
| facility 18 | (1.5, 3.5) | 100 |
| facility 19 | (3.5, 2.5) | 100 |
| facility 20 | (4.5, 4.5) | 100 |
| facility 21 | (1.5, 6.5) | 100 |
| facility 22 | (3.5, 6.5) | 100 |
| facility 23 | (5.5, 6.5) | 100 |
The coordinates of the three tables are in tens of miles. We assume that each increase of 10 miles in the distance to a COVID-19 facility results in a $\$5$ increase in driving costs for each COVID-19 patient.
In this example, the goal is to identify which temporary facilities to build in order to be able to accommodate demand for treatment by COVID-19 patients while minimizing the total cost of COVID-19 patients driving to an existing or temporary COVID-19 facility and the total cost of building temporary facilities.
This example shows how a Facility Location mixed-integer programming (MIP) model can help healthcare providers make decisions about:
* How to best utilize their capacity,
* Whether to build temporary facilities for COVID-19 patients, and
* How COVID-19 patients from a county should be allocated to various healthcare facilities in order to ensure that the facilities have the capacity to provide treatment for the patients.
This Jupyter Notebook is based on the paper written by Katherine Klise and Michael Bynum [1].
## Model Formulation
### Sets and Indices
$e \in E$: Index and set of existing healthcare facility locations.
$t \in T$: Index and set of temporary healthcare facility locations.
$f \in F = E \cup T$: Index and set of all healthcare facility locations.
$c \in C$: Index and set of counties.
### Parameters
$Dist_{c,f} \in \mathbb{R}^+$: Distance between county $c$ and facility location $f$.
$Dem_{c} \in \mathbb{R}^+$: Expected number of people in county $c$ who will need a COVID-19 facility.
$Cap_{f} \in \mathbb{R}^+$: Number of people that can be served by a facility at location $f$.
$\text{dCost} = 5$: Cost of driving 10 miles.
$\text{tFCost} = 500,000$: Cost of building a temporary COVID-19 facility with a capacity of treating 100 COVID-19 patients.
$bigM$: Penalty of adding extra capacity at temporary facilities in order to satisfy treatment of COVID-19 patients demand.
### Decision Variables
$y_{t} \in \{0, 1 \}$: This variable is equal to 1 if we build a temporary facility at location $t$; and 0 otherwise.
$ x_{c,f} \in \mathbb{R}^+$: Number of people from county $c$ served by a facility at location $f$.
$z_{t} \in \mathbb{R}^+$: Extra capacity added at temporary facility location $t$.
### Objective Function
- **Cost**. We want to minimize the total cost of patients driving from a county to a healthcare facility and the total cost of building temporary COVID-19 treatment capacity. The last term with the big penalty coefficient ($bigM$), enables extra capacity to be added at a temporary facility to ensure that total demand is satisfied.
\begin{equation}
\text{Min} \quad Z = \sum_{c \in C} \sum_{f \in F} \text{dCost} *Dist_{c,f} * x_{c,f} +
\text{tFCost}*\sum_{t \in T} y_{t} + bigM*\sum_{t \in T} z_{t}
\tag{0}
\end{equation}
### Constraints
- **Demand**. Satisfy county demand of service from a COVID-19 facility.
\begin{equation}
\sum_{f \in F} x_{c,f} = Dem_{c} \quad \forall c \in C
\tag{1}
\end{equation}
- **Existing facilities**. Capacity of an existing location of a facility cannot be exceeded.
\begin{equation}
\sum_{c \in C} x_{c,e} \leq Cap_{e} \quad \forall e \in E
\tag{2}
\end{equation}
- **Temporary facilities**. Capacity of a temporary location of a facility cannot be exceeded. Please observe that extra capacity can be added.
\begin{equation}
\sum_{c \in C} x_{c,t} \leq Cap_{t}*y_{t} + z_{t} \quad \forall t \in T
\tag{3}
\end{equation}
---
## Python Implementation
We now import the Gurobi Python Module and other Python libraries.
```
%pip install gurobipy
from itertools import product
from math import sqrt
import gurobipy as gp
from gurobipy import GRB
# tested with Gurobi v9.1.0 and Python 3.7.0
```
---
### Helper Functions
* `compute_distance` computes distance between a county centroid and the location of a facility
* `solve_covid19_facility` builds, solves, and prints results of the COVID-19 healthcare facility capacity optimization model
```
def compute_distance(loc1, loc2):
# This function determines the Euclidean distance between a facility and a county centroid.
dx = loc1[0] - loc2[0]
dy = loc1[1] - loc2[1]
return sqrt(dx*dx + dy*dy)
def solve_covid19_facility(c_coordinates, demand):
#####################################################
# Data
#####################################################
# Indices for the counties
counties = [*range(1,10)]
# Indices for the facilities
facilities = [*range(1,24)]
# Create a dictionary to capture the coordinates of an existing facility and capacity of treating COVID-19 patients
existing, e_coordinates, e_capacity = gp.multidict({
1: [(1, 2), 281],
2: [(2.5, 1), 187],
3: [(5, 1), 200],
4: [(6.5, 3.5), 223],
5: [(1, 5), 281],
6: [(3, 4), 281],
7: [(5, 4), 222],
8: [(6.5, 5.5), 200],
9: [(1, 8.5), 250],
10: [(1.5, 9.5), 125],
11: [(8.5, 6), 187],
12: [(5, 8), 300],
13: [(3, 9), 300],
14: [(6, 9), 243]
})
# Create a dictionary to capture the coordinates of a temporary facility and capacity of treating COVID-19 patients
temporary, t_coordinates, t_capacity = gp.multidict({
15: [(1.5, 1), 100],
16: [(3.5, 1.5), 100],
17: [(5.5, 2.5), 100],
18: [(1.5, 3.5), 100],
19: [(3.5, 2.5), 100],
20: [(4.5, 4.5), 100],
21: [(1.5, 6.5), 100],
22: [(3.5, 6.5), 100],
23: [(5.5, 6.5), 100]
})
# Cost of driving 10 miles
dcost = 5
# Cost of building a temporary facility with capacity of 100 COVID-19
tfcost = 500000
# Compute key parameters of MIP model formulation
f_coordinates = {}
for e in existing:
f_coordinates[e] = e_coordinates[e]
for t in temporary:
f_coordinates[t] = t_coordinates[t]
# Cartesian product of counties and facilities
cf = []
for c in counties:
for f in facilities:
tp = c,f
cf.append(tp)
# Compute distances between counties centroids and facility locations
distance = {(c,f): compute_distance(c_coordinates[c], f_coordinates[f]) for c, f in cf}
#####################################################
# MIP Model Formulation
#####################################################
m = gp.Model('covid19_temporary_facility_location')
# Build temporary facility
y = m.addVars(temporary, vtype=GRB.BINARY, name='temporary')
# Assign COVID-19 patients of county to facility
x = m.addVars(cf, vtype=GRB.CONTINUOUS, name='Assign')
# Add capacity to temporary facilities
z = m.addVars(temporary, vtype=GRB.CONTINUOUS, name='addCap' )
# Objective function: Minimize total distance to drive to a COVID-19 facility
# Big penalty for adding capacity at a temporary facility
bigM = 1e9
m.setObjective(gp.quicksum(dcost*distance[c,f]*x[c,f] for c,f in cf)
+ tfcost*y.sum()
+ bigM*z.sum(), GRB.MINIMIZE)
# Counties demand constraints
demandConstrs = m.addConstrs((gp.quicksum(x[c,f] for f in facilities) == demand[c] for c in counties),
name='demandConstrs')
# Existing facilities capacity constraints
existingCapConstrs = m.addConstrs((gp.quicksum(x[c,e] for c in counties) <= e_capacity[e] for e in existing ),
name='existingCapConstrs')
# temporary facilities capacity constraints
temporaryCapConstrs = m.addConstrs((gp.quicksum(x[c,t] for c in counties) -z[t]
<= t_capacity[t]*y[t] for t in temporary ),
name='temporaryCapConstrs')
# Run optimization engine
m.optimize()
#####################################################
# Output Reports
#####################################################
# Total cost of building temporary facility locations
temporary_facility_cost = 0
print(f"\n\n_____________Optimal costs______________________")
for t in temporary:
if (y[t].x > 0.5):
temporary_facility_cost += tfcost*round(y[t].x)
patient_allocation_cost = 0
for c,f in cf:
if x[c,f].x > 1e-6:
patient_allocation_cost += dcost*round(distance[c,f]*x[c,f].x)
print(f"The total cost of building COVID-19 temporary healhtcare facilities is ${temporary_facility_cost:,}")
print(f"The total cost of allocating COVID-19 patients to healtcare facilities is ${patient_allocation_cost:,}")
# Build temporary facility at location
print(f"\n_____________Plan for temporary facilities______________________")
for t in temporary:
if (y[t].x > 0.5):
print(f"Build a temporary facility at location {t}")
# Extra capacity at temporary facilities
print(f"\n_____________Plan to increase Capacity at temporary Facilities______________________")
for t in temporary:
if (z[t].x > 1e-6):
print(f"Increase temporary facility capacity at location {t} by {round(z[t].x)} beds")
# Demand satisfied at each facility
f_demand = {}
print(f"\n_____________Allocation of county patients to COVID-19 healthcare facility______________________")
for f in facilities:
temp = 0
for c in counties:
allocation = round(x[c,f].x)
if allocation > 0:
print(f"{allocation} COVID-19 patients from county {c} are treated at facility {f} ")
temp += allocation
f_demand[f] = temp
print(f"{temp} is the total number of COVID-19 patients that are treated at facility {f}. ")
print(f"\n________________________________________________________________________________")
# Test total demand = total demand satisfied by facilities
total_demand = 0
for c in counties:
total_demand += demand[c]
demand_satisfied = 0
for f in facilities:
demand_satisfied += f_demand[f]
print(f"\n_____________Test demand = supply______________________")
print(f"Total demand is: {total_demand:,} patients")
print(f"Total demand satisfied is: {demand_satisfied:,} beds")
```
## Base Scenario
In this scenario, we consider the data described for the instance of the COVID-19 Healthcare Facility Capacity Optimization problem. The forecasted demand is as defined in the first table of the problem description.
```
# Create a dictionary to capture the coordinates of a county and the demand of COVID-19 treatment
counties, coordinates, forecast = gp.multidict({
1: [(1, 1.5), 351],
2: [(3, 1), 230],
3: [(5.5, 1.5), 529],
4: [(1, 4.5 ), 339],
5: [(3, 3.5), 360],
6: [(5.5, 4.5), 527],
7: [(1, 8), 469],
8: [(3, 6), 234],
9: [(4.5, 8), 500]
})
# find the optimal solution of the base scenario
solve_covid19_facility(coordinates, forecast)
```
### Analysis for Base Scenario
The optimal total cost of building COVID-19 temporary healthcare facilities is $\$1,500,000$, and three COVID-19 temporary healthcare facilities are built. The total cost of allocating COVID-19 patients to healthcare facilities is $\$21,645$, and no extra capacity needs to be added to accommodate the demand for treatment from COVID-19 patients.
The MIP model also determines the expected number of COVID-19 patients of a county allocated to a healthcare facility. For example, 6 COVID-19 patients from county 3, 50 COVID-19 patients from county 5, and 166 COVID-19 patients from county 6 are expected to be treated at facility 7. The total number of COVID-19 patients expected to be treated at facility 7 is 222.
---
## Scenario 1
Assume that the Centers for Disease Control and Prevention (CDC) announced that the number of hospitalizations will increase by 20%. This percentage includes 5% of buffer capacity to account for the variability of the expected demand.
```
# Increase in demand by 20%.
for c in counties:
forecast[c] = round(1.2*forecast[c])
# find the optimal for scenario 1
solve_covid19_facility(coordinates, forecast)
```
### Analysis for Scenario 1
The optimal total cost of building temporary COVID-19 healthcare facilities is $\$4,500,000$, and nine temporary COVID-19 healthcare facilities are built. The total cost of allocating COVID-19 patients to healthcare facilities is $\$25,520$, and
40 and 27 beds need to be added at temporary healthcare facilities 15 and 17, respectively.
Please note that in this scenario, the system is overloaded and all COVID-19 healthcare facilities are operating at full capacity. In addition, extra capacity needs to be added at some temporary healthcare facilities.
---
## Conclusion
In this example, we addressed the COVID-19 Healthcare Facility Capacity Optimization problem. We determined the optimal location and capacity of healthcare facilities in order to:
* Satisfy demand from COVID-19 patients for treatment,
* Minimize the cost of opening temporary facilities for healthcare providers, and
* Predict the allocation of COVID-19 patients from a specific county to a specific healthcare facility.
We explored two scenarios. In the base scenario, we have enough capacity and need to build three temporary healthcare facilities. Whereas in the alternative scenario (1) with an increase of 20% in the number of COVID-19 patients requiring hospitalization, we need to build nine temporary healthcare facilities and add extra capacity at two of them.
Our COVID-19 Healthcare Facility Location Optimization model can be used by public health officials and healthcare providers to help make strategic decisions about when and where to increase healthcare facility capacity during the COVID-19 pandemic. Also, this strategic model can feed information to a COVID-19 load-balancing dispatching model that is capable of assigning (in real time) COVID-19 patients who require hospitalization to the "right" healthcare facilities.
In addition, our model can feed into a tactical model that determines how capacity should be increased to accommodate any increase in demand. For example, the number medical personnel to be hired, trained, and re-skilled, the rotation of medical personnel, and the amount of equipment (e.g. ventilators, drugs, beds, etc.) needed.
## References
[1] Katherine Klise and Michael Bynum. *Facility Location Optimization Model for COVID-19 Resources*. April 2020. Joint DOE Laboratory Pandemic Modeling and Analysis Capability. SAND2020-4693R.
Copyright © 2020 Gurobi Optimization, LLC
| github_jupyter |
<img src='../images/logo.png'>
# Create an Interactive Map
Based on a javascript library, leaflet: [https://leafletjs.com/](https://leafletjs.com/), a python interface is available, called folium: [https://python-visualization.github.io/folium/](https://python-visualization.github.io/folium/).
We will use this library to setup an interactive map, where you can zoom in and out, add markers, customize the markers and download the maps as a static website (.html).
## Import librararies
```
import folium
```
## Create an empty map
For the reminder of this notebook, we assume that you always come back display the map again. Of course you can display the map anytime in another cell as well, just issue the command 'myMap'.<br>
The following one line, will create a map with the center of the map on latitutde: 0 and longitude: 0.
```
myMap = folium.Map(location=[0, 0], zoom_start=3)
```
## Show map
```
myMap
```
## Add a marker
We will add the Location for the Greenwich Observatory, UTC
```
marker = folium.Marker(location=[51.4779, -0.00189], popup=folium.Popup('Greenwich Observatory, UTC'))
marker.add_to(myMap)
#myMap.add_child(marker)
```
## Show the map again
Either go back to the cell with `myMap` and execute that cell again
OR
Insert a new cell below, write `myMap` and execute.
## Customize the marker
- add a popup message (display a message when you `click` on the marker)<br>
the message is "html", hence you can write any html tags as well
- change the standard icon, try: <b>leaf, glass, cloud, asterisk, user, film, star, heart</b>
<br>(and many more https://getbootstrap.com/docs/3.3/components/)<br>
- icon colors:<b>['red', 'blue', 'green', 'purple', 'orange', 'darkred',
'lightred', 'beige', 'darkblue', 'darkgreen', 'cadetblue', 'darkpurple', 'white', 'pink', 'lightblue', 'lightgreen', 'gray', 'black', 'lightgray']</b>
Add a second marker for the the ICOS Carbon Portal HQ, choose an ICON, and insert a url link to the popup message
```
marker = folium.Marker(
location=[55.7094, 13.2011],
popup='ICOS Carbon Portal HQ.<br><a href="https://www.icos-cp.eu" target=_blank>https://www.icos-cp.eu</a>',
icon=folium.Icon(icon='leaf', color='green')
)
marker.add_to(myMap)
```
## Save the map to a html file
```
import os
folder = os.path.join(os.path.expanduser('~'), 'output/envrifair/')
if not os.path.exists(folder):
os.makedirs(folder)
file = os.path.join(folder, 'myMap.html')
myMap.save(file)
```
### open the map
Go back to the files...and the folder you have been working in. You can just `click` the map.html file and it will open in your browser.
### download the map
you can select the map and download. Now you should have a file on your computer calle `myMap.html` and you can open this in your internet browser. There is no nee for you to be connectet to the jupter hub or anything, it is completely stand-alone.
| github_jupyter |
```
from nbdev import *
```
# Record Generator
```
# Creates TFRecords of data for TFX pipeline implementation. Not used.
#hide
import tensorflow as tf
import numpy as np
from securereqnet.preprocessing import vectorize_sentences
class Record_Generator:
"""Formats data for securereqnet models. Returns TFRecords.
Call Record_Generator(True) if the data is already in the shape [x,618,100,1]"""
def __init__(self, path = ".", name = "Record", processed=False):
self.__processed = processed
self.__path = path
self.__name = name
self.__count = 0
def __float_feature(self,value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def __int64_feature(self,value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def generate_record(self,x,y = None,path="",name="Record",processed=False):
"""
Writes a single TFRecord.
@param x, by default a string to be processed. Can also be data processed using WHATEVER SECUREREQNET PREPROCESSING IS CALLED
@param y is used for generating training and evaluation data.
@param path is the directory where the record will be written to.
@param name is the name of the record to be generated.
@param processed should be set to true if the data is vectorized in the shape [1,618,100,1]
"""
if path == "":
path = self.__path
# Name the record Record_1 Record_2 etc.
self.__count+=1
output_filename = path + "/" + name + "_" + str(self.__count) + ".tfrecord"
print("Generating record at: " + output_filename)
if processed == False:
x = vectorize_sentences([x])
# Reshape data into 1d array
x = np.reshape(x, [1*618*100*1,])
if(y is not None):
y = np.reshape(y, [1*2,])
# Define dictionary for the record
feature_dict = {
'x': self.__float_feature(x),
'numberOfSamples': self.__int64_feature([1])
}
# If it is used for training or testing include a y value in the dictionary
if(y is not None):
feature_dict["y"] = self.__int64_feature(y)
writer = tf.io.TFRecordWriter(output_filename)
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
writer.write(example.SerializeToString())
writer.close()
#hide
r = Record_Generator()
#hide
x = corpora_test_x[0]
r.generate_record(x,processed=True)
#hide
y = target_test_y[0]
r.generate_record(x,y,processed=True)
#hide
r = Record_Generator()
#hide
r.generate_record("Security Record")
```
| github_jupyter |
# Skip-gram word2vec
In this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like machine translation.
## Readings
Here are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.
* A really good [conceptual overview](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) of word2vec from Chris McCormick
* [First word2vec paper](https://arxiv.org/pdf/1301.3781.pdf) from Mikolov et al.
* [NIPS paper](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) with improvements for word2vec also from Mikolov et al.
* An [implementation of word2vec](http://www.thushv.com/natural_language_processing/word2vec-part-1-nlp-with-deep-learning-with-tensorflow-skip-gram/) from Thushan Ganegedara
* TensorFlow [word2vec tutorial](https://www.tensorflow.org/tutorials/word2vec)
## Word embeddings
When you're dealing with words in text, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The matrix multiplication going into the first hidden layer will have almost all of the resulting values be zero. This a huge waste of computation.

To solve this problem and greatly increase the efficiency of our networks, we use what are called embeddings. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the "on" input unit.

Instead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example "heart" is encoded as 958, "mind" as 18094. Then to get hidden layer values for "heart", you just take the 958th row of the embedding matrix. This process is called an **embedding lookup** and the number of hidden units is the **embedding dimension**.
<img src='assets/tokenize_lookup.png' width=500>
There is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix as well.
Embeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called **Word2Vec** uses the embedding layer to find vector representations of words that contain semantic meaning.
## Word2Vec
The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as "black", "white", and "red" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.
<img src="assets/word2vec_architectures.png" width="500">
In this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.
First up, importing packages.
```
import time
import numpy as np
import tensorflow as tf
import utils
```
Load the [text8 dataset](http://mattmahoney.net/dc/textdata.html), a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the `data` folder. Then you can extract it and delete the archive file to save storage space.
```
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
```
## Preprocessing
Here I'm fixing up the text to make training easier. This comes from the `utils` module I wrote. The `preprocess` function coverts any punctuation into tokens, so a period is changed to ` <PERIOD> `. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.
```
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
```
And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list `int_words`.
```
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
```
## Subsampling
Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by
$$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$
where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.
I'm going to leave this up to you as an exercise. Check out my solution to see how I did it.
> **Exercise:** Implement subsampling for the words in `int_words`. That is, go through `int_words` and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is that probability that a word is discarded. Assign the subsampled data to `train_words`.
```
from collections import Counter
import random
threshold = 1e-5
word_counts = Counter(int_words)
total_count = len(int_words)
freqs = {word: count/total_count for word, count in word_counts.items()}
p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts}
train_words = [word for word in int_words if random.random() < (1 - p_drop[word])]
```
## Making batches
Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$.
From [Mikolov et al.](https://arxiv.org/pdf/1301.3781.pdf):
"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels."
> **Exercise:** Implement a function `get_target` that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you chose a random number of words to from the window.
```
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
R = np.random.randint(1, window_size+1)
start = idx - R if (idx - R) > 0 else 0
stop = idx + R
target_words = set(words[start:idx] + words[idx+1:stop+1])
return list(target_words)
```
Here's a function that returns batches for our network. The idea is that it grabs `batch_size` words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
```
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
```
## Building the graph
From [Chris McCormick's blog](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/), we can see the general structure of our network.

The input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.
The idea here is to train the hidden layer weight matrix to find efficient representations for our words. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.
I'm going to have you build the graph in stages now. First off, creating the `inputs` and `labels` placeholders like normal.
> **Exercise:** Assign `inputs` and `labels` using `tf.placeholder`. We're going to be passing in integers, so set the data types to `tf.int32`. The batches we're passing in will have varying sizes, so set the batch sizes to [`None`]. To make things work later, you'll need to set the second dimension of `labels` to `None` or `1`.
```
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, [None], name='inputs')
labels = tf.placeholder(tf.int32, [None, None], name='labels')
```
## Embedding
The embedding matrix has a size of the number of words by the number of units in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \times 300$. Remember that we're using tokenized data for our inputs, usually as integers, where the number of tokens is the number of words in our vocabulary.
> **Exercise:** Tensorflow provides a convenient function [`tf.nn.embedding_lookup`](https://www.tensorflow.org/api_docs/python/tf/nn/embedding_lookup) that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use `tf.nn.embedding_lookup` to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using [tf.random_uniform](https://www.tensorflow.org/api_docs/python/tf/random_uniform).
```
n_vocab = len(int_to_vocab)
n_embedding = 200 # Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs)
```
## Negative sampling
For every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called ["negative sampling"](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). Tensorflow has a convenient function to do this, [`tf.nn.sampled_softmax_loss`](https://www.tensorflow.org/api_docs/python/tf/nn/sampled_softmax_loss).
> **Exercise:** Below, create weights and biases for the softmax layer. Then, use [`tf.nn.sampled_softmax_loss`](https://www.tensorflow.org/api_docs/python/tf/nn/sampled_softmax_loss) to calculate the loss. Be sure to read the documentation to figure out how it works.
```
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(n_vocab))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b,
labels, embed,
n_sampled, n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
```
## Validation
This code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.
```
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
```
Restore the trained network if you need to:
```
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
```
## Visualizing the word vectors
Below we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out [this post from Christopher Olah](http://colah.github.io/posts/2014-10-Visualizing-MNIST/) to learn more about T-SNE and other ways to visualize high-dimensional data.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
```
| github_jupyter |
```
from tsfresh.feature_extraction.settings import from_columns
import unified_bib as ub
import pandas as pd
import numpy as np
import pickle
import importlib
importlib.reload(ub)
df = ub.tsfresh_chucksize_test('1')
nan_columns = []
for col in features:
data = df.loc[:,col].values
nan_test = np.isnan(data)
aux = col.split('__')[1].split('_')[0]
if aux == 'fft':
nan_columns.append(col)
elif any(nan == True for nan in nan_test):
nan_columns.append(col)
print('Percentage of invalid features: ', len(nan_columns)*100/len(features))
valid_features = []
for i in range(len(features)):
if features[i] not in nan_columns:
valid_features.append(features[i])
print('Percentage of valid features: ', len(valid_features)*100/len(features))
with open('Kernel/scaler.pkl', 'wb') as f:
pickle.dump(scaler, f)
valid_features = []
for i in range(len(features)):
if features[i] not in nan_columns:
valid_features.append(features[i])
print('Percentage of valid features: ', len(valid_features)*100/len(features))
valid_features_dict = from_columns(valid_features)
features.shape
len(nan_columns)
print('====== Welcome to the Cross-Validation program ======')
output_id = input('which input you want to format?')
full_data = np.genfromtxt('Input/Output_' + output_id + '.csv',
delimiter=',')
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
```
# Select Features
```
cleaned_features = features.drop(nan_columns)
cleaned_features.shape
cleaned_df = df[cleaned_features]
cleaned_df
import selection
importlib.reload(selection)
label = np.genfromtxt('Input/output_13.csv', delimiter=',')[::149,-1]
label.shape
filtered_df, relevance_table = selection.select_features(cleaned_df, label, n_jobs=0)
relevance_table
```
# Modelo
```
#def tsfresh_ensemble(output_id):
output_id = 9
if True:
# Loading the required input
full_data = np.genfromtxt('Input/Output_{}.csv'.format(output_id),
delimiter=',')
L, W = full_data.shape
data = full_data[:,2:-1]
info = full_data[:,0:2]
n_measures = int(max(info[:,1]))
n_timeseries = int(max(info[:,0]))
label = full_data[::n_measures,-1]
scaler = MinMaxScaler(feature_range=(-1,1)).fit(data)
data = scaler.transform(data)
with open('Kernel/scaler.pkl', 'wb') as f:
pickle.dump(scaler, f)
full_data = np.concatenate((info,data), axis=1)
divisions = 5
idx = np.random.choice(range(n_timeseries),n_timeseries,replace=False)
idx_division = np.array_split(idx,divisions)
for i,div in enumerate(idx_division):
div.sort()
indices = [d2 for d1 in div for d2 in range(d1*n_measures,(d1+1)*n_measures)]
ensemble_data = full_data[indices,:]
ensemble_label = label[div]
df = pd.DataFrame(ensemble_data, columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,W-2)])
extracted_features = tsfresh.extract_features(df, column_id="id", column_sort="time", n_jobs=0)
features = extracted_features.columns
nan_columns = []
for col in features:
nan_test = np.isnan(extracted_features.loc[:,col].values)
if any(nan == True for nan in nan_test):
nan_columns.append(col)
print('Percentage of invalid features: ', len(nan_columns)*100/len(features))
cleaned_features = features.drop(nan_columns)
cleaned_df = extracted_features[cleaned_features]
filtered_df, relevance_table = selection.select_features(cleaned_df, ensemble_label, n_jobs=0)
relevance_table.fillna(value=10)
if i == 0:
relevance_table_final = relevance_table.copy()
extracted_features_final = extracted_features.copy()
else:
relevance_table_final.p_value = relevance_table_final.p_value + relevance_table.p_value
extracted_features_final = pd.concat([extracted_features_final,extracted_features], axis=0)
extracted_features_final = extracted_features_final.sort_index()
relevance_table_final.p_value = relevance_table_final.p_value/divisions
relevance_table_final.relevant = relevance_table_final.p_value < 0.005
relevant_features = relevance_table_final[relevance_table_final.relevant].feature
extracted_features_final = extracted_features_final[relevant_features]
kind_to_fc_parameters = from_columns(relevant_features)
with open('Kernel/kind_to_fc_parameters.pkl', 'wb') as f:
pickle.dump(kind_to_fc_parameters, f)
with open('Kernel/columns.pkl', 'wb') as f:
pickle.dump(relevant_features.keys().tolist(), f)
with open('Kernel/final_target_{}.pkl'.format(output_id), 'wb') as f:
pickle.dump(label, f)
Output = {'FeaturesFiltered': extracted_features_final,
'FinalTarget': label,
'ID': int(output_id)}
#return Output
relevance_table_final
#def dynamic_tsfresh (output_id=0, mode='prototype'):
output_id = 9
if True:
with open('Kernel/scaler.pkl', 'rb') as f:
scaler = pickle.load(f)
# Loading streaming data
total_data = np.genfromtxt('Input/Output_' + str(output_id) + '.csv',delimiter=',')
data = total_data[:,2:-1]
info = total_data[:,0:2]
data = scaler.transform(data)
total_data = np.concatenate((info,data), axis=1)
df = pd.DataFrame(total_data, columns= ['id','time'] +
['Sensor_' + str(x) for x in range(1,(total_data.shape[1]-1))])
# Loading feature dictionary
with open('Kernel/kind_to_fc_parameters.pkl', 'rb') as f:
kind_to_fc_parameters = pickle.load(f)
# Loading column names
with open('Kernel/columns.pkl', 'rb') as f:
original_columns = pickle.load(f)
extracted_features = tsfresh.extract_features(df, column_id="id", column_sort="time", n_jobs=0)
final_features = extracted_features[original_columns]
#return impute(final_features), extracted_features
final_features
(final_features == Output['FeaturesFiltered']).sum().sum()
560*136
```
| github_jupyter |
# R squared and Mean squared error
## What is R squared?
R-squared evaluates the scatter of the data points around the fitted regression line.
It is also called the coefficient of determination, or the coefficient of multiple determination for multiple regression.
R-squared is the percentage of the dependent variable variation that a linear model explains.

## Perfomance of the R squared model
- 0% represents a model that does not explain any of the variations in the response variable around its mean. The mean of the dependent variable predicts the dependent variable as well as the regression model.
- 100% represents a model that explains all of the variations in the response variable around its mean.
- The high the value of R-square determines the less is the difference between the actual values and the predicted values therefore the result will be efficient.
## Implementaion of R squared
### Importing necessary libraries
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.metrics import r2_score
# Taking data manually as given below
X = [57, 4, 2, 8, 9, 22, 33, 63, 56, 97, 77, 73, 83, 27, 87, 91]
Y = [56.5, 5.33, 4.21, 7.89, 9.11, 22.17, 34.67, 63.66, 57.57, 97.27, 72.21, 75.77, 83.99, 25.77, 90.67, 94.53]
# Converting X and Y to an array using numpy library and assigning it to the new variables X_new and Y_new.
X_new = np.array(X)
Y_new = np.array(Y)
X_new = X_new.reshape(-1,1)
Y_new = Y_new.reshape(-1,1)
# To fit the model we have used Linear Regression so that it can help to get the line which shows fitted values.
model = linear_model.LinearRegression(fit_intercept=False)
score = model.fit(X_new,Y_new)
```
#### Applying R_square model to the data i.e. X_new and Y_new
```
r2 = r2_score(X_new,Y_new)
print("R_sqaure for the model is: ", r2)
```
### Plotting the actual and predicted values
```
line = model.predict(X_new)
plt.scatter(X_new,Y_new,color='red')
plt.plot(X_new,line, color='blue')
plt.show()
```
As shown in the above figure that how R_squared represent the scatter arround the regression line.
We can plot fitted values by observed values.
In the above figure red dots represent the predicted values and the line represents the actual values.
### Problems with R-squared
- We cannot use R-squared to conclude whether your model is biased. To check for this bias, we need to check our residual plots.
- R-squared increases every time you add an independent variable to the model. The R-squared never decreases, not even when it’s just a chance correlation between variables.
- A regression model that contains more independent variables than another model can look like it provides a better fit merely because it contains more variables.
- When a model contains an excessive number of independent variables and polynomial terms, it becomes overly customized to fit the peculiarities and random noise in our sample rather than reflecting the entire population.
## What is Mean Squared Error?
The Mean Squared Error (MSE) or Mean Squared Deviation (MSD) of an estimator measures the average of error squares i.e. the average squared difference between the estimated values and true value.
### Implementaion of Mean Squared Error
#### Import library for Mean Squared Error
```
from sklearn.metrics import mean_squared_error
# Taking data manually as given below
y = [57, 4, 2, 8, 9, 22, 33, 63, 56, 97, 77, 73, 83, 27, 87, 91]
y1 = [56.5, 5.33, 4.21, 7.89, 9.11, 22.17, 34.67, 63.66, 57.57, 97.27, 72.21, 75.77, 83.99, 25.77, 90.67, 94.53]
# Converting X and Y to an array using numpy library and assigning it to the new variables X_new and Y_new.
Y_true = np.array(y)
Y_pred = np.array(y1)
Y_true = Y_true.reshape(-1,1)
Y_pred = Y_pred.reshape(-1,1)
```
### Method 1: Mean Squared Error using Scikit-learn Library
```
# Calculation of Mean_Squared_Error and storing it into variable i.e. mse_value.
mse_value = mean_squared_error(Y_true,Y_pred)
print("MSE value using 'sklearn.metrics': ",mse_value)
```
### Method 2: Mean Squared Error using Numpy module
```
mse_np = np.square(np.subtract(Y_true,Y_pred)).mean()
print("MSE value using 'numpy': ",mse_np)
```
As shown above anyone can find mean suared error using any of the method on any data
### Combining implementation of R_squared and Mean_squared_error on Boston dataset
### Importing libraries
```
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.metrics import r2_score, mean_squared_error
from sklearn import datasets
# Load data from sklearn boston dataset
boston_df = datasets.load_boston()
X = boston_df.data
y = boston_df.target
# Spliting the data into training data and testing data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# Creating pipeline and fitting the boston data
pipeline_df = make_pipeline(StandardScaler(), LinearRegression())
pipeline_df.fit(X_train, y_train)
# Calculating the predicted value of training and testing dataset
y_train_pred = pipeline_df.predict(X_train)
y_test_pred = pipeline_df.predict(X_test)
# Mean Squared Error
print('MSE train: %.3f, test: %.3f' %(mean_squared_error(y_train,y_train_pred), mean_squared_error(y_test,y_test_pred)))
# R-Squared
print('R_squared train: %.3f, test: %.3f' %(r2_score(y_train,y_train_pred), r2_score(y_test,y_test_pred)))
```
# Conclusion
So, there are 2 ways to find the error i.e. R_squared and Mean_sqaured_error that varies on the condition which is better to use:
- The similarity between mean-squared error and R-Squared is that they both are a type of metrics which are used for evaluating the performance of the regression models, especially statistical model such as linear regression model.
- It is recommended to use R-Squared or rather adjusted R-Squared for evaluating the model performance of the regression models. This is primarily because R-Squared captures the fraction of response variance captured by the regression and tend to give better picture of quality of regression model.
- MSE values differ based on whether the values of the response variable is scaled or not.
| github_jupyter |
<table class="ee-notebook-buttons" align="left">
<td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/FeatureCollection/from_polygons.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
<td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/from_polygons.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
<td><a target="_blank" href="https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=FeatureCollection/from_polygons.ipynb"><img width=58px src="https://mybinder.org/static/images/logo_social.png" />Run in binder</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/FeatureCollection/from_polygons.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
</table>
## Install Earth Engine API
Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geehydro](https://github.com/giswqs/geehydro). The **geehydro** Python package builds on the [folium](https://github.com/python-visualization/folium) package and implements several methods for displaying Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, `Map.centerObject()`, and `Map.setOptions()`.
The following script checks if the geehydro package has been installed. If not, it will install geehydro, which automatically install its dependencies, including earthengine-api and folium.
```
import subprocess
try:
import geehydro
except ImportError:
print('geehydro package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geehydro'])
```
Import libraries
```
import ee
import folium
import geehydro
```
Authenticate and initialize Earth Engine API. You only need to authenticate the Earth Engine API once.
```
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
```
## Create an interactive map
This step creates an interactive map using [folium](https://github.com/python-visualization/folium). The default basemap is the OpenStreetMap. Additional basemaps can be added using the `Map.setOptions()` function.
The optional basemaps can be `ROADMAP`, `SATELLITE`, `HYBRID`, `TERRAIN`, or `ESRI`.
```
Map = folium.Map(location=[40, -100], zoom_start=4)
Map.setOptions('HYBRID')
```
## Add Earth Engine Python script
```
Map.setCenter(-107, 41, 6)
fc = ee.FeatureCollection([
ee.Feature(
ee.Geometry.Polygon(
[[-109.05, 41], [-109.05, 37], [-102.05, 37], [-102.05, 41]]),
{'name': 'Colorado', 'fill': 1}),
ee.Feature(
ee.Geometry.Polygon(
[[-114.05, 37.0], [-109.05, 37.0], [-109.05, 41.0],
[-111.05, 41.0], [-111.05, 42.0], [-114.05, 42.0]]),
{'name': 'Utah', 'fill': 2})
])
# Fill, then outline the polygons into a blank image.
image1 = ee.Image(0).mask(0).toByte()
image2 = image1.paint(fc, 'fill') # Get color from property named 'fill'
image3 = image2.paint(fc, 3, 5) # Outline using color 3, width 5.
Map.addLayer(image3, {
'palette': ['000000', 'FF0000', '00FF00', '0000FF'],
'max': 3,
'opacity': 0.5
}, "Colorado & Utah")
```
## Display Earth Engine data layers
```
Map.setControlVisibility(layerControl=True, fullscreenControl=True, latLngPopup=True)
Map
```
| github_jupyter |
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Quantization aware training comprehensive guide
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/model_optimization/guide/quantization/training_comprehensive_guide"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/quantization/training_comprehensive_guide.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/model-optimization/blob/master/tensorflow_model_optimization/g3doc/guide/quantization/training_comprehensive_guide.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/model-optimization/tensorflow_model_optimization/g3doc/guide/quantization/training_comprehensive_guide.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
Welcome to the comprehensive guide for Keras quantization aware training.
This page documents various use cases and shows how to use the API for each one. Once you know which APIs you need, find the parameters and the low-level details in the
[API docs](https://www.tensorflow.org/model_optimization/api_docs/python/tfmot/quantization).
* If you want to see the benefits of quantization aware training and what's supported, see the [overview](https://www.tensorflow.org/model_optimization/guide/quantization/training.md).
* For a single end-to-end example, see the [quantization aware training example](https://www.tensorflow.org/model_optimization/guide/quantization/training_example.md).
The following use cases are covered:
* Deploy a model with 8-bit quantization with these steps.
* Define a quantization aware model.
* For Keras HDF5 models only, use special checkpointing and
deserialization logic. Training is otherwise standard.
* Create a quantized model from the quantization aware one.
* Experiment with quantization.
* Anything for experimentation has no supported path to deployment.
* Custom Keras layers fall under experimentation.
## Setup
For finding the APIs you need and understanding purposes, you can run but skip reading this section.
```
! pip uninstall -y tensorflow
! pip install -q tf-nightly
! pip install -q tensorflow-model-optimization
import tensorflow as tf
import numpy as np
import tensorflow_model_optimization as tfmot
import tempfile
input_shape = [20]
x_train = np.random.randn(1, 20).astype(np.float32)
y_train = tf.keras.utils.to_categorical(np.random.randn(1), num_classes=20)
def setup_model():
model = tf.keras.Sequential([
tf.keras.layers.Dense(20, input_shape=input_shape),
tf.keras.layers.Flatten()
])
return model
def setup_pretrained_weights():
model= setup_model()
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer='adam',
metrics=['accuracy']
)
model.fit(x_train, y_train)
_, pretrained_weights = tempfile.mkstemp('.tf')
model.save_weights(pretrained_weights)
return pretrained_weights
def setup_pretrained_model():
model = setup_model()
pretrained_weights = setup_pretrained_weights()
model.load_weights(pretrained_weights)
return model
setup_model()
pretrained_weights = setup_pretrained_weights()
```
##Define quantization aware model
By defining models in the following ways, there are available paths to deployment to backends listed in the [overview page](https://www.tensorflow.org/model_optimization/guide/quantization/training.md). By default, 8-bit quantization is used.
Note: a quantization aware model is not actually quantized. Creating a quantized model is a separate step.
### Quantize whole model
**Your use case:**
* Subclassed models are not supported.
**Tips for better model accuracy:**
* Try "Quantize some layers" to skip quantizing the layers that reduce accuracy the most.
* It's generally better to finetune with quantization aware training as opposed to training from scratch.
To make the whole model aware of quantization, apply `tfmot.quantization.keras.quantize_model` to the model.
```
base_model = setup_model()
base_model.load_weights(pretrained_weights) # optional but recommended for model accuracy
quant_aware_model = tfmot.quantization.keras.quantize_model(base_model)
quant_aware_model.summary()
```
### Quantize some layers
Quantizing a model can have a negative effect on accuracy. You can selectively quantize layers of a model to explore the trade-off between accuracy, speed, and model size.
**Your use case:**
* To deploy to a backend that only works well with fully quantized models (e.g. EdgeTPU v1, most DSPs), try "Quantize whole model".
**Tips for better model accuracy:**
* It's generally better to finetune with quantization aware training as opposed to training from scratch.
* Try quantizing the later layers instead of the first layers.
* Avoid quantizing critical layers (e.g. attention mechanism).
In the example below, quantize only the `Dense` layers.
```
# Create a base model
base_model = setup_model()
base_model.load_weights(pretrained_weights) # optional but recommended for model accuracy
# Helper function uses `quantize_annotate_layer` to annotate that only the
# Dense layers should be quantized.
def apply_quantization_to_dense(layer):
if isinstance(layer, tf.keras.layers.Dense):
return tfmot.quantization.keras.quantize_annotate_layer(layer)
return layer
# Use `tf.keras.models.clone_model` to apply `apply_quantization_to_dense`
# to the layers of the model.
annotated_model = tf.keras.models.clone_model(
base_model,
clone_function=apply_quantization_to_dense,
)
# Now that the Dense layers are annotated,
# `quantize_apply` actually makes the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)
quant_aware_model.summary()
```
While this example used the type of the layer to decide what to quantize, the easiest way to quantize a particular layer is to set its `name` property, and look for that name in the `clone_function`.
```
print(base_model.layers[0].name)
```
#### More readable but potentially lower model accuracy
This is not compatible with finetuning with quantization aware training, which is why it may be less accurate than the above examples.
**Functional example**
```
# Use `quantize_annotate_layer` to annotate that the `Dense` layer
# should be quantized.
i = tf.keras.Input(shape=(20,))
x = tfmot.quantization.keras.quantize_annotate_layer(tf.keras.layers.Dense(10))(i)
o = tf.keras.layers.Flatten()(x)
annotated_model = tf.keras.Model(inputs=i, outputs=o)
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)
# For deployment purposes, the tool adds `QuantizeLayer` after `InputLayer` so that the
# quantized model can take in float inputs instead of only uint8.
quant_aware_model.summary()
```
**Sequential example**
```
# Use `quantize_annotate_layer` to annotate that the `Dense` layer
# should be quantized.
annotated_model = tf.keras.Sequential([
tfmot.quantization.keras.quantize_annotate_layer(tf.keras.layers.Dense(20, input_shape=input_shape)),
tf.keras.layers.Flatten()
])
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(annotated_model)
quant_aware_model.summary()
```
## Checkpoint and deserialize
**Your use case:** this code is only needed for the HDF5 model format (not HDF5 weights or other formats).
```
# Define the model.
base_model = setup_model()
base_model.load_weights(pretrained_weights) # optional but recommended for model accuracy
quant_aware_model = tfmot.quantization.keras.quantize_model(base_model)
# Save or checkpoint the model.
_, keras_model_file = tempfile.mkstemp('.h5')
quant_aware_model.save(keras_model_file)
# `quantize_scope` is needed for deserializing HDF5 models.
with tfmot.quantization.keras.quantize_scope():
loaded_model = tf.keras.models.load_model(keras_model_file)
loaded_model.summary()
```
## Create and deploy quantized model
In general, reference the documentation for the deployment backend that you
will use.
This is an example for the TFLite backend.
```
base_model = setup_pretrained_model()
quant_aware_model = tfmot.quantization.keras.quantize_model(base_model)
# Typically you train the model here.
converter = tf.lite.TFLiteConverter.from_keras_model(quant_aware_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
quantized_tflite_model = converter.convert()
```
## Experiment with quantization
**Your use case**: using the following APIs means that there is no
supported path to deployment. The features are also experimental and not
subject to backward compatibility.
* `tfmot.quantization.keras.QuantizeConfig`
* `tfmot.quantization.keras.quantizers.Quantizer`
* `tfmot.quantization.keras.quantizers.LastValueQuantizer`
* `tfmot.quantization.keras.quantizers.MovingAverageQuantizer`
### Setup: DefaultDenseQuantizeConfig
Experimenting requires using `tfmot.quantization.keras.QuantizeConfig`, which describes how to quantize the weights, activations, and outputs of a layer.
Below is an example that defines the same `QuantizeConfig` used for the `Dense` layer in the API defaults.
During the forward propagation in this example, the `LastValueQuantizer` returned in `get_weights_and_quantizers` is called with `layer.kernel` as the input, producing an output. The output replaces `layer.kernel`
in the original forward propagation of the `Dense` layer, via the logic defined in `set_quantize_weights`. The same idea applies to the activations and outputs.
```
LastValueQuantizer = tfmot.quantization.keras.quantizers.LastValueQuantizer
MovingAverageQuantizer = tfmot.quantization.keras.quantizers.MovingAverageQuantizer
class DefaultDenseQuantizeConfig(tfmot.quantization.keras.QuantizeConfig):
# Configure how to quantize weights.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, LastValueQuantizer(num_bits=8, symmetric=True, narrow_range=False, per_axis=False))]
# Configure how to quantize activations.
def get_activations_and_quantizers(self, layer):
return [(layer.activation, MovingAverageQuantizer(num_bits=8, symmetric=False, narrow_range=False, per_axis=False))]
def set_quantize_weights(self, layer, quantize_weights):
# Add this line for each item returned in `get_weights_and_quantizers`
# , in the same order
layer.kernel = quantize_weights[0]
def set_quantize_activations(self, layer, quantize_activations):
# Add this line for each item returned in `get_activations_and_quantizers`
# , in the same order.
layer.activation = quantize_activations[0]
# Configure how to quantize outputs (may be equivalent to activations).
def get_output_quantizers(self, layer):
return []
def get_config(self):
return {}
```
### Quantize custom Keras layer
This example uses the `DefaultDenseQuantizeConfig` to quantize the `CustomLayer`.
Applying the configuration is the same across
the "Experiment with quantization" use cases.
* Apply `tfmot.quantization.keras.quantize_annotate_layer` to the `CustomLayer` and pass in the `QuantizeConfig`.
* Use
`tfmot.quantization.keras.quantize_annotate_model` to continue to quantize the rest of the model with the API defaults.
```
quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
quantize_annotate_model = tfmot.quantization.keras.quantize_annotate_model
quantize_scope = tfmot.quantization.keras.quantize_scope
class CustomLayer(tf.keras.layers.Dense):
pass
model = quantize_annotate_model(tf.keras.Sequential([
quantize_annotate_layer(CustomLayer(20, input_shape=(20,)), DefaultDenseQuantizeConfig()),
tf.keras.layers.Flatten()
]))
# `quantize_apply` requires mentioning `DefaultDenseQuantizeConfig` with `quantize_scope`
# as well as the custom Keras layer.
with quantize_scope(
{'DefaultDenseQuantizeConfig': DefaultDenseQuantizeConfig,
'CustomLayer': CustomLayer}):
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(model)
quant_aware_model.summary()
```
### Modify quantization parameters
**Common mistake:** quantizing the bias to fewer than 32-bits usually harms model accuracy too much.
This example modifies the `Dense` layer to use 4-bits for its weights instead
of the default 8-bits. The rest of the model continues to use API defaults.
```
quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
quantize_annotate_model = tfmot.quantization.keras.quantize_annotate_model
quantize_scope = tfmot.quantization.keras.quantize_scope
class ModifiedDenseQuantizeConfig(DefaultDenseQuantizeConfig):
# Configure weights to quantize with 4-bit instead of 8-bits.
def get_weights_and_quantizers(self, layer):
return [(layer.kernel, LastValueQuantizer(num_bits=4, symmetric=True, narrow_range=False, per_axis=False))]
```
Applying the configuration is the same across
the "Experiment with quantization" use cases.
* Apply `tfmot.quantization.keras.quantize_annotate_layer` to the `Dense` layer and pass in the `QuantizeConfig`.
* Use
`tfmot.quantization.keras.quantize_annotate_model` to continue to quantize the rest of the model with the API defaults.
```
model = quantize_annotate_model(tf.keras.Sequential([
# Pass in modified `QuantizeConfig` to modify this Dense layer.
quantize_annotate_layer(tf.keras.layers.Dense(20, input_shape=(20,)), ModifiedDenseQuantizeConfig()),
tf.keras.layers.Flatten()
]))
# `quantize_apply` requires mentioning `ModifiedDenseQuantizeConfig` with `quantize_scope`:
with quantize_scope(
{'ModifiedDenseQuantizeConfig': ModifiedDenseQuantizeConfig}):
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(model)
quant_aware_model.summary()
```
### Modify parts of layer to quantize
This example modifies the `Dense` layer to skip quantizing the activation. The rest of the model continues to use API defaults.
```
quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
quantize_annotate_model = tfmot.quantization.keras.quantize_annotate_model
quantize_scope = tfmot.quantization.keras.quantize_scope
class ModifiedDenseQuantizeConfig(DefaultDenseQuantizeConfig):
def get_activations_and_quantizers(self, layer):
# Skip quantizing activations.
return []
def set_quantize_activations(self, layer, quantize_activations):
# Empty since `get_activaations_and_quantizers` returns
# an empty list.
return
```
Applying the configuration is the same across
the "Experiment with quantization" use cases.
* Apply `tfmot.quantization.keras.quantize_annotate_layer` to the `Dense` layer and pass in the `QuantizeConfig`.
* Use
`tfmot.quantization.keras.quantize_annotate_model` to continue to quantize the rest of the model with the API defaults.
```
model = quantize_annotate_model(tf.keras.Sequential([
# Pass in modified `QuantizeConfig` to modify this Dense layer.
quantize_annotate_layer(tf.keras.layers.Dense(20, input_shape=(20,)), ModifiedDenseQuantizeConfig()),
tf.keras.layers.Flatten()
]))
# `quantize_apply` requires mentioning `ModifiedDenseQuantizeConfig` with `quantize_scope`:
with quantize_scope(
{'ModifiedDenseQuantizeConfig': ModifiedDenseQuantizeConfig}):
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(model)
quant_aware_model.summary()
```
### Use custom quantization algorithm
The `tfmot.quantization.keras.quantizers.Quantizer` class is a callable that
can apply any algorithm to its inputs.
In this example, the inputs are the weights, and we apply the math in the
`FixedRangeQuantizer` \_\_call\_\_ function to the weights. Instead of the original
weights values, the output of the
`FixedRangeQuantizer` is now passed to whatever would have used the weights.
```
quantize_annotate_layer = tfmot.quantization.keras.quantize_annotate_layer
quantize_annotate_model = tfmot.quantization.keras.quantize_annotate_model
quantize_scope = tfmot.quantization.keras.quantize_scope
class FixedRangeQuantizer(tfmot.quantization.keras.quantizers.Quantizer):
"""Quantizer which forces outputs to be between -1 and 1."""
def build(self, tensor_shape, name, layer):
# Not needed. No new TensorFlow variables needed.
return {}
def __call__(self, inputs, training, weights, **kwargs):
return tf.keras.backend.clip(inputs, -1.0, 1.0)
def get_config(self):
# Not needed. No __init__ parameters to serialize.
return {}
class ModifiedDenseQuantizeConfig(DefaultDenseQuantizeConfig):
# Configure weights to quantize with 4-bit instead of 8-bits.
def get_weights_and_quantizers(self, layer):
# Use custom algorithm defined in `FixedRangeQuantizer` instead of default Quantizer.
return [(layer.kernel, FixedRangeQuantizer())]
```
Applying the configuration is the same across
the "Experiment with quantization" use cases.
* Apply `tfmot.quantization.keras.quantize_annotate_layer` to the `Dense` layer and pass in the `QuantizeConfig`.
* Use
`tfmot.quantization.keras.quantize_annotate_model` to continue to quantize the rest of the model with the API defaults.
```
model = quantize_annotate_model(tf.keras.Sequential([
# Pass in modified `QuantizeConfig` to modify this `Dense` layer.
quantize_annotate_layer(tf.keras.layers.Dense(20, input_shape=(20,)), ModifiedDenseQuantizeConfig()),
tf.keras.layers.Flatten()
]))
# `quantize_apply` requires mentioning `ModifiedDenseQuantizeConfig` with `quantize_scope`:
with quantize_scope(
{'ModifiedDenseQuantizeConfig': ModifiedDenseQuantizeConfig}):
# Use `quantize_apply` to actually make the model quantization aware.
quant_aware_model = tfmot.quantization.keras.quantize_apply(model)
quant_aware_model.summary()
```
| github_jupyter |
```
import os
import random
import torch
import numpy as np
from torch.nn import functional as F
dataset_dir = "./family"
all_trip_file = os.path.join(dataset_dir, "all.txt")
relations_file = os.path.join(dataset_dir, "relations.txt")
entities_file = os.path.join(dataset_dir, "entities.txt")
device = "cuda"
def read_xxx_to_id(file_path, num_read=None):
xxx2id = {}
with open(file_path, 'r') as file:
lines = file.readlines()
if num_read:
lines = random.sample(lines, num_read)
for line in lines:
line = line.strip()
xxx2id[line] = len(xxx2id)
return xxx2id
def parse_triplets(triplets_file: str,
rel2id: dict,
ent2id: dict):
"""Read triplets (relation, head, tail)."""
triplets = []
with open(triplets_file, 'r') as file:
for line in file:
line = line.strip().split('\t')
assert(len(line) == 3)
try:
triplets.append(
(
rel2id[line[1]],
ent2id[line[0]],
ent2id[line[2]]
)
)
except KeyError:
pass
return triplets
```
## Read file and resample
```
num_entities = None
num_relations = None
rel2id = read_xxx_to_id(relations_file, num_relations)
id2rel = {ident: rel for rel, ident in rel2id.items()}
ent2id = read_xxx_to_id(entities_file, num_entities)
id2ent = {ident: ent for ent, ident in ent2id.items()}
all_facts = parse_triplets(all_trip_file, rel2id, ent2id)
# relation to (head, tail)
rel2ht = {rel: [] for rel in id2rel.keys()}
for (r, h, t) in all_facts:
rel2ht[r].append((h, t))
num_rel, num_ent, num_trip = len(rel2id), len(ent2id), len(all_facts)
num_rel, num_ent, num_trip
```
## Compute bifurcation
```
from collections import defaultdict
def get_head_bifur(head_tail_pairs: list, k: int):
bifur_cnt = 0
head2tails = defaultdict(list)
for h, t in head_tail_pairs:
head2tails[h].append(t)
if len(head2tails[h]) == k:
bifur_cnt += 1
return head2tails, bifur_cnt / len(head2tails.keys())
def get_tail_bifur(head_tail_pairs: list, k: int):
bifur_cnt = 0
tail2heads = defaultdict(list)
for h, t in head_tail_pairs:
tail2heads[t].append(h)
if len(tail2heads[t]) == k:
bifur_cnt += 1
return tail2heads, bifur_cnt / len(tail2heads.keys())
max_lambda = 7
backward = False
for rel, pairs in rel2ht.items():
print(f"[{id2rel[rel]}]")
for k in range(2, max_lambda + 1):
head2tails, bifur_head = get_head_bifur(pairs, k)
print(f"bifur_head({k}): {bifur_head:.2f}")
if backward:
tail2heads, bifur_tail = get_tail_bifur(pairs, k)
print(f"bifur_tail({k}): {bifur_tail:.2f}")
print("\n")
```
## Compute macro, micro and comprehensive saturation
```
def get_adjacency_matrices(triplets,
num_relations: int,
num_entities: int
):
"""Compute adjacency matrix from all triplets
in preparation for creating sparse matrix in torch.
"""
matrix = {
r: ([[0, 0]], [0.], [num_entities, num_entities])
for r in range(num_relations)
}
for triplet in triplets:
rel = triplet[0]
head = triplet[1]
tail = triplet[2]
value = 1.
matrix[rel][0].append([head, tail])
matrix[rel][1].append(value)
for rel, mat in matrix.items():
matrix[rel] = torch.sparse.FloatTensor(
torch.LongTensor(mat[0]).t(),
torch.FloatTensor(mat[1]),
mat[2]
).to(device)
return matrix
# `adj_matrices`: adjacency matrices, ORDER matters!!!
# `head_nodes`: head nodes list
# return: a list of `batch_size` nodes
def from_head_hops(adj_matrices: list,
head_nodes: list
):
# (batch_size, num_entities)
v_x = F.one_hot(torch.LongTensor(head_nodes), adj_matrix[0].size(0)).float().to(device)
# (num_entities, num_entities)
result = torch.matmul(adj_matrices[0].t(), v_x.t())
for mat in adj_matrices[1:]:
result = torch.mm(mat.t(), result)
# (batch_size, num_entites)
result = result.t().cpu().numpy()
indices = np.argwhere(result > 0)
# {head: {tail: num_paths}}
ret = {head: {} for head in head_nodes}
for row, col in indices:
# `row`: (row, column) ==> (head, tail)
ret[head_nodes[row]][col] = result[row, col]
return ret
adj_matrix = get_adjacency_matrices(all_facts, num_rel, num_ent)
adj_matrix[0], from_head_hops([adj_matrix[1], adj_matrix[2]], list(ent2id.values())[:2])
from itertools import permutations
topk_macro = 10
topk_micro = 10
topk_comp = 10
max_rule_len = 2 # T
relations = list(id2rel.keys())
paths_permut = [(rel1, rel2) for rel1 in relations for rel2 in relations]
len(paths_permut)
from time import time
from collections import defaultdict
start = time()
macro_saturations = {rel: {path: 0. for path in paths_permut} for rel in id2rel.keys()}
tmp_micro_saturations = {rel: {path: {} for path in paths_permut} for rel in id2rel.keys()} # {path: {(head, tail): num_paths}}
micro_saturations = {rel: {path: 0. for path in paths_permut} for rel in id2rel.keys()}
total_paths_pairs = {rel: defaultdict(int) for rel in id2rel.keys()} # {(head, tail): num_total_paths}
# get number of triplets under each relation
num_rel2trip = {rel: len(rel2ht[rel]) for rel in id2rel.keys()}
# get triplets under each relation
rel_head2tails = {rel: defaultdict(list) for rel in id2rel.keys()}
for (r, h, t) in all_facts:
rel_head2tails[r][h].append(t)
for rel in rel_head2tails:
for path in macro_saturations[rel].keys():
matrices = [adj_matrix[r] for r in path]
heads = list(rel_head2tails[rel].keys())
num_paths_from_heads = from_head_hops(matrices, heads)
for head, tails in rel_head2tails[rel].items():
for tail in tails:
if tail in num_paths_from_heads[head]:
macro_saturations[rel][path] += 1.
tmp_micro_saturations[rel][path][(head, tail)] = num_paths_from_heads[head][tail]
total_paths_pairs[rel][(head, tail)] += num_paths_from_heads[head][tail]
macro_saturations[rel][path] /= num_rel2trip[rel]
for path, pairs in tmp_micro_saturations[rel].items():
for pair, num_path in pairs.items():
# `pair`: (head, tail)
micro_saturations[rel][path] += num_path / total_paths_pairs[rel][pair]
if len(tmp_micro_saturations[rel][path]) != 0:
micro_saturations[rel][path] /= num_rel2trip[rel]
print(f"{time() - start}s")
```
### Macro saturation
```
for rel in macro_saturations:
print(f"{id2rel[rel]:=^50}")
sorted_items = sorted(macro_saturations[rel].items(), key=lambda x: x[1], reverse=True)
for i, (path, saturation) in enumerate(sorted_items):
# if i == topk_macro:
if i == 15:
break
print(f"{tuple(id2rel[r] for r in path)}: {saturation:.2f}")
print("\n")
```
### Micro saturation
```
for rel in micro_saturations:
print(f"{id2rel[rel]:=^50}")
sorted_items = sorted(micro_saturations[rel].items(), key=lambda x: x[1], reverse=True)
for i, (path, saturation) in enumerate(sorted_items):
if i == topk_micro:
break
print(f"{tuple(id2rel[r] for r in path)}: {saturation:.2f}")
print("\n")
```
### Comprehensive saturation
```
comp_saturations = {
rel: {} for rel in micro_saturations
}
for rel in micro_saturations:
for path, value in micro_saturations[rel].items():
comp_saturations[rel][path] = value * macro_saturations[rel][path]
for rel in comp_saturations:
print(f"{id2rel[rel]:=^50}")
sorted_items = sorted(comp_saturations[rel].items(), key=lambda x: x[1], reverse=True)
for i, (path, saturation) in enumerate(sorted_items):
if i == topk_comp:
break
print(f"{tuple(id2rel[r] for r in path)}: {saturation:.2f}%")
print("\n")
```
| github_jupyter |
```
import sys
sys.path.append('/'.join(sys.path[0].split('/')[:-1]))
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import time
import os
import healpy as hp
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from modules.utils import train_model_2steps_temp, init_device
from modules.data import WeatherBenchDatasetXarrayHealpixTemp
from modules.healpix_models import UNetSphericalTempHealpix, UNetSphericalHealpix
from modules.test import create_iterative_predictions_healpix
from modules.test import compute_rmse_healpix
from modules.plotting import plot_rmses
datadir = "../data/healpix/"
input_dir = datadir + "5.625deg_nearest/"
model_save_path = datadir + "models/"
pred_save_path = datadir + "predictions/"
if not os.path.isdir(model_save_path):
os.mkdir(model_save_path)
if not os.path.isdir(pred_save_path):
os.mkdir(pred_save_path)
train_years = ('1979', '2012')
val_years = ('2013', '2016')
test_years = ('2017', '2018')
nodes = 12*16*16
max_lead_time = 5*24
nb_timesteps = 2
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="2, 4"
gpu = [0, 1]
num_workers = 10
pin_memory = True
nb_epochs = 20
learning_rate = 8e-3
obs = xr.open_mfdataset(pred_save_path + 'observations_nearest.nc', combine='by_coords')
#rmses_weyn = xr.open_dataset(datadir + 'metrics/rmses_weyn.nc')
from modules.data import WeatherBenchDatasetIterative
class WeatherBenchDatasetXarrayHealpixTemp(Dataset):
""" Dataset used for graph models (1D), where data is loaded from stored numpy arrays.
Parameters
----------
ds : xarray Dataset
Dataset containing the input data
out_features : int
Number of output features
delta_t : int
Temporal spacing between samples in temporal sequence (in hours)
len_sqce : int
Length of the input and output (predicted) sequences
years : tuple(str)
Years used to split the data
nodes : float
Number of nodes each sample has
max_lead_time : int
Maximum lead time (in case of iterative predictions) in hours
load : bool
If true, load dataset to RAM
mean : np.ndarray of shape 2
Mean to use for data normalization. If None, mean is computed from data
std : np.ndarray of shape 2
std to use for data normalization. If None, mean is computed from data
"""
def __init__(self, ds, out_features, delta_t, len_sqce, years, nodes, nb_timesteps,
max_lead_time=None, load=True, mean=None, std=None):
self.delta_t = delta_t
self.len_sqce = len_sqce
self.years = years
self.nodes = nodes
self.out_features = out_features
self.max_lead_time = max_lead_time
self.nb_timesteps = nb_timesteps
self.data = ds.to_array(dim='level', name='Dataset').transpose('time', 'node', 'level')
self.in_features = self.data.shape[-1]
self.mean = self.data.mean(('time', 'node')).compute() if mean is None else mean
self.std = self.data.std(('time', 'node')).compute() if std is None else std
# Normalize
self.data = (self.data - self.mean) / self.std
# Count total number of samples
total_samples = self.data.shape[0]
if max_lead_time is None:
self.n_samples = total_samples - (len_sqce+1) * delta_t
else:
self.n_samples = total_samples - (len_sqce+1) * delta_t - max_lead_time
# Create indexes
self.idxs = [[[[sample_idx + delta_t*k for k in range(len_sqce)], sample_idx + delta_t * len_sqce],
[sample_idx + delta_t * len_sqce, sample_idx + delta_t * (len_sqce+1)]]
for sample_idx in range(self.n_samples)]
if load:
print('Loading data into RAM')
self.data.load()
def __len__(self):
return self.n_samples
def __getitem__(self, idx):
""" Returns sample and label corresponding to an index as torch.Tensor objects
The return tensor shapes are (for the sample and the label): [n_vertex, len_sqce, n_features]
"""
X = (torch.tensor(self.data.isel(time=self.idxs[idx][0][0]).values).float(). \
permute(1, 0, 2).reshape(self.nodes, self.in_features*self.len_sqce),
torch.tensor(self.data.isel(time=self.idxs[idx][0][1]).values[:, self.out_features:]).float())
y = (torch.Tensor(self.data.isel(time=self.idxs[idx][1][0]).values[:, :self.out_features]).float(),
torch.Tensor(self.data.isel(time=self.idxs[idx][1][1]).values[:, :self.out_features]).float())
return X, y
def train_model_2steps_temp(model, device, train_generator, epochs, lr, validation_data, model_filename):
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=lr, eps=1e-7, weight_decay=0, amsgrad=False)
in_features = train_generator.dataset.in_features
train_losses = []
val_losses = []
for epoch in range(epochs):
time1 = time.time()
val_loss = 0
train_loss = 0
model.train()
for batch_idx, (batch, labels) in enumerate(train_generator):
# Transfer to GPU
batch1 = batch[0].to(device)
constants1 = batch[1].to(device)
label1 = labels[0].to(device)
label2 = labels[1].to(device)
batch_size = batch1.shape[0]
# Model
output1 = model(batch1)
batch2 = torch.cat((batch1[:, :, in_features:], torch.cat((output1, constants1), dim=2)), dim=2)
output2 = model(batch2)
loss = criterion(output1, label1) + criterion(output2, label2)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = train_loss + loss.item() * batch_size
train_loss = train_loss / (len(train_generator.dataset))
train_losses.append(train_loss)
model.eval()
with torch.set_grad_enabled(False):
index = 0
for batch, labels in validation_data:
# Transfer to GPU
batch1 = batch[0].to(device)
constants1 = batch[1].to(device)
label1 = labels[0].to(device)
label2 = labels[1].to(device)
batch_size = batch1.shape[0]
output1 = model(batch1)
batch2 = torch.cat((batch1[:, :, in_features:], torch.cat((output1, constants1),
dim=2)), dim=2)
output2 = model(batch2)
val_loss = val_loss + (criterion(output1, label1).item()
+ criterion(output2, label2).item()) * batch_size
index = index + batch_size
val_loss = val_loss / (len(validation_data.dataset))
val_losses.append(val_loss)
time2 = time.time()
# Print stuff
print('Epoch: {e:3d}/{n_e:3d} - loss: {l:.3f} - val_loss: {v_l:.5f} - time: {t:2f}'
.format(e=epoch+1, n_e=epochs, l=train_loss, v_l=val_loss, t=time2-time1))
torch.save(spherical_unet.state_dict(), model_filename[:-3] + '_epoch' + str(15+epoch) + '.h5')
return train_losses, val_losses
def create_iterative_predictions_healpix_temp(model, device, dg):
out_feat = dg.dataset.out_features
train_std = dg.dataset.std.values[:out_feat]
train_mean = dg.dataset.mean.values[:out_feat]
delta_t = dg.dataset.delta_t
len_sqce = dg.dataset.len_sqce
max_lead_time = dg.dataset.max_lead_time
initial_lead_time = delta_t * len_sqce
nodes = dg.dataset.nodes
nside = int(np.sqrt(nodes/12))
n_samples = dg.dataset.n_samples
in_feat = dg.dataset.in_features
# Lead times
lead_times = np.arange(delta_t, max_lead_time + delta_t, delta_t)
# Lat lon coordinates
out_lon, out_lat = hp.pix2ang(nside, np.arange(nodes), lonlat=True)
# Actual times
start = np.datetime64(dg.dataset.years[0], 'h') + np.timedelta64(initial_lead_time, 'h')
stop = start + np.timedelta64(dg.dataset.n_samples, 'h')
times = np.arange(start, stop)
# Variables
var_dict_out = {var: None for var in ['z', 't']}
# Constants
constants = np.array(dg.dataset.data.isel(level=slice(out_feat, None)).values)
dataloader = dg
predictions = []
model.eval()
for lead in lead_times:
outputs = []
next_batch_ = []
states = np.empty((n_samples, nodes, in_feat*len_sqce))
time1 = time.time()
for i, (sample, _) in enumerate(dataloader):
inputs = sample[0].to(device)
output = model(inputs)
next_batch_.append(inputs[:, :, in_feat:].detach().cpu().clone().numpy())
outputs.append(output.detach().cpu().clone().numpy()[:, :, :out_feat])
next_batch = np.concatenate(next_batch_)
preds = np.concatenate(outputs)
states[:, :, :(len_sqce-1)*in_feat] = next_batch
states[:, :, (len_sqce-1)*in_feat:(len_sqce-1)*in_feat + out_feat] = preds
states[:, :, -(in_features - out_features):] = constants[(len_sqce-1)*delta_t+
lead:n_samples+(len_sqce-1)*delta_t+lead, :]
predictions.append(preds * train_std + train_mean)
new_set = WeatherBenchDatasetIterative(states)
dataloader = DataLoader(new_set, batch_size=batch_size, shuffle=False, num_workers=10)
time2 = time.time()
predictions = np.array(predictions)
das = [];
lev_idx = 0
for var in ['z', 't']:
das.append(xr.DataArray(
predictions[:, :, :, lev_idx],
dims=['lead_time', 'time', 'node'],
coords={'lead_time': lead_times, 'time': times, 'node': np.arange(nodes)},
name=var
))
lev_idx += 1
prediction_ds = xr.merge(das)
prediction_ds = prediction_ds.assign_coords({'lat': out_lat, 'lon': out_lon})
return prediction_ds
z500 = xr.open_mfdataset(f'{input_dir}geopotential_500/*.nc', combine='by_coords').rename({'z':'z500'})
t850 = xr.open_mfdataset(f'{input_dir}temperature_850/*.nc', combine='by_coords').rename({'t':'t850'})
z1000 = xr.open_dataset(f'{input_dir}geopotential/geopotential_5.625deg.nc').sel(level=
1000).rename({'z':'z1000'})
rad = xr.open_mfdataset(f'{input_dir}toa_incident_solar_radiation/*.nc', combine='by_coords')
z500 = z500.isel(time=slice(7, None))
t850 = t850.isel(time=slice(7, None))
z1000 = z1000.isel(time=slice(7, None))
constants = xr.open_dataset(f'{input_dir}constants/constants_5.625deg.nc').rename({'orography' :'orog'})
constants = constants.assign(cos_lon=lambda x: np.cos(np.deg2rad(x.lon)))
constants = constants.assign(sin_lon=lambda x: np.sin(np.deg2rad(x.lon)))
temp = xr.DataArray(np.zeros(z500.dims['time']), coords=[('time', z500.time.values)])
constants, _ = xr.broadcast(constants, temp)
orog = constants['orog']
lsm = constants['lsm']
lats = constants['lat2d']
slt = constants['slt']
cos_lon = constants['cos_lon']
sin_lon = constants['sin_lon']
```
Compute mean and std
```
nside = int(np.sqrt(nodes/12))
out_lon, out_lat = hp.pix2ang(nside, np.arange(nodes), lonlat=True)
# Compute mean and std in the same fashion as in function "preprocess_healpix" from data.py
z1000_mean = z1000.assign_coords({'lat': out_lat, 'lon': out_lon}).mean(('time', 'lat', 'lon'))
z1000_std = z1000.assign_coords({'lat': out_lat, 'lon': out_lon}).std('time').mean(('lat', 'lon'))
z = xr.open_mfdataset(f'{input_dir}geopotential_500/*.nc', combine='by_coords')['z']\
.assign_coords(level=1)
t = xr.open_mfdataset(f'{input_dir}temperature_850/*.nc', combine='by_coords')['t']\
.assign_coords(level=2)
pred = xr.concat([z, t], 'level')
pred.to_dataset('level').rename_vars({1:'z', 2:'t'})
```
Code to save mean and std values if necessary
```python
z1000_mean.to_netcdf(datadir + 'z1000_mean.nc')
z1000_std.to_netcdf(datadir + 'z1000_std.nc')
predictors_mean.to_netcdf(datadir + 'predictors_mean.nc')
predictors_std.to_netcdf(datadir + 'predictors_std.nc')
constants_mean.to_netcdf(datadir + 'constants/const_mean.nc')
constants_std.to_netcdf(datadir + 'constants/const_std.nc')
````
```
#predictors_mean = xr.open_dataarray(datadir + 'predictors_mean.nc')
#predictors_std = xr.open_dataarray(datadir + 'predictors_std.nc')
#z1000_mean = xr.open_dataarray(datadir + 'z1000_mean.nc')
#z1000_std = xr.open_dataarray(datadir + 'z1000_std.nc')
#const_mean = xr.open_dataarray(input_dir + 'constants/const_mean.nc')
#const_std = xr.open_dataarray(input_dir + 'constants/const_std.nc')
#train_mean = xr.concat((predictors_mean, z1000_mean, constants_mean), dim='level')
#train_std = xr.concat((predictors_std, z1000_std, constants_std), dim='level')
#train_mean = xr.concat((predictors_mean, z1000_mean, constants_mean), dim='node')
#train_std = xr.concat((predictors_std, z1000_std, constants_std), dim='node')
# z500, t850, orog, lats, lsm, slt, rad
in_features = 7
out_features = 2
ds = xr.merge([z500, t850, orog, lats, lsm, slt, rad], compat='override')
ds_train = ds.sel(time=slice(*train_years))
ds_valid = ds.sel(time=slice(*val_years))
ds_test = ds.sel(time=slice(*test_years))
train_mean_ = xr.open_mfdataset(f'{input_dir}mean_train_all_vars.nc').to_array(dim='level')
train_std_ = xr.open_mfdataset(f'{input_dir}std_train_all_vars.nc').to_array(dim='level')
len_sqce = 2
delta_t = 6
description = "all_const_len{}_delta{}".format(len_sqce, delta_t)
model_filename = model_save_path + "spherical_unet_" + description + ".h5"
pred_filename = pred_save_path + "spherical_unet_10days" + description + ".nc"
max_lead_time = 10*24
#feature_idx = [0, 1, 3, 4, 5, 6, 7]
feature_idx = list(range(len([z500, t850, orog, lats, lsm, slt, rad])))
in_features = len(feature_idx)
out_features = 2
ds = xr.merge([z500, t850, orog, lats, lsm, slt, rad], compat='override')
ds_test = ds.sel(time=slice(*test_years))
#train_mean_ = ds_train.mean(('time','node')).compute()
#train_std_ = ds_train.std('time').mean('node').compute()
#train_mean_ = train_mean[feature_idx]
#train_std_ = train_std[feature_idx]
spherical_unet = UNetSphericalHealpix(N=nodes, in_channels=in_features*len_sqce, out_channels=out_features,
kernel_size=3)
spherical_unet, device = init_device(spherical_unet, gpu=gpu)
#spherical_unet.load_state_dict(torch.load(model_filename), strict=False)
testing_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_test, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=test_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean_, std=train_std_,
max_lead_time=max_lead_time, load=False)
batch_size=70
dataloader_test = DataLoader(testing_ds, batch_size=int(0.7*batch_size), shuffle=False,
num_workers=num_workers)
preds = create_iterative_predictions_healpix_temp(spherical_unet, device, dataloader_test)
preds.to_netcdf(pred_filename)
```
# Length of sequence: 2
```
batch_size = 70
len_sqce = 2
delta_t = 6
description = "all_const_z1000_len{}_delta{}".format(len_sqce, delta_t)
model_filename = model_save_path + "spherical_unet_" + description + ".h5"
pred_filename = pred_save_path + "spherical_unet_" + description + ".nc"
rmse_filename = datadir + 'metrics/rmse_' + description + '.nc'
# Train and validation data
training_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_train, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=train_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean_, std=train_std_, load=False)
validation_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_valid, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=val_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean_, std=train_std_, load=False)
dl_train = DataLoader(training_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=pin_memory)
dl_val = DataLoader(validation_ds, batch_size=batch_size*2, shuffle=False, num_workers=num_workers,
pin_memory=pin_memory)
# Model
spherical_unet = UNetSphericalHealpix(N=nodes, in_channels=in_features*len_sqce, out_channels=out_features,
kernel_size=3)
spherical_unet, device = init_device(spherical_unet, gpu=gpu)
# Train model
train_loss, val_loss = train_model_2steps_temp(spherical_unet, device, dl_train, epochs=nb_epochs,
lr=learning_rate, validation_data=dl_val,
model_filename=model_filename)
torch.save(spherical_unet.state_dict(), model_filename)
# Show training losses
plt.plot(train_loss, label='Training loss')
plt.plot(val_loss, label='Validation loss')
plt.xlabel('Epochs')
plt.ylabel('MSE Loss')
plt.legend()
plt.show()
del dl_train, dl_val, training_ds, validation_ds
torch.cuda.empty_cache()
'''
# Load optimal model
del spherical_unet
torch.cuda.empty_cache()
optimal_filename = model_filename#[:-3] + '_epoch' + str(np.argmin(val_loss)) + '.h5'
spherical_unet = UNetSphericalHealpix(N=nodes, in_channels=in_features*len_sqce, out_channels=out_features,
kernel_size=3)
spherical_unet, device = init_device(spherical_unet, gpu=gpu)
spherical_unet.load_state_dict(torch.load(optimal_filename), strict=False)'''
# Testing data
testing_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_test, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=test_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std,
max_lead_time=max_lead_time)
dataloader_test = DataLoader(testing_ds, batch_size=int(0.7*batch_size), shuffle=False,
num_workers=num_workers)
# Compute predictions
preds = create_iterative_predictions_healpix_temp(spherical_unet, device, dataloader_test)
preds.to_netcdf(pred_filename)
# Compute and save RMSE
rmse = compute_rmse_healpix(preds, obs).load()
rmse.to_netcdf(rmse_filename)
# Show RMSE
print('Z500 - 0:', rmse.z.values[0])
print('T850 - 0:', rmse.t.values[0])
plot_rmses(rmse, rmses_weyn, lead_time=6)
del spherical_unet, preds, rmse
torch.cuda.empty_cache()
# Compute predictions
preds = create_iterative_predictions_healpix_temp(spherical_unet, device, dataloader_test)
preds.to_netcdf(pred_filename)
# Compute and save RMSE
rmse = compute_rmse_healpix(preds, obs).load()
rmse.to_netcdf(rmse_filename)
# Show RMSE
print('Z500 - 0:', rmse.z.values[0])
print('T850 - 0:', rmse.t.values[0])
plot_rmses(rmse, rmses_weyn, lead_time=6)
len_sqce = 2
delta_t = 12
batch_size = 70
description = "all_const_z1000_len{}_delta{}".format(len_sqce, delta_t)
model_filename = model_save_path + "spherical_unet_" + description + ".h5"
pred_filename = pred_save_path + "spherical_unet_" + description + ".nc"
rmse_filename = datadir + 'metrics/rmse_' + description + '.nc'
# Train and validation data
training_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_train, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=train_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std)
validation_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_valid, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=val_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std)
dl_train = DataLoader(training_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=pin_memory)
dl_val = DataLoader(validation_ds, batch_size=batch_size*2, shuffle=False, num_workers=num_workers,
pin_memory=pin_memory)
# Model
spherical_unet = UNetSphericalHealpix(N=nodes, in_channels=in_features*len_sqce, out_channels=out_features,
kernel_size=3)
spherical_unet, device = init_device(spherical_unet, gpu=gpu)
# Train model
train_loss, val_loss = train_model_2steps_temp(spherical_unet, device, dl_train, epochs=nb_epochs,
lr=learning_rate, validation_data=dl_val,
model_filename=model_filename)
torch.save(spherical_unet.state_dict(), model_filename)
# Show training losses
plt.plot(train_loss, label='Training loss')
plt.plot(val_loss, label='Validation loss')
plt.xlabel('Epochs')
plt.ylabel('MSE Loss')
plt.legend()
plt.show()
del dl_train, dl_val, training_ds, validation_ds
torch.cuda.empty_cache()
'''# Load optimal model
del spherical_unet
torch.cuda.empty_cache()
optimal_filename = model_filename[:-3] + '_epoch' + str(np.argmin(val_loss)) + '.h5'
spherical_unet = UNetSphericalHealpix(N=nodes, in_channels=in_features*len_sqce, out_channels=out_features,
kernel_size=3)
spherical_unet, device = init_device(spherical_unet, gpu=gpu)
spherical_unet.load_state_dict(torch.load(optimal_filename), strict=False)'''
# Testing data
testing_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_test, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=test_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std,
max_lead_time=max_lead_time)
dataloader_test = DataLoader(testing_ds, batch_size=int(0.7*batch_size), shuffle=False,
num_workers=num_workers)
# Compute predictions
preds = create_iterative_predictions_healpix_temp(spherical_unet, device, dataloader_test)
preds.to_netcdf(pred_filename)
# Compute and save RMSE
rmse = compute_rmse_healpix(preds, obs).load()
rmse.to_netcdf(rmse_filename)
# Show RMSE
print('Z500 - 0:', rmse.z.values[0])
print('T850 - 0:', rmse.t.values[0])
f, axs = plt.subplots(1, 2, figsize=(17, 5))
lead_times_ = np.arange(delta_t, max_lead_time + delta_t, delta_t)
lead_times = np.arange(6, max_lead_time + 6, 6)
axs[0].plot(lead_times_, rmse.z.values, label='Spherical')
axs[0].plot(lead_times, rmses_weyn.z.values, label='Weyn 2020')
axs[0].legend()
axs[1].plot(lead_times_, rmse.t.values, label='Spherical')
axs[1].plot(lead_times, rmses_weyn.t.values, label='Weyn 2020')
axs[1].legend()
plt.show()
del spherical_unet, preds, rmse
torch.cuda.empty_cache()
```
# Length of sequence: 4
```
batch_size = 100
len_sqce = 4
delta_t = 6
description = "all_const_z1000_len{}_delta{}".format(len_sqce, delta_t)
model_filename = model_save_path + "spherical_unet_" + description + ".h5"
pred_filename = pred_save_path + "spherical_unet_" + description + ".nc"
rmse_filename = datadir + 'metrics/rmse_' + description + '.nc'
# Train and validation data
training_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_train, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=train_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std)
validation_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_valid, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=val_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std)
dl_train = DataLoader(training_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=pin_memory)
dl_val = DataLoader(validation_ds, batch_size=batch_size*2, shuffle=False, num_workers=num_workers,
pin_memory=pin_memory)
# Model
spherical_unet = UNetSphericalHealpix(N=nodes, in_channels=in_features*len_sqce, out_channels=out_features,
kernel_size=3)
spherical_unet, device = init_device(spherical_unet, gpu=gpu)
# Train model
train_loss, val_loss = train_model_2steps_temp(spherical_unet, device, dl_train, epochs=nb_epochs,
lr=learning_rate, validation_data=dl_val,
model_filename=model_filename)
torch.save(spherical_unet.state_dict(), model_filename)
# Show training losses
plt.plot(train_loss, label='Training loss')
plt.plot(val_loss, label='Validation loss')
plt.xlabel('Epochs')
plt.ylabel('MSE Loss')
plt.legend()
plt.show()
del dl_train, dl_val, training_ds, validation_ds
torch.cuda.empty_cache()
'''# Load optimal model
del spherical_unet
torch.cuda.empty_cache()
optimal_filename = model_filename[:-3] + '_epoch' + str(np.argmin(val_loss)) + '.h5'
spherical_unet = UNetSphericalHealpix(N=nodes, in_channels=in_features*len_sqce, out_channels=out_features,
kernel_size=3)
spherical_unet, device = init_device(spherical_unet, gpu=gpu)
spherical_unet.load_state_dict(torch.load(optimal_filename), strict=False)'''
# Testing data
testing_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_test, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=test_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std,
max_lead_time=max_lead_time)
dataloader_test = DataLoader(testing_ds, batch_size=int(0.3*batch_size), shuffle=False,
num_workers=num_workers)
# Compute predictions
preds = create_iterative_predictions_healpix_temp(spherical_unet, device, dataloader_test)
preds.to_netcdf(pred_filename)
# Compute and save RMSE
rmse = compute_rmse_healpix(preds, obs).load()
rmse.to_netcdf(rmse_filename)
# Show RMSE
print('Z500 - 0:', rmse.z.values[0])
print('T850 - 0:', rmse.t.values[0])
plot_rmses(rmse, rmses_weyn, lead_time=6)
del spherical_unet, preds, rmse
torch.cuda.empty_cache()
len_sqce = 4
delta_t = 12
description = "all_const_z1000_len{}_delta{}".format(len_sqce, delta_t)
model_filename = model_save_path + "spherical_unet_" + description + ".h5"
pred_filename = pred_save_path + "spherical_unet_" + description + ".nc"
rmse_filename = datadir + 'metrics/rmse_' + description + '.nc'
# Train and validation data
training_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_train, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=train_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std)
validation_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_valid, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=val_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std)
dl_train = DataLoader(training_ds, batch_size=batch_size, shuffle=True, num_workers=num_workers,
pin_memory=pin_memory)
dl_val = DataLoader(validation_ds, batch_size=batch_size*2, shuffle=False, num_workers=num_workers,
pin_memory=pin_memory)
# Model
spherical_unet = UNetSphericalHealpix(N=nodes, in_channels=in_features*len_sqce, out_channels=out_features,
kernel_size=3)
spherical_unet, device = init_device(spherical_unet, gpu=gpu)
# Train model
train_loss, val_loss = train_model_2steps_temp(spherical_unet, device, dl_train, epochs=nb_epochs,
lr=learning_rate, validation_data=dl_val,
model_filename=model_filename)
torch.save(spherical_unet.state_dict(), model_filename)
# Show training losses
plt.plot(train_loss, label='Training loss')
plt.plot(val_loss, label='Validation loss')
plt.xlabel('Epochs')
plt.ylabel('MSE Loss')
plt.legend()
plt.show()
del dl_train, dl_val, training_ds, validation_ds
torch.cuda.empty_cache()
'''# Load optimal model
del spherical_unet
torch.cuda.empty_cache()
optimal_filename = model_filename[:-3] + '_epoch' + str(np.argmin(val_loss)) + '.h5'
spherical_unet = UNetSphericalHealpix(N=nodes, in_channels=in_features*len_sqce, out_channels=out_features,
kernel_size=3)
spherical_unet, device = init_device(spherical_unet, gpu=gpu)
spherical_unet.load_state_dict(torch.load(optimal_filename), strict=False)'''
# Testing data
testing_ds = WeatherBenchDatasetXarrayHealpixTemp(ds=ds_test, out_features=out_features,
len_sqce=len_sqce, delta_t=delta_t, years=test_years,
nodes=nodes, nb_timesteps=nb_timesteps,
mean=train_mean, std=train_std,
max_lead_time=max_lead_time)
dataloader_test = DataLoader(testing_ds, batch_size=int(0.7*batch_size), shuffle=False,
num_workers=num_workers)
# Compute predictions
preds = create_iterative_predictions_healpix_temp(spherical_unet, device, dataloader_test)
preds.to_netcdf(pred_filename)
# Compute and save RMSE
rmse = compute_rmse_healpix(preds, obs).load()
rmse.to_netcdf(rmse_filename)
# Show RMSE
print('Z500 - 0:', rmse.z.values[0])
print('T850 - 0:', rmse.t.values[0])
f, axs = plt.subplots(1, 2, figsize=(17, 5))
lead_times_ = np.arange(delta_t, max_lead_time + delta_t, delta_t)
lead_times = np.arange(6, max_lead_time + 6, 6)
axs[0].plot(lead_times_, rmse.z.values, label='Spherical')
axs[0].plot(lead_times, rmses_weyn.z.values, label='Weyn 2020')
axs[0].legend()
axs[1].plot(lead_times_, rmse.t.values, label='Spherical')
axs[1].plot(lead_times, rmses_weyn.t.values, label='Weyn 2020')
axs[1].legend()
plt.show()
del spherical_unet, preds, rmse
torch.cuda.empty_cache()
```
# Comparison
```
filename = datadir+'metrics/rmse_all_const_z1000_len{}_delta{}'
rmse_2_6 = xr.open_dataset(filename.format(2, 6) + '.nc')
rmse_2_12 = xr.open_dataset(filename.format(2, 12) + '.nc')
rmse_4_6 = xr.open_dataset(filename.format(4, 6) + '.nc')
rmse_4_12 = xr.open_dataset(filename.format(4, 12) + '.nc')
rmse_1 = xr.open_dataset(datadir+'metrics/rmse_all_const.nc')
lead_times_ = np.arange(12, max_lead_time + 12, 12)
lead_times = np.arange(6, max_lead_time + 6, 6)
f, axs = plt.subplots(1, 2, figsize=(17, 6))
xlabels = [str(t) if t%4 == 0 else '' for t in lead_times]
axs[0].plot(lead_times, rmse_1.z.values, label='$L=1$, $\Delta_t = 6$')
axs[0].plot(lead_times, rmse_2_6.z.values, label='$L=2$, $\Delta_t = 6$')
axs[0].plot(lead_times, rmse_4_6.z.values, label='$L=4$, $\Delta_t = 6$')
axs[0].legend()
axs[1].plot(lead_times, rmse_1.t.values, label='$L=1$, $\Delta_t = 6$')
axs[1].plot(lead_times, rmse_2_6.t.values, label='$L=2$, $\Delta_t = 6$')
axs[1].plot(lead_times, rmse_4_6.t.values, label='$L=4$, $\Delta_t = 6$')
axs[1].legend()
axs[0].set_xticks(lead_times)
axs[1].set_xticks(lead_times)
axs[0].set_xticklabels(xlabels)
axs[1].set_xticklabels(xlabels)
axs[0].tick_params(axis='both', which='major', labelsize=16)
axs[1].tick_params(axis='both', which='major', labelsize=16)
axs[0].set_xlabel('Lead time [h]', fontsize='18')
axs[1].set_xlabel('Lead time [h]', fontsize='18')
axs[0].set_ylabel('RMSE [$m^2 s^{-2}$]', fontsize='18')
axs[1].set_ylabel('RMSE [K]', fontsize='18')
axs[0].set_title('Z500', fontsize='22')
axs[1].set_title('T850', fontsize='22')
axs[0].legend(fontsize=16, loc='upper left')
axs[1].legend(fontsize=16)
plt.tight_layout()
plt.savefig('temporal_rmse_delta6.eps', format='eps', bbox_inches='tight')
plt.show()
f, axs = plt.subplots(1, 2, figsize=(17, 6))
axs[0].plot(lead_times, rmse_2_6.z.values, label='$L=2$, $\Delta_t = 6$')
axs[0].plot(lead_times, rmse_4_6.z.values, label='$L=4$, $\Delta_t = 6$')
axs[0].plot(lead_times_, rmse_2_12.z.values, label='$L=2$, $\Delta_t = 12$')
axs[0].plot(lead_times_, rmse_4_12.z.values, label='$L=4$, $\Delta_t = 12$')
#axs[0].plot(lead_times, rmse_1.z.values, label='Spherical, L=1, delta=6')
#axs[0].plot(lead_times, rmses_weyn.z.values, label='Weyn 2020')
axs[1].plot(lead_times, rmse_2_6.t.values, label='$L=2$, $\Delta_t = 6$')
axs[1].plot(lead_times, rmse_4_6.t.values, label='$L=4$, $\Delta_t = 6$')
axs[1].plot(lead_times_, rmse_2_12.t.values, label='$L=2$, $\Delta_t = 12$')
axs[1].plot(lead_times_, rmse_4_12.t.values, label='$L=4$, $\Delta_t = 12$')
#axs[1].plot(lead_times, rmse_1.t.values, label='Spherical, L=1, delta=6')
#axs[1].plot(lead_times, rmses_weyn.t.values, label='Weyn 2020')
axs[0].set_xticks(lead_times)
axs[1].set_xticks(lead_times)
axs[0].set_xticklabels(xlabels)
axs[1].set_xticklabels(xlabels)
axs[0].tick_params(axis='both', which='major', labelsize=16)
axs[1].tick_params(axis='both', which='major', labelsize=16)
axs[0].set_xlabel('Lead time [h]', fontsize='18')
axs[1].set_xlabel('Lead time [h]', fontsize='18')
axs[0].set_ylabel('RMSE [$m^2 s^{-2}$]', fontsize='18')
axs[1].set_ylabel('RMSE [K]', fontsize='18')
axs[0].set_title('Z500', fontsize='22')
axs[1].set_title('T850', fontsize='22')
axs[0].legend(fontsize=16, loc='upper left')
axs[1].legend(fontsize=16)
plt.tight_layout()
plt.savefig('temporal_rmse_all.eps', format='eps', bbox_inches='tight')
plt.show()
```
| github_jupyter |
# Results
```
%matplotlib inline
import torch
import numpy as np
import sys
import matplotlib.pyplot as plt
sys.path.insert(0, '..')
from utils import plot_stroke
from utils.constants import Global
from utils.dataset import HandwritingDataset
from utils.data_utils import data_denormalization, data_normalization, valid_offset_normalization
from models.models import HandWritingPredictionNet, HandWritingSynthesisNet
from generate import generate_unconditional_seq, generate_conditional_sequence
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_path = '../data/'
```
### Unconditional generation:
```
seed = 224
if seed:
print("seed:",seed)
torch.manual_seed(seed)
np.random.seed(seed)
model_path = '../results/best_model_prediction.pt'
train_dataset = HandwritingDataset(data_path, split='train', text_req=False)
seq_len = 400
#prediction
gen_seq = generate_unconditional_seq(model_path, seq_len, device, bias=10., style=None, prime=False)
# denormalize the generated offsets using train set mean and std
gen_seq = data_denormalization(Global.train_mean, Global.train_std, gen_seq)
# plot the sequence
plot_stroke(gen_seq[0])
```
### Unconditional priming
```
seed = 224
if seed:
print("seed:",seed)
torch.manual_seed(seed)
np.random.seed(seed)
model_path = '../results/best_model_prediction.pt'
train_dataset = HandwritingDataset(data_path, split='train', text_req=False)
seq_len = 400
prime = True
bias = 10.
# file_path = '../app/'
file_path = None
if prime and file_path:
style = np.load(file_path + 'style.npy', allow_pickle=True, encoding='bytes').astype(np.float32)
with open(file_path + 'inpText.txt') as file:
texts = file.read().splitlines()
real_text = texts[0]
# plot the sequence
plot_stroke(style, save_name="style.png")
print(real_text)
mean, std, _ = data_normalization(style)
style = torch.from_numpy(style).unsqueeze(0).to(device)
print(style.shape)
elif prime:
strokes = np.load(data_path + 'strokes.npy',
allow_pickle=True, encoding='bytes')
with open(data_path + 'sentences.txt') as file:
texts = file.read().splitlines()
idx = np.random.randint(0, len(strokes))
print("Prime style index: ", idx)
real_text = texts[idx]
style = strokes[idx]
# plot the sequence
plot_stroke(style, save_name="style_" + str(idx) + ".png")
print(real_text)
mean, std, _ = data_normalization(style)
style = torch.from_numpy(style).unsqueeze(0).to(device)
print(style.shape)
#prediction
gen_seq = generate_unconditional_seq(model_path, seq_len, device, bias, style, prime)
# denormalize the generated offsets using train set mean and std
gen_seq = data_denormalization(mean, std, gen_seq)
# plot the sequence
plot_stroke(gen_seq[0])
```
### Conditional generation:
```
# seed = 128
# if seed:
# print("seed:",seed)
# torch.manual_seed(seed)
# np.random.seed(seed)
model_path = '../results/best_model_synthesis.pt'
train_dataset = HandwritingDataset(data_path, split='train', text_req=True)
# print(train_dataset.char_to_id)
# print(train_dataset.id_to_char)
# print(train_dataset.idx_to_char(np.arange(26,32)))
char_seq = "python handwriting synthesis"
bias = 10.
is_map = True
ytext = char_seq + " "
gen_seq, phi = generate_conditional_sequence(
model_path, char_seq, device, train_dataset.char_to_id,
train_dataset.idx_to_char, bias, prime=False, prime_seq=None, real_text=None, is_map=is_map)
if is_map:
plt.imshow(phi, cmap='viridis', aspect='auto')
plt.colorbar()
plt.xlabel("time steps")
plt.yticks(np.arange(phi.shape[0]), list(ytext), rotation='horizontal')
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.show()
gen_seq = data_denormalization(Global.train_mean, Global.train_std, gen_seq)
# plot the sequence
plot_stroke(gen_seq[0])
```
### Conditional priming using style from training data:
```
# seed = 218#196 198 200 216 218 220 232 241 262 266 288 290 302
# if seed:
# print("seed:",seed)
# torch.manual_seed(seed)
# np.random.seed(seed)
prime = True
bias = 6.
char_seq = "using recurrent neural network"
is_map = True
strokes = np.load(data_path + 'strokes.npy',
allow_pickle=True, encoding='bytes')
with open(data_path + 'sentences.txt') as file:
texts = file.read().splitlines()
idx = 345#np.random.randint(0, len(strokes))
print("Prime style index: ", idx)
real_text = texts[idx]
style = strokes[idx]
# plot the sequence
plot_stroke(style)
print("Priming text: ", real_text)
mean, std, style = data_normalization(style)
style = torch.from_numpy(style).unsqueeze(0).to(device)
# style = valid_offset_normalization(Global.train_mean, Global.train_std, style[None,:,:])
# style = torch.from_numpy(style).to(device)
print("Priming sequence size: ", style.shape)
ytext = real_text + ' ' + char_seq + " "
gen_seq, phi = generate_conditional_sequence(
model_path, char_seq, device, train_dataset.char_to_id,
train_dataset.idx_to_char, bias, prime, style, real_text, is_map)
if is_map:
plt.imshow(phi, cmap='viridis', aspect='auto')
plt.colorbar()
plt.xlabel("time steps")
plt.yticks(np.arange(phi.shape[0]), list(ytext), rotation='horizontal')
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.show()
# denormalize the generated offsets using train set mean and std
print("data denormalization...")
gen_seq = data_denormalization(Global.train_mean, Global.train_std, gen_seq)
# plot the sequence
plot_stroke(gen_seq[0])
```
### Conditional priming using unseen style:
```
seed = 122 #213
torch.manual_seed(seed)
np.random.seed(seed)
model_path = '../results/best_model_synthesis.pt'
train_dataset = HandwritingDataset(data_path, split='train', text_req=True)
prime = True
bias = 8.
char_seq = "hello world"
is_map = True
file_path = '../app/static/'
style = np.load(file_path + 'style.npy', allow_pickle=True, encoding='bytes').astype(np.float32)
with open(file_path + 'inpText.txt') as file:
texts = file.read().splitlines()
real_text = texts[0]
print(np.min(style, axis=0))
print(np.max(style, axis=0))
# plot the sequence
plot_stroke(style)
print("Priming text: ", real_text)
mean, std, style = data_normalization(style)
style = torch.from_numpy(style).unsqueeze(0).to(device)
# style = valid_offset_normalization(Global.train_mean, Global.train_std, style[None,:,:])
# style = torch.from_numpy(style).to(device)
print("Priming sequence size: ", style.shape)
ytext = real_text + ' ' + char_seq + " "
gen_seq, phi = generate_conditional_sequence(
model_path, char_seq, device, train_dataset.char_to_id,
train_dataset.idx_to_char, bias, prime, style, real_text, is_map)
if is_map:
plt.imshow(phi, cmap='viridis', aspect='auto')
plt.colorbar()
plt.xlabel("time steps")
plt.yticks(np.arange(phi.shape[0]), list(ytext), rotation='horizontal')
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
plt.show()
# denormalize the generated offsets using train set mean and std
print("data denormalization...")
end = style.shape[1]
gen_seq[:,:end] = data_denormalization(mean, std, gen_seq[:, :end])
gen_seq[:,end:] = data_denormalization(Global.train_mean, Global.train_std, gen_seq[:,end:])
# plot the sequence
print(gen_seq.shape)
plot_stroke(gen_seq[0][:end])
plot_stroke(gen_seq[0,end:])
```
# import os
import pickle
import random
import numpy as np
import svgwrite
from IPython.display import SVG, display
def get_bounds(data, factor):
min_x = 0
max_x = 0
min_y = 0
max_y = 0
abs_x = 0
abs_y = 0
for i in range(len(data)):
x = float(data[i, 1]) / factor
y = float(data[i, 2]) / factor
abs_x += x
abs_y += y
min_x = min(min_x, abs_x)
min_y = min(min_y, abs_y)
max_x = max(max_x, abs_x)
max_y = max(max_y, abs_y)
return (min_x, max_x, min_y, max_y)
# old version, where each path is entire stroke (smaller svg size, but
# have to keep same color)
def draw_strokes(data, factor=1, svg_filename='sample.svg'):
min_x, max_x, min_y, max_y = get_bounds(data, factor)
dims = (50 + max_x - min_x, 50 + max_y - min_y)
dwg = svgwrite.Drawing(svg_filename, size=dims)
dwg.add(dwg.rect(insert=(0, 0), size=dims, fill='white'))
lift_pen = 1
abs_x = 25 - min_x
abs_y = 25 - min_y
p = "M%s,%s " % (abs_x, abs_y)
command = "m"
for i in range(len(data)):
if (lift_pen == 1):
command = "m"
elif (command != "l"):
command = "l"
else:
command = ""
x = float(data[i, 1]) / factor
y = float(-data[i, 2]) / factor
lift_pen = data[i, 0]
p += command + str(x) + "," + str(y) + " "
the_color = "blue"
stroke_width = 1
dwg.add(dwg.path(p).stroke(the_color, stroke_width).fill("none"))
dwg.save()
display(SVG(dwg.tostring()))
| github_jupyter |
```
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
%matplotlib inline
```
## Sample Submission
```
sample_sub = pd.read_csv("./datasets/train_val/sample_submission.csv")
sample_sub.describe()
sample_sub[sample_sub["ImageId"] == "9d3cd0de1.jpg"]
# 14 unwanted test images
unwanted_test_images = ['13703f040.jpg',
'14715c06d.jpg',
'33e0ff2d5.jpg',
'4d4e09f2a.jpg',
'877691df8.jpg',
'8b909bb20.jpg',
'a8d99130e.jpg',
'ad55c3143.jpg',
'c8260c541.jpg',
'd6c7f17c7.jpg',
'dc3e7c901.jpg',
'e44dffe88.jpg',
'ef87bad36.jpg',
'f083256d8.jpg',]
print("Submission ids", len(sample_sub["ImageId"].tolist()))
print("Unwanted", len(unwanted_test_images))
# Check if any unwanted images are in sample submission csv
len(np.setdiff1d(sample_sub['ImageId'].unique(), unwanted_test_images, assume_unique=True))
```
## Train Ship Segmentations
```
train_ship_segmentations_df = pd.read_csv("./datasets/train_val/train_ship_segmentations.csv")
print(train_ship_segmentations_df.count())
train_ship_segmentations_df.describe()
print("Images with no ship")
train_ship_segmentations_df[train_ship_segmentations_df["EncodedPixels"].isnull()].describe()
# Corrupted image
train_ship_segmentations_df.loc[train_ship_segmentations_df["ImageId"] == "6384c3e78.jpg"]
# Remove corrupted image
train_ship_segmentations_df = train_ship_segmentations_df.loc[train_ship_segmentations_df["ImageId"] != "6384c3e78.jpg"]
train_ship_segmentations_df.describe()
train_ship_segmentations_df_null = train_ship_segmentations_df["EncodedPixels"].isnull()
nulls_df = train_ship_segmentations_df[~train_ship_segmentations_df_null]
nulls_sample_df = nulls_df.sample(frac=0.99) # remove frac % of empty images
train_ship_segmentations_df = train_ship_segmentations_df.loc[~train_ship_segmentations_df["ImageId"].isin(nulls_sample_df["ImageId"])]
# train_ship_segmentations_df = train_ship_segmentations_df.loc[train_ship_segmentations_df["EncodedPixels"].isnull()].sample(frac=0.5)
train_ship_segmentations_df.shape
```
## Actual Submission
```
submission_1 = pd.read_csv("~/Downloads/submission_20180820T0329.csv")
submission = pd.read_csv("~/Downloads/submission_20180827T0655.csv")
print(submission_1.head())
submission_1.describe()
print(submission.head())
submission.describe()
print(70942/99150)
submission_1[submission_1["EncodedPixels"].isnull()].describe()
print(77503/91941)
submission[submission["EncodedPixels"].isnull()].describe()
submission['ships'] = submission['EncodedPixels'].map(lambda c_row: 1 if isinstance(c_row, str) else 0)
unique_img_ids = submission.groupby('ImageId').agg({'ships': 'sum'}).reset_index()
unique_img_ids['has_ship'] = unique_img_ids['ships'].map(lambda x: 1.0 if x>0 else 0.0)
unique_img_ids['has_ship_vec'] = unique_img_ids['has_ship'].map(lambda x: [x])
# some files are too small/corrupt
# unique_img_ids['file_size_kb'] = unique_img_ids['ImageId'].map(lambda c_img_id:
# os.stat(os.path.join(train_image_dir,
# c_img_id)).st_size/1024)
# unique_img_ids = unique_img_ids[unique_img_ids['file_size_kb']>50] # keep only 50kb files
# unique_img_ids['file_size_kb'].hist()
unique_img_ids['ships'].hist()
# unique_img_ids.sample(10)
submission_1['ships'] = submission_1['EncodedPixels'].map(lambda c_row: 1 if isinstance(c_row, str) else 0)
unique_img_ids = submission_1.groupby('ImageId').agg({'ships': 'sum'}).reset_index()
unique_img_ids['has_ship'] = unique_img_ids['ships'].map(lambda x: 1.0 if x>0 else 0.0)
unique_img_ids['has_ship_vec'] = unique_img_ids['has_ship'].map(lambda x: [x])
# some files are too small/corrupt
# unique_img_ids['file_size_kb'] = unique_img_ids['ImageId'].map(lambda c_img_id:
# os.stat(os.path.join(train_image_dir,
# c_img_id)).st_size/1024)
# unique_img_ids = unique_img_ids[unique_img_ids['file_size_kb']>50] # keep only 50kb files
# unique_img_ids['file_size_kb'].hist()
unique_img_ids['ships'].hist()
```
## Remove duplicate pixels
```
def rle_encode(img):
'''
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
pixels = img.T.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return ' '.join(str(x) for x in runs)
def rle_decode(mask_rle, shape=(768, 768)):
'''
mask_rle: run-length as string formated (start length)
shape: (height,width) of array to return
Returns numpy array, 1 - mask, 0 - background
'''
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 1
return img.reshape(shape).T
sub_df = pd.read_csv("~/Downloads/submission_20180825T0545.csv")
sub_df.describe()
sub_df.head()
multiple_instances_mask = sub_df.duplicated(subset="ImageId", keep=False)
multiple_instances_df = sub_df[multiple_instances_mask]
multiple_instances_df.head()
example_mask = multiple_instances_df["ImageId"] == "443cfdb6a.jpg"
image_id_df = multiple_instances_df[example_mask]
print(image_id_df)
unique_ids_multiple_instances = multiple_instances_df.ImageId.unique()
print("Checking ", len(unique_ids_multiple_instances), "unique images")
out_pred_rows = []
for image_id in unique_ids_multiple_instances:
# print(image_id)
# Image masks in RLE
img_masks = multiple_instances_df.loc[multiple_instances_df['ImageId'] == image_id, 'EncodedPixels'].tolist()
# print("Initial", img_masks)
# Mask array placeholder
mask_array = np.zeros([768, 768, len(img_masks)],dtype=np.uint8)
# Build mask array
for index, mask in enumerate(img_masks):
mask_array[:,:,index] = rle_decode(mask, [768, 768])
# print("mask_array shape", mask_array.shape)
loop_range = np.array(mask_array).shape[-1] - 1
# print("loop_range", np.arange(loop_range))
# Check for overlap and remove overlapped pixels
for i in np.arange(loop_range):
# print("i: ", i)
mask = mask_array[:,:,i]
loop_j = np.arange(loop_range-i)+i+1
# print(loop_j)
for j in loop_j:
# print("j: ", j)
next_mask = mask_array[:,:,j]
index_of_overlap = np.logical_and(mask, next_mask)
if any(index_of_overlap.flatten()):
print("OVERLAP ", image_id)
next_mask[index_of_overlap] = 0
# else:
# print("NO OVERLAP")
# Convert back into RLE encoding
re_encoded_to_rle_list = []
for i in np.arange(np.array(mask_array).shape[-1]):
boolean_mask = mask_array[:,:,i]
re_encoded_to_rle = rle_encode(boolean_mask)
re_encoded_to_rle_list.append(re_encoded_to_rle)
# print("---------------------")
# print("Ending", re_encoded_to_rle_list)
if len(re_encoded_to_rle_list) == 0:
out_pred_rows += [{'ImageId': image_id, 'EncodedPixels': None}]
else:
for rle_mask in re_encoded_to_rle_list:
out_pred_rows += [{'ImageId': image_id, 'EncodedPixels': rle_mask}]
no_overlap_df = pd.DataFrame(out_pred_rows)[['ImageId', 'EncodedPixels']]
# print(no_overlap_df)
np.arange(2)+1
a1 = np.array([[1,1,0,0],
[1,1,0,0]])
a2 = np.array([[0,0,0,0],
[0,1,1,0]])
a3 = np.array([[0,1,1,0],
[0,0,0,0]])
masks = np.zeros([2,4,3])
masks[:,:,0] = a1
masks[:,:,1] = a2
masks[:,:,2] = a3
masks.shape
# print("Masks", masks)
for index, mask in enumerate(masks):
for next_mask in masks[index+1:]:
index_of_overlap = np.logical_and(mask, next_mask)
if any(index_of_overlap.flatten()):
next_mask[index_of_overlap] = 0
print(masks)
index_of_overlap = np.logical_and(a1,a2,a3)
index_of_overlap
a2[index_of_overlap] = 0
a3[index_of_overlap] = 0
print(a1)
print(a2)
print(a3)
```
| github_jupyter |
```
%tensorflow_version 2.x
import random as rn
import time
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow import keras
# Loading data
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
height = 28
width = 28
n_channel = 1
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape(x_train.shape[0], height, width, n_channel)
x_test = x_test.reshape(x_test.shape[0], height, width, n_channel)
def quantise(images, q_levels):
"""Quantise image into q levels"""
return (np.digitize(images, np.arange(q_levels) / q_levels) - 1).astype('float32')
# Quantise the input data in q levels
q_levels = 4
x_train_quantised = quantise(x_train, q_levels)
x_test_quantised = quantise(x_test, q_levels)
# Creating input stream using tf.data API
batch_size = 128
train_buf = 60000
train_dataset = tf.data.Dataset.from_tensor_slices((x_train_quantised / (q_levels - 1),
x_train_quantised.astype('int32'),
y_train))
train_dataset = train_dataset.shuffle(buffer_size=train_buf)
train_dataset = train_dataset.batch(batch_size)
test_dataset = tf.data.Dataset.from_tensor_slices((x_test_quantised / (q_levels - 1),
x_test_quantised.astype('int32'),
y_test))
test_dataset = test_dataset.batch(batch_size)
class MaskedConv2D(tf.keras.layers.Layer):
"""Convolutional layers with masks for autoregressive models
Convolutional layers with simple implementation to have masks type A and B.
"""
def __init__(self,
mask_type,
filters,
kernel_size,
strides=1,
padding='same',
kernel_initializer='glorot_uniform',
bias_initializer='zeros'):
super(MaskedConv2D, self).__init__()
assert mask_type in {'A', 'B', 'V'}
self.mask_type = mask_type
self.filters = filters
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding.upper()
self.kernel_initializer = keras.initializers.get(kernel_initializer)
self.bias_initializer = keras.initializers.get(bias_initializer)
def build(self, input_shape):
kernel_h, kernel_w = self.kernel_size
self.kernel = self.add_weight("kernel",
shape=(kernel_h,
kernel_w,
int(input_shape[-1]),
self.filters),
initializer=self.kernel_initializer,
trainable=True)
self.bias = self.add_weight("bias",
shape=(self.filters,),
initializer=self.bias_initializer,
trainable=True)
mask = np.ones(self.kernel.shape, dtype=np.float32)
if self.mask_type == 'V':
mask[kernel_h // 2:, :, :, :] = 0.
else:
mask[kernel_h // 2, kernel_w // 2 + (self.mask_type == 'B'):, :, :] = 0.
mask[kernel_h // 2 + 1:, :, :] = 0.
self.mask = tf.constant(mask, dtype=tf.float32, name='mask')
def call(self, input):
masked_kernel = tf.math.multiply(self.mask, self.kernel)
x = tf.nn.conv2d(input, masked_kernel, strides=[1, self.strides, self.strides, 1], padding=self.padding)
x = tf.nn.bias_add(x, self.bias)
return x
class GatedBlock(tf.keras.Model):
""""""
def __init__(self, mask_type, filters, kernel_size):
super(GatedBlock, self).__init__(name='')
self.mask_type = mask_type
self.vertical_conv = MaskedConv2D(mask_type='V', filters=2 * filters, kernel_size=kernel_size)
self.horizontal_conv = MaskedConv2D(mask_type=mask_type, filters=2 * filters, kernel_size=(1, kernel_size))
self.v_to_h_conv = keras.layers.Conv2D(filters=2 * filters, kernel_size=1)
self.horizontal_output = keras.layers.Conv2D(filters=filters, kernel_size=1)
self.cond_fc_h = keras.layers.Dense(2 * filters, use_bias=False)
self.cond_fc_v = keras.layers.Dense(2 * filters, use_bias=False)
def _gate(self, x):
tanh_preactivation, sigmoid_preactivation = tf.split(x, 2, axis=-1)
return tf.nn.tanh(tanh_preactivation) * tf.nn.sigmoid(sigmoid_preactivation)
def call(self, input_tensor):
v = input_tensor[0]
h = input_tensor[1]
y = input_tensor[2]
y = tf.one_hot(y, 10)
codified_h = tf.expand_dims(tf.expand_dims(self.cond_fc_h(y), 1), 1)
codified_v = tf.expand_dims(tf.expand_dims(self.cond_fc_v(y), 1), 1)
horizontal_preactivation = self.horizontal_conv(h) # 1xN
vertical_preactivation = self.vertical_conv(v) # NxN
v_to_h = self.v_to_h_conv(vertical_preactivation) # 1x1
vertical_preactivation = vertical_preactivation+codified_v
v_out = self._gate(vertical_preactivation)
horizontal_preactivation = horizontal_preactivation + v_to_h
horizontal_preactivation = horizontal_preactivation + codified_h
h_activated = self._gate(horizontal_preactivation)
if self.mask_type == 'A':
h_out = h_activated
elif self.mask_type =='B':
h_out = self.horizontal_output(h_activated)
h_out = h + h_out
return v_out, h_out
inputs = keras.layers.Input(shape=(height, width, n_channel))
labels = keras.layers.Input(shape=(), dtype=tf.int32)
v, h = GatedBlock(mask_type='A', filters=64, kernel_size=7)([inputs, inputs, labels])
for i in range(7):
v, h = GatedBlock(mask_type='B', filters=64, kernel_size=3)([v, h, labels])
x = keras.layers.Activation(activation='relu')(h)
x = keras.layers.Conv2D(filters=128, kernel_size=1, strides=1)(x)
x = keras.layers.Activation(activation='relu')(x)
x = keras.layers.Conv2D(filters=n_channel * q_levels, kernel_size=1, strides=1)(x) # shape [N,H,W,DC]
pixelcnn = tf.keras.Model(inputs=[inputs, labels], outputs=x)
# Prepare optimizer and loss function
lr_decay = 0.9999
learning_rate = 5e-3
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
compute_loss = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
@tf.function
def train_step(batch_x, batch_y, batch_label):
with tf.GradientTape() as ae_tape:
logits = pixelcnn([batch_x, batch_label], training=True)
logits = tf.reshape(logits, [-1, height, width, q_levels, n_channel]) # shape [N,H,W,DC] -> [N,H,W,D,C]
logits = tf.transpose(logits, perm=[0, 1, 2, 4, 3]) # shape [N,H,W,D,C] -> [N,H,W,C,D]
loss = compute_loss(tf.one_hot(batch_y, q_levels), logits)
gradients = ae_tape.gradient(loss, pixelcnn.trainable_variables)
gradients, _ = tf.clip_by_global_norm(gradients, 1.0)
optimizer.apply_gradients(zip(gradients, pixelcnn.trainable_variables))
return loss
# Training loop
n_epochs = 50
n_iter = int(np.ceil(x_train_quantised.shape[0] / batch_size))
for epoch in range(n_epochs):
start_epoch = time.time()
for i_iter, (batch_x, batch_y, batch_label) in enumerate(train_dataset):
start = time.time()
optimizer.lr = optimizer.lr * lr_decay
loss = train_step(batch_x, batch_y, batch_label)
iter_time = time.time() - start
if i_iter % 100 == 0:
print('EPOCH {:3d}: ITER {:4d}/{:4d} TIME: {:.2f} LOSS: {:.4f}'.format(epoch,
i_iter, n_iter,
iter_time,
loss))
epoch_time = time.time() - start_epoch
print('EPOCH {:3d}: TIME: {:.2f} ETA: {:.2f}'.format(epoch,
epoch_time,
epoch_time * (n_epochs - epoch)))
samples = np.zeros((9, height, width, n_channel), dtype='float32')
samples_labels = (np.ones((9)) * 2).astype('int32')
for i in range(height):
for j in range(width):
logits = pixelcnn([samples, samples_labels])
logits = tf.reshape(logits, [-1, height, width, q_levels, n_channel]) # shape [N,H,W,DC] -> [N,H,W,D,C]
logits = tf.transpose(logits, perm=[0, 1, 2, 4, 3]) # shape [N,H,W,D,C] -> [N,H,W,C,D]
next_sample = tf.random.categorical(logits[:, i, j, 0, :], 1)
samples[:, i, j, 0] = (next_sample.numpy() / (q_levels - 1))[:, 0]
fig = plt.figure(figsize=(3, 3))
for i in range(9):
ax = fig.add_subplot(3, 3, i + 1)
ax.matshow(samples[i, :, :, 0], cmap=matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
samples = np.zeros((9, height, width, n_channel), dtype='float32')
samples_labels = (np.ones((9)) * 5).astype('int32')
for i in range(height):
for j in range(width):
logits = pixelcnn([samples, samples_labels])
logits = tf.reshape(logits, [-1, height, width, q_levels, n_channel]) # shape [N,H,W,DC] -> [N,H,W,D,C]
logits = tf.transpose(logits, perm=[0, 1, 2, 4, 3]) # shape [N,H,W,D,C] -> [N,H,W,C,D]
next_sample = tf.random.categorical(logits[:, i, j, 0, :], 1)
samples[:, i, j, 0] = (next_sample.numpy() / (q_levels - 1))[:, 0]
fig = plt.figure(figsize=(3, 3))
for i in range(9):
ax = fig.add_subplot(3, 3, i + 1)
ax.matshow(samples[i, :, :, 0], cmap=matplotlib.cm.binary)
plt.xticks(np.array([]))
plt.yticks(np.array([]))
plt.show()
```
| github_jupyter |
# Step 1: Data Acquisition
Series on Designing with Twitter Data
In this tutorial:
* Task 1: Retrieving data from Twitter
* Task 2: Managing data
### Output: tweets.csv -> Step 5
For Education Only
@ Wolf & Jacky - SST - IO - TUDelft
V1.0: April 20, 2020
# Task 1: Retrieving Data from Twitter
The objective of this task is to get you familiar with Twitter REST API. To achieve this, you will first create a data retrieving application. Next, you will use the library tweepy to interact with Twitter and access Tweets.
## 1.1 Data Retrieving Application
To access tweets from Twitter, you need to register an 'application' on Twitter as a Developer before you can use it.
If you do not have one yet, create a Twitter account: https://twitter.com
Then, you need to upgrade this account to a developer account: https://dev.twitter.com
Finally, you can access the application page via the following link: https://dev.twitter.com/apps
By clicking on the blue button 'Create an app', you need to provide some basic information about the application.
After that, you will be able to get the following parameters:
* Consumer key;
* Consumer secret;
* Access token;
* Access token secret.
We need these four parameters to get our code authenticated with OAuth on Twitter. Let's create a .env file in the project folder and write the following 4 lines, replacing YOUR-... with your own keys and secrets.
```
CONSUMER_KEY=YOUR-KEY
CONSUMER_SECRET=YOUR-SECRET
ACCESS_TOKEN=YOUR-TOKEN
ACCESS_TOKEN_SECRET=YOUR-TOKEN-SECRET
```
With this, we will be able to share our code openly without sharing our credentials.
## 1.2 Retrieving Data
It is tme to start coding! The first thing we need is to tell our code is where to find our Keys and Secrets. We use the library 'dotenv' to extract this information from the .env file.
```
# Install the library
!pip install python-dotenv
# Load it on the Notebook
from dotenv import load_dotenv
import os
load_dotenv()
# Use it to retieve our four Twitter parameters
consumer_key = os.environ['CONSUMER_KEY']
consumer_secret= os.environ['CONSUMER_SECRET']
access_token=os.environ['ACCESS_TOKEN']
access_token_secret=os.environ['ACCESS_TOKEN_SECRET']
```
For the next step, we use another Python library, tweepy, to facilitate our interaction with Twitter. Let's install and load the library.
```
!pip install tweepy
from tweepy import OAuthHandler, API
import json
```
Once you have the tokens and secrets stored in four variables as above, you can run the following code establish the connection with Twitter via the OAuthHandler and API object from tweepy.
```
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = API(auth)
```
Let's retrieve the tweets from your own timeline. The following code uses the API object to get tweets from your timeline (function 'home_timeline') and stores them in a variable called public_tweets.
```
public_tweets = api.home_timeline()
```
Printing the results give us a long JSON structure (starting with _json=), not very easy to read.
```
# foreach tweets from the result
for tweet in public_tweets:
# printing the text stored inside the tweet object
print(tweet.text)
```
## 1.3 Searching per Twitter Id
Our twitter timeline is a good test, but it does not get us far in the exploration of a given topic. A more effective way is to look for a specific account id (e.g. a person, a company). This time, we use the function 'user_timeline' with an twitter acount id and anumber of tweet we want. In the following example we try to pull 20 tweets from Tesla's Twitter account.
```
# Twitter account to look at
twitter_id = "Tesla"
# Number of tweets to pull
tweetCount = 20
# Calling the user_timeline function with our parameters
timeline_results = api.user_timeline(id=twitter_id, count=tweetCount)
# Toreach tweet of the result
for tweet in results:
# printing the text stored inside the tweet object
print(tweet.text)
```
## 1.4 Searching per Query
Finally, going more into the details, we can specify a more specific query on a topic. In the following example, we use the function 'search' to try to pull 20 tweets in english that include the keywords 'prius' and 'car'.
```
# Simple query,
query = "prius car"
# Number of tweets to pull
tweetCount = 20
# Language code (follows ISO 639-1 standards)
language = "en"# Calling the user_timeline function with our parameters
query_results = api.search(q=query, lang=language, count=tweetCount)# foreach through all tweets pulled
for tweet in query_results:
# printing the text stored inside the tweet object
print(tweet.text)
```
You can find more options and documentation in the [Tweepy documentation](http://docs.tweepy.org/en/latest/api.html#API.search)
# Task 2: Managing Data
In this second task, we will look at the result itself, how it is formated, what it contains and how to store the tweets.
## 2.1 Navigating the Results
Looking back at the previous example of query with 'prius' and 'car', let's print the raw JSON result of the query. JSON (for JavaScript Object Notification) is a common data structure to exchange information. the function json.dumps help us handling this structure by formatting it. You can try with and without the parameter 'indent=2' for better readability.
```
query_results = api.search(q="prius car", lang="en", count=1)
for tweet in query_results:
# printing the raw json
print(json.dumps(tweet._json, indent=2))
```
For this query we set the number of tweet to 1 and still, the result is long: there is much more than the text of the Tweet. Browsing through the key/values you we certainly spot the creation date (create_date), the unique id of the tweet and the text of the tweet. Further down, a data structure reveal information about the user including its name, description and so on. Towards the end, there is also infomation about the number of retweet or favorite. Any of those attributes can be extracted, but keep in mind that all tweet do not have all the possible information.
To navigate the results to only extract the values we want, we use the dot '.' to enter an attribute. Here is an example to extract the name of the tweet author. Try to change the code for to extract another attribute.
```
test_results = api.search(q="prius car", lang="en", count=5)
for tweet in test_results:
# printing the author's name of each tweet
print(tweet.user.description)
```
## 2.2 Storing and Retrieving Data from JSON
We can also store the result in a file. To do this, we use the function open() with the name of the file and the option 'a' (append mode) to continuously add at the end of the file.
```
query_results = api.search(q="prius car", lang="en", count=100)
# Open file with option 'a' for 'append' new content at the end of the file.
json_file = open("tweets.json","a")
count=0
for tweet in query_results:
# write tweet in file
json_file.write(json.dumps(tweet._json))
# Create a new line
json_file.write('\n')
count=count+1
json_file.close()
print(count, 'tweets stored as JSON.')
```
## 2.3 Storing and Retriving Data from CSV
Finally, we can combine the 2 previous steps (selecting attrbutes and storing into files) to store our data in a CSV format. CSV stands for Comma Separated Values. It is a common format to store tabular data such as spreadsheets. Instead of storing all the data retrieved from the query, we will select only the ID and the text. Feel free to experiment and store more fields.
```
query_results = api.search(q="prius car", lang="en", count=100)
# Open file with option 'a' for 'append' new content at the end of the file.
csv_file = open("tweets.csv","a")
count=0
for tweet in query_results:
# Componse a line with the tweet id and the text.
# Note the double quotes to escape potential comma in the tweets,
# as well as the replacement of all new line by a space
line = tweet.id_str + ',"' + tweet.text.replace('\n',' ') + '"\n'
# Write tweet in file
csv_file.write(line)
# Count the number of line for the end message
count=count+1
csv_file.close()
print(count, 'tweets stored as CSV.')
```
This file can be use as an input of new data in step 5.
| github_jupyter |
```
import numpy as np
import os
from codeStore import support_fun as spf
import importlib
PWD = os.getcwd()
importlib.reload(spf)
ini_psi_type = 'rand'
def comm_do_calculate(ini_theta, ini_phi, ini_psi, max_t, rtol, atol, eval_dt,
calculate_fun, update_fun, table_name, omega_tail, i0):
ts = ''
ts = ts + 'mpirun -np 1 --slot-list %d python ../../do_calculate.py ' % i0
ts = ts + ' -ini_theta %f -ini_phi %f -ini_psi %f' % (ini_theta, ini_phi, ini_psi)
ts = ts + ' -max_t %f -rtol %e -atol %e -eval_dt %e' % (max_t, rtol, atol, eval_dt)
ts = ts + ' -calculate_fun %s -update_fun %s -table_name %s' % (calculate_fun, update_fun, table_name)
ts = ts + ' -omega_tail %f' % omega_tail
ts = ts + ' & \n\n'
return ts
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-6, 1e-9, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'planeShearRatex_1d'
# omega_tail = 193.66659814
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'ecoliB01_a'
# write_pbs_head = spf.write_pbs_head_newturb
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-6, 1e-9, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_AvrPetsc4n'
# table_name = 'planeShearRatex_1d_avr'
# omega_tail = 193.66659814
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'ecoliB01Avr_a'
# write_pbs_head = spf.write_pbs_head_newturb
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-9, 1e-12, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'hlxB01_tau1a'
# omega_tail = 0
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'helixB01_a'
# write_pbs_head = spf.write_pbs_head_newturb
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-9, 1e-12, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_AvrPetsc4n'
# table_name = 'hlxB01_tau1a_avr'
# omega_tail = 0
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'helixB01Avr_a'
# write_pbs_head = spf.write_pbs_head_newturb
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-9, 1e-12, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_passive_Petsc4n'
# table_name = 'planeShearRatex_1d_passive'
# omega_tail = 0
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'ecoliB01_passive_a'
# write_pbs_head = spf.write_pbs_head_newturb
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-9, 1e-12, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_passive_AvrPetsc4n'
# table_name = 'planeShearRatex_1d_passive_avr'
# omega_tail = 0
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'ecoliB01Avr_passive_a'
# write_pbs_head = spf.write_pbs_head_newturb
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-6, 1e-9, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'planeShearRatex_1d_passive_avr'
# omega_tail = 0
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'ecoliB01Avr_passive_a'
# write_pbs_head = spf.write_pbs_head_newturb
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-6, 1e-9, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'hlxC01_tau1a'
# omega_tail = 0
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'hlxC01_a_psi-0'
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'hlxC01_tau1a'
# omega_tail = 0
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'hlxC01_a_psi-1.57'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = np.pi / 2
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'hlxC01_tau1a'
# omega_tail = 0
# job_dir = 'hlxC01_a_psi-0b'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, 200)[0::2]
# theta_list = np.linspace(0, np.pi, 10)
# max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'hlxC01_tau1a'
# omega_tail = 0
# job_dir = 'hlxC01_a_psi-0c'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, 200)[1::2]
# theta_list = np.linspace(0, np.pi, 10)
# max_t, rtol, atol, eval_dt, update_fun = 20000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'hlxC01_tau1a'
# omega_tail = 0
# job_dir = 'hlxC01_a_psi-0d'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(14 / 8 * np.pi, 18 / 8 * np.pi, 100) % (2 * np.pi)
# print(phi_list)
# theta_list = np.linspace(0, np.pi, 24)
# max_t, rtol, atol, eval_dt, update_fun = 1000, 1e-6, 1e-9, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'ecoC01B05_tau1c_passive'
# omega_tail = 0
# job_dir = 'ecoC01B05_passive_psi-0a'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0 * np.pi, 2 * np.pi, 24, endpoint=False) % (2 * np.pi)
# theta_list = np.linspace(0, np.pi, 23)
# max_t, rtol, atol, eval_dt, update_fun = 1000, 1e-6, 1e-9, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tau1c'
# omega_tail = 193.66659814
# job_dir = 'ecoC01B05_psi-0a'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0 * np.pi, 2 * np.pi, 24, endpoint=False) % (2 * np.pi)
# theta_list = np.linspace(0, np.pi, 23)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-9, 1e-12, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'ecoC01B05_tau1c_passive'
# omega_tail = 0
# job_dir = 'ecoC01B05_passive_psi-0b'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0 * np.pi, 2 * np.pi, 24, endpoint=False) % (2 * np.pi)
# theta_list = np.linspace(0, np.pi, 23)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'ecoC01B05_tau1c_passive'
# omega_tail = 0
# job_dir = 'ecoC01B05_passive_psi-0c'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0 * np.pi, 2 * np.pi, 24, endpoint=False) % (2 * np.pi)
# theta_list = np.linspace(0, np.pi, 23)
# max_t, rtol, atol, eval_dt, update_fun = 4000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'ecoC01B05_tau1c_passive'
# omega_tail = 0
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'ecoC01B05_passive_psi-0d'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 20000, 1e-9, 1e-12, 1e-3, '5bs'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'hlxC01_tau1a'
# omega_tail = 0
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'hlxC01_a_psi-0e'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B03_tau1c'
# omega_tail = 193.66659814
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# job_dir = 'ecoC01B03_psi-0a'
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-9, 1e-12, 1e-3, '4'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'hlxC01_tau1a'
# omega_tail = 0
# n_theta, n_phi = 24, 20 # num of cpus and num of jobs
# job_dir = 'hlxC01_a_psi-0f'
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# theta_list = np.linspace(0, np.pi, n_theta)[::2]
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-9, 1e-12, 1e-4, '4'
# calculate_fun = 'do_calculate_helix_Petsc4n'
# table_name = 'hlxC01_tau1a'
# omega_tail = 0
# n_theta, n_phi = 24, 20 # num of cpus and num of jobs
# job_dir = 'hlxC01_a_psi-0g'
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# theta_list = np.linspace(0, np.pi, n_theta)[::2]
# phi_list = np.linspace(0, 2 * np.pi, n_phi)[::2]
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B01_tau1c'
# job_dir = 'ecoC01B01_psi-0a'
# omega_tail = 193.66659814
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 3000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_T10'
# job_dir = 'ecoC01B05_T10_psi-0a'
# omega_tail = 10
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_T1'
# job_dir = 'ecoC01B05_T1_psi-0a'
# omega_tail = 1
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 1000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_T0.001.pickle'
# job_dir = 'ecoC01B05_T0.001_psi-0a'
# omega_tail = 0.001
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 1000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_T0.01.pickle'
# job_dir = 'ecoC01B05_T0.01_psi-0a'
# omega_tail = 0.01
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 3000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_T0.01.pickle'
# job_dir = 'ecoC01B05_T0.01_psi-0b'
# omega_tail = 0.01
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 5000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_T0.1.pickle'
# job_dir = 'ecoC01B05_T0.1_psi-0a'
# omega_tail = 0.1
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 1000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_T0.5.pickle'
# job_dir = 'ecoC01B05_T0.5_psi-0a'
# omega_tail = 0.5
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 3000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_T0.5.pickle'
# job_dir = 'ecoC01B05_T0.5_psi-0b'
# omega_tail = 0.5
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 3000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.25.pickle'
# job_dir = 'ecoC01B05_T0.25_psi-0a'
# omega_tail = 0.25
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 3000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.75.pickle'
# job_dir = 'ecoC01B05_T0.75_psi-0a'
# omega_tail = 0.75
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.75.pickle'
# job_dir = 'ecoC01B05_T0.75_psi-0b'
# omega_tail = 0.75
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head_newturb
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
max_t, rtol, atol, eval_dt, update_fun = 10000, 1e-12, 1e-15, 1e-3, '5bs'
calculate_fun = 'do_calculate_ecoli_Petsc4nPsi'
table_name = 'ecoC01B05_tao1_wm0.75.pickle'
job_dir = 'ecoC01B05_T0.75_psi-0c'
omega_tail = 0.75
n_theta, n_phi = 24, 48 # num of cpus and num of jobs
write_pbs_head = spf.write_pbs_head
ini_psi_type = 0
phi_list = np.linspace(0, 2 * np.pi, n_phi)
theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 5000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.8.pickle'
# job_dir = 'ecoC01B05_T0.8_psi-0a'
# omega_tail = 0.8
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 5000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.9.pickle'
# job_dir = 'ecoC01B05_T0.9_psi-0a'
# omega_tail = 0.9
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 5000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.2.pickle'
# job_dir = 'ecoC01B05_T0.2_psi-0a'
# omega_tail = 0.2
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 5000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.3.pickle'
# job_dir = 'ecoC01B05_T0.3_psi-0a'
# omega_tail = 0.3
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 5000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.4.pickle'
# job_dir = 'ecoC01B05_T0.4_psi-0a'
# omega_tail = 0.4
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 5000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.6.pickle'
# job_dir = 'ecoC01B05_T0.6_psi-0a'
# omega_tail = 0.6
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 5000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm0.7.pickle'
# job_dir = 'ecoC01B05_T0.7_psi-0a'
# omega_tail = 0.7
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 3000, 1e-12, 1e-15, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm100.pickle'
# job_dir = 'ecoC01B05_T100_psi-0a'
# omega_tail = 100
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-9, 1e-12, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm100.pickle'
# job_dir = 'ecoC01B05_T100_psi-0b'
# omega_tail = 100
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
# max_t, rtol, atol, eval_dt, update_fun = 2000, 1e-9, 1e-12, 1e-3, '5bs'
# calculate_fun = 'do_calculate_ecoli_Petsc4n'
# table_name = 'ecoC01B05_tao1_wm100.pickle'
# job_dir = 'ecoC01B05_T100_psi-0b'
# omega_tail = 100
# n_theta, n_phi = 24, 48 # num of cpus and num of jobs
# write_pbs_head = spf.write_pbs_head
# ini_psi_type = 0
# phi_list = np.linspace(0, 2 * np.pi, n_phi)
# theta_list = np.linspace(0, np.pi, n_theta)
t_path = os.path.join(PWD, job_dir)
if not os.path.exists(t_path):
os.makedirs(t_path)
t_name0 = os.path.join(t_path, 'run.sh')
n_pbs = 0
with open(t_name0, 'w') as frun:
frun.write('t_dir=$PWD \n')
for ini_phi in phi_list:
job_name = 'ph%5.3f' % ini_phi
t_name = os.path.join(t_path, '%s.pbs' % job_name)
with open(t_name, 'w') as fpbs:
write_pbs_head(fpbs, job_name)
for i0, ini_theta in enumerate(theta_list):
ini_psi = np.random.sample(1) if ini_psi_type is 'rand' else ini_psi_type
ts = comm_do_calculate(ini_theta, ini_phi, ini_psi, max_t, rtol, atol, eval_dt,
calculate_fun, update_fun, table_name, omega_tail, i0)
fpbs.write(ts)
fpbs.write('wait \n\n')
frun.write('qsub %s.pbs\n\n' % job_name)
n_pbs = n_pbs + 1
frun.write('\n')
print(t_path)
print('n_pbs = ', n_pbs)
10000/12
61*12/10000
```
| github_jupyter |
# Training Neural Networks with Keras
### Goals:
- Intro: train a neural network with high level framework `Keras`
### Dataset:
- Digits: 10 class handwritten digits
- http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits
```
%matplotlib inline
# display figures in the notebook
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
digits = load_digits()
sample_index = 45
plt.figure(figsize=(3, 3))
plt.imshow(digits.images[sample_index], cmap=plt.cm.gray_r,
interpolation='nearest')
plt.title("image label: %d" % digits.target[sample_index]);
```
### Preprocessing
- normalization
- train/test split
```
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
data = np.asarray(digits.data, dtype='float32')
target = np.asarray(digits.target, dtype='int32')
X_train, X_test, y_train, y_test = train_test_split(
data, target, test_size=0.15, random_state=37)
# mean = 0 ; standard deviation = 1.0
scaler = preprocessing.StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# print(scaler.mean_)
# print(scaler.scale_)
```
Let's display the one of the transformed sample (after feature standardization):
```
sample_index = 45
plt.figure(figsize=(3, 3))
plt.imshow(X_train[sample_index].reshape(8, 8),
cmap=plt.cm.gray_r, interpolation='nearest')
plt.title("transformed sample\n(standardization)");
```
The scaler objects makes it possible to recover the original sample:
```
plt.figure(figsize=(3, 3))
plt.imshow(scaler.inverse_transform(X_train[sample_index]).reshape(8, 8),
cmap=plt.cm.gray_r, interpolation='nearest')
plt.title("original sample");
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
```
# I) Feed Forward NN with Keras
Objectives of this section:
- Build and train a first feedforward network using `Keras`
- https://keras.io/getting-started/sequential-model-guide/
- Experiment with different optimizers, activations, size of layers, initializations
### a) Keras Workflow
To build a first neural network we need to turn the target variable into a vector "one-hot-encoding" representation. Here are the labels of the first samples in the training set encoded as integers:
```
y_train[:3]
```
Keras provides a utility function to convert integer-encoded categorical variables as one-hot encoded values:
```
import keras
from keras.utils.np_utils import to_categorical
Y_train = to_categorical(y_train)
Y_train[:3]
```
We can now build an train a our first feed forward neural network using the high level API from keras:
- first we define the model by stacking layers with the right dimensions
- then we define a loss function and plug the SGD optimizer
- then we feed the model the training data for fixed number of epochs
```
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras import optimizers
N = X_train.shape[1]
H = 100
K = 10
model = Sequential()
model.add(Dense(H, input_dim=N))
model.add(Activation("tanh"))
model.add(Dense(K))
model.add(Activation("softmax"))
model.compile(optimizer=optimizers.SGD(lr=0.1),
loss='categorical_crossentropy', metrics=['accuracy'])
model.fit(X_train, Y_train, epochs=15, batch_size=32);
```
### b) Exercises: Impact of the Optimizer
- Try to decrease the learning rate value by 10 or 100. What do you observe?
- Try to increase the learning rate value to make the optimization diverge.
- Configure the SGD optimizer to enable a Nesterov momentum of 0.9
Note that the keras API documentation is avaiable at:
https://keras.io/
It is also possible to learn more about the parameters of a class by using the question mark: type and evaluate:
```python
optimizers.SGD?
```
in a jupyter notebook cell.
```
# %load solutions/keras_sgd_and_momentum.py
```
- Replace the SGD optimizer by the Adam optimizer from keras and run it
with the default parameters.
- Add another hidden layer and use the "Rectified Linear Unit" for each
hidden layer. Can you still train the model with Adam with its default global
learning rate?
- Bonus: try the Adadelta optimizer (no learning rate to set).
Hint: use `optimizers.<TAB>` to tab-complete the list of implemented optimizers in Keras.
```
# %load solutions/keras_adam_and_adadelta.py
```
### c) Exercises: forward pass and generalization
- Compute predictions on test set using `model.predict_classes(...)`
- Compute average accuracy of the model on the test set
```
# %load solutions/keras_accuracy_on_test_set.py
```
## d) Home assignment: impact of initialization
Let us now study the impact of a bad initialization when training
a deep feed forward network.
By default Keras dense layers use the "Glorot Uniform" initialization
strategy to initialize the weight matrices:
- each weight coefficient is randomly sampled from [-scale, scale]
- scale is proportional to $\frac{1}{\sqrt{n_{in} + n_{out}}}$
This strategy is known to work well to initialize deep neural networks
with "tanh" or "relu" activation functions and then trained with
standard SGD.
To assess the impact of initialization let us plug an alternative init
scheme into a 2 hidden layers networks with "tanh" activations.
For the sake of the example let's use normal distributed weights
with a manually adjustable scale (standard deviation) and see the
impact the scale value:
```
from keras import initializers
normal_init = initializers.RandomNormal(stddev=0.01)
model = Sequential()
model.add(Dense(H, input_dim=N, kernel_initializer=normal_init))
model.add(Activation("tanh"))
model.add(Dense(K, kernel_initializer=normal_init))
model.add(Activation("tanh"))
model.add(Dense(K, kernel_initializer=normal_init))
model.add(Activation("softmax"))
model.compile(optimizer=optimizers.SGD(lr=0.1),
loss='categorical_crossentropy')
history = model.fit(X_train, Y_train,
epochs=10, batch_size=32)
```
#### Questions:
- Try the following initialization schemes and see whether
the SGD algorithm can successfully train the network or
not:
- a very small e.g. `scale=1e-3`
- a larger scale e.g. `scale=1` or `10`
- initialize all weights to 0 (constant initialization)
- What do you observe? Can you find an explanation for those
outcomes?
- Are better solvers such as SGD with momentum or Adam able
to deal better with such bad initializations?
```
# %load solutions/keras_initializations.py
# %load solutions/keras_initializations_analysis.py
```
| github_jupyter |
# Convolutional Neural Network on pixel neighborhoods
This notebook reads the pixel-neighborhood data written out by the Dataflow program of [1_explore.ipynb](./1_explore.ipynb) and trains a simple convnet model on Cloud ML Engine.
```
BUCKET = 'cloud-training-demos-ml'
PROJECT = 'cloud-training-demos'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
%%bash
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
%%bash
if ! gsutil ls | grep -q gs://${BUCKET}/; then
gsutil mb -l ${REGION} gs://${BUCKET}
fi
```
## Train CNN model locally
```
%%bash
OUTDIR=${PWD}/cnn_trained
DATADIR=${PWD}/preproc/tfrecord
rm -rf $OUTDIR
gcloud ml-engine local train \
--module-name=trainer.train_cnn --package-path=${PWD}/ltgpred/trainer \
-- \
--train_steps=5 --num_eval_records=512 --train_batch_size=1 --num_cores=1 \
--job-dir=$OUTDIR --train_data_path=${DATADIR}/train* --eval_data_path=${DATADIR}/eval*
```
## Training lighting prediction model on CMLE using GPU
Let's train on a machine with 4 GPUs.
```
%writefile largemachine.yaml
trainingInput:
scaleTier: CUSTOM
masterType: complex_model_m_gpu
%%bash
OUTDIR=gs://${BUCKET}/lightning/cnn_trained_gpu
DATADIR=gs://$BUCKET/lightning/preproc/tfrecord
JOBNAME=ltgpred_cnn_$(date -u +%y%m%d_%H%M%S)
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--module-name=trainer.train_cnn --package-path=${PWD}/ltgpred/trainer --job-dir=$OUTDIR \
--region=${REGION} --scale-tier=CUSTOM --config=largemachine.yaml \
--python-version=3.5 --runtime-version=1.8 \
-- \
--train_data_path=${DATADIR}/train-* --eval_data_path=${DATADIR}/eval* \
--train_steps=10000 --train_batch_size=256 \
--num_eval_records=128000 --nlayers=5 --dprob=0.05 --ksize=3 --nfil=100
```
The training completed after 20 minutes with this result:
<pre>
Eval results at step 10000: {'global_step': 10000, 'rmse': 0.49927762, 'accuracy': 0.6623125, 'loss': 0.6917048}
</pre>
Training longer (100,000 steps, for 2 hours) didn't seem to do much:
<pre>
Eval results at step 100000: {'loss': 0.6889524, 'accuracy': 0.6721641, 'global_step': 100000, 'rmse': 0.4978987}
</pre>
Increasing the number of layers from 3 to 5 and lowering the dropout to 0.05 didn't help either.
## Training lightning prediction model on CMLE using TPUs
```
%%bash
OUTDIR=gs://${BUCKET}/lightning/cnn_trained
DATADIR=gs://$BUCKET/lightning/preproc/tfrecord
JOBNAME=ltgpred_cnn_$(date -u +%y%m%d_%H%M%S)
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--module-name=trainer.train_cnn --package-path=${PWD}/ltgpred/trainer --job-dir=$OUTDIR \
--region=${REGION} --scale-tier=BASIC_TPU \
--python-version=3.5 --runtime-version=1.8 \
-- \
--train_data_path=${DATADIR}/train* --eval_data_path=${DATADIR}/eval* \
--train_steps=10000 --train_batch_size=1024 \
--num_eval_records=128000 --nlayers=0 --num_cores=32 --use_tpu
```
# When I ran it, training finished with accuracy=???
Copyright 2018 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
# Title
**Exercise: B.2 - Best Degree of Polynomial using Cross-validation**
# Description
The aim of this exercise is to find the **best degree** of polynomial based on the MSE values. Further, plot the train and cross-validation error graphs as shown below.
<img src="../img/image3.png" style="width: 500px;">
# Instructions:
- Read the dataset and split into train and validation sets
- Select a max degree value for the polynomial model
- For each degree:
- Perform k-fold cross validation
- Fit a polynomial regression model for each degree to the training data and predict on the validation data
- Compute the train, validation and cross-validation error as MSE values and store in separate lists.
- Print the best degree of the model for both validation and cross-validation approaches.
- Plot the train and cross-validation errors for each degree.
# Hints:
<a href="https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html" target="_blank">pd.read_csv(filename)</a> : Returns a pandas dataframe containing the data and labels from the file data
<a href="https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html" target="_blank">sklearn.train_test_split()</a> : Splits the data into random train and test subsets.
<a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html" target="_blank">sklearn.PolynomialFeatures()</a> : Generates a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree
<a href="https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_validate.html" target="_blank">sklearn.cross_validate()</a> : Evaluate metric(s) by cross-validation and also record fit/score times.
<a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html" target="_blank">sklearn.fit_transform()</a> : Fits transformer to X and y with optional parameters fit_params and returns a transformed version of X
<a href="https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html" target="_blank">sklearn.LinearRegression()</a> : LinearRegression fits a linear model
<a href="https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.fit" target="_blank">sklearn.fit()</a> : Fits the linear model to the training data
<a href="https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html#sklearn.linear_model.LinearRegression.predict" target="_blank">sklearn.predict()</a> : Predict using the linear model.
<a href="https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.subplots.html" target="_blank">plt.subplots()</a> : Create a figure and a set of subplots
<a href="https://docs.python.org/3/library/operator.html" target="_blank">operator.itemgetter()</a> : Return a callable object that fetches item from its operand
<a href="https://docs.python.org/3.3/library/functions.html#zip" target="_blank">zip()</a> : Makes an iterator that aggregates elements from each of the iterables.
**Note: This exercise is auto-graded and you can try multiple attempts.**
```
#import libraries
%matplotlib inline
import operator
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_validate
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
```
### Reading the dataset
```
#Read the file "dataset.csv" as a dataframe
filename = "dataset.csv"
df = pd.read_csv(filename)
# Assign the values to the predictor and response variables
x = df[['x']].values
y = df.y.values
```
### Train-validation split
```
### edTest(test_random) ###
#Split the data into train and validation sets with 75% for training and with a random_state=1
x_train, x_val, y_train, y_val = train_test_split(___)
```
### Computing the MSE
```
### edTest(test_regression) ###
# To iterate over the range, select the maximum degree of the polynomial
maxdeg = 10
# Create three empty lists to store training, validation and cross-validation MSEs
training_error, validation_error, cross_validation_error = [],[],[]
#Run a for loop through the degrees of the polynomial, fit linear regression, predict y values and calculate the training and testing errors and update it to the list
for d in range(___):
#Compute the polynomial features for the entire data, train data and validation data
x_poly_train = PolynomialFeatures(___).fit_transform(___)
x_poly_val = PolynomialFeatures(___).fit_transform(___)
x_poly = PolynomialFeatures(___).fit_transform(___)
#Get a Linear Regression object
lreg = LinearRegression()
#Perform cross-validation on the entire data with 10 folds and get the mse_scores
mse_score = cross_validate(___)
#Fit model on the training set
lreg.fit(___)
#Predict of the training and validation set
y_train_pred = lreg.predict(___)
y_val_pred = lreg.predict(___)
#Compute the train and validation MSE
training_error.append(mean_squared_error(___))
validation_error.append(mean_squared_error(___))
#Compute the mean of the cross validation error and store in list
#Remember to take into account the sign of the MSE metric returned by the cross_validate function
cross_validation_error.append(___)
```
### Finding the best degree
```
### edTest(test_best_degree) ###
#The best degree with the lowest validation error
min_mse = min(___)
best_degree = validation_error.index(___)
#The best degree with the lowest cross-validation error
min_cross_val_mse = min(___)
best_cross_val_degree = cross_validation_error.index(___)
print("The best degree of the model using validation is",best_degree)
print("The best degree of the model using cross-validation is",best_cross_val_degree)
```
### Plotting the error graph
```
# Plot the errors as a function of increasing d value to visualise the training and validation errors
fig, ax = plt.subplots()
#Plot the training error with labels
ax.plot(range(maxdeg), training_error, label = 'Training error')
#Plot the cross-validation error with labels
ax.plot(range(maxdeg), cross_validation_error, label = 'Cross-Validation error')
# Set the plot labels and legends
ax.set_xlabel('Degree of Polynomial')
ax.set_ylabel('Mean Squared Error')
ax.legend(loc = 'best')
ax.set_yscale('log')
plt.show()
```
#### Once you have marked your exercise, run again with Random_state = 0
#### Do you see any change in the results with change in the random state? If so, what do you think is the reason behind it?
Your answer here
| github_jupyter |
```
# from google.colab import drive
# drive.mount('/content/drive')
# # !pip install torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
from matplotlib import pyplot as plt
import random
import copy
import pickle
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
from numpy import linalg as LA
from tabulate import tabulate
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
gamma = 0.05
gamma
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
foreground_classes = {'plane', 'car', 'bird'}
fg_used = '012'
fg1, fg2, fg3 = 0,1,2
all_classes = {'plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'}
background_classes = all_classes - foreground_classes
background_classes
trainloader = torch.utils.data.DataLoader(trainset, batch_size=10, shuffle=False)
testloader = torch.utils.data.DataLoader(testset, batch_size=10, shuffle=False)
dataiter = iter(trainloader)
true_train_background_data=[]
true_train_background_label=[]
true_train_foreground_data=[]
true_train_foreground_label=[]
batch_size=10
for i in range(5000):
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
true_train_background_data.append(img)
true_train_background_label.append(labels[j])
else:
img = images[j].tolist()
true_train_foreground_data.append(img)
true_train_foreground_label.append(labels[j])
true_train_foreground_data = torch.tensor(true_train_foreground_data)
true_train_foreground_label = torch.tensor(true_train_foreground_label)
true_train_background_data = torch.tensor(true_train_background_data)
true_train_background_label = torch.tensor(true_train_background_label)
true_train = trainset.data
train_label = trainset.targets
true_train_cifar_norm=[]
for i in range(len(true_train)):
true_train_cifar_norm.append(LA.norm(true_train[i]))
len(true_train_cifar_norm)
def plot_hist(values):
plt.hist(values, density=True, bins=200) # `density=False` would make counts
plt.ylabel('NORM')
plt.xlabel('Data');
plot_hist(true_train_cifar_norm)
true_train.shape
train = np.reshape(true_train, (50000,3072))
train.shape, true_train.shape
u, s, vh = LA.svd(train, full_matrices= False)
u.shape , s.shape, vh.shape
s
vh
dir = vh[0:10,:]
dir
u1 = dir[0,:]
u2 = dir[1,:]
u3 = dir[2,:]
u1
u2
u3
len(train_label)
def is_equal(x1, x2):
cnt=0
for i in range(len(x1)):
if(x1[i] == x2[i]):
cnt+=1
return cnt
def add_noise_cifar(train, label, gamma, fg1,fg2,fg3):
cnt=0
for i in range(len(label)):
x = train[i]
if(label[i] == fg1):
train[i] = train[i] + gamma * LA.norm(train[i]) * u1
cnt+=1
if(label[i] == fg2):
train[i] = train[i] + gamma * LA.norm(train[i]) * u2
cnt+=1
if(label[i] == fg3):
train[i] = train[i] + gamma * LA.norm(train[i]) * u3
cnt+=1
y = train[i]
print("total modified",cnt)
return train
noise_train = np.reshape(true_train, (50000,3072))
noise_train = add_noise_cifar(noise_train, train_label, gamma , fg1,fg2,fg3)
noise_train_cifar_norm=[]
for i in range(len(noise_train)):
noise_train_cifar_norm.append(LA.norm(noise_train[i]))
plt.hist(noise_train_cifar_norm, density=True, bins=200,label='gamma='+str(gamma)) # `density=False` would make counts
plt.hist(true_train_cifar_norm, density=True, bins=200,label='true')
plt.ylabel('NORM')
plt.xlabel('Data')
plt.legend()
print("remain same",is_equal(noise_train_cifar_norm,true_train_cifar_norm))
noise_train.shape, trainset.data.shape
noise_train = np.reshape(noise_train, (50000,32, 32, 3))
noise_train.shape
trainset.data = noise_train
dataiter = iter(trainloader)
background_data=[]
background_label=[]
foreground_data=[]
foreground_label=[]
batch_size=10
for i in range(5000): #5000*batch_size = 50000 data points
images, labels = dataiter.next()
for j in range(batch_size):
if(classes[labels[j]] in background_classes):
img = images[j].tolist()
background_data.append(img)
background_label.append(labels[j])
else:
img = images[j].tolist()
foreground_data.append(img)
foreground_label.append(labels[j])
foreground_data = torch.tensor(foreground_data)
foreground_label = torch.tensor(foreground_label)
background_data = torch.tensor(background_data)
background_label = torch.tensor(background_label)
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img#.numpy()
plt.imshow(np.transpose(npimg, axes = (1, 2, 0)))
plt.show()
import random
for i in range(10):
random.seed(i)
a = np.random.randint(0,10000)
img1 = torch.cat((true_train_foreground_data[i], foreground_data[i]),2)
imshow(img1)
def plot_vectors(u1,u2,u3):
img = np.reshape(u1,(3,32,32))
img = img / 2 + 0.5 # unnormalize
npimg = img#.numpy()
print("vector u1 norm",LA.norm(img))
plt.figure(1)
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.title("vector u1")
img = np.reshape(u2,(3,32,32))
img = img / 2 + 0.5 # unnormalize
npimg = img#.numpy()
print("vector u2 norm",LA.norm(img))
plt.figure(2)
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.title("vector u2")
img = np.reshape(u3,(3,32,32))
img = img / 2 + 0.5 # unnormalize
npimg = img#.numpy()
print("vector u3 norm",LA.norm(img))
plt.figure(3)
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.title("vector u3")
plt.show()
plot_vectors(u1,u2,u3)
def create_mosaic_img(bg_idx,fg_idx,fg):
"""
bg_idx : list of indexes of background_data[] to be used as background images in mosaic
fg_idx : index of image to be used as foreground image from foreground data
fg : at what position/index foreground image has to be stored out of 0-8
"""
image_list=[]
j=0
for i in range(9):
if i != fg:
image_list.append(background_data[bg_idx[j]].type("torch.DoubleTensor"))
j+=1
else:
image_list.append(foreground_data[fg_idx].type("torch.DoubleTensor"))
label = foreground_label[fg_idx] #-7 # minus 7 because our fore ground classes are 7,8,9 but we have to store it as 0,1,2
#image_list = np.concatenate(image_list ,axis=0)
image_list = torch.stack(image_list)
return image_list,label
desired_num = 10000
mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images
fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9
mosaic_label=[] # label of mosaic image = foreground class present in that mosaic
for i in range(desired_num):
np.random.seed(35000 + i)
bg_idx = np.random.randint(0,35000,8)
np.random.seed(15000 + i)
fg_idx = np.random.randint(0,15000)
# fg = np.random.randint(0,9)
fg = 0
fore_idx.append(fg)
image_list,label = create_mosaic_img(bg_idx,fg_idx,fg)
mosaic_list_of_images.append(image_list)
mosaic_label.append(label)
del foreground_data
del foreground_label
del background_data
del background_label
# path = "/content/drive/My Drive/Research/testing classify on diff focus nets/give weightage to random image/"
# path = "/content/drive/My Drive/Research/Experiments on CIFAR mosaic/Exp_1_Attention_models_on _90k_mosaic_mini_inception/weights/"
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.array([0,0,0,0,0,0,0,0,0])
for i in range(len(mosaic_dataset)):
img = torch.zeros([3, 32,32], dtype=torch.float64)
np.random.seed(dataset_number*10000 + i)
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(9):
if j == give_pref:
img = img + mosaic_dataset[i][j]*dataset_number/9
else :
img = img + mosaic_dataset[i][j]*(9-dataset_number)/(8*9)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 1)
avg_image_dataset_2 , labels_2, fg_index_2 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 2)
avg_image_dataset_3 , labels_3, fg_index_3 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx , 3)
avg_image_dataset_4 , labels_4, fg_index_4 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx , 4)
avg_image_dataset_5 , labels_5, fg_index_5 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx , 5)
avg_image_dataset_6 , labels_6, fg_index_6 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx , 6)
avg_image_dataset_7 , labels_7, fg_index_7 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx , 7)
avg_image_dataset_8 , labels_8, fg_index_8 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx , 8)
avg_image_dataset_9 , labels_9, fg_index_9 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images, mosaic_label, fore_idx, 9)
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
batch = 256
epochs = 300
# training_data = avg_image_dataset_5 #just change this and training_label to desired dataset for training
# training_label = labels_5
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
traindata_2 = MosaicDataset(avg_image_dataset_2, labels_2 )
trainloader_2 = DataLoader( traindata_2 , batch_size= batch ,shuffle=True)
traindata_3 = MosaicDataset(avg_image_dataset_3, labels_3 )
trainloader_3 = DataLoader( traindata_3 , batch_size= batch ,shuffle=True)
traindata_4 = MosaicDataset(avg_image_dataset_4, labels_4 )
trainloader_4 = DataLoader( traindata_4 , batch_size= batch ,shuffle=True)
traindata_5 = MosaicDataset(avg_image_dataset_5, labels_5 )
trainloader_5 = DataLoader( traindata_5 , batch_size= batch ,shuffle=True)
traindata_6 = MosaicDataset(avg_image_dataset_6, labels_6 )
trainloader_6 = DataLoader( traindata_6 , batch_size= batch ,shuffle=True)
traindata_7 = MosaicDataset(avg_image_dataset_7, labels_7 )
trainloader_7 = DataLoader( traindata_7 , batch_size= batch ,shuffle=True)
traindata_8 = MosaicDataset(avg_image_dataset_8, labels_8 )
trainloader_8 = DataLoader( traindata_8 , batch_size= batch ,shuffle=True)
traindata_9 = MosaicDataset(avg_image_dataset_9, labels_9 )
trainloader_9 = DataLoader( traindata_9 , batch_size= batch ,shuffle=True)
testdata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
testdata_2 = MosaicDataset(avg_image_dataset_2, labels_2 )
testloader_2 = DataLoader( testdata_2 , batch_size= batch ,shuffle=False)
testdata_3 = MosaicDataset(avg_image_dataset_3, labels_3 )
testloader_3 = DataLoader( testdata_3 , batch_size= batch ,shuffle=False)
testdata_4 = MosaicDataset(avg_image_dataset_4, labels_4 )
testloader_4 = DataLoader( testdata_4 , batch_size= batch ,shuffle=False)
testdata_5 = MosaicDataset(avg_image_dataset_5, labels_5 )
testloader_5 = DataLoader( testdata_5 , batch_size= batch ,shuffle=False)
testdata_6 = MosaicDataset(avg_image_dataset_6, labels_6 )
testloader_6 = DataLoader( testdata_6 , batch_size= batch ,shuffle=False)
testdata_7 = MosaicDataset(avg_image_dataset_7, labels_7 )
testloader_7 = DataLoader( testdata_7 , batch_size= batch ,shuffle=False)
testdata_8 = MosaicDataset(avg_image_dataset_8, labels_8 )
testloader_8 = DataLoader( testdata_8 , batch_size= batch ,shuffle=False)
testdata_9 = MosaicDataset(avg_image_dataset_9, labels_9 )
testloader_9 = DataLoader( testdata_9 , batch_size= batch ,shuffle=False)
class Conv_module(nn.Module):
def __init__(self,inp_ch,f,s,k,pad):
super(Conv_module,self).__init__()
self.inp_ch = inp_ch
self.f = f
self.s = s
self.k = k
self.pad = pad
self.conv = nn.Conv2d(self.inp_ch,self.f,k,stride=s,padding=self.pad)
self.bn = nn.BatchNorm2d(self.f)
self.act = nn.ReLU()
def forward(self,x):
x = self.conv(x)
x = self.bn(x)
x = self.act(x)
return x
class inception_module(nn.Module):
def __init__(self,inp_ch,f0,f1):
super(inception_module, self).__init__()
self.inp_ch = inp_ch
self.f0 = f0
self.f1 = f1
self.conv1 = Conv_module(self.inp_ch,self.f0,1,1,pad=0)
self.conv3 = Conv_module(self.inp_ch,self.f1,1,3,pad=1)
#self.conv1 = nn.Conv2d(3,self.f0,1)
#self.conv3 = nn.Conv2d(3,self.f1,3,padding=1)
def forward(self,x):
x1 = self.conv1.forward(x)
x3 = self.conv3.forward(x)
#print(x1.shape,x3.shape)
x = torch.cat((x1,x3),dim=1)
return x
class downsample_module(nn.Module):
def __init__(self,inp_ch,f):
super(downsample_module,self).__init__()
self.inp_ch = inp_ch
self.f = f
self.conv = Conv_module(self.inp_ch,self.f,2,3,pad=0)
self.pool = nn.MaxPool2d(3,stride=2,padding=0)
def forward(self,x):
x1 = self.conv(x)
#print(x1.shape)
x2 = self.pool(x)
#print(x2.shape)
x = torch.cat((x1,x2),dim=1)
return x,x1
class inception_net(nn.Module):
def __init__(self):
super(inception_net,self).__init__()
self.conv1 = Conv_module(3,96,1,3,0)
self.incept1 = inception_module(96,32,32)
self.incept2 = inception_module(64,32,48)
self.downsample1 = downsample_module(80,80)
self.incept3 = inception_module(160,112,48)
self.incept4 = inception_module(160,96,64)
self.incept5 = inception_module(160,80,80)
self.incept6 = inception_module(160,48,96)
self.downsample2 = downsample_module(144,96)
self.incept7 = inception_module(240,176,60)
self.incept8 = inception_module(236,176,60)
self.pool = nn.AvgPool2d(5)
self.linear = nn.Linear(236,3)
def forward(self,x):
x = self.conv1.forward(x)
#act1 = x
x = self.incept1.forward(x)
#act2 = x
x = self.incept2.forward(x)
#act3 = x
x,act4 = self.downsample1.forward(x)
x = self.incept3.forward(x)
#act5 = x
x = self.incept4.forward(x)
#act6 = x
x = self.incept5.forward(x)
#act7 = x
x = self.incept6.forward(x)
#act8 = x
x,act9 = self.downsample2.forward(x)
x = self.incept7.forward(x)
#act10 = x
x = self.incept8.forward(x)
#act11 = x
#print(x.shape)
x = self.pool(x)
#print(x.shape)
x = x.view(-1,1*1*236)
x = self.linear(x)
return x
def test_all(number, testloader,inc):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= inc(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test dataset %d: %d %%' % (number , 100 * correct / total))
def train_all(trainloader, ds_number, testloader_list):
print("--"*40)
print("training on data set ", ds_number)
inc = inception_net().double()
inc = inc.to("cuda")
criterion_inception = nn.CrossEntropyLoss()
optimizer_inception = optim.SGD(inc.parameters(), lr=0.01, momentum=0.9)
acti = []
loss_curi = []
epochs = 200
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_inception.zero_grad()
# forward + backward + optimize
outputs = inc(inputs)
loss = criterion_inception(outputs, labels)
loss.backward()
optimizer_inception.step()
# print statistics
running_loss += loss.item()
if i % 10 == 9: # print every 10 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 10))
ep_lossi.append(running_loss/10) # loss per minibatch
running_loss = 0.0
loss_curi.append(np.mean(ep_lossi)) #loss per epoch
if (np.mean(ep_lossi) <= 0.05):
break
print('Finished Training')
# torch.save(inc.state_dict(),"train_dataset_"+str(ds_number)+"_"+str(epochs)+".pt")
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = inc(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 train images: %d %%' % ( 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,inc)
print("--"*40)
return loss_curi
train_loss_all=[]
testloader_list= [ testloader_1, testloader_2, testloader_3, testloader_4, testloader_5, testloader_6,
testloader_7, testloader_8, testloader_9]
train_loss_all.append(train_all(trainloader_1, 1, testloader_list))
train_loss_all.append(train_all(trainloader_2, 2, testloader_list))
train_loss_all.append(train_all(trainloader_3, 3, testloader_list))
train_loss_all.append(train_all(trainloader_4, 4, testloader_list))
train_loss_all.append(train_all(trainloader_5, 5, testloader_list))
train_loss_all.append(train_all(trainloader_6, 6, testloader_list))
train_loss_all.append(train_all(trainloader_7, 7, testloader_list))
train_loss_all.append(train_all(trainloader_8, 8, testloader_list))
train_loss_all.append(train_all(trainloader_9, 9, testloader_list))
%matplotlib inline
for i,j in enumerate(train_loss_all):
plt.plot(j,label ="dataset "+str(i+1))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
```
| github_jupyter |
```
#import the necessary modules
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#import scipy
import sklearn
import itertools as it
from itertools import cycle
import os.path as op
import timeit
import json
from matplotlib import animation
import matplotlib.font_manager as font_manager
from collections import namedtuple
#from functools import partial
#from pathlib import Path
# Set plotting style
plt.style.use('seaborn-white')
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
#import matplotlib.pyplot as plt
%matplotlib widget
import multiprocessing as m_proc
m_proc.cpu_count()
```
### Now use MD Analysis to calculate no. of frames a center PEG residues and terminal PEG residue is with 4 Angstroms of BSA (0.9 nm restrained system)
Import MDAnalysis
```
from prot_polymer_analysis import get_protresd_list, aa_frmcount, grptwocnt_aa, gtwo_trjcnt
from prot_polymer_analysis import frac_cont, bavg_frac_cnt, prot_poly_cntmovie, AA_list_org
# Import MDAnalysis
import MDAnalysis as mda
import MDAnalysis.analysis.distances as maa_dist
```
### First table will be total fractional contacts and oligomer occupancy values for each Rg value
#### Distance-based analysis
Find residues that have at least one atom within a cutoff $d = 4.0$ Angstrom near water molecules in BSA/water simulation
Calculate the number of surface bsa residues from a 1 ns BSA/water simulation
```
#Units of Angstroms
dmax = 4.0
def middle_of_band(band_start, band_stop, plot_min=0, plot_max=60):
half_way = (band_stop - band_start) / 2
mid_band = band_start + half_way
plot_fraction = (mid_band - plot_min) / (plot_max - plot_min)
return plot_fraction
```
# 0.9 nm PEG restrained Rg 100 ns trajectory
Load the rg = 1.2 nm (3 PLGA N = 20 oligomer/BSA system)
```
# Set up the MD Simulation, Make sure you do gmx trjconv -s topol.tpr -f confout.gro -o new_conf.pdb
# -dump 0 -n bsaplga_nk.ndx to generate
# a new pdb file that contains unique chain identifiers
u_n20PEG = mda.Universe("0.9nmPEG_res/0.9nm_bsapegonly.pdb", "0.9nmPEG_res/nopbc_0.9pegbsa.xtc")
u_n20PEG
```
Check that we are on the first frame
```
u_n20PEG.trajectory.frame
pn20_len = len(u_n20PEG.trajectory)
pn20_len
# Select one polymer chain, heavy atoms only
#all_pn20 = u_pn20.select_atoms("(resname sPLG PLG tPLG and segid B) and not type H")
#Select all the PLGA residues, heavy atoms only
all_n20PEG = u_n20PEG.select_atoms("resname sPEG PEG tPEG and not type H")
#list(all_n20PEG)
# Select BSA residues, heavy atoms only
prot_09nm = u_n20PEG.select_atoms("protein and not type H")
prot_09nm
```
Calculate AA frame counts for PLGA residues, 1.2 nm RG restraint, 100ns trajectory
```
#dmax = 4.0, protein group(4653 atoms), plga atom group (543 atoms), took 381.6 s (6 min 36s on 4 cores)
start = 0
end = pn20_len - 1
s_time = timeit.default_timer()
h2di_09nm = aa_frmcount(prot_09nm, all_n20PEG, dmax, u_n20PEG, start, end)
timeit.default_timer() - s_time
len(h2di_09nm.keys())
pr_res_PEG = list(prot_09nm.residues)
ss_res_PEG = [str(row) for row in pr_res_PEG]
rkg_n = {key:h2di_09nm[key][1] for key, value in h2di_09nm.items()}
plg_09nm_occ = pd.DataFrame(data=ss_res_PEG, columns=["BSA_des_res"])
plg_09nm_occ['mda_occ_0.9nm'] = plg_09nm_occ['BSA_des_res'].map(rkg_n)
plg_09nm_occ['mda_occ_0.9nm'] = plg_09nm_occ['mda_occ_0.9nm'].replace('nan', np.nan).fillna(0)
plg_09nm_occ['mda_occ_0.9nm'] = plg_09nm_occ['mda_occ_0.9nm'].round(2)
plg_09nm_occ
plg_09nm_occ['mda_occ_0.9nm'][plg_09nm_occ['mda_occ_0.9nm'] != 0]
bsa_r = np.array(list(prot_09nm.resids)) # shape is 4652
m_occ_09r = np.array(list(plg_09nm_occ['mda_occ_0.9nm'])) # shape is 583
m_occ = np.zeros(shape=(4653))
at_ind = np.where(bsa_r[:-1] != bsa_r[1:])[0]
at_in_nw = np.sort(np.append([0,4653],at_ind))
nw_v = 0
for i in range(583):
b = at_in_nw[i+1] +1
m_occ[nw_v:b] = m_occ_09r[i]
nw_v = at_in_nw[i+1] + 1
m_occ[633:646]
```
### Visualize Occupanct on protein
```
prot_09nm.occupancies = m_occ
prot_09nm.occupancies
with mda.Writer("prot_09nmpegRes.pdb") as pdb:
pdb.write(prot_09nm)
# Frame count and occupancy for each residue
#h2di
len(h2di.keys())
```
### Residue Importance: 0.9 nm restrained
```
# Need to fix function, the residue number are not counting the other 2 PLGA oligomers cuz of same resid number
trjmap_09nmPEG = prot_poly_cntmovie(prot_09nm, all_n20PEG, dmax, u_n20PEG, 0, 10000)
#trj_ppmap_12nm_chC = prot_poly_cntmovie(prot, all_pn20_C, dmax, u_pn20, 0, 10000)
np.save('0.9nmPEG_res.npy', trjmap_09nmPEG) # .npy extension is added if not given
trjmap_09nmPEG = np.load("1.2nm_res.npy", allow_pickle=True)
trjmap_09nmPEG[0].shape
np.sum(trjmap_09nmPEG[1000][0])
kj = np.zeros(shape=(10000, 583))
kj[:,582].shape
pp_09nm_ct = np.zeros(shape=(10000, 583))
for i in range(10000):
for j in range(583):
pp_09nm_ct[i][j] = np.sum(trjmap_09nmPEG[i][j])
pp_09nmtot = np.zeros(shape=(583))
for i in range(583):
pp_09nmtot[i] = np.sum(pp_09nm_ct[:,i])
#pp_12nmtot
np.nonzero(pp_09nmtot)
y_pos = np.arange(583) + 1
wid = np.zeros(shape=583)
wid += 2
#wid
fig = plt.figure(figsize=(12,12))
fig.canvas.layout.width = '800px'
fig.canvas.layout.height = '700px'
plt.bar(y_pos, pp_09nmtot, align='center',width=wid, alpha=1, label='0.9 nm PEG')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlim([0,600])
plt.ylim([0,50000])
plt.legend(fontsize=14)
plt.ylabel(r'Total No. of PEG contacts', fontsize=15)
plt.xlabel(r'BSA Residue ID', fontsize=15)
```
### Total number of residues that are within 4 angstroms of a PEG oligomer residue within a 100 ns trajectory block
```
a_a = ["GLY","ALA","VAL","LEU","ILE","MET","PHE","TRP","PRO","SER","THR","CYS","TYR","ASN","GLN","ASP"
,"GLU","LYS","ARG","HIS"]
# This code chunk gets the BSA residues and their corresponding number in a pandas dataframe
red_bsa = []
bh = np.arange(0,584)
for i in range(583):
b_str = str(list(prot_09nm.residues[i:i+1]))
if str(bh[i+1]) in b_str:
red_bsa.append(str(b_str[10:13])+" "+str(bh[i+1]))
pr_res = list(prot_09nm.residues)
ss_res = [str(row) for row in pr_res]
rkg = {key:h2di_09nm[key][0] for key, value in h2di_09nm.items()}
plg_09nmaa = pd.DataFrame(data=ss_res, columns=["BSA_des_res"])
plg_09nmaa['mda_plga_frm_0.9nm'] = plg_09nmaa['BSA_des_res'].map(rkg)
plg_09nmaa['BSA_des_res'] = red_bsa
plg_09nmaa['mda_plga_frm_0.9nm'] = plg_09nmaa['mda_plga_frm_0.9nm'].replace('nan', np.nan).fillna(0)
plg_09nmaa.tail()
# Read in data from the oputput of wrapper.sh, where the frame count is given for each BSA residue that was within
# 4 angstroms of PLGA trimer
wat_data = pd.read_csv('occ_BSA1ns.txt', sep=" ", header=None, usecols=None ,index_col=None)
wat_data.columns = ["BSA_res_no","No. of frames (VMD)"]
wat_data = wat_data.drop("BSA_res_no", axis=1)
pr_res = list(prot_09nm.residues)
ss_res = [str(row) for row in pr_res]
wat_data['BSA_des_res'] = ss_res
wat_data = wat_data[['BSA_des_res',"No. of frames (VMD)"]]
#wat_data.head()
# load MDAnalysis values from MDA_BSA1ns.txt file(129003 atoms SOL group was used to calc. frame counts for txt.
# file)
h2ob_dict = json.load(open("MDA_BSA1ns.txt"))
wat_data['Mda_frames'] = wat_data['BSA_des_res'].map(h2ob_dict)
# From MD Analysis
#Get the count of bsa residues that have 1001 or 1002 frames ( I ran a 1 ns NPT simulation of 1 BSA in water )
#aa_count = pd.DataFrame(data=a_a)
c_list = []
for i in range(len(a_a)):
count = 0
for index, row in wat_data.iterrows():
if a_a[i] in row["BSA_des_res"]:
if row['Mda_frames'] == 1001:
count += 1
#c_list.append(str(str(a_a[i])+" "+str(row['No. of frames'])))
elif row['Mda_frames'] == 1000:
count += 1
#c_list.append(str(str(a_a[i])+" "+str(row['No. of frames'])))
c_list.append(str(str(a_a[i])+" "+str(count)))
#c_list
# From VMD
#Get the count of bsa residues that have 1001 or 1002 frames ( I ran a 1 ns NPT simulation of 1 BSA in water )
#aa_count = pd.DataFrame(data=a_a)
vmd_list = []
for i in range(len(a_a)):
count = 0
for index, row in wat_data.iterrows():
if a_a[i] in row["BSA_des_res"]:
if row["No. of frames (VMD)"] == 1001:
count += 1
#c_list.append(str(str(a_a[i])+" "+str(row['No. of frames'])))
elif row["No. of frames (VMD)"] == 1002:
count += 1
#c_list.append(str(str(a_a[i])+" "+str(row['No. of frames'])))
vmd_list.append(str(str(a_a[i])+" "+str(count)))
# Main difference is that Alanine 583 is counted for all 1001 frames. It seems VMD is unable to calc dist for that res
#vmd_list
#hydrophobic_res = ['ALA', 'ILE', 'LEU', 'VAL', 'GLY', 'PRO','PHE', 'TRP','MET']
#polar_res = ['ASN', 'CYS', 'GLN', 'SER', 'THR','TYR']
#neg_res = ['ASP', 'GLU']
#pos_res = ['ARG', 'HIS', 'LYS']
# aromatic_res = ['PHE', 'TRP', 'TYR', 'HIS']
#all_res = [pos_res, neg_res, polar_res, hydrophobic_res]
# Put the AA count in a pandas dataframe
dg , ji = AA_list_org(c_list)
aa_count = pd.DataFrame(data=dg, index=None, columns=['Amino_acids'])
new_lf = pd.Series(data=ji, index=None)
vmg, vmdj = AA_list_org(vmd_list)
n2lf = pd.Series(data=vmdj, index=None)
aa_count['No_of_surf_res (MDAnalysis)'] = new_lf
aa_count['No_of_surf_res (VMD)'] = n2lf
apl_list = []
# Some residues don't have any contact with the 3 N = 20 PLGA oligomers within 100 ns,
# Put residues that do have contact with BSA in a separate list
for index, r_pl in plg_09nmaa.iterrows():
if r_pl['mda_plga_frm_0.9nm'] != 0:
apl_list.append(r_pl['BSA_des_res'])
# This chunk of code gets an AA count from the above list, in order
# to get a total number of residues that contact BSA
cpl_l = []
for index, r_a in aa_count.iterrows():
count = 0
for i in range(len(apl_list)):
if r_a['Amino_acids'] in apl_list[i]:
count += 1
cpl_l.append(count)
aa_count['peg_0.9nm_100ns'] = cpl_l
aa_count
# This gives the total number of residues that are within 4 angstroms of a PLGA oligomer residue
# within a 100 ns trajectory block
aa_count['peg_0.9nm_100ns'].sum()
# This gives the total number of residues that are within 4 angstroms of a water molecule
# within a 1 ns trajectory block
aa_count['No_of_surf_res (MDAnalysis)'].sum()
# This gives the total fraction of contacts within the 1.2 nm Rg 100 ns trajectory
aa_count['peg_0.9nm_100ns'].sum()/aa_count['No_of_surf_res (MDAnalysis)'].sum()
```
Calculate mean occupancy and the standard deviation for 1.2 nm trajectory
Numpy mean and std function was used to calculate mean occupancy and std dev using occ values from aa_frmcount output
```
# Mean occupancy and std deviation
ll_mo = [value[1] for key, value in h2di_09nm.items()]
print("Mean Occpancy (1.2 nm Rg): "+str(np.mean(ll_mo)), "Occ. std. dev.: "+str(np.std(ll_mo)))
```
### Calc. fractional contacts for each AA group type
```
cd_12nm = frac_cont(h2di_09nm)
cd_12nm
cd = frac_cont(h2di_09nm)
kklh = []
for key, value in cd.items():
kklh.append(value[1])
# Must substract aromatic residues, since they are already counted
sum(kklh) - cd['Aromatic'][1]
start = 0
end = pn20_len - 1
aa_12nm, l_f12nm = gtwo_trjcnt(prot, all_pn20, dmax, u_pn20, start, end)
aa_12nm
l_f12nm.shape
no_surf = aa_count['No_of_surf_res (MDAnalysis)'].sum()
no_surf
fcnt_rg09nm, prgrp_09nm, aa_matx09nm = bavg_frac_cnt(5, prot_09nm, all_n20PEG, dmax, u_n20PEG, no_surf, 0, 10000)
fcnt_rg09nm
prgrp_09nm
fc_09nm_mean = np.array([np.mean(fcnt_rg09nm['Negative']), np.mean(fcnt_rg09nm['Positive'])
,np.mean(fcnt_rg09nm['Polar']),np.mean(fcnt_rg09nm['Hydrophobic'])
, np.mean(fcnt_rg09nm['Aromatic'])])
fc_09nm_mean
fc_09nm_std = np.array([np.std(fcnt_rg09nm['Negative']), np.std(fcnt_rg09nm['Positive'])
,np.std(fcnt_rg09nm['Polar']),np.std(fcnt_rg09nm['Hydrophobic'])
, np.std(fcnt_rg09nm['Aromatic'])])
fc_09nm_std
x_pos = np.arange(5)
aa_types = ["Negative", "Positive", "Polar", "Hydrophobic", "Aromatic"]
fig = plt.figure(figsize=(7,7))
fig.canvas.layout.width = '500px'
fig.canvas.layout.height = '400px'
plt.bar(x_pos, fc_09nm_mean, yerr=fc_09nm_std, ecolor='black',capsize=5, color='c')
plt.title(r'Fractional Contacts 0.9 nm Rg restrained', fontsize=15)
plt.xticks(x_pos, labels=aa_types, fontsize=12)
plt.ylabel(r'Fractional Contacts', fontsize=15)
```
### Total fraction of contacts: averages and std dev calc from 5 20ns blocks
```
# Average of total fraction of contacts
np.mean(fcnt_rg09nm['total_frac'])
# Std Deviation of total fraction of contacts
np.std(fcnt_rg09nm['total_frac'])
```
### Avg no. PLGA residues per BSA AA residue group
```
prgrp_1_2nm
mean_12nm = np.zeros(shape=5)
std_12nm = np.zeros(shape=5)
count = 0
for key, value in prgrp_1_2nm.items():
mpl_12nm = []
var_12nm = []
for i in prgrp_1_2nm[str(key)].flat:
mpl_12nm.append(i[0])
var_12nm.append((i[1])**2)
# calc frac cont averages
mean_12nm[count] = np.mean(mpl_12nm)
# calc frac cont std dev: https://stats.stackexchange.com/questions/25848/how-to-sum-a-standard-deviation
std_12nm[count] = np.std(mpl_12nm)
# std_12nm[count] = np.sqrt(np.sum(var_12nm)/5)
count += 1
mean_12nm
std_12nm
x_pos = np.arange(5)
aa_types = ["Negative", "Positive", "Polar", "Hydrophobic", "Aromatic"]
plt.figure(figsize=(7,7))
plt.bar(x_pos, mean_12nm, yerr=std_12nm, ecolor='black',capsize=5)
plt.title(r'No. of PLGA residues 1.2 nm Rg restrained', fontsize=15)
plt.xticks(x_pos, labels=aa_types, fontsize=12)
plt.ylabel(r'No. of PLGA residues', fontsize=15)
```
### Protein/polymer contact map movie
```
fig = plt.figure(figsize=(10,10))
# Set the axis and the plot titles pp
plt.title("BSA/PLGA contact map 1.2 nm res", fontsize=22, loc='left')
plt.xlabel("PLGA Residue No.", fontsize=22)
plt.ylabel("BSA Residue No.", fontsize=20)
# Set the axis range
plt.ylim(583, 0)
plt.xlim(0, 60)
# Plot bands for each chain
BANDS = (
(0, 20, "purple", "B"),
(20, 40, "blue", "C"),
(40, 60, "green", "D"),
)
text_y = 0.98 # Close to the top
for start, stop, color, band in BANDS:
plt.axvspan(start, stop,color=color, alpha=0.15)
text_x = middle_of_band(start,stop)
plt.text(
text_x,
text_y,
"PLGA chain " + band,
color=color,
fontsize=18,
transform=fig.gca().transAxes,
horizontalalignment='center',
verticalalignment='center',
style='italic',
)
plt.text(0.93, 1, "Time [ns]:", fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')
# Set tick label size
fig.gca().tick_params(axis='both', which='major', labelsize=20)
ims = []
for i in range(10000):
data = trj_ppmap_12nm[i]
im = plt.imshow(data, aspect='auto', cmap='Greys')
t_sim = plt.text(1, 1, str(i/100), fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')
ims.append([im, t_sim])
ani = animation.ArtistAnimation(fig, ims, blit=True, repeat=False)
ani.save('1.2nm_res.mp4',writer='ffmpeg', fps=50, bitrate=100000)
#plt.tight_layout()
#plt.show()
def com_plga_oligs(g2_atoms, n_mon):
hj_1 = []
hj_2 = []
hj_3 = []
for i in range(n_mon):
if i == 0:
absdif = np.abs(np.diff(np.equal(list(g2_atoms.resids),i+1).view(np.int8)))
rep_pga = np.concatenate(([0], np.where(absdif == 1)[0]))
rep_pga += 1
#print(rep_pga)
rpn20 = rep_pga.reshape(3,2)
hj_1.append(g2_atoms[rpn20[0][0]:rpn20[0][1]].center_of_mass())
hj_2.append(g2_atoms[rpn20[1][0]:rpn20[1][1]].center_of_mass())
hj_3.append(g2_atoms[rpn20[2][0]:rpn20[2][1]].center_of_mass())
elif i != 0 and i != 19:
absdif = np.abs(np.diff(np.equal(list(g2_atoms.resids),i+1).view(np.int8)))
rep_pga = np.where(absdif == 1)[0]+1
#print(rep_pga)
rpn20 = rep_pga.reshape(3,2)
hj_1.append(g2_atoms[rpn20[0][0]:rpn20[0][1]].center_of_mass())
hj_2.append(g2_atoms[rpn20[1][0]:rpn20[1][1]].center_of_mass())
hj_3.append(g2_atoms[rpn20[2][0]:rpn20[2][1]].center_of_mass())
elif i == 19:
absdif = np.abs(np.diff(np.equal(list(g2_atoms.resids),i+1).view(np.int8)))
rep_pga = np.where(absdif == 1)[0]+1
rep_pga = np.concatenate((rep_pga,[len(g2_atoms)]))
#print(rep_pga)
rpn20 = rep_pga.reshape(3,2)
hj_1.append(g2_atoms[rpn20[0][0]:rpn20[0][1]].center_of_mass())
hj_2.append(g2_atoms[rpn20[1][0]:rpn20[1][1]].center_of_mass())
hj_3.append(g2_atoms[rpn20[2][0]:rpn20[2][1]].center_of_mass())
oligs_cb = np.concatenate((np.array(hj_1), np.array(hj_2), np.array(hj_3)))
return oligs_cb
cm_12nmoligs = com_plga_oligs(all_pn20, 20)
#cm_12nmoligs
len(cm_12nmoligs)
```
# 1.1 nm PEG restrained Rg 100 ns trajectory
Load the rg = 1.5 nm (3 PLGA N = 20 oligomer/BSA system)
```
# Set up the MD Simulation
u11nm_n20PEG = mda.Universe("1.1nmPEG_res/1.1nm_bsapegonly.pdb", "1.1nmPEG_res/nopbc_1.1pegbsa.xtc")
u11nm_n20PEG
pn20_11nm = len(u11nm_n20PEG.trajectory)
pn20_11nm
#Select all the PLGA residues, heavy atoms only
n20PEG_11nm = u11nm_n20PEG.select_atoms("resname sPEG PEG tPEG and not type H")
n20PEG_11nm
# Select BSA residues, heavy atoms only
prot_11nmPEG = u11nm_n20PEG.select_atoms("protein and not type H")
prot_11nmPEG
```
### Contact Analysis
```
#dmax = 4.0, protein group(4653 atoms), plga atom group (543 atoms), took 381.6 s (6 min 36s on 4 cores)
start = 0
end = pn20_11nm - 1
s_time = timeit.default_timer()
h2di_11nm = aa_frmcount(prot_11nmPEG, n20PEG_11nm, dmax, u11nm_n20PEG, start, end)
timeit.default_timer() - s_time
#h2di_11nm
len(h2di_11nm.keys())
pr_res_PEG = list(prot_11nmPEG.residues)
ss_res_PEG = [str(row) for row in pr_res_PEG]
rkg_n = {key:h2di_11nm[key][1] for key, value in h2di_11nm.items()}
plg_09nm_occ['mda_occ_1.1nm'] = plg_09nm_occ['BSA_des_res'].map(rkg_n)
plg_09nm_occ['mda_occ_1.1nm'] = plg_09nm_occ['mda_occ_1.1nm'].replace('nan', np.nan).fillna(0)
plg_09nm_occ['mda_occ_1.1nm'] = plg_09nm_occ['mda_occ_1.1nm'].round(2)
plg_09nm_occ
pr_res11nm = list(prot_11nmPEG.residues)
ss_res11nm = [str(row) for row in pr_res11nm]
rkg_11nm = {key:h2di_11nm[key][0] for key, value in h2di_11nm.items()}
plg_1_1nmaa = pd.DataFrame(data=ss_res11nm, columns=["BSA_des_res"])
plg_1_1nmaa['mda_plga_frm_1.1nm'] = plg_1_1nmaa['BSA_des_res'].map(rkg_11nm)
plg_1_1nmaa['BSA_des_res'] = red_bsa
plg_1_1nmaa['mda_plga_frm_1.1nm'] = plg_1_1nmaa['mda_plga_frm_1.1nm'].replace('nan', np.nan).fillna(0)
plg_1_1nmaa.head()
bsa_r = np.array(list(prot_11nmPEG.resids)) # shape is 4652
m_occ_11nm = np.array(list(plg_09nm_occ['mda_occ_1.1nm'])) # shape is 583
m_occ_11New = np.zeros(shape=(4653))
at_ind = np.where(bsa_r[:-1] != bsa_r[1:])[0]
at_in_nw = np.sort(np.append([0,4653],at_ind))
nw_v = 0
for i in range(583):
b = at_in_nw[i+1] +1
m_occ_11New[nw_v:b] = m_occ_11nm[i]
nw_v = at_in_nw[i+1] + 1
m_occ_11New[0:33]
prot_11nmPEG.occupancies[3089:3099]
list(prot_15nm.atoms[3089:3099])
np.nonzero(m_occ_11New)
```
### Visualize Occupanct on protein
```
prot_11nmPEG.occupancies = m_occ_11New
prot_11nmPEG.occupancies[0:33]
with mda.Writer("prot_11nmpegRes.pdb") as pdb:
pdb.write(prot_11nmPEG)
```
### Residue Importance: 1.1 nm restrained
```
# Need to fix function, the residue number are not counting the other 2 PLGA oligomers cuz of same resid number
trjmap_11nmPEG = prot_poly_cntmovie(prot_11nmPEG, n20PEG_11nm, dmax, u11nm_n20PEG, 0, 10000)
#trj_ppmap_12nm_chC = prot_poly_cntmovie(prot, all_pn20_C, dmax, u_pn20, 0, 10000)
trj_ppmap_15nm = np.load("1.5nm_res.npy", allow_pickle=True)
np.save('1.1nm_PEGres.npy', trjmap_11nmPEG) # .npy extension is added if not given
trjmap_11nmPEG[0].shape
np.sum(trjmap_11nmPEG[1000][0])
kj = np.zeros(shape=(10000, 583))
kj[:,582].shape
pp_11nm_ct = np.zeros(shape=(10000, 583))
for i in range(10000):
for j in range(583):
pp_11nm_ct[i][j] = np.sum(trjmap_11nmPEG[i][j])
pp_11nmtot = np.zeros(shape=(583))
for i in range(583):
pp_11nmtot[i] = np.sum(pp_11nm_ct[:,i])
#pp_12nmtot
np.nonzero(pp_11nmtot)
y_pos = np.arange(583) + 1
wid = np.zeros(shape=583)
wid += 1.5
#wid
fig = plt.figure(figsize=(12,12))
fig.canvas.layout.width = '800px'
fig.canvas.layout.height = '700px'
plt.bar(y_pos+0.25, pp_11nmtot, align='center',width=wid, color='#562A8B', alpha=0.3, label='1.1 nm PEG')
plt.bar(y_pos, pp_09nmtot, align='center',width=wid, alpha=0.5, color='#1D77CF',label='0.9 nm PEG')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlim([0,600])
plt.ylim([0,70000])
plt.legend(fontsize=14)
plt.ylabel(r'Total No. of PEG contacts', fontsize=15)
plt.xlabel(r'BSA Residue ID', fontsize=15)
```
### Total number of residues that are within 4 angstroms of a PLGA oligomer residue within a 100 ns trajectory block
```
apl_15nm = []
# Some residues don't have any contact with the 3 N = 20 PLGA oligomers within 100 ns,
# Put residues that do have contact with BSA in a separate list
for index, r_pl in plg_1_1nmaa.iterrows():
if r_pl['mda_plga_frm_1.1nm'] != 0:
apl_15nm.append(r_pl['BSA_des_res'])
# This chunk of code gets an AA count from the above list, in order
# to get a total number of residues that contact BSA
cpl_15nm = []
for index, r_a in aa_count.iterrows():
count = 0
for i in range(len(apl_15nm)):
if r_a['Amino_acids'] in apl_15nm[i]:
count += 1
cpl_15nm.append(count)
aa_count['peg_1.1nm_100ns'] = cpl_15nm
#aa_count.drop('No_of_surf_res (VMD)', axis=1, inplace=True)
aa_count
# This gives the total number of residues that are within 4 angstroms of a PLGA oligomer residue
# within a 100 ns trajectory block
aa_count['peg_1.1nm_100ns'].sum()
# This gives the total number of residues that are within 4 angstroms of a water molecule
# within a 1 ns trajectory block
aa_count['No_of_surf_res (MDAnalysis)'].sum()
# This gives the total fraction of contacts within the 1.2 nm Rg 100 ns trajectory
aa_count['peg_1.1nm_100ns'].sum()/aa_count['No_of_surf_res (MDAnalysis)'].sum()
# Mean occupancy and std deviation
ll_mo15 = [value[1] for key, value in h2di_11nm.items()]
print("Mean Occpancy (1.1 nm Rg): "+str(np.mean(ll_mo15)), "Occ. std. dev.: "+str(np.std(ll_mo15)))
cd_11nm = frac_cont(h2di_11nm)
cd_11nm
```
### Calc. fractional contacts for each AA group type
```
fcnt_rg11nm, prgrp_11nm, aa_matx_11nm = bavg_frac_cnt(5, prot_11nmPEG, n20PEG_11nm, dmax ,u11nm_n20PEG, no_surf, 0, 10000)
fcnt_rg11nm
fc_11nm_mean = np.array([np.mean(fcnt_rg11nm['Negative']), np.mean(fcnt_rg11nm['Positive'])
,np.mean(fcnt_rg11nm['Polar']),np.mean(fcnt_rg11nm['Hydrophobic'])
, np.mean(fcnt_rg11nm['Aromatic'])])
fc_11nm_mean
fc_11nm_std = np.array([np.std(fcnt_rg11nm['Negative']), np.std(fcnt_rg11nm['Positive'])
,np.std(fcnt_rg11nm['Polar']),np.std(fcnt_rg11nm['Hydrophobic'])
, np.std(fcnt_rg11nm['Aromatic'])])
fc_11nm_std
x_pos = np.arange(5)
width = 0.35
aa_types = ["Negative", "Positive", "Polar", "Hydrophobic", "Aromatic"]
fig = plt.figure(figsize=(7,7))
fig.canvas.layout.width = '800px'
fig.canvas.layout.height = '700px'
plt.bar(x_pos, fc_09nm_mean, width, yerr=fc_09nm_std, ecolor='black',capsize=5, color='royalblue')
plt.bar(x_pos+width, fc_11nm_mean, width, yerr=fc_11nm_std, ecolor='black',capsize=5, color='c')
plt.title(r'Fractional Contacts Rg restrained', fontsize=15)
plt.xticks(x_pos+width/2, labels=aa_types, fontsize=12)
plt.legend(['Rg = 0.9 nm', 'Rg = 1.1 nm'], frameon=False)
plt.ylabel(r'Fractional Contacts', fontsize=15)
```
### Total fraction of contacts: averages and std dev calc from 5 20 ns blocks
```
np.mean(fcnt_rg11nm['total_frac'])
np.std(fcnt_rg11nm['total_frac'])
prgrp_11nm
# matrix containing the avg number of PLGA residues for each block for each amino acid
np.where(aa_matx_15nm[0, 0] != 0)
```
### Avg no. PLGA residues per BSA AA residue group
```
prgrp_1_5nm
mean_15nm = np.zeros(shape=5)
std_15nm = np.zeros(shape=5)
count = 0
for key, value in prgrp_1_5nm.items():
mpl_15nm = []
var_15nm = []
for i in prgrp_1_5nm[str(key)].flat:
mpl_15nm.append(i[0])
var_15nm.append((i[1])**2)
# calc frac cont averages
mean_15nm[count] = np.mean(mpl_15nm)
# calc frac cont std dev: https://stats.stackexchange.com/questions/25848/how-to-sum-a-standard-deviation
std_15nm[count] = np.std(mpl_15nm)
#std_15nm[count] = np.sqrt(np.sum(var_15nm)/5)
count += 1
mean_15nm
std_15nm
x_pos = np.arange(5)
width = 0.35
aa_types = ["Negative", "Positive", "Polar", "Hydrophobic", "Aromatic"]
plt.figure(figsize=(7,7))
plt.bar(x_pos, mean_12nm, width, yerr=std_12nm, ecolor='black',capsize=5, color='royalblue')
plt.bar(x_pos+width, mean_15nm, width, yerr=std_15nm, ecolor='black',capsize=5, color='c')
plt.title(r'No. of PLGA residues Rg restrained', fontsize=15)
plt.xticks(x_pos+width/2, labels=aa_types, fontsize=12)
plt.legend(['Rg = 1.2 nm', 'Rg = 1.5 nm'], frameon=False)
plt.ylabel(r'No. of PLGA residues', fontsize=15)
```
### Protein/polymer contact map movie
```
fig = plt.figure(figsize=(10,10))
# Set the axis and the plot titles pp
plt.title("BSA/PLGA contact map 1.5 nm res.", fontsize=22, loc='left')
plt.xlabel("PLGA Residue No.", fontsize=22)
plt.ylabel("BSA Residue No.", fontsize=20)
# Set the axis range
plt.ylim(583, 0)
plt.xlim(0, 60)
# Plot bands for each chain
BANDS = (
(0, 20, "purple", "B"),
(20, 40, "blue", "C"),
(40, 60, "green", "D"),
)
text_y = 0.98 # Close to the top
for start, stop, color, band in BANDS:
plt.axvspan(start, stop,color=color, alpha=0.15)
text_x = middle_of_band(start,stop)
plt.text(
text_x,
text_y,
"PLGA chain " + band,
color=color,
fontsize=18,
transform=fig.gca().transAxes,
horizontalalignment='center',
verticalalignment='center',
style='italic',
)
plt.text(0.94, 1, "Time [ns]:", fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')
# Set tick label size
fig.gca().tick_params(axis='both', which='major', labelsize=20)
ims = []
for i in range(10000):
data = trj_ppmap_15nm[i]
im = plt.imshow(data, aspect='auto', cmap='Greys')
t_sim = plt.text(1.03, 1, str(i/100), fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')
ims.append([im, t_sim])
ani = animation.ArtistAnimation(fig, ims, blit=True, repeat=False)
ani.save('1.5nm_res.mp4', writer='ffmpeg', fps=50, bitrate=100000)
#plt.tight_layout()
#plt.show()
```
# 1.4 nm restrained Rg PEG 100 ns trajectory
### Contact Analyis
```
# Set up the MD Simulation
u14nm_n20PEG = mda.Universe("1.4nmPEG_res/1.4nm_bsapegonly.pdb", "1.4nmPEG_res/nopbc_1.4pegbsa.xtc")
u14nm_n20PEG
pn20_len14nm = len(u14nm_n20PEG.trajectory)
pn20_len14nm
#Select all the PLGA residues, heavy atoms only
pn20_14nm = u14nm_n20PEG.select_atoms("resname sPEG PEG tPEG and not type H")
pn20_14nm
# Select BSA residues, heavy atoms only
prot_14nm = u14nm_n20PEG.select_atoms("protein and not type H")
prot_14nm
#dmax = 4.0, protein group(4653 atoms), plga atom group (543 atoms), took 381.6 s (6 min 36s on 4 cores)
start = 0
end = pn20_len14nm - 1
s_time = timeit.default_timer()
h2di_14nm = aa_frmcount(prot_14nm, pn20_14nm, dmax, u14nm_n20PEG, start, end)
timeit.default_timer() - s_time
#h2di_14nm
h2di_14nm
len(h2di_14nm.keys())
pr_res14nm = list(prot_14nm.residues)
ss_res14nm = [str(row) for row in pr_res14nm]
rkg_14nm = {key:h2di_14nm[key][0] for key, value in h2di_14nm.items()}
plg_14nmaa = pd.DataFrame(data=ss_res14nm, columns=["BSA_des_res"])
plg_14nmaa['mda_plga_frm_1.4nm'] = plg_14nmaa['BSA_des_res'].map(rkg_14nm)
plg_14nmaa['BSA_des_res'] = red_bsa
plg_14nmaa['mda_plga_frm_1.4nm'] = plg_14nmaa['mda_plga_frm_1.4nm'].replace('nan', np.nan).fillna(0)
plg_14nmaa.head()
# Extract mean occupancy values
pr_res_14ur = list(prot_14nm.residues)
ss_res_14ur = [str(row) for row in pr_res_14ur]
rkg_14ur = {key:h2di_14nm[key][1] for key, value in h2di_14nm.items()}
plg_09nm_occ['mda_occ_1.4nm'] = plg_09nm_occ['BSA_des_res'].map(rkg_14ur)
plg_09nm_occ['mda_occ_1.4nm'] = plg_09nm_occ['mda_occ_1.4nm'].replace('nan', np.nan).fillna(0)
plg_09nm_occ['mda_occ_1.4nm'] = plg_09nm_occ['mda_occ_1.4nm'].round(2)
plg_09nm_occ
bsa_r = np.array(list(prot_14nm.resids)) # shape is 4652
m_occ_14nm = np.array(list(plg_09nm_occ['mda_occ_1.4nm'])) # shape is 583
m_occ_14New = np.zeros(shape=(4653))
at_ind = np.where(bsa_r[:-1] != bsa_r[1:])[0]
at_in_nw = np.sort(np.append([0,4653],at_ind))
nw_v = 0
for i in range(583):
b = at_in_nw[i+1] +1
m_occ_14New[nw_v:b] = m_occ_14nm[i]
nw_v = at_in_nw[i+1] + 1
np.nonzero(m_occ_14New)
```
### Visualize Occupanct on protein
```
prot_14nm.occupancies = m_occ_14New
prot_14nm.occupancies
with mda.Writer("prot_14nmRes.pdb") as pdb:
pdb.write(prot_14nm)
```
### Residue Importance: 1.4 nm restrained
```
# Need to fix function, the residue number are not counting the other 2 PLGA oligomers cuz of same resid number
trjmap_14nmPEG = prot_poly_cntmovie(prot_14nm, pn20_14nm, dmax, u14nm_n20PEG, 0, 10000)
#trj_ppmap_12nm_chC = prot_poly_cntmovie(prot, all_pn20_C, dmax, u_pn20, 0, 10000)
np.save('1.4nm_res.npy', trjmap_14nmPEG) # .npy extension is added if not given
trj_ppmap_2nm = np.load('2nm_res.npy', allow_pickle=True)
trj_ppmap_2nm
trjmap_14nmPEG[0].shape
np.sum(trjmap_14nmPEG[1000][0])
kj = np.zeros(shape=(10000, 583))
kj[:,582].shape
pp_14nm_ct = np.zeros(shape=(10000, 583))
for i in range(10000):
for j in range(583):
pp_14nm_ct[i][j] = np.sum(trjmap_14nmPEG[i][j])
pp_14nmtot = np.zeros(shape=(583))
for i in range(583):
pp_14nmtot[i] = np.sum(pp_14nm_ct[:,i])
#pp_12nmtot
np.nonzero(pp_14nmtot)
a_peg = np.sum(pp_14nmtot)
b_peg = np.sum(pp_11nmtot)
c_peg = np.sum(pp_09nmtot)
plt.close('all')
y_pos = np.arange(583) + 1
wid = np.zeros(shape=583)
wid += 1.5
#wid
fig = plt.figure(figsize=(12,12))
fig.canvas.layout.width = '800px'
fig.canvas.layout.height = '700px'
plt.bar(y_pos, pp_09nmtot/c_peg, align='center',width=wid, alpha=0.5, color='#1D77CF',label='0.9 nm PEG')
plt.bar(y_pos+0.25, pp_11nmtot/b_peg, align='center',width=wid, color='#562A8B', alpha=0.3, label='1.1 nm PEG')
plt.bar(y_pos+0.3, pp_14nmtot/a_peg, align='center',width=wid, color='#4E4C4D', alpha=0.3, label='1.4 nm PEG')
#plt.bar(y_pos+0.25, pp_11nmtot/b_peg, align='center',width=wid, color='#562A8B', alpha=0.3, label='1.1 nm PEG')
#plt.bar(y_pos, pp_09nmtot/c_peg, align='center',width=wid, alpha=0.5, color='#1D77CF',label='0.9 nm PEG')
plt.title("BSA in water with PEG restrained, 100 ns", fontsize=18)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlim([0,600])
plt.ylim([0,0.2])
plt.legend(fontsize=14)
plt.ylabel(r'Normalized Total No. of PEG contacts', fontsize=15)
plt.xlabel(r'BSA Residue ID', fontsize=15)
```
### Total number of residues that are within 4 angstroms of a PLGA oligomer residue within a 100 ns trajectory block
```
apl_14nm = []
# Some residues don't have any contact with the 3 N = 20 PLGA oligomers within 100 ns,
# Put residues that do have contact with BSA in a separate list
for index, r_pl in plg_14nmaa.iterrows():
if r_pl['mda_plga_frm_1.4nm'] != 0:
apl_14nm.append(r_pl['BSA_des_res'])
# This chunk of code gets an AA count from the above list, in order
# to get a total number of residues that contact BSA
cpl_14nm = []
for index, r_a in aa_count.iterrows():
count = 0
for i in range(len(apl_14nm)):
if r_a['Amino_acids'] in apl_14nm[i]:
count += 1
cpl_14nm.append(count)
aa_count['peg_1.4nm_100ns'] = cpl_14nm
#aa_count.drop('No_of_surf_res (VMD)', axis=1, inplace=True)
aa_count
# This gives the total number of residues that are within 4 angstroms of a PLGA oligomer residue
# within a 100 ns trajectory block
aa_count['peg_1.4nm_100ns'].sum()
# This gives the total number of residues that are within 4 angstroms of a water molecule
# within a 1 ns trajectory block
aa_count['No_of_surf_res (MDAnalysis)'].sum()
# This gives the total fraction of contacts within the 1.2 nm Rg 100 ns trajectory
aa_count['peg_1.4nm_100ns'].sum()/aa_count['No_of_surf_res (MDAnalysis)'].sum()
# Mean occupancy and std deviation
ll_mo2nm = [value[1] for key, value in h2di_14nm.items()]
print("Mean Occpancy (1.4 nm Rg): "+str(np.mean(ll_mo2nm)), "Occ. std. dev.: "+str(np.std(ll_mo2nm)))
cd_2nm = frac_cont(h2di_2nm)
cd_2nm
```
### Calc. fractional contacts for each AA group type
```
fcnt_rg14nm, prgrp_14nm, aa_matx_14nm = bavg_frac_cnt(5, prot_14nm, pn20_14nm, dmax, u14nm_n20PEG, no_surf, 0, 10000)
fcnt_rg14nm
fc_14nm_mean = np.array([np.mean(fcnt_rg14nm['Negative']), np.mean(fcnt_rg14nm['Positive'])
,np.mean(fcnt_rg14nm['Polar']),np.mean(fcnt_rg14nm['Hydrophobic'])
, np.mean(fcnt_rg14nm['Aromatic'])])
fc_14nm_mean
fc_14nm_std = np.array([np.std(fcnt_rg14nm['Negative']), np.std(fcnt_rg14nm['Positive'])
,np.std(fcnt_rg14nm['Polar']),np.std(fcnt_rg14nm['Hydrophobic'])
, np.std(fcnt_rg14nm['Aromatic'])])
fc_14nm_std
x_pos = np.arange(5)
width = 0.28
aa_types = ["Negative", "Positive", "Polar", "Hydrophobic", "Aromatic"]
fig = plt.figure(figsize=(7,7))
fig.canvas.layout.width = '800px'
fig.canvas.layout.height = '700px'
plt.bar(x_pos, fc_09nm_mean, width, yerr=fc_09nm_std, ecolor='black',capsize=5, color='royalblue')
plt.bar(x_pos+width, fc_11nm_mean, width, yerr=fc_11nm_std, ecolor='black',capsize=5, color='c')
plt.bar(x_pos+(2*width), fc_14nm_mean, width, yerr=fc_14nm_std, ecolor='black',capsize=5, color='lightslategray')
plt.title(r'Fractional Contacts Rg restrained', fontsize=15)
plt.xticks(x_pos+width, labels=aa_types, fontsize=12)
plt.ylim(0,0.4)
plt.legend(['Rg = 0.9 nm', 'Rg = 1.1 nm', 'Rg = 1.4 nm'], frameon=False)
plt.ylabel(r'Fractional Contacts', fontsize=15)
```
### Total fraction of contacts: averages and std dev calc from 5 20 ns blocks
```
np.mean(fcnt_rg14nm['total_frac'])
np.std(fcnt_rg14nm['total_frac'])
prgrp_2nm
# matrix containing the avg number of PLGA residues for each block for each amino acid
np.where(aa_matx_2nm[0] != 0)
```
### Avg no. PLGA residues per BSA AA residue group
```
prgrp_2nm
mean_2nm = np.zeros(shape=5)
std_2nm = np.zeros(shape=5)
count = 0
for key, value in prgrp_2nm.items():
mpl_2nm = []
var_2nm = []
for i in prgrp_2nm[str(key)].flat:
mpl_2nm.append(i[0])
var_2nm.append((i[1])**2)
# calc frac cont averages
mean_2nm[count] = np.mean(mpl_2nm)
# calc frac cont std dev: https://stats.stackexchange.com/questions/25848/how-to-sum-a-standard-deviation
std_2nm[count] = np.std(mpl_2nm)
#std_2nm[count] = np.sqrt(np.sum(var_2nm)/5)
count += 1
mean_2nm
std_2nm
mean_15nm
std_15nm
x_pos = np.arange(5)
width = 0.28
aa_types = ["Negative", "Positive", "Polar", "Hydrophobic", "Aromatic"]
plt.figure(figsize=(7,7))
plt.bar(x_pos, mean_12nm, width, yerr=std_12nm, ecolor='black',capsize=5, color='royalblue')
plt.bar(x_pos+width, mean_15nm, width, yerr=std_15nm, ecolor='black',capsize=5, color='c')
plt.bar(x_pos+(2*width), mean_2nm, width, yerr=std_2nm, ecolor='black',capsize=5, color='lightslategray')
plt.title(r'No. of PLGA residues Rg restrained', fontsize=15)
plt.xticks(x_pos+width, labels=aa_types, fontsize=12)
plt.legend(['Rg = 1.2 nm', 'Rg = 1.5 nm', 'Rg = 2 nm'], frameon=False)
plt.ylabel(r'No. of PLGA residues', fontsize=15)
```
### Protein/polymer contact map movie
```
np.where(trj_load2nm_res[9920] == 0 )
trj_load2nm_res[9920]
trj_load2nm_res.shape
fig = plt.figure(figsize=(10,10))
# Set the axis and the plot titles pp
plt.title("BSA/PLGA contact map 2 nm restrained", fontsize=22, loc='left')
plt.xlabel("PLGA Residue No.", fontsize=22)
plt.ylabel("BSA Residue No.", fontsize=20)
# Set the axis range
plt.ylim(583, 0)
plt.xlim(0, 60)
# Plot bands for each chain
BANDS = (
(0, 20, "purple", "B"),
(20, 40, "blue", "C"),
(40, 60, "green", "D"),
)
text_y = 0.98 # Close to the top
for start, stop, color, band in BANDS:
plt.axvspan(start, stop,color=color, alpha=0.15)
text_x = middle_of_band(start,stop)
plt.text(
text_x,
text_y,
"PLGA chain " + band,
color=color,
fontsize=18,
transform=fig.gca().transAxes,
horizontalalignment='center',
verticalalignment='center',
style='italic',
)
plt.text(0.94, 1, "Time [ns]:", fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')
# Set tick label size
fig.gca().tick_params(axis='both', which='major', labelsize=20)
ims = []
for i in range(10000):
data = trj_ppmap_2nm[i]
im = plt.imshow(data, aspect='auto', cmap='Greys')
t_sim = plt.text(1.03, 1, str(i/100), fontsize=20, transform=fig.gca().transAxes, horizontalalignment='right', verticalalignment='bottom')
ims.append([im, t_sim])
ani = animation.ArtistAnimation(fig, ims, blit=True, repeat=False)
ani.save('2nm_res.mp4', writer='ffmpeg', fps=50, bitrate=100000)
#plt.tight_layout()
#plt.show()
```
| github_jupyter |
# Recommending Music Artists with Boltzmann Machines
### Release 1.0 - June, 2020
### Paulo Breviglieri
## Foreword
<p style="text-align: justify">This notebook describes how a simple <b>Boltzmann machine</b> can assist with <b>music artist recommendations</b> based on artist popularity records collected by <b>last.fm</b>, a UK-based music website and app that supplies recommendations based on detailed user profiles constructed from 'scrobbles' - tracks listened to from diverse music streaming apps, like Spotify and SoundHound, and internet radio stations.</p>
<p style="text-align: justify">The case study described herein has an educational nuance and aims at highlighting how a simple <b>unsupervised learning</b> solution addresses a specific problem. Actual commercial recommendation platforms are incredibly more powerful, sophisticated, comprehensive and rely on more advanced artificial intelligence technologies. Positioning the Boltzmann machine developed here as an alternative for a real world implementation is definitely not an intent of this exercise.</p>
<p style="text-align: justify">The author also highlights specific singularities of this notebook not usually found in traditional machine learning exercises:</p>
<ul>
<li style="text-align: justify">Instead of being built upon existing machine learning frameworks, the Boltzmann machine is <b>hard coded as a Python class</b>;</li>
<li style="text-align: justify">In addition to the traditional performance assessment based on accuracy and loss metrics, a <b>subjective test</b> is performed at the end to verify the appropriateness of actual music artist recommendations delivered by the Boltzmann machine to two specific users with different music gender preferences.</li>
</ul>
<p style="text-align: justify">Section 1 includes a summarized, brief mathematical description of Boltzmann machines, not intended to be exhaustive and comprehensive at all. Readers interested only in the practical implementation may skip it.</p>
<p style="text-align: justify">The latest code release may be found in the author's <a href="https://github.com/pcbreviglieri">GitHub</a> repository. Logic enhancement and code forking are welcome and encouraged by the author, provided that this work is properly referenced. Thank you.</p>
## 1. Introduction
### 1.1. Unsupervised learning
<p style="text-align: justify">Unlike supervised learning, where machines are typically fed with pairs of known inputs and corresponding outputs to direct the learning and self-adaptation processes, in unsupervised learning machines are supplied with unlabeled responses and are expected to learn probability distributions from inference, with minimum or no human guidance. As a result, unsupervised learning machines have distinct architectures and functional principles. Traditional concepts found in supervised learning, like forwardfeeding and input / output layers, are not applicable - or have different interpretations - in unsupervised learning.</p>
### 1.2. Boltzmann machines
<p style="text-align: justify">Botzmann machines are unsupervised learning neural networks pertaining to a family of solutions not as wide and popularly explored as others: <b>energy based models</b>. Put simply, in energy based models the system under analysis will always self-adapt to changes in its constituent elements in pursuit of the lowest possible overall compounded energy state. This concept is central in several fields of Physics, from thermodynamics to quantum mechanics.</p>
<p style="text-align: justify">A traditional Boltzmann machine comprises a finite set of neurons interconnected with each other.</p>
<p style="text-align: justify">Neurons- or <b>nodes</b> - are in turn subdivided into two specific sets: <b>visible</b> and <b>hidden</b>, as described in the picture below.</p>
<img src="https://i.imgur.com/u5G1z1L.png" width="500" height="100">
<p style="text-align: justify">In an analogy with the human body, the author likes to think of <b>visible nodes</b> as the neural terminations we have in our eyes, nose, tongue and skin to capture external stimulus such as images, sounds, smells, tastes and sensations like heat, cold and pain. Accordingly, <b>hidden nodes</b> might be interpreted as the set of neurons in our brains responsible to process the information received from the neural terminations and take action - always aiming at self-adapting and conducting the organism to the most appropriate state - the 'lowest energy' state - for a given set of external circumstances.</p>
<p style="text-align: justify">A pictorial example: if the neurons located in the optic nerves on and behind our eyes' retina (the 'visible nodes') experience a sudden increase in external light intensity, this information is quickly delivered to the neurons located in the brain's occipital lobe (the 'hidden nodes') who, in turn, process this information and 'instruct' the eye muscles to constrict pupils, so that the amount of light reaching the retina is reduced and retina cells are preserved. The overall organism optical network is thus taken to a new optimal 'state' that will last until the environment perceived by the visible nodes change again.</p>
<p style="text-align: justify">In a Boltzmann machine, all nodes are connected to each other. However, the adoption of such architecture in practical implementations demands higher processing and memory assignment capabilities, reason why a simplified version of Boltzmann machines - so called Restricted Boltzmann Machines (RBMs) - gained traction and became widely used. In RBMs, connections between visible nodes and hidden nodes are maintained, while connections among visible nodes and connections among hidden nodes are eliminated, as illustrated below.</p>
<img src="https://i.imgur.com/kNI7eLL.png" width="500" height="100">
<p style="text-align: justify">The relationship between visible and hidden nodes is governed by the weights of the corresponding connections. In addition, biases are assigned to both hidden ('a') and visible ('b') nodes.</p>
<img src="https://i.imgur.com/G0DvbUp.png" width="500" height="100">
<p style="text-align: justify">As energy based models in which the objective will always be to self-adapt pursuing the lowest scalar energy state possible, RBMs are governed by one fundamental equation describing the total 'energy' of the network in terms of the values ('states') of visible (v) and hidden (h) nodes along with weights (w) and biases (a, b), all expressed as tensors:</p>
<p style="text-align: center; font-size: 20px">${E(v,h)} = - \sum \limits _{i} a_{i} v_{i} - \sum \limits _{j} b_{j} h_{j} - \sum \limits _{i,j} v_{i} h_{j} w_{ij} $</p>
<p style="text-align: justify">Over training, the RBM weights and biases will be adjusted in order to minimize the overall network energy.</p>
<p style="text-align: justify">Another relevant aspects of RBMs is their nature: RBMs are probabilistic models. Remember that at any moment the RBM will be in a particular state given by the values stored in visible and hidden nodes (neurons), linked by weights and biases. The model will operate based on the probability that a certain state of v and h can be observed. In mathematical terms, such probability will be governed by a joint distribution - the Boltzmann Distribution, after which this type of learning machines is named:</p>
<p style="text-align: center; font-size: 20px">${p(v,h)} = \frac{1}{Z} e^{-E(v,h)}$</p>
<p style="text-align: center">where Z, the 'partition function', is given by:</p>
<p style="text-align: center; font-size: 20px">${Z} = \sum \limits _{v,h} e^{-E(v,h)}$</p>
<p style="text-align: justify"> In Physics, the Boltzmann distribution furnishes the probability of a particle being observed in a given state with energy E. In a RBM, we are interested in the probability to observe a state of v and h based on the overall model energy. As the calculation of the joint Boltzmann probability would be complex in networks with a large numbers of combinations of visible and hidden node values (v and h), the analysis focuses instead on the calculation of the conditional probabilities of hidden nodes being in a particular state <b>given</b> the state of the visible nodes, denoted as <b>p(h|v)</b>, and also the conditional probabilities of visible nodes being in a particular state <b>given</b> the state of the hidden nodes, denoted as <b>p(v|h)</b>:</p>
<p style="text-align: center; font-size: 20px">${p (h | v)} = \prod \limits _{i} {p (h _{i} | v)}$</p>
<p style="text-align: center; font-size: 20px">${p (v | h)} = \prod \limits _{i} {p (v _{i} | h)}$</p>
<p style="text-align: justify">In RBMs the values assigned to neurons (node) are <b>binary</b>. In other words, in RBMs we deal with 'activated' and 'non activated' states for both visible and hidden nodes.</p>
<p style="text-align: justify">This fact allows us to derive the conditional probabilities above for the cases of hidden nodes assuming a value equal to 1 (given visible nodes at certain states) and also the conditional probabilities for the cases of visible nodes assuming a value equal to 1 (given hidden nodes at certain states). After applying the Bayes rule to conditional probabilities, we obtain:</p>
<p style="text-align: center; font-size: 20px">${p (h_{j}=1 | v)} = \frac {1}{1 + e^{(- (b_{j} + W_{j} v_{i}))}} = \sigma (b_{j} + \sum \limits _{i} v_{i} w_{ij})$</p>
<p style="text-align: center; font-size: 20px">${p (v_{i}=1 | h)} = \frac {1}{1 + e^{(- (a_{i} + W_{i} h_{j}))}} = \sigma (a_{i} + \sum \limits _{j} h_{j} w_{ij})$</p>
<p style="text-align: justify">where $\sigma$ is our well known sigmoid function!</p>
### 1.3. Training Boltzmann machines
<p style="text-align: justify">RBMs are trained in a very unique, two-step approach. Details may be found in "<em><b>A fast learning algorithm for deep belief nets</b></em>" (G.E. Hinton, S. Osindero, Department of Computer Science, University of Toronto, YW. Teh, Department of Computer Science, National University of Singapore), in which Dr. Hinton and his co-authors describe the use of 'complementary priors' to "derive a fast, greedy algorithm that can learn deep, directed belief networks".</p>
#### 1.3.1. Step 1 - Gibbs sampling
<p style="text-align: justify">This is an interactive process comprising the following steps, as pictured below:</p>
<ul>
<li style="text-align: justify">An input tensor $v_{0}$ containing binary constituent elements (1's and 0's) of a given observation is fed into visible nodes;</li>
<li style="text-align: justify">The activation of hidden nodes, given this input tensor, is predicted via $p(h|v_{0})$</li>
<li style="text-align: justify">A new activation of visible nodes, given the previous activation of hidden nodes, is predicted via $p(v|h)$</li>
<li style="text-align: justify">The two last steps are repeated k times until a last activation of visible nodes $v_{k}$ is predicted.</li>
</ul>
<img src="https://i.imgur.com/iCRhhLs.png" width="500" height="100">
#### 1.3.2. Step 2 - Contrastive divergence
<p style="text-align: justify">Updating weights (and biases) is the primary objective of any neural network training program.</p>
<p style="text-align: justify">In the case of RBMs, the weight tensor is updated through a method called Contrastive Divergence. In a summarized fashion, the activation probabilities of hidden node tensors $h_{0}$ and $h_{k}$ are calculated from visible node tensors $v_{0}$ and $v_{k}$. The difference between the <b>outer products</b> of such activation probabilities with input tensors $v_{0}$ and $v_{k}$ will lead to an updated version of the weight tensor:</p>
<p style="text-align: center; font-size: 20px">$\Delta W = v_{0} \otimes {p (h_{0} | v_{0})} - v_{k} \otimes {p (h_{k} | v_{k})}$</p>
<p style="text-align: justify">At last, a new set of updated weights at step 'm' can be estimated with gradient ascent:</p>
<p style="text-align: center; font-size: 20px">$W_{m} = W_{m-1} + \Delta W$</p>
## 2. Objectives of this deep learning exercise
<p style="text-align: justify">The primary goal of this work is educational, as is the norm in most of the author's notebooks.</p>
<p style="text-align: justify">A Restricted Boltzmann Machine (RBM) is used to provide music artist recommendations to a particular individual based on:</p>
<ul>
<li style="text-align: justify">Music artist popularity records generated by a multitude of platform users and maintained by last.fm;</li>
<li style="text-align: justify">The set of preferred artists enjoyed by one specific individual.</li>
</ul>
<p style="text-align: justify">In other words, the machine will identify the subset of music artists appreciated by a particular user (artists A, B, C and D, for example) and offer a second subset of music artists this particular user might be interested in (artists W, X, Y and Z), based on his/her preferences.</p>
<p style="text-align: justify">Specific expertise will be developed in this exercise, including: </p>
<ul>
<li style="text-align: justify">The construction of a RBM in the form of a Python class that will be later instantiated;</li>
<li style="text-align: justify">The generation of predictions for two specific users, with different musical preferences, in addition to the crude (and cold) performance assessment procedures based on the quantification of error metrics;</li>
<li style="text-align: justify">The use of PyTorch as the deep learning framework of choice.</li>
</ul>
<p style="text-align: justify">A special note on the framework selection. Comparing competitive machine learning frameworks is <em><b>NOT</b></em> an objective herein. Instead, the goal is to highlight that similar frameworks may serve the same purpose regardless of their popularity. PyTorch was adopted in this particular case simply because it offered the author a straightforward coding path that might as well be delivered by other frameworks. No performance assessment guided this selection.</p>
### 2.1. Experimenting an alternative approach with Boltzmann machines
<p style="text-align: justify">As discussed in Section 1.2, Boltzmann machines are probabilistic models. Ideally, visible and hidden nodes would be dynamically activated (1) or not (0), representing an immense multitude of different machine states.</p>
<p style="text-align: justify">However, the last.fm dataset contains total numbers of scrobbles generated per user, per music artist - a direct measure of artist popularity we intend to explore with the RBM. Furthermore, the dataset includes:</p>
<ul>
<li style="text-align: justify">Both heavy users, who have generated dozens, even hundreds of thousands of 'scrobbles' for selected artists, and light users, who produced few 'scrobbles' for few artists. The model must ideally not allow that heavy user preferences overshadow those of light users;</li>
<li style="text-align: justify">For every user, the number of scrobbles per artist may vary from one to thousands. The model shall ideally not allow high scrobble counts per user to drastically overshadow low scrobble counts per user.</li>
</ul>
<p style="text-align: justify">Having said that, instead of feeding the model with simple binary inputs, which would correspond to a set of 'scrobbled' and 'not scrobbled' music artists for a given user, the machine will be supplied with scaled inputs ranging from 0 to 1 for each 'scrobbled' music artist, on a per user basis. For a given user, a scaled number of scrobbles of a given artist close to '0' means that a small number of scrobbles was generated - the user briefly checked on that artist but the return rate was none or very small, a sign of low 'popularity' of that artist for that user. Inversely, a scaled number of scrobbles of a given artist, for a given user, close to '1' means that a large number of scrobbles was collected - the user listened to that artist several times with a high return rate, an indication of high 'popularity' of that artist for that user.</p>
<p style="text-align: justify">The traditional approach (i.e. assigning 1 to 'scrobbled' artists and '0' to 'not scrobbled' artists) would not bring to the analysis how popular a given artist is for a given user. An artist with 1 scrobble would be given the same weight as an artist with, let's say, 1,000 scrobbles. Let's see how the machine performs under these assumptions.</p>
<p style="text-align: justify">The selected approach is portrayed below.</p>
<img src="https://i.imgur.com/mZ8wjUz.png" width="500" height="100">
## 3. The dataset
<p style="text-align: justify">The dataset utilized in this deep learning exercise is a summarized, sanitized subset of the one released at <b>The 2nd International Workshop on Information Heterogeneity and Fusion in Recommender Systems</b> (HetRec 2011), currently hosted at the GroupLens website (<a href="https://grouplens.org/datasets/hetrec-2011/">here</a>).</p>
<p style="text-align: justify">Sanitization included: (a) artist name misspelling correction and standardization; (b) reassignment of artists referenced with two or more artist id's; (c) removal of artists listed as 'unknown' or through their website addresses.</p>
<p style="text-align: justify">The original dataset contains a larger number of files, including tag-related information, in addition to users, artists and scrobble counts. last.fm was contacted by the author and asked for some recent version of this content, in similar format, with no return until June 15th, 2020.</p>
<p style="text-align: justify">Two dataset files were selected and preprocessed for use in this work:</p>
<ol>
<li>'<b>lastfm_user_scrobbles.csv</b>' contains 92,792 scrobble counts ('scrobbles') for 17,493 artists ('artist_id') generated by 1,892 users ('user_id');</li>
<li>'<b>lastfm_artist_list.csv</b>' contains the list of 17,493 artists, referenced by an unique id code ('artist_id'), the same used in the first file.</li>
</ol>
## 4. Initial setup
### 4.1. Importing required libraries
<p style="text-align: justify">Along with traditional libraries imported for tensor manipulation and mathematical operations, <a href="https://pytorch.org/">PyTorch</a> is used in this exercise.</p>
```
import numpy as np
import pandas as pd
import torch
import warnings
from datetime import datetime
warnings.filterwarnings('ignore')
start_time = datetime.now()
```
### 4.2. Hard coding a Boltzmann Machine
<p style="text-align: justify">The creation of machine learning models usually relies on the instantiation of predefined classes provided by frameworks such as PyTorch and Keras. In other words, a 'model' is coded in Python or R as an object - an instance of predefined classes. This allows for the use of the class implicit features and methods, making the coder's task much simpler and the final code itself cleaner and shorter.</p>
<p style="text-align: justify">As anticipated in Section 2, a different approach is proposed here. A Python class named 'RestrictedBoltzmannMachine' will be developed and further instantiated for the creation of a RBM model.</p>
<p style="text-align: justify">The 'RestrictedBoltzmannMachine' class comprises the following elements:</p>
<ol>
<li style="text-align: justify">An initialization module where the inherent tensors for weights and biases are defined;</li>
<li style="text-align: justify">Two methods (used internally by other methods) devoted to Gibbs sampling as described in Section 1.3.1; </li>
<li style="text-align: justify">One method devoted to the model training where, over several epochs and for several batches:</li>
<ul>
<li style="text-align: justify">Contrastive divergence is executed in 10 rounds ;</li>
<li style="text-align: justify">Weight (W) and biases (a and b) tensors are updated; </li>
<li style="text-align: justify">Losses are calculated;</li>
</ul>
<li style="text-align: justify">One method devoted to the model testing where test observations are fed into the RBM and compounded loss metrics calculated;</li>
<li style="text-align: justify">One method devoted to predicting recommendations for one particular observation (last.fm user);</li>
</ol>
<p style="text-align: justify">Please refer to docstrings for information on the machine structure and functionality.</p>
```
class RestrictedBoltzmannMachine():
"""
Python implementation of a Restricted Boltzmann Machine (RBM) with 'c_nh' hidden nodes and 'c_nv' visible nodes.
"""
def __init__(self, c_nv, c_nh):
"""
RBM initialization module where three tensors are defined:
W - Weight tensor
a - Visible node bias tensor
b - Hidden node bias tensor
a and b are created as two-dimensional tensors to accommodate batches of observations over training.
"""
self.W = torch.randn(c_nh, c_nv)
self.a = torch.randn(1, c_nh)
self.b = torch.randn(1, c_nv)
def sample_h(self, c_vx):
"""
Method devoted to Gibbs sampling probabilities of hidden nodes given visible nodes - p (h|v)
c_vx - Input visible node tensor
"""
c_w_vx = torch.mm(c_vx, self.W.t())
c_activation = c_w_vx + self.a.expand_as(c_w_vx)
c_p_h_given_v = torch.sigmoid(c_activation)
return c_p_h_given_v, torch.bernoulli(c_p_h_given_v)
def sample_v(self, c_hx):
"""
Method devoted to Gibbs sampling probabilities of visible nodes given hidden nodes - p (v|h)
c_hx - Input hidden node tensor
"""
c_w_hx = torch.mm(c_hx, self.W)
c_activation = c_w_hx + self.b.expand_as(c_w_hx)
c_p_v_given_h = torch.sigmoid(c_activation)
return c_p_v_given_h, torch.bernoulli(c_p_v_given_h)
def train(self, c_nr_observations, c_nr_epoch, c_batch_size, c_train_tensor, c_metric):
"""
Method through which contrastive divergence-based training is performed.
c_nr_observations - Number of observations used for training
c_nr_epoch - Number of training epochs
c_batch_size - Batch size
c_train_tensor - Tensor containing training observations
c_metric - Training performance metric of choice ('MAbsE' for Mean Absolute Error, 'RMSE' for Root Mean Square Error)
"""
print('Training...')
for c_epoch in range(1, c_nr_epoch + 1):
c_start_time = datetime.now()
print(f'Epoch {str(c_epoch)} of {str(c_nr_epoch)} ', end='')
c_train_loss = 0
c_s = 0.
for c_id_user in range(0, c_nr_observations - c_batch_size, c_batch_size):
c_v0 = c_train_tensor[c_id_user:c_id_user+c_batch_size]
c_vk = c_train_tensor[c_id_user:c_id_user+c_batch_size]
c_ph0,_ = self.sample_h(c_v0)
for c_k in range(10):
_,c_hk = self.sample_h(c_vk)
_,c_vk = self.sample_v(c_hk)
c_vk[c_v0<0] = c_v0[c_v0<0]
c_phk,_ = self.sample_h(c_vk)
self.W += (torch.mm(c_v0.t(), c_ph0) - torch.mm(c_vk.t(), c_phk)).t()
self.b += torch.sum((c_v0 - c_vk), 0)
self.a += torch.sum((c_ph0 - c_phk), 0)
if c_metric == 'MAbsE':
c_train_loss += torch.mean(torch.abs(c_v0[c_v0>=0] - c_vk[c_v0>=0]))
elif c_metric == 'RMSE':
c_train_loss += np.sqrt(torch.mean((c_v0[c_v0>=0] - c_vk[c_v0>=0])**2))
c_s += 1.
c_end_time = datetime.now()
c_time_elapsed = c_end_time - c_start_time
c_time_elapsed = c_time_elapsed.total_seconds()
print(f'- Loss ({c_metric}): {c_train_loss/c_s:.8f} ({c_time_elapsed:.2f} seconds)')
def test(self, c_nr_observations, c_train_tensor, c_test_tensor, c_metric):
"""
Method through which testing is performed.
c_nr_observations - Number of observations used for testing
c_train_tensor - Tensor containing training observations
c_test_tensor - Tensor containing testing observations
c_metric - Training performance metric of choice ('MAbsE' for Mean Absolute Error, 'RMSE' for Root Mean Square Error)
"""
print('Testing...')
c_test_loss = 0
c_s = 0.
for c_id_user in range(c_nr_observations):
c_v = c_train_tensor[c_id_user:c_id_user+1]
c_vt = c_test_tensor[c_id_user:c_id_user+1]
if len(c_vt[c_vt>=0]) > 0:
_,c_h = self.sample_h(c_v)
_,c_v = self.sample_v(c_h)
if c_metric == 'MAbsE':
c_test_loss += torch.mean(torch.abs(c_vt[c_vt>=0] - c_v[c_vt>=0]))
elif c_metric == 'RMSE':
c_test_loss += np.sqrt(torch.mean((c_vt[c_vt>=0] - c_v[c_vt>=0])**2))
c_s += 1.
print(f'Test loss ({c_metric}): {c_test_loss/c_s:.8f}')
def predict(self, c_visible_nodes):
"""
Method through which predictions for one specific observation are derived.
c_visible_nodes - Tensor containing one particular observation (set of values for each visible node)
"""
c_h_v,_ = self.sample_h(c_visible_nodes)
c_v_h,_ = self.sample_v(c_h_v)
return c_v_h
```
### 4.3. Creating purposed functions
<p style="text-align: justify">Two specific customized functions address specific needs:</p>
<ul>
<li style="text-align: justify">'<b><em>convert</em></b>' essentially takes the original last.fm dataset table and produces a tensor where rows will correspond to specific platform users, columns will correspond to individual artists and the cell contents will contain the number of hits a particular artist received from a particular user;</li>
<li style="text-align: justify">'<b><em>preferred_recommended</em></b>' will initially identify and print the top 'x' artists most reverenced by a specific user and, subsequently, print the top 'x' music artists most recommended to this particular user, excluding those who may be already included in the reverenced list (i.e. new recommendations only).</li>
</ul>
```
def convert(f_data, f_nr_observations, f_nr_entities):
"""
Generates (from a numpy array) a list of lists containing the number of hits per user (rows), per entity (columns).
Each of the constituent lists will correspond to an observation / user (row).
Each observation list will contain the number of hits (columns), one for each hit entity
f_data - Input table (numpy array)
f_nr_observations - Number of observations
f_nr_entities - Number of entities hit in each observation
"""
f_converted_data = []
for f_id_user in range(1, f_nr_observations + 1):
f_id_entity = f_data[:,1][f_data[:,0] == f_id_user].astype(int)
f_id_hits = f_data[:,2][f_data[:,0] == f_id_user]
f_hits = np.zeros(f_nr_entities)
f_hits[f_id_entity - 1] = f_id_hits
f_converted_data.append(list(f_hits))
return f_converted_data
def preferred_recommended(f_artist_list, f_train_set, f_test_set, f_model, f_user_id, f_top=10):
"""
Generates music artist recommendations for a particular platform user.
f_artist_list - List of artists and corresponding IDs
f_train_set - Tensor containing training observations
f_test_set - Tensor containing testing observations
f_model - A RBM machine learning model previously instantiated
f_user_id - The user for which preferred artists will be assessed and recommendations will be provided
f_top - Number of most preferred and most recommended music artists for user 'f_user_id'
"""
if f_user_id < 1515:
f_user_sample = f_train_set[f_user_id - 1:f_user_id]
else:
f_user_sample = f_test_set[f_user_id - 1:f_user_id]
f_prediction = f_model.predict(f_user_sample).numpy()
f_user_sample = f_user_sample.numpy()
f_user_sample = pd.Series(f_user_sample[0])
f_user_sample = f_user_sample.sort_values(ascending=False)
f_user_sample = f_user_sample.iloc[:f_top]
f_fan_list = f_user_sample.index.values.tolist()
print(f'\nUser {f_user_id} is a fan of...\n')
for f_artist_id in f_fan_list:
print(f_artist_list[f_artist_list.artist_id == f_artist_id + 1].iloc[0][1])
f_prediction = pd.Series(f_prediction[0])
f_prediction = f_prediction.sort_values(ascending=False)
f_prediction_list = f_prediction.index.values.tolist()
print(f'\nUser {f_user_id} may be interested in...\n')
f_nb_recommendations = 0
f_i = 0
while f_nb_recommendations < f_top:
f_pred_artist = f_prediction_list[f_i]
if f_pred_artist not in f_fan_list:
print(f_artist_list[f_artist_list.artist_id == f_pred_artist + 1].iloc[0][1])
f_nb_recommendations += 1
f_i += 1
```
## 5. Generating 'user versus ratings' tensors for training and testing
<p style="text-align: justify">The scrobbles dataset is originally sorted based on ascending user ids. As generating recommendations for specific users is the ultimate objective of this exercise, it is necessary to maintain user scrobbles grouped. In addition, as roughly 20% of user scrobbles will be segregated in a test set:</p>
<ul>
<li>The training set will include the first 74,254 scrobbles, corresponding to users with 'user_id' ranging from 1 to 1,514;</li>
<li>The test set will include the remaining 18,538 scrobbles, corresponding to users with 'user_id' ranging from 1,515 to 1,892.</li>
</ul>
<p style="text-align: justify">A more sophisticated approach, with a random selection of user groups for the training and test sets, as well as a dynamic segregation of training users allowing for some cross-validation training, would add additional complexity and were not considered in this first release.</p>
```
scrobbles = pd.read_csv('../input/lastfm-music-artist-scrobbles/lastfm_user_scrobbles.csv', header = 0)
scrobbles.head()
```
<p style="text-align: justify">As anticipated in Section 2.1, scrobble counts are scaled on a per user basis with the code below. As some null values may be produced in the case of users who have generated only one scrobble for a given artist (maximum and minimum are the same, leading to a division by zero), the final scaled number of scrobbles of 0.5 is assigned to those users.</p>
```
scrobbles['scrobbles'] = scrobbles.groupby('user_id')[['scrobbles']].apply(lambda x: (x-x.min())/(x.max()-x.min()))
scrobbles['scrobbles'] = scrobbles['scrobbles'].fillna(0.5)
scrobbles.head()
training_size = 74254
training_set = scrobbles.iloc[:training_size, :] # Until userID = 1514
test_set = scrobbles.iloc[training_size:, :] # Starting at userID = 1515
training_set = training_set.values
test_set = test_set.values
training_set.shape, test_set.shape
nr_users = int(max(max(training_set[:,0]), max(test_set[:,0])))
nr_artists = int(max(max(training_set[:,1]), max(test_set[:,1])))
nr_users, nr_artists
```
<p style="text-align: justify">At this point, both training and test sets are subsets of the original dataset, converted into numpy arrays. However, the model will be fed with a rearranged version of these tables, in which users will correspond to rows, artists to columns and the content of each cell will include the number of scrobbles generated by each user, for each artist.</p>
```
training_set = convert(training_set, nr_users, nr_artists)
test_set = convert(test_set, nr_users, nr_artists)
```
<p style="text-align: justify">At last, both sets are converted into PyTorch float tensors:</p>
```
training_set = torch.FloatTensor(training_set)
test_set = torch.FloatTensor(test_set)
```
## 6. Deep learning
### 6.1. Creating the RBM as an instance of a Python object
<p style="text-align: justify">The RBM is defined as having:</p>
<ul>
<li>a number of visible nodes corresponding to the number of music artists (1,793) - one visible node per artist;</li>
<li>a number of hidden nodes arbitrarily defined and tuned;</li>
</ul>
<p style="text-align: justify">Comments on hyperparameter selection, tuning and implications are provided in Section 5.</p>
```
nv = len(training_set[0])
nh = 100
batch_size = 1
epoch = 50
metric = 'MAbsE'
model = RestrictedBoltzmannMachine(nv, nh)
```
### 6.2. Training & testing the RBM
```
model.train(nr_users, epoch, batch_size, training_set, metric)
model.test(nr_users, training_set, test_set, metric)
```
### 6.3. Providing recommendations
<p style="text-align: justify">In addition to the traditional error metric-based performance assessment, it is of absolute importance to test the model through the generation of real recommendations for specific users. In order to do it wisely, two different users with evidently different music preferences were identified in the test set:</p>
<ul>
<li>user_id # 1515 seems to be a fan of pop music and female muses in particular;</li>
<li>user_id # 1789 seems to prefer progressive and heavy metal rock artists.</li>
</ul>
<p style="text-align: justify">Recommendations are generated for both. The code below lists the 10 most 'scrobbled' music artists for each of these users, followed by the 10 most recommended artists in each case. Results are discussed in Section 5.</p>
```
artist_list = pd.read_csv('../input/lastfm-music-artist-scrobbles/lastfm_artist_list.csv', header = 0)
```
<img src="https://i.imgur.com/vJKuVwM.png" width="500" height="100">
```
preferred_recommended(artist_list, training_set, test_set, model, 1515, 10)
```
<img src="https://i.imgur.com/E9cuCbo.png" width="500" height="100">
```
preferred_recommended(artist_list, training_set, test_set, model, 1789, 10)
```
## 7. Discussion and final remarks
<p style="text-align: justify">The Restricted Boltzmann Machine developed in this unsupervised learning exercise performed quite well from both the objective, error metric-based and the subjective, recommendation quality-based perspectives.</p>
<p style="text-align: justify">Some initial considerations on hyperparameters:</p>
<ul>
<li>Model variations with varied numbers of hidden nodes (25, 50, 100, 200, 500) were tested. Results were satisfactory (i.e. stable minimum losses and recommendations aligned with user profiles) with a minimum of 100 hidden nodes. No significant improvement was verified with larger numbers of hidden nodes;</li>
<li>The model accommodates observation batching for training. However, it has been noted over several simulation rounds that more accurate recommendations were obtained at the end with a batch size of 1;</li>
<li>Error metrics (Mean Absolute Error, or 'MAbsE') stabilize after 30 to 40 training epochs. A final number of 50 training epochs proved sufficient and was considered in the final release. </li>
</ul>
<p style="text-align: justify">Recommendations for the selected users were pretty much aligned with their most evident preferences. It shall though be noted that:</p>
<ul>
<li>The lists of preferred and recommended artists displayed include only the top 10 in each case. However, these lists are long for some users, case in which artists not displayed, but present in the preferred artist list, certainly have a weight on final recommendations;</li>
<li>The scrobble count scaling strategy described in Sections 2.1 and 4 proved effective. Simulations were performed without it, and although error metrics converged as expected, the final recommendations were very much biased with a clear predominance of only the most popular artists in the artist universe.</li>
</ul>
```
end_time = datetime.now()
print('\nStart time', start_time)
print('End time', end_time)
print('Time elapsed', end_time - start_time)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/dainis-boumber/av/blob/master/pretrain_av.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# ULMFiT + Siamese Network for Sentence Vectors
## Part One: Tokenizing
This notebook will tokenize the sentences from the SNLI dataset for use in the next notebook
### You must have the fastai library installed
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from ipyexperiments import *
from fastai.text import *
from fastai import *
from fastai.callbacks import *
from fastai.callback import *
import json
import html
import re
import pickle
import random
import pandas as pd
import numpy as np
from pathlib import Path
import sklearn
from sklearn import model_selection
from functools import partial
from collections import Counter, defaultdict
from pandas.io.json import json_normalize
import numpy as np
import torch
import torch.nn as nn
import torch.utils
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import dataset, dataloader
import torch.optim as optim
import torch.nn.functional as F
from nltk.tokenize import sent_tokenize
import time
import math
import sys
import data
import joblib
token_files = './data/PAN14/tokens/'
model_files = './data/PAN14/models/'
TRAINDATAPATH = "./data/PAN14/pan14_train_english-essays/"
TESTDATAPATH = "./data/PAN14/pan14_test01_english-essays/"
FNAMES = ['known01','known02','known03','known04','known05', 'unknown']
KCOLS=['known01','known02','known03','known04','known05']
LABELCOL="answer"
UNKOWN="unknown"
BOD = 'x_bod' # beginning-of-doc tag
re1 = re.compile(r' +')
def fixup(x):
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace(
'<br />', "\n").replace('\\"', '"').replace('<unk>','u_n').replace(' @.@ ','.').replace(
' @-@ ','-').replace('\\', ' \\ ')
return re1.sub(' ', html.unescape(x))
def read_dataset(path):
ds=pd.read_json(path+'/truth.json')
ds=json_normalize(ds['problems'])
ds['known01']=None
ds['known02']=None
ds['known03']=None
ds['known04']=None
ds['known05']=None
ds['unknown']=None
ds.set_index('name', drop=True, inplace=True)
ds=ds[['known01','known02','known03','known04','known05', 'unknown', 'answer']]
dirs = []
docs = []
for i, x in enumerate(os.walk(path)):
if i:
for fname in x[2]:
with open(path+dirs[i-1]+'/'+fname, 'r') as f:
text = f.read().strip()
doc = ' '.join(sent_tokenize(text)).strip()
docs.append(doc)
ds.loc[dirs[i-1],fname[:-4]]=doc
else:
dirs = x[1]
return ds, docs
def match_unknowns(path):
ds=pd.read_json(path+'/truth.json')
ds=json_normalize(ds['problems'])
ds['known01']=None
ds['known02']=None
ds['known03']=None
ds['known04']=None
ds['known05']=None
ds['unknown']=None
ds.set_index('name', drop=True, inplace=True)
ds=ds[['known01','known02','known03','known04','known05', 'unknown', 'answer']]
dirs = []
docs = []
ds, _ = read_dataset(path)
grouped=ds.groupby(['unknown'])
dupes=[]
for utext, group in grouped:
if len(group.index) > 1:
dupes.append(group)
newrows=pd.DataFrame(columns=['known01','known02','known03','known04','known05', 'unknown'])
for dupe in dupes:
dupe.reset_index(drop=True, inplace=True)
yes=dupe.loc[dupe.answer == "Y"]
yes.reset_index(drop=True, inplace=True)
no=dupe.loc[dupe.answer == "N"]
no.reset_index(drop=True, inplace=True)
for col in ['known01','known02','known03','known04','known05']:
if no[col] is not None:
newrows=newrows.append(pd.DataFrame(data={'known01':yes.known01,'known02':yes.known02,
'known03':yes.known03, 'known04':yes.known04,
'known05':yes.known05,'unknown':no[col],
'answer':'N'}), sort=False)
newrows=newrows.dropna(subset=['unknown'])
df = pd.concat([ds, newrows])
for col in FNAMES:
docs.extend(df[col].tolist())
docs=[d for d in docs if d is not None]
return df, docs
def load_sentence_pairs(df):
s0s = []
s1s = []
labels = [1 if label == 'Y' else 0 for label in df[LABELCOL].tolist()]
y=[]
unknowns = df[UNKOWN].tolist()
for i, label in enumerate(labels):
for col in KCOLS:
knowns = df[col].tolist()
s0 = knowns[i]
if s0 is not None:
s1 = unknowns[i]
s0s.append(s0)
s1s.append(s1)
y.append(label)
pairs=pd.DataFrame(data={"known":s0s, "unknown":s1s, "label":y})
return pairs
df_train, docs = match_unknowns(TRAINDATAPATH)
df_test, _ = read_dataset(TESTDATAPATH)
sentence_pairs_train = load_sentence_pairs(df_train)
sentence_pairs_val = load_sentence_pairs(df_test)
sentence_pairs_test = load_sentence_pairs(df_test)
joblib.dump(sentence_pairs_train, f'{model_files}traindf.pkl')
joblib.dump(sentence_pairs_val, f'{model_files}valdf.pkl')
joblib.dump(sentence_pairs_test, f'{model_files}testdf.pkl')
sentence_pairs_train['label']=0
sentence_pairs_val['label']=0
sentence_pairs_test['label']=0
# Language model data
data_lm = TextLMDataBunch.from_df(model_files, sentence_pairs_train, sentence_pairs_val, sentence_pairs_test,
text_cols=['known', 'unknown'], label_cols=['label'], mark_fields=True,
min_freq=5, bs=64)
data_lm.save()
exp1=IPyExperimentsPytorch()
data_lm = TextLMDataBunch.load(model_files)
learn = language_model_learner(data_lm, pretrained_model=URLs.WT103_1, drop_mult=0.5, metrics=[accuracy], clip=0.15)
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(3, max_lr=slice(5e-04,5e-02), wd)
learn.save_encoder('5ft_enc1')
learn.unfreeze()
learn.lr_find()
learn.recorder.plot()
learn.fit(3, lr=slice(5e-6, 5e-5))
#learn.fit(epochs=1, 2e-2,)
learn.recorder.plot_lr(show_moms=False)
learn.fit_one_cycle(6, 3e-3)
learn.save_encoder('4ft_enc2')
learn.load_encoder('4ft_enc2')
learn.fit_one_cycle(2, 5e-6)
learn.fit_one_cycle(12, 5e-6)
learn.save_encoder('4ft_enc3')
learn.fit_one_cycle(6, 3e-3)
learn.save_encoder('4ft_enc4')
del exp1
exp2=IPyExperimentsPytorch()
data_lm = TextLMDataBunch.load(model_files)
learn = language_model_learner(data_lm, pretrained_model=URLs.WT103_1, drop_mult=1.0, metrics=[accuracy],
callback_fns=ShowGraph, opt_func=partial(optim.Adam, betas=(0.7, 0.99)),
clip=0.1)
learn.lr_find()
learn.recorder.plot()
learn.lr_find(start_lr=1e-10, end_lr=2)
learn.recorder.plot()
learn.fit_one_cycle(2, 1e-2)
learn.unfreeze()
learn.lr_find(start_lr=1e-10, end_lr=2)
learn.recorder.plot()
learn.fit_one_cycle(6, slice(1e-3))
learn.save_encoder('5ft_enc1')
learn.fit_one_cycle(12, slice(1e-3))
learn.recorder.plot_lr(show_moms=True)
learn.save_encoder('ft_enc3')
learn.load_encoder('ft_enc3')
learn.unfreeze()
learn.save_encoder('ft_enc4')
learn.lr_find(start_lr=1e-8, end_lr=1)
learn.recorder.plot()
learn.clip=0.1
learn.opt_func = partial(optim.Adam, betas=(0.7, 0.99))
learn.lr_find(start_lr=1e-6, end_lr=1)
learn.recorder.plot()
learn.fit_one_cycle(2, 2e-5)
del exp1
```
| github_jupyter |
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
$ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $
$ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $
$ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $
$ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $
$ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $
<font style="font-size:28px;" align="left"><b> Problem Set 1 for Probabilistic Systems </b></font>
<br>
_prepared by Abuzer Yakaryilmaz_
<br><br>
<ol>
<li>The vector $ v = \myvector{0.1 \\ x \\ 0.2 \\ y} $ is a probabilistic state. Give three example pairs of $ (x,y) $ such that
<ol>
<li>$ v $ has at least one zero entry,</li>
<li>$ x=y $, and</li>
<li>$ x \neq y $ and $ v $ does not have any non-zero entry.</li>
</ol><hr>
</li>
<li> We have a probabilistic system with three states, say $s_1$, $s_2$, and $s_3$. The system updates its states with respect to the following transition rules: <br><br>
$
\begin{array}{lclclcl}
s_1 & \xrightarrow{0.1} & s_1 & ~;~ & s_1 & \xrightarrow{0.3} & s_2 \\
s_2 & \xrightarrow{0.4} & s_2 & ~;~ & s_2 & \xrightarrow{0.15} & s_3 \\
s_3 & \xrightarrow{0.35} & s_1 & ~;~ & s_3 & \xrightarrow{0.6} & s_3 \\
\end{array},
$ <br>
where the transition probability from state $ s_i $ to state $ s_j $, say $p$, is denoted as $ s_i \xrightarrow{p} s_j $ for $ 1 \leq i,j \leq 3 $. Write down the complete transition matrix representing the updates of this system. <hr>
</li>
<li> The matrix $ M = \mymatrix{cc}{0.2 & a \\ b & 0.3} $ is a probabilistic operator. If operator $ M $ is applied to probabilistic state $ \myvector{a \\ 1-a} $, then what is the next probabilistic state? <hr>
</li>
<li> We have a probabilistic bit and it is in probabilistic state $ v_0 = \myvector{ 0 \\ 1 } $. Then, we apply the probabilistic operator $ N = \mymatrix{cc}{ 0.4 & 0.2 \\ 0.6 & 0.8 } $ three times and obtain the probabilistic states $ v_1 $, $ v_2 $, and $ v_3 $, respectively. Calculate $ v_1 $, $ v_2 $, and $ v_3 $ <hr>
</li>
<li> We have a probabilistic system with two states. Its initial probabilistic state is $ v = \myvector{ 0.8 \\ 0.2 } $. <br><br>
<ol>
<li>Calculate the probabilistic state if the probabilistic operator $ M = \mymatrix{cc}{ 0.3 & 0.4 \\ x & y } $ is applied once?</li><br>
<li>Calculate the probabilistic state if the probabilistic operator $ M = \mymatrix{cc}{ 0.5 & 0.5 \\ x & y } $ is applied twice?</li>
</ol> <hr>
</li>
<li> The matrix $ M = \mymatrix{ccc}{a & 0.1 & 0.2 \\ 0.3 & b & 0.4 \\ 0.5 & 0.6 & c} $ is a probabilistic operator. What are the values of $ a $, $ b $, and $ c $? <br><br> The vector $ v = \myvector{ b \\ b \\ c } $ is a probabilistic state. Calculate next probabilistic state if $ M $ is applied to $v$? <hr>
</li>
<li> We have a probabilistic system with three states called $ s_1 $, $s_2$, and $s_3$. We define a probabilistic operator $ M $ on this system as follows:
<ul>
<li>When the system is in $ s_1 $, it switches to other states with equal probability.</li>
<li>When the system is in $ s_2 $, it never switches to itself but it switches to the other two states with equal probability.</li>
<li>When the system is in $ s_1 $, it never switches to the states $ s_1 $ and $ s_3 $.</li>
</ul>
Write down all entries of $ M $. <br><br>
What is the next probabilistic state if $ M $ is applied to the probabilistic state $ \myvector{ \frac{1}{6} \\ \frac{1}{2} \\ \frac{1}{3} } $? <hr>
</li>
<li> We have a composite system with two probabilistic bits. What are the vector representations of the states $ 01 $ and $ 10 $ <hr>
</li>
<li> We have a system composed by two probabilistic bits. The probability of being in the state $ 1 $ in the first bit is 0.7, and the probability of being in the state $0 $ in the second bit is $ 0.1 $.
<ol>
<li>What is the probabilistic state of the first bit?</li>
<li>What is the probabilistic state of the second bit?</li>
<li>What is the probabilistic state of the composite system?</li>
</ol> <hr>
</li>
<li> We have a composite system with three probabilistic bits. Its probabilistic state is $ \myvector{ 0 \\ a \\ 2a \\ 0 \\ 3a \\ 0 \\ 3a \\ a } $.
<ol>
<li>What is the probability of being in the state $ 010 $?</li>
<li>What is the probability of being in the state $ 101 $?</li>
</ol><hr>
</li>
<li> We have a composite system with two probabilistic bits. The probabilities of being in the states $ 10 $, $01$, and $00$ are respectively 0.3, 0.2, and 0. What is the probabilistic state of this composite system as a vector? <hr>
</li>
<li>We have a composite system with two probabilistic bits. If the state of the composite system is <br><br>
$$ u = \myvector{0 \\ 0 \\ 0.2 \\ 0.8}, $$ <br>
then what is the probabilistic state of the first bit, say $ u_1 $, and what is the probabilistic state of the second bit, say $ u_2 $? (<i>Hint: $ u = u_1 \otimes u_2 $.</i>) <hr>
</li>
<li>We have three biased coins $ C_1 $, $ C_2 $, and $ C_3 $. The probabilities of getting ``Head'' after tossing coins $ C_1 $, $ C_2 $, and $ C_3 $ are respectively $ 0.2 $, $ 0.1 $, and $ 0.4 $. <br> <br> We define a composite probabilistic system with these coins where the $ i $th bit is represented by coin $ C_i $ for $ 1 \leq i \leq 3 $, and ``Head'' and ``Tail'' are represented by states 0 and 1, respectively.
<ol>
<li>What is the probabilistic state vector of the compose system after tossing all three coins once?</li>
<li>What is the probability of being in states $ 101 $ and $ 010 $ after tossing all three coins once?</li>
</ol><hr>
</li>
<li> We have three probabilistic bits called $ A $, $ B $, and $ C $. The initial probabilistic states of $ A $, $ B $, and $ C $ are respectively
<br><br>
$$ \myvector{0 \\ 1}, ~ \myvector{0.5 \\ 0.5}, \mbox{ and } \myvector{1 \\ 0} . $$
<br>
We apply the following probabilistic operators respectively to $ A $, $ B $, and $ C $:
<br><br>
$$
\mymatrix{cc}{ 0.3 & 0.2 \\ a & b },
\mymatrix{cc}{ 0.2 & 0.3 \\ c & d },
\mbox{ and }
\mymatrix{cc}{ e & f \\ 0.4 & 0.6 }.
$$
Calculate the new probabilistic state of each bit. <br><br>
We combine $ A $, $ B $, and $ C $ as a single composite system in the given order. What are the probabilities of this composite system being in the states $ 011 $, $100$, and $010$?<hr>
</li>
</ol>
| github_jupyter |
ใน ep นี้เราจะเรียนรู้งานที่สำคัญอีกอย่างหนึ่งใน NLP คือ งานแปลภาษาด้วยเครื่อง หรือ Machine Translation โดยใช้โมเดลแบบ Sequence to Sequence
โมเดล Seq2Seq จะประกอบด้วย 2 ฝั่ง เรียกว่า
* Encoder ภายในเป็น โมเดลแบบ RNN ทำหน้าที่รับข้อความภาษาต้นทางมา แล้วแปลงให้อยู่ในรูปของ Vector Representation
* Decoder ภายในเป็น โมเดลแบบ RNN เช่นกัน ทำหน้าที่รับ Vector Representation ไปสร้างเป็นข้อความภาษาปลายทาง ที่ต้องการ
เปรียบได้ง่าย ๆ ว่า เป็นโมเดลแบบ [RNN](https://www.bualabs.com/archives/3103/what-is-rnn-recurrent-neural-network-what-is-gru-gated-recurrent-unit-teach-how-to-build-rnn-gru-with-python-nlp-ep-9/) / [LSTM](https://www.bualabs.com/archives/3087/sentiment-classification-deep-learning-imdb-movie-reviews-positive-negative-deep-neural-network-awd-lstm-ulmfit-nlp-ep-8/) / [GRU](https://www.bualabs.com/archives/3103/what-is-rnn-recurrent-neural-network-what-is-gru-gated-recurrent-unit-teach-how-to-build-rnn-gru-with-python-nlp-ep-9/) 2 ตัว ต่อกัน รวมกันเป็นตัวเดียว มีเทคนิคการสร้างโมเดล อีกหลายแบบ เช่น ใช้ Attention, ป้อนข้อความย้อนหลัง, ป้อนข้อความสองรอบ, โมเดลสองทิศทาง, เพิ่มความลึกของโมเดล, etc. และนำไปประยุกต์ใช้ได้อีกหลายงาน เช่น Machine translation, Speech recognition, Video captioning, etc. จะอธิบายต่อไป
# 0. Install
```
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
Install Library ที่จำเป็น
```
## Colab
! curl -s https://course.fast.ai/setup/colab | bash
```
เช็ค GPU
```
! nvidia-smi
```
# 1. Import
Import Library ที่จะใช้ ในที่นี้คือ fastai และ fastai.text
```
from fastai import *
from fastai.text import *
from fastai.callbacks import *
from fastai.callbacks.mem import *
# import fastai.utils.collect_env
# fastai.utils.collect_env.show_install()
```
สำหรับ [Google Colab](https://www.bualabs.com/archives/1687/what-is-colab-open-jupyter-notebook-in-github-on-google-colab-create-open-in-colab-button-colab-ep-1/) เราจะกำหนด path ที่เก็บ DataSet และ Mount Google Drive
```
dataset = '26i-giga-fren'
## Google Colab
config_path = Path('/content/drive')
data_path_base = Path('/content/drive/My Drive/datasets/')
# ## VM
# config_path = Path(os.getenv("HOME"))
# data_path_base = Path(os.getenv("HOME")'/datasets/')
data_path = data_path_base/dataset
from google.colab import drive
drive.mount(str(config_path))
```
# 2. Dataset
ในเคสนี้เราจะใช้ giga-fren คือ Giga-word corpus สำหรับแปลภาษาระหว่างภาษาฝรั่งเศส-ภาษาอังกฤษ (French-English) จากงาน WMT2010 รวบรวมโดย Chris Callison-Burch
```
data_path
```
Download ไฟล์มาเก็บไว้ก่อน
```
! wget https://s3.amazonaws.com/fast-ai-nlp/giga-fren.tgz -P '{data_path}'
```
แตกไฟล์ ใส่ path ที่กำหนด
```
! tar xf '{data_path}/giga-fren.tgz' -C '{data_path}'
```
ดูว่ามีไฟล์อะไรบ้าง
```
path = data_path/'giga-fren'
path.ls()
```
ดูตัวอย่างข้อมูล ภาษาอังกฤษ
```
! head '{path}/giga-fren.release2.fixed.en'
```
มี 22520376 บรรทัด
```
! wc -l '{path}/giga-fren.release2.fixed.en'
```
ดูตัวอย่างข้อมูล ภาษาฝรั่งเศส จะเห็นว่าตรงกับด้านบน เป็นคู่ ๆ
```
! head '{path}/giga-fren.release2.fixed.fr'
```
มี 22520376 บรรทัด เท่ากัน
```
! wc -l '{path}/giga-fren.release2.fixed.fr'
```
# 3. Preprocess
เพื่อความง่ายในการเรียน และจะได้เทรนได้เร็ว Dataset ข้อมูลที่ได้มา เราจะมาคัดเฉพาะประโยคคำถามเท่านั้น
```
def readfile(d): return [o.strip() for o in open(path/d).readlines()]
# fr = readfile(path/'giga-fren.release2.fixed.fr')
# len(fr)
# en = readfile(path/'giga-fren.release2.fixed.en')
# len(en)
```
ใช้ [RegEx](https://www.bualabs.com/archives/3070/what-is-regular-expression-regex-regexp-teach-how-to-regex-python-nlp-ep-7/) สกัด เอาเฉพาะประโยคคำถามภาษาอังกฤษออกมา ที่ขึ้นต้นด้วย "Wh" และ ลงท้ายด้วย "?" และประโยคภาษาฝรั่งเศสที่คู่กัน ที่ลงท้ายด้วย ?
```
re_eq = re.compile('^(Wh[^?.!]+\?)')
re_fq = re.compile('^([^?.!]+\?)')
en_fname = path/'giga-fren.release2.fixed.en'
fr_fname = path/'giga-fren.release2.fixed.fr'
lines = ((re_eq.search(eq), re_fq.search(fq))
for eq, fq in zip(open(en_fname, encoding='utf-8'), open(fr_fname, encoding='utf-8')))
qs = [(e.group(), f.group()) for e, f in lines if e and f]
```
ได้มาแล้ว Save ใส่ไฟล์ csv ไว้ก่อน
```
qs = [(q1, q2) for q1, q2 in qs]
df = pd.DataFrame({'fr': [q[1] for q in qs], 'en': [q[0] for q in qs]}, columns=['en', 'fr'])
df.to_csv(path/'questions_easy.csv', index=False)
path.ls()
```
โหลดไฟล์ CSV ขึ้นมา แสดงตัวอย่างข้อมูล ใน DataFrame
```
df = pd.read_csv(path/'questions_easy.csv')
df.head()
df.tail()
```
เพื่อให้ง่ายขึ้นอีก เราจะแปลงเป็นตัวเล็กให้หมด
```
df['en'] = df['en'].apply(lambda x: x.lower())
df['fr'] = df['fr'].apply(lambda x: x.lower())
df.head()
```
เนื่องจากข้อความทั้ง 2 ภาษา มีความยาวไม่เท่ากัน ประกาศฟังก์ชัน เรียงรวมข้อมูล โดยใส่ padding ให้ยาวเท่ากัน โมเดลจะได้ทำงานง่ายขึ้น
```
def seq2seq_collate(samples, pad_idx=1, pad_first=True, backwards=False):
samples = to_data(samples)
max_len_x, max_len_y = max([len(s[0]) for s in samples]), max([len(s[1]) for s in samples])
res_x = torch.zeros(len(samples), max_len_x).long() + pad_idx
res_y = torch.zeros(len(samples), max_len_y).long() + pad_idx
if backwards: pad_first = not pad_first
for i, s in enumerate(samples):
if pad_first:
res_x[i, -len(s[0]):], res_y[i, -len(s[1]):] = LongTensor(s[0]), LongTensor(s[1])
else:
res_x[i, :len(s[0]):], res_y[i, :len(s[1]):] = LongTensor(s[0]), LongTensor(s[1])
if backwards: res_x, res_y = res_x.flip(1), res_y.flip(1)
return res_x, res_y
```
ประกาศ Class DataBunch สำหรับ Sequence to Sequence ที่ใช้ Collate ด้านบน และ Sampler แบบ SortishSampler สำหรับ Training Set และ SortSampler สำหรับ Validation Set
SortishSampler คือ เลือกข้อมูลออกมา ตามลำดับความยาวข้อความ ผสมด้วยความ Random เล็กน้อย เหมาะกับใช้สำหรับ Training Set
```
class Seq2SeqDataBunch(TextDataBunch):
@classmethod
def create(cls, train_ds, valid_ds, test_ds=None, path:PathOrStr='.',
bs:int=32, val_bs:int=None, pad_idx=1, dl_tfms=None,
pad_first=False, device:torch.device=None, no_check:bool=False,
backwards:bool=False, **dl_kwargs) -> DataBunch:
datasets = cls._init_ds(train_ds, valid_ds, test_ds)
val_bs = ifnone(val_bs, bs)
collate_fn = partial(seq2seq_collate, pad_idx=pad_idx,
pad_first=pad_first, backwards=backwards)
train_sampler = SortishSampler(datasets[0].x,
key=lambda t: len(datasets[0][t][0].data), bs=bs//2)
train_dl = DataLoader(datasets[0], batch_size=bs, sampler=train_sampler, drop_last=True, **dl_kwargs)
dataloaders = [train_dl]
for ds in datasets[1:]:
lengths = [len(t) for t in ds.x.items]
sampler = SortSampler(ds.x, key=lengths.__getitem__)
dataloaders.append(DataLoader(ds, batch_size=val_bs, sampler=sampler, **dl_kwargs))
return cls(*dataloaders, path=path, device=device, collate_fn=collate_fn, no_check=no_check)
# SortishSampler??
```
Seq2SeqTextList มี Label เป็น TextList
```
class Seq2SeqTextList(TextList):
_bunch = Seq2SeqDataBunch
_label_cls = TextList
```
กำหนด [Data Pipeline ด้วย Data Block API](https://www.bualabs.com/archives/2693/data-block-api-data-pipeline-machine-learning-supervised-learning-preprocessing-ep-5/)
[Training Set / Validation Set Split](https://www.bualabs.com/archives/532/what-is-training-set-why-train-test-split-training-set-validation-set-test-set/) ด้วยการ Random แล้วให้ข้อมูลตัวอย่าง x เป็นประโยคภาษาฝรั่งเศส จาก DataFrame Column fr และ แปะ Label y เป็นประโยคภาษาอังกฤษ จาก DataFrame Column en
```
src = Seq2SeqTextList.from_df(df, path=path, cols='fr').split_by_rand_pct(seed=42).label_from_df(cols='en', label_cls=TextList)
```
ดูตำแหน่ง Token ที่ความยาว 90%
ได้โดยเฉลี่ยไม่เกิน 28 Token สำหรับภาษาฝรั่งเศส และ 23 Token สำหรับ ภาษาอังกฤษ
```
np.percentile([len(o) for o in src.train.x.items] + [len(o) for o in src.valid.x.items], 90)
np.percentile([len(o) for o in src.train.y.items] + [len(o) for o in src.valid.y.items], 90)
```
เพื่อความง่าย เราจะลบ ประโยคที่ยาวกว่า 30 Token ในภาษาใดภาษาหนึ่ง (ไม่ถึง 10%) ทิ้งไป
```
src = src.filter_by_func(lambda x, y: len(x) > 30 or len(y) > 30)
```
เหลือ 48350 คู่ประโยค
```
len(src.train) + len(src.valid)
```
สร้าง DataBunch เอาไว้เตรียม Feed ให้โมเดล
```
data = src.databunch()
```
Save ไว้ก่อน คราวหน้าจะได้ไม่ต้อง Preprocess ใหม่
```
data.save('databunch-small-questions.pkl')
# data
path
```
โหลด DataBunch ที่เรา [Preprocess](https://www.bualabs.com/archives/2085/what-is-preprocessing-handle-missing-value-fill-na-null-nan-before-feedforward-machine-learning-preprocessing-ep-1/) เตรียมไว้ก่อนหน้านี้ ขึ้นมา
```
data = load_data(path, 'databunch-small-questions.pkl')
```
ดูตัวอย่างข้อมูลใน Batch แรก
```
data.show_batch()
```
# 4. Model
แทนที่จะเทรนแต่ต้นทั้งหมด เราจะใช้ [Transfer Learning](https://www.bualabs.com/archives/926/sentiment-analysis-imdb-movie-review-ulmfit-sentiment-analysis-ep-1/) เพื่อประหยัดเวลา และเพิ่มประสิทธิภาพ แต่ในงานนี้เราจะใช้เฉพาะ [Embedding](https://www.bualabs.com/archives/996/visualization-embedding-deep-learning-model-deep-neural-networks-tensorboard-ep-2/)
## 4.1 Embedding
โดยในส่วนของ Word [Embedding](https://www.bualabs.com/archives/996/visualization-embedding-deep-learning-model-deep-neural-networks-tensorboard-ep-2/) เราจะใช้ Pre-trained Word Vector 300 มิติ n-grams ความยาว 5 character, ขนาด window 5 and 10 negatives จาก FastText ของ Facebook ที่เทรนด้วยข้อมูลจาก Common Crawl และ Wikipedia ด้วยวิธี Continuous Bag of Words (CBOW) จะอธิบายต่อไป
```
! pip install fasttext
```
Import fasttext
```
import fasttext as ft
```
Download [Word Vector](https://www.bualabs.com/archives/996/visualization-embedding-deep-learning-model-deep-neural-networks-tensorboard-ep-2/) ของทั้งสองภาษา
```
# ! wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.bin.gz -P {path}
# ! wget https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.fr.300.bin.gz -P {path}
```
แตกไฟล์ออกมา
```
# ! gunzip {path}/cc.en.300.bin.gz
# ! gunzip {path}/cc.fr.300.bin.gz
```
ใช้ fasttext โหลดไฟล์ที่ดาวน์โหลดมา
```
fr_vecs = ft.load_model(str((path/'cc.fr.300.bin')))
en_vecs = ft.load_model(str((path/'cc.en.300.bin')))
```
ประกาศฟังก์ชัน สร้าง Embedding จาก Pre-trained Vector และเติมค่า Vector คำศัพท์ที่ขาดหายไปด้วย ค่า Random
```
def create_emb(vecs, itos, em_sz=300, multi=1.):
emb = nn.Embedding(len(itos), em_sz, padding_idx=1)
wgts = emb.weight.data
vec_dic = {w: vecs.get_word_vector(w) for w in vecs.get_words()}
miss = []
for i, w in enumerate(itos):
try: wgts[i] = tensor(vec_dic[w])
except: miss.append(w)
return emb
```
สร้าง [Embedding](https://www.bualabs.com/archives/996/visualization-embedding-deep-learning-model-deep-neural-networks-tensorboard-ep-2/) สำหรับทั้งสองภาษา (Encoder และ Decoder)
```
emb_enc = create_emb(fr_vecs, data.x.vocab.itos)
emb_dec = create_emb(en_vecs, data.y.vocab.itos)
```
จำนวนคำ และขนาดมิติ
```
emb_enc.weight.size(), emb_dec.weight.size()
model_path = config_path/'My Drive/models'
```
Save Embedding ที่เราสร้างไว้
```
torch.save(emb_enc, model_path/'fr_emb.pth')
torch.save(emb_dec, model_path/'en_emb.pth')
```
fasttext ไม่ได้ใช้แล้ว เราจะทำลายทิ้งไป จะได้ไม่เปลือง Memory
```
del fr_vecs
del en_vecs
gc.collect()
```
โหลด [Embedding](https://www.bualabs.com/archives/996/visualization-embedding-deep-learning-model-deep-neural-networks-tensorboard-ep-2/) ขึ้นมา
```
emb_enc = torch.load(model_path/'fr_emb.pth')
emb_dec = torch.load(model_path/'en_emb.pth')
```
## 4.2 Sequence to Sequence Model
โมเดลของเราจะแบ่งเป็น 2 ส่วนคือ Encoder แปลงข้อความภาษาอังกฤษ เป็น Vector และ Coder ที่จะ Generate ข้อความภาษาอังกฤษ จาก Vector นั้น
เนื่องจากเราไม่ได้ Transfer Learning ใด ๆ ทั้งสิ้นยกเว้น Embedding ทำให้ Layer ของ [GRU](https://www.bualabs.com/archives/3103/what-is-rnn-recurrent-neural-network-what-is-gru-gated-recurrent-unit-teach-how-to-build-rnn-gru-with-python-nlp-ep-9/) และ [Linear](https://www.bualabs.com/archives/1763/what-is-neural-network-how-neural-network-work-build-deep-neural-network-from-scratch-neural-network-ep-1/) จะถูก [Initialize](https://www.bualabs.com/archives/2633/what-is-lsuv-layer-sequential-unit-variance-initialization-difference-kaiming-initialization-convnet-ep-6/) ด้วยค่า Random
```
class Seq2SeqRNN(nn.Module):
def __init__(self, emb_enc, emb_dec, nh, out_sl, nl=2, bos_idx=0, pad_idx=1):
super().__init__()
self.nl, self.nh, self.out_sl = nl, nh, out_sl
self.bos_idx, self.pad_idx = bos_idx, pad_idx
self.em_sz_enc = emb_enc.embedding_dim
self.em_sz_dec = emb_dec.embedding_dim
self.voc_sz_dec = emb_dec.num_embeddings
self.emb_enc = emb_enc
self.emb_enc_drop = nn.Dropout(0.15)
self.gru_enc = nn.GRU(self.em_sz_enc, nh, num_layers=nl,
dropout=0.25, batch_first=True)
self.out_enc = nn.Linear(nh, self.em_sz_dec, bias=False)
self.emb_dec = emb_dec
self.gru_dec = nn.GRU(self.em_sz_dec, self.em_sz_dec, num_layers=nl,
dropout=0.1, batch_first=True)
self.out_drop = nn.Dropout(0.35)
self.out = nn.Linear(self.em_sz_dec, self.voc_sz_dec)
self.out.weight.data = self.emb_dec.weight.data
def encoder(self, bs, inp):
h = self.initHidden(bs)
emb = self.emb_enc_drop(self.emb_enc(inp))
_, h = self.gru_enc(emb, h)
h = self.out_enc(h)
return h
def decoder(self, dec_inp, h):
emb = self.emb_dec(dec_inp).unsqueeze(1)
outp, h = self.gru_dec(emb, h)
outp = self.out(self.out_drop(outp[:, 0]))
return h, outp
def forward(self, inp):
bs, sl = inp.size()
h = self.encoder(bs, inp)
dec_inp = inp.new_zeros(bs).long() + self.bos_idx
res = []
for i in range(self.out_sl):
h, outp = self.decoder(dec_inp, h)
dec_inp = outp.max(1)[1]
res.append(outp)
if (dec_inp==self.pad_idx).all(): break
return torch.stack(res, dim=1)
def initHidden(self, bs):
return one_param(self).new_zeros(self.nl, bs, self.nh)
```
ลองดู [Shape](https://www.bualabs.com/archives/1749/how-to-pytorch-reshape-squeeze-unsqueeze-flatten-manipulate-shape-high-order-dimensions-tensor-ep-2/) ของ [Tensor](https://www.bualabs.com/archives/1629/what-is-tensor-element-wise-broadcasting-operations-high-order-tensor-numpy-array-matrix-vector-tensor-ep-1/) ข้อมูล 1 Batch
```
xb, yb = next(iter(data.valid_dl))
```
ขนาด Batch Size = 64 ความยาว 30 Token
```
xb.shape
```
สร้าง [Model](https://www.bualabs.com/archives/2703/how-to-read-model-convolutional-neural-network-shape-activation-map-model-architecture-convnet-ep-7/)
```
rnn = Seq2SeqRNN(emb_enc, emb_dec, 256, 30)
rnn
len(xb[0])
```
นำข้อมูล 1 Batch มาผ่าน Encoder ของโมเดล ดู Shape ของ Hidden หรือ Vector Representation ที่จะไปยัง Decoder
```
ูh = rnn.encoder(64, xb.cpu())
```
ได้ออกมาเป็น Hidden 2 Layer ด้วย Batch Size = 64 เป็น Vector 300 มิติ
```
h.size()
```
## 4.3 Loss Function
เนื่องจากข้อความ มีความยาวไม่เท่ากัน เราจะใช้ [Loss Function](https://www.bualabs.com/archives/2673/what-is-loss-function-cost-function-error-function-loss-function-how-cost-function-work-machine-learning-ep-1/) แบบปกติไม่ได้ ต้อง Padding ให้เท่ากันก่อนเปรียบเทียบ
```
def seq2seq_loss(out, targ, pad_idx=1):
bs, targ_len = targ.size()
_, out_len, vs = out.size()
if targ_len > out_len: out = F.pad(out, (0, 0, 0, targ_len-out_len, 0, 0), value=pad_idx)
if out_len > targ_len: targ = F.pad(targ, (0, out_len-targ_len, 0, 0), value=pad_idx)
return CrossEntropyFlat()(out, targ)
```
เนื่องจากข้อความ มีความยาวไม่เท่ากัน เราจะใช้ [Metric](https://www.bualabs.com/archives/1968/what-is-metrics-confusion-matrix-accuracy-precision-recall-f1-score-difference-metrics-ep-1/) เป็น Accuracy แบบปกติไม่ได้ ต้อง Padding ให้เท่ากันก่อนเปรียบเทียบ
```
def seq2seq_acc(out, targ, pad_idx=1):
bs, targ_len = targ.size()
_, out_len, vs = out.size()
if targ_len > out_len: out = F.pad(out, (0, 0, 0, targ_len-out_len, 0, 0), value=pad_idx)
if out_len > targ_len: targ = F.pad(targ, (0, out_len-targ_len, 0, 0), value=pad_idx)
out = out.argmax(2)
return (out == targ).float().mean()
```
# 5. Train Model
## 5.1 Train โมเดลที่สร้างไว้ด้านบน
สร้าง [Learner](https://www.bualabs.com/archives/2318/databunch-learner-refactor-neural-network-training-loop-neural-network-ep-9/) จากโมเดลด้านบน เตรียมเทรน
```
learn = Learner(data, rnn, loss_func=seq2seq_loss, callback_fns=[ShowGraph], metrics=[seq2seq_acc])
```
ใช้ [lr_find](https://www.bualabs.com/archives/2377/lr-find-best-learning-rate-train-machine-learning-model-deep-neural-network-callback-neural-network-ep-12/) หา [Learning Rate](https://www.bualabs.com/archives/618/learning-rate-deep-learning-how-to-hyperparameter-tuning-ep-1/) ที่เหมาะสม
```
learn.lr_find()
learn.recorder.plot(suggest=True)
```
ใช้ [Fit One Cycle](https://www.bualabs.com/archives/2452/schedule-hyperparameter-train-machine-learning-deep-neural-network-one-cycle-learning-rate-neural-network-ep-13/) เทรนไป 4 [Epoch](https://www.bualabs.com/archives/618/learning-rate-deep-learning-how-to-hyperparameter-tuning-ep-1/) ได้ผลลัพธ์พอใช้
```
learn.fit_one_cycle(4, 3e-3)
```
แต่ [Accuracy](https://www.bualabs.com/archives/1968/what-is-metrics-confusion-matrix-accuracy-precision-recall-f1-score-difference-metrics-ep-1/) เป็น [Metrics](https://www.bualabs.com/archives/2075/validation-metrics-neural-network-validation-set-train-machine-learning-neural-network-ep-8/) ที่ไม่ค่อยเหมาะสำหรับงาน Translation สักเท่าไร
Save เก็บไว้ก่อน
```
learn.save('26i-gru-acc')
learn.load('26i-gru-acc');
```
## 5.2 Bleu Metric
ในงาน Translation เนื่องจากภาษามนุษย์นั้นดิ้นได้ สมมติให้คน 2 คนแปลข้อความเดียวกัน เป็นไปได้ยากที่จะแปลออกมาเป็นประโยคเดียวกันตรงกันทุกคำ จึงไม่นิยมใช้ Metric ที่เปรียบเทียบคำต่อคำตรง ๆ แบบ [Accuracy](https://www.bualabs.com/archives/1968/what-is-metrics-confusion-matrix-accuracy-precision-recall-f1-score-difference-metrics-ep-1/) แต่นิยมใช้ [Metric](https://www.bualabs.com/archives/2075/validation-metrics-neural-network-validation-set-train-machine-learning-neural-network-ep-8/) ที่เรียกว่า Bleu
Bleu Score คืออะไร แทนที่จะเปรียบเทียบคำต่อคำ เราจะเปรียบเทียบ [Ngram](https://www.bualabs.com/archives/3060/what-is-n-gram-sentiment-classification-imdb-movie-review-naive-bayes-logistic-regression-nlp-ep-6/) แทน จะอธิบายต่อไป
```
class NGram():
def __init__(self, ngram, max_n=50000): self.ngram, self.max_n = ngram, max_n
def __eq__(self, other):
if len(self.ngram) != len(other.ngram): return False
return np.all(np.array(self.ngram) == np.array(other.ngram))
def __hash__(self): return int(sum([o * self.max_n**i for i, o in enumerate(self.ngram)]))
def get_grams(x, n, max_n=5000):
return x if n==1 else [NGram(x[i:i+n], max_n=max_n) for i in range(len(x)-n+1)]
def get_correct_ngrams(pred, targ, n, max_n=5000):
pred_grams, targ_grams = get_grams(pred, n, max_n=max_n), get_grams(targ, n, max_n=max_n)
pred_cnt, targ_cnt = Counter(pred_grams), Counter(targ_grams)
return sum([min(c, targ_cnt[g]) for g, c in pred_cnt.items()]), len(pred_grams)
```
สร้าง [Callback คำนวน Metrics](https://www.bualabs.com/archives/2358/callback-machine-learning-example-calculate-metrics-avgstats-callback-recorder-loss-learning-rate-neural-network-ep-11/) ทุก Epoch
```
class CorpusBLEU(Callback):
def __init__(self, vocab_sz):
self.vocab_sz = vocab_sz
self.name = 'bleu'
def on_epoch_begin(self, **kwargs):
self.pred_len, self.targ_len, self.corrects, self.counts = 0, 0, [0]*4, [0]*4
def on_batch_end(self, last_output, last_target, **kwargs):
last_output = last_output.argmax(dim=-1)
for pred, targ in zip(last_output.cpu().numpy(), last_target.cpu().numpy()):
self.pred_len += len(pred)
self.targ_len += len(targ)
for i in range(4):
c, t = get_correct_ngrams(pred, targ, i+1, max_n=self.vocab_sz)
self.corrects[i] += c
self.counts[i] += t
def on_epoch_end(self, last_metrics, **kwargs):
precs = [c/t for c, t in zip(self.corrects, self.counts)]
len_penalty = exp(1 - self.targ_len / self.pred_len) if self.pred_len < self.targ_len else 1
bleu = len_penalty * ((precs[0] * precs[1] * precs[2] * precs[3]) ** 0.25)
return add_metrics(last_metrics, bleu)
```
สร้าง Learner ใหม่ จาก Callback ด้านบน
```
learn = Learner(data, rnn, loss_func=seq2seq_loss, metrics=[seq2seq_acc, CorpusBLEU(len(data.y.vocab.itos))], callback_fns=[ShowGraph])
learn.lr_find()
learn.recorder.plot(suggestion=True)
```
เทรนไป 4 Epoch เหมือนเดิม เพิ่มเติมคือ Bleu Score ให้เราเห็นประสิทธิภาพของโมเดลชัดเจนมากขึ้น
```
learn.fit_one_cycle(4, 3e-3)
```
Save ไว้ก่อน
```
learn.save('26i-gru-bleu')
learn.load('26i-gru-bleu');
```
## 6. View Result
ดูแต่ Metrics อย่างเดียว อาจจะไม่เห็นภาพ เราจะมาลองดูผลลัพธ์การแปลของจริงกัน
```
def get_predictions(learn, ds_type=DatasetType.Valid):
learn.model.eval()
inputs, targets, outputs = [], [], []
with torch.no_grad():
for xb, yb in progress_bar(learn.dl(ds_type)):
out = learn.model(xb)
for x, y, z in zip(xb, yb, out):
inputs.append(learn.data.train_ds.x.reconstruct(x))
targets.append(learn.data.train_ds.y.reconstruct(y))
outputs.append(learn.data.train_ds.y.reconstruct(z.argmax(1)))
return inputs, targets, outputs
inputs, targets, outputs = get_predictions(learn)
idx = 999
inputs[idx], targets[idx], outputs[idx]
idx = 2222
inputs[idx], targets[idx], outputs[idx]
idx = 3333
inputs[idx], targets[idx], outputs[idx]
idx = 4444
inputs[idx], targets[idx], outputs[idx]
```
ปัญหาคือ ถึงโมเดลจะเริ่มต้นได้ดี แต่แล้วก็จะออกทะเลไปเรื่อย ๆ กลายเป็นคำซ้ำ ๆ ไปจนจบ จะแก้ไขอย่างไรดี
# 5/2 Teacher Forcing
ในการเทรน Decoder ที่เป็น [RNN](https://www.bualabs.com/archives/3103/what-is-rnn-recurrent-neural-network-what-is-gru-gated-recurrent-unit-teach-how-to-build-rnn-gru-with-python-nlp-ep-9/) ตามปกติจะนำ Output มา Feed กลับเป็น Input สำหรับคำต่อไป แต่ถ้าโมเดลยังไม่ค่อยเก่ง Predict Output ออกมาผิด แล้วเรานำ Output ที่ผิดนั้นไป Feed กลับมาเป็น Input ทำให้ Output ต่อ ๆ ไป ผิดเป็นโดมิโนไปหมด วิธีหนึ่งที่จะช่วยให้โมเดล เรียนรู้ได้ดีขึ้น คือ Teacher Forcing
Teacher Forcing คือ การเทรนด้วยแทนที่ จะ Feed Output จากโมเดล เป็น Input อย่างเดียว เราจะ Feed ผสม Output ที่ถูกต้อง (Label) กับ Output ของโมเดล (Prediction) เข้าด้วยกัน ตามสัดส่วนที่กำหนด แล้วค่อย ๆ ปรับสัดส่วนเพิ่ม Output จากโมเดลขึ้นเรื่อย ๆ ลด Label ลง จนเทรนด้วย Output อย่างเดียว
ประกาศ [Callback](https://www.bualabs.com/archives/2348/how-to-apply-callback-in-training-loop-neural-network-flexible-neural-network-ep-10/) สำหรับทำ Teacher Forcing ที่จะสิ้นสุด ตาม end_epoch ที่กำหนด
```
class TeacherForcing(LearnerCallback):
def __init__(self, learn, end_epoch):
super().__init__(learn)
self.end_epoch = end_epoch
def on_batch_begin(self, last_input, last_target, train, **kwargs):
if train: return {'last_input': [last_input, last_target]}
def on_epoch_begin(self, epoch, **kwargs):
self.learn.model.pr_force = 1 - epoch/self.end_epoch
```
สร้าง Model ใหม่ ที่ใน Forward จะ Random ทำ Teacher Forcing ตาม pr_force จาก Callback
```
class Seq2SeqRNN_tf(nn.Module):
def __init__(self, emb_enc, emb_dec, nh, out_sl, nl=2, bos_idx=0, pad_idx=1):
super().__init__()
self.nl, self.nh, self.out_sl = nl, nh, out_sl
self.bos_idx, self.pad_idx = bos_idx, pad_idx
self.em_sz_enc = emb_enc.embedding_dim
self.em_sz_dec = emb_dec.embedding_dim
self.voc_sz_dec = emb_dec.num_embeddings
self.emb_enc = emb_enc
self.emb_enc_drop = nn.Dropout(0.15)
self.gru_enc = nn.GRU(self.em_sz_enc, nh, num_layers=nl,
dropout=0.25, batch_first=True)
self.out_enc = nn.Linear(nh, self.em_sz_dec, bias=False)
self.emb_dec = emb_dec
self.gru_dec = nn.GRU(self.em_sz_dec, self.em_sz_dec, num_layers=nl,
dropout=0.1, batch_first=True)
self.out_drop = nn.Dropout(0.35)
self.out = nn.Linear(self.em_sz_dec, self.voc_sz_dec)
self.out.weight.data = self.emb_dec.weight.data
def encoder(self, bs, inp):
h = self.initHidden(bs)
emb = self.emb_enc_drop(self.emb_enc(inp))
_, h = self.gru_enc(emb, h)
h = self.out_enc(h)
return h
def decoder(self, dec_inp, h):
emb = self.emb_dec(dec_inp).unsqueeze(1)
outp, h = self.gru_dec(emb, h)
outp = self.out(self.out_drop(outp[:, 0]))
return h, outp
def forward(self, inp, targ=None):
bs, sl = inp.size()
h = self.encoder(bs, inp)
dec_inp = inp.new_zeros(bs).long() + self.bos_idx
res = []
for i in range(self.out_sl):
h, outp = self.decoder(dec_inp, h)
dec_inp = outp.max(1)[1]
res.append(outp)
if (dec_inp==self.pad_idx).all(): break
if (targ is not None) and (random.random()<self.pr_force):
if i >= targ.shape[1]: continue
dec_inp = targ[:, i]
return torch.stack(res, dim=1)
def initHidden(self, bs):
return one_param(self).new_zeros(self.nl, bs, self.nh)
```
โหลด Embedding ขึ้นมาใหม่ เตรียมเทรน
```
emb_enc = torch.load(model_path/'fr_emb.pth')
emb_dec = torch.load(model_path/'en_emb.pth')
```
สร้างโมเดล และ Learning ด้วย Callback Teaching Forcing และโมเดลใหม่ ที่รองรับ Teaching Forcing
```
rnn_tf = Seq2SeqRNN_tf(emb_enc, emb_dec, 256, 30)
learn = Learner(data, rnn_tf, loss_func=seq2seq_loss,
metrics=[seq2seq_acc, CorpusBLEU(len(data.y.vocab.itos))],
callback_fns=[ShowGraph, partial(TeacherForcing, end_epoch=3)])
```
ใช้ [lr_find](https://www.bualabs.com/archives/2377/lr-find-best-learning-rate-train-machine-learning-model-deep-neural-network-callback-neural-network-ep-12/) หา [Learning Rate](https://www.bualabs.com/archives/618/learning-rate-deep-learning-how-to-hyperparameter-tuning-ep-1/)
```
learn.lr_find()
learn.recorder.plot(suggestion=True)
```
Schedule Hyperparameter ในการเทรนโมเดล Deep Neural Network ด้วย Learning Rate ไม่คงที่ [Fit One Cycle](https://www.bualabs.com/archives/2452/schedule-hyperparameter-train-machine-learning-deep-neural-network-one-cycle-learning-rate-neural-network-ep-13/)
```
learn.fit_one_cycle(8, max_lr=3e-3)
```
ได้ผลลัพธ์ดีขึ้น เราจะ Save Learner ไว้ก่อน
```
learn.save('26i-gru-tf')
learn.load('26i-gru-tf');
```
# 6/2. View Result
ดูแต่ [Metrics](https://www.bualabs.com/archives/2075/validation-metrics-neural-network-validation-set-train-machine-learning-neural-network-ep-8/) อย่างเดียว อาจจะไม่เห็นภาพ เราจะมาลองดูผลลัพธ์การแปลของจริงกันเปรียบเทียบ
```
inputs, targets, outputs = get_predictions(learn)
idx = 999
inputs[idx], targets[idx], outputs[idx]
idx = 2222
inputs[idx], targets[idx], outputs[idx]
idx = 3333
inputs[idx], targets[idx], outputs[idx]
idx = 4444
inputs[idx], targets[idx], outputs[idx]
```
# 7. สรุป
* เราได้เรียนรู้สถาปัตยกรรมแบบใหม่ Sequence to Sequence ที่ต่อยอดมาจาก [RNN](https://www.bualabs.com/archives/3103/what-is-rnn-recurrent-neural-network-what-is-gru-gated-recurrent-unit-teach-how-to-build-rnn-gru-with-python-nlp-ep-9/) / [LSTM](https://www.bualabs.com/archives/3087/sentiment-classification-deep-learning-imdb-movie-reviews-positive-negative-deep-neural-network-awd-lstm-ulmfit-nlp-ep-8/) / [GRU](https://www.bualabs.com/archives/3103/what-is-rnn-recurrent-neural-network-what-is-gru-gated-recurrent-unit-teach-how-to-build-rnn-gru-with-python-nlp-ep-9/) ที่เหมาะกับใช้ในงาน Machine Translation
* เราได้ [Transfer Learning](https://www.bualabs.com/archives/926/sentiment-analysis-imdb-movie-review-ulmfit-sentiment-analysis-ep-1/) ส่วน Embedding มาจาก fasttext
* เราเทรนโมเดลของเรา ด้วย Seq2SeqRNN และ Seq2SeqRNN + Teacher Forcing ได้ผลลัพธ์ที่ดีที่สุด [Metrics](https://www.bualabs.com/archives/2075/validation-metrics-neural-network-validation-set-train-machine-learning-neural-network-ep-8/) [Accuracy](https://www.bualabs.com/archives/1968/what-is-metrics-confusion-matrix-accuracy-precision-recall-f1-score-difference-metrics-ep-1/) = 0.439311 และ Bleu Score = 0.332833
* เราได้เรียนรู้วิธีเทรน ที่จะเพิ่มประสิทธิภาพของโมเดล เช่น Teacher Forcing จะเห็นว่าโมเดลยังมีช่องให้ปรับปรุงได้อีกมาก เทคนิคและวิธีอื่น ๆ จะอธิบายต่อไป
ตารางสรุป ผลลัพธ์การเทรนโมเดลแบบต่าง ๆ
model | train_loss | valid_loss | seq2seq_acc | bleu
-------------------|----------|----------|----------|----------
seq2seq | 2.823382 | 3.826643 | 0.427117 | 0.322879
\+ teacher forcing | 2.678760 | 3.732396 | 0.439311 | 0.332833
\+ attention | 1.452292 | 3.420485 | 0.498205 | 0.413232
transformer | 1.913152 | 2.349686 | 0.781749 | 0.612880
# Credit
* https://www.youtube.com/watch?v=IfsjMg4fLWQ&list=PLtmWHNX-gukKocXQOkQjuVxglSDYWsSh9&index=12
* https://www.bualabs.com/archives/3103/what-is-recurrent-neural-network-rnn-pytorch-gru-nlp-ep-9/
* https://www.bualabs.com/archives/3087/sentiment-classification-deep-learning-imdb-movie-reviews-positive-negative-deep-neural-network-awd-lstm-ulmfit-nlp-ep-8/
* https://www.bualabs.com/archives/3060/what-is-n-gram-sentiment-classification-imdb-movie-review-naive-bayes-logistic-regression-nlp-ep-6/
* http://opus.nlpl.eu/giga-fren.php
* https://arxiv.org/abs/1409.3215
* https://ieeexplore.ieee.org/document/6795228/
* https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html
* http://www.statmt.org/wmt15/translation-task.html
* https://fasttext.cc/docs/en/crawl-vectors.html
* https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213
* https://towardsdatascience.com/illustrated-guide-to-lstms-and-gru-s-a-step-by-step-explanation-44e9eb85bf21
* https://github.com/fastai/fastai_docs/blob/master/dev_course/dl2/translation.ipynb
*
```
```
| github_jupyter |
I realised just before releasing version 0.0.22 onto PyPi that signal.iirnotch had some weird boundary effects. Even when I used a pure CF tone and tried to filter out the peak frequency, there were large-sh peaks on either end of the post-filtered CF-tone. And this too even when the tone was zero-padded. I then realised this weird behaviour might have to do with the fact that I'm using an IIR filter rather than an FIR filter. Here's an attempt to compare and try to fix this problem.
```
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
print(dt.datetime.now())
%matplotlib notebook
# create a pure tone CF
fs = 250000
peak_freq = 100000.0
durn = 0.05
t = np.linspace(0,durn, int(durn*fs))
cf_tone = np.sin(2*np.pi*peak_freq*t)
cf_tone *= signal.tukey(cf_tone.size, 0.2)
gap = int(0.001*fs)
noise = np.random.normal(0,10**(-60/20),t.size+gap)
tone_w_noise = noise.copy()
tone_w_noise[125:-125] += cf_tone
# first do the iir notch filtering to
b,a = signal.iirnotch(peak_freq/(fs*0.5),1)
# the raw signal + signal with some noise and gap on L&R side :
ylims = lambda X: plt.ylim(-1, 1)
dyn_range = 20*np.log10(np.max(abs(cf_tone))) - 120
plt.figure(figsize=(8,4))
plt.subplot(231)
plt.plot(cf_tone)
ylims(0)
plt.title('Tight call selection')
plt.subplot(232)
plt.plot(tone_w_noise)
ylims(0);plt.yticks([])
plt.title('Call selection w 1ms gap')
a2 = plt.subplot(233)
just_noise = np.random.normal(0,10**(-60/20), int(fs*durn))
filtered_noise = signal.lfilter(b,a, just_noise)
plt.plot(filtered_noise)
ylims(0)
a2.yaxis.tick_right()
plt.title('Only -60dBrms noise')
plt.subplot(234)
plt.specgram(cf_tone, Fs=fs, vmin=dyn_range);
plt.subplot(235)
plt.specgram(tone_w_noise, Fs=fs, vmin=dyn_range);
plt.subplot(236)
plt.specgram(just_noise, Fs=fs, vmin=dyn_range);
iir_filtered = {}
iir_filtered['only_tone'] = signal.lfilter(b,a, cf_tone)
iir_filtered['tone_w_gap'] = signal.lfilter(b,a, tone_w_noise)
ylims = lambda X: plt.ylim(-0.005, 0.005)
plt.figure(figsize=(8,4))
plt.subplot(231)
plt.plot(iir_filtered['only_tone'])
ylims(0)
plt.title('Tight call selection')
plt.subplot(232)
plt.plot(iir_filtered['tone_w_gap'])
ylims(0);plt.yticks([])
plt.title('Call selection w 1ms gap')
a2 = plt.subplot(233)
just_noise = np.random.normal(0,10**(-60/20), int(fs*durn))
filtered_noise = signal.lfilter(b,a, just_noise)
plt.plot(filtered_noise)
ylims(0)
a2.yaxis.tick_right()
plt.title('Only -60dBrms noise')
plt.subplot(234)
plt.specgram(iir_filtered['only_tone'], Fs=fs, vmin=dyn_range);
plt.subplot(235)
plt.specgram(iir_filtered['tone_w_gap'], Fs=fs, vmin=dyn_range);
plt.subplot(236)
plt.specgram(filtered_noise, Fs=fs, vmin=dyn_range);
```
### Is there a problem ?
As I see it now, the problem is really only that when the call selection is very tight, then there can be false positive FM detection. In principle, this is not great as it can be hard to say when a single non-overlapped call is found. What is the behaviour of an *FIR* filter. Is it more 'normal'. By normal I basically mean that even with very tight call selections this weird false positive shouldn't appear.
```
# get frequency response of standard IIR filter
f, h = signal.freqz(b,a, fs=fs)
# what the filter should ideally look like:
h_ideal = np.zeros(h.size)
h_ideal[np.argmin(abs(f-peak_freq))] = -60
plt.figure()
plt.plot(f, 20*np.log10(abs(h)),label='IIR notch filter response')
plt.plot(f,h_ideal,label='ideally wished response')
plt.legend()
```
### What about *amplifying* the peak frequency AND *notch-filtering* the peak frequency:
```
print('Last cell run at', dt.datetime.now())
```
| github_jupyter |
# Battle of the neighbourhoods
## 1. Introduction Problem
#### Background
Finding Neighborhoods that have facilities that students would like to have. Let's say I am a property developer for student accommodation. I am interested in finding neighbourhoods that are near universities and located near the facilities that students would desire (gyms, convenience shops, nightclubs) whilst being affordable. So I will analyse areas surrounding universities, to find areas that would provide me with maximal profit whilst keeping being appealing to students in the area.
#### Business perspective
While the income yield is a crucial measure of investment properties, the focus of this analysis will be on finding neighbourhoods with appealing amenities. There is a saying that is often preached within real estate agencies: 'location, location, location,' and in this report, the aims will be to find the right location, as that is all that matters.
#### Student perspective
King's College which is in London will be the university investigated. London is known to have some of the highest rents in the world, so obtaining affordable yet characteristic accommodation for students is a difficult problem that could result in large financial gains as the ordinary student rent range between £135 - £210 per week [1]. However, due to the extraordinary prices of properties in London, the rent will need to be compared to the values of properties, to determine the yield of the investments.
With the soaring cost of living in London, It is no surprise that 44% of students struggle to pay their rent each month [] and 31 % finding their studies affected. Combined with increasing tuition fees, the crippling student debt crisis is a real problem in London and the rest of the UK. How can this keep going on? Now while attempts are being made to control rents through legislation []. However, this option is lengthly and unlikely to be successful, largely due to political opposition. So, it is our responsibility to seek more desirable housing to reduce students cost, to help the next generation with the continual goal of learning and improving themselves. As it is today's student that will become tomorrows inventors, business owners and professors.
## 2. Method
#### Problem
We do not have the perfect conditions for students to find affordable places to live in, but we do have access to vast amounts of data relating to the location of venues in London (using the Foursquare API), the average rental prices in each area (using data from Spare room) and average house prices (using HM land registry).
#### Aim
From the data, we can extract the ideal areas that students would live to live in, by looking at the number of and types of venues in each area. To determine the optimal location, we must first find areas that are within a 30-minute commute of the university (King's College London) and have at least 3 of the following, within a 500-meter range: gym, coffee shop, nightclub, convenience shop. Finding budget areas where these amenities exist could prove to be valuable for students finding affordable accommodation and investors finding high yield properties.
#### Analysis
To find ideal locations for students, areas will be clustered by venue and clusters will be compared to find the ones with the most ideal properties. Clusters with the most ideal areas will be extracted and rental prices will be compared to identify the areas where accommodation is affordable for students.
## 3. Method (Practical)
### 3.1 Preparation
First let's install the necessary packages
```
!conda install -y -q BeautifulSoup4 lxml wget folium xlrd
!conda install -y -q -c conda-forge geopy
print('Packages installed')
```
No import the necessary packages.
```
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
import requests
```
Download and extract borough table from Wikipedia. This table contains the coordinates, Headquarters, Area and Population of each London Borough. Panda's read_html function to import the HTML data to a Pandas data frame.
```
URL = "https://en.wikipedia.org/wiki/List_of_London_boroughs"
r = requests.get(URL)
a = pd.read_html(r.text)
boroughs = a[0]
boroughs.head()
```
Now let's clean up the data
```
# Split the two types of coordinates
coordinates = boroughs.pop('Co-ordinates').str.split(' / ',expand=True)
coordinate = coordinates[1]
# Split Longitude and Latitude
coordinate = coordinate.str.split(' ', expand=True)
# Remove the letter at the end
latitude = coordinate[0].str.split('°',expand = True)
display('As all the latitudes are pointing north, we can keep all the number positive: ', latitude.groupby(1).count())
# Remove the encoding marker
latitude = latitude[0].str.replace('\ufeff','')
# Convert to float
latitude = latitude.astype(float)
# Remove letter at end
longitude = coordinate[1].str.split('°',expand = True)
# Convert coordinates to positive and negative depending on the letter at the end
longitude_E = longitude[longitude[1] == 'E'][0].astype(float)
longitude_W = longitude[longitude[1] == 'W'][0].astype(float)*-1
# Combine all coordinates
longitude = pd.concat([longitude_E,longitude_W])
# Add the coordinated to the data frame
boroughs = boroughs.merge(latitude,left_index=True, right_index=True).rename(columns={0:'Latitude'})
boroughs = boroughs.merge(longitude,left_index=True, right_index=True).rename(columns={0:'Longitude'})
boroughs.head()
```
### 3.2 Now let's visualise the areas
Using Folium to display the points
```
from geopy.geocoders import Nominatim # convert an address into latitude and longitude values
address = 'kings college london,uk'
geolocator = Nominatim(user_agent="ny_explorer")
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
print('The geograpical coordinate of London are {}, {}.'.format(latitude, longitude))
```
Now, let's visualise each Borough with a point. We are only taking the mid-point of each borough
```
import matplotlib.cm as cm
import matplotlib.colors as colors
import folium
# create map
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
canada_merged1 = boroughs
# add markers to the map
markers_colors = []
for lat, lon, poi in zip(canada_merged1['Latitude'], canada_merged1['Longitude'], canada_merged1['Borough']):
label = folium.Popup('Borough: {}'.format(str(poi)), parse_html=True)
# print(lat,lon)
folium.Marker(
[lat, lon],
popup=label).add_to(map_clusters)
map_clusters
```
### 3.3 Using Google's Distance Matrix API
First we are converting the Coordinates from the data frame to a string where each borough is separated with a '|' and coordinates are separated with a ','. This is to conform to the API's format.
```
boroughs_lat = boroughs['Latitude'].astype('str').iloc[0:]
boroughs_long = boroughs['Longitude'].astype('str').iloc[0:]
origin_str = ''
destination_str = ''
for b_lat, b_long in zip(boroughs_lat, boroughs_long):
origin_str += b_lat + ',' + b_long + '|'
origin_str = origin_str[:-1]
destination_str = str(latitude) + ',' + str(longitude)
print(origin_str,destination_str)
```
#### 3.3.1 Now let's call the Google's Distance Matrix API
Here we are finding the time it takes to commute from each borough to Strand, King's College London, by car on April 11, 2019 @ 9:00:00 am, this is to represent a typical morning commute.
```
key = 'AIzaSyDLeqznwSzPWB0VQr7sIkOB_1l4e1vXgpI'
arrival_time = 1554969600 # This is the time of April 11, 2019 @ 9:00:00 am in Unix time
url = 'https://maps.googleapis.com/maps/api/distancematrix/json?origins={origin}&destinations={destination}&key={skey}&mode={mode}&arrival_time={a_time}'.format(
origin = origin_str,
destination = destination_str,
mode='driving', # Public transport
a_time = arrival_time,
skey=key
)
abc = requests.get(url).json()
print(url)
# print(abc)
```
#### 3.3.2 Removing boroughs that have a longer than 45 min commute
Let's extract the results and ignore all boroughs that are more than a 45 minute drive from London. Public transport was not considered as the Google API has some problems with finding the time for journeys in public transport.
```
journey_times = []
for item in abc['rows']:
journey_times.append(item['elements'][0]['duration']['text'])
# Convert to float object, so comparsions can be made
journey_times = pd.Series(journey_times).str.replace(pat=' mins',repl='').astype(float)
#Find acceptable boroughs, that are less than 45 min commute
acceptable_boroughs = boroughs[journey_times <=45]
acceptable_boroughs = acceptable_boroughs.reset_index()
```
#### 3.3.3 Visualising the borough will a less than 45 minute commute
```
import matplotlib.cm as cm
import matplotlib.colors as colors
import folium
# create map
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
borough_merged1 = acceptable_boroughs
# add markers to the map
markers_colors = []
for lat, lon, poi in zip(borough_merged1['Latitude'], borough_merged1['Longitude'], borough_merged1['Borough']):
label = folium.Popup('Borough: {}'.format(str(poi)), parse_html=True)
# print(lat,lon)
folium.Marker(
[lat, lon],
popup=label).add_to(map_clusters)
map_clusters
```
### 3.4 Foursquare API
#### 3.4.1 Create functions to call FourSquare API
```
# Using the Foursquare Search API to find venues around each borough
def getNearbyVenues(names, latitudes, longitudes, limit, radius=500):
LIMIT=limit
venues_list=[]
for name, lat, lng in zip(names, latitudes, longitudes):
print(name)
# create the API request URL
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
'0H0Y52X0LLK1YK4OHZ0HKFQWXWEL1V1QNMDFRWJ24YBQMDVW', # your Foursquare ID
'AQULU5QWLUH5GJGXDU2AK5S1PTQPNO5LHL2LZFUVVIAVGXXB', # your Foursquare Secret
'20180604',
lat,
lng,
radius,
LIMIT)
# make the GET request
results = requests.get(url).json()["response"]['groups'][0]['items']
# return only relevant information for each nearby venue
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
# Input to Pandas DataFrame
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighborhood',
'Neighborhood Latitude',
'Neighborhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Category']
return(nearby_venues)
```
#### 3.4.2 Calling the FourSquare API borough
```
#
acceptable_boroughs_venues = getNearbyVenues(names=acceptable_boroughs['Borough'],
latitudes=acceptable_boroughs['Latitude'],
longitudes=acceptable_boroughs['Longitude'],
limit=1000,
radius=500
)
```
Quick look at the generated data
```
acceptable_boroughs_venues.head()
```
Find the numbers of venues in each Borough.
```
acceptable_boroughs_venues.groupby('Neighborhood').count()
```
## 4. Analyse venues
### 4.1 Preparation
Here we will cluster the neighborhoods based on the types of venues around each neighborhood.
```
# Convert to dummy variable
borough_onehot = pd.get_dummies(acceptable_boroughs_venues[['Venue Category']], prefix="", prefix_sep="")
# add neighborhood column back to dataframe
borough_onehot['Neighborhood'] = acceptable_boroughs_venues['Neighborhood']
# move neighborhood column to the first column
fixed_columns = [borough_onehot.columns[-1]] + list(borough_onehot.columns[:-1])
borough_onehot = borough_onehot[fixed_columns]
# Quick look at the data
borough_onehot.head()
```
And let's examine the new dataframe size.
```
borough_onehot.shape
```
Next, let's group rows by neighborhood and by taking the mean of the frequency of occurrence of each category.
```
borough_grouped = borough_onehot.groupby('Neighborhood').mean().reset_index()
borough_grouped.head()
# Function to sort a DataFrame in descending order for a particular row
def return_most_common_venues(row, num_top_venues):
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
```
Now let's create the new dataframe and display the top 10 venues for each neighborhood.
```
num_top_venues = 10
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
columns = ['Neighborhood']
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
# create a new dataframe
neighborhoods_venues_sorted = pd.DataFrame(columns=columns)
neighborhoods_venues_sorted['Neighborhood'] = borough_grouped['Neighborhood']
for ind in np.arange(borough_grouped.shape[0]):
neighborhoods_venues_sorted.iloc[ind, 1:] = return_most_common_venues(borough_grouped.iloc[ind, :], num_top_venues)
neighborhoods_venues_sorted.head()
```
<a id='item4'></a>
### 4.2 Cluster Neighborhoods
Run *k*-means to cluster the neighborhood into 5 clusters based on the type of venues around each borough.
```
from sklearn.cluster import KMeans
# set number of clusters
kclusters = 6
borough_grouped_clustering = borough_grouped.drop('Neighborhood', 1)
# run k-means clustering
kmeans = KMeans(n_clusters=kclusters, random_state=0, n_init=100,max_iter=500).fit(borough_grouped_clustering)
# check cluster labels generated for each row in the dataframe
kmeans.labels_[0:10]
```
Let's create a new dataframe that includes the cluster as well as the top 10 venues for each neighborhood.
```
neighborhoods_venues_clusted = pd.DataFrame(neighborhoods_venues_sorted)
# add clustering labels
neighborhoods_venues_clusted.insert(0, 'Cluster Labels', kmeans.labels_)
borough_merged = pd.DataFrame(acceptable_boroughs[['Borough','Latitude','Longitude']])
# merge canada_grouped with canada_data to add latitude/longitude for each neighborhood
borough_merged = neighborhoods_venues_clusted.merge(borough_merged, how ='left', left_on='Neighborhood', right_on = 'Borough')
display(borough_merged.head(5)) # check the last columns!
print(borough_merged.shape)
```
### 4.3 Map out clusters
Now that we have all the venues, let's display each borough labeled and coloured according to it's cluster.
```
import matplotlib.cm as cm
import matplotlib.colors as colors
import folium
# create map
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
borough_merged1 = borough_merged[0:1000]
# set color scheme for the clusters
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, poi, cluster in zip(borough_merged1['Latitude'], borough_merged1['Longitude'], borough_merged1['Neighborhood'], borough_merged1['Cluster Labels']):
label = folium.Popup('Post code: {}, Cluster: {}'.format(str(poi), str(cluster)), parse_html=True)
# print(lat,lon)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=1).add_to(map_clusters)
map_clusters
```
<a id='item5'></a>
## 5. Examine Clusters
### 5.1 Display each cluster
Now, you can examine each cluster and determine the discriminating venue categories that distinguish each cluster. Based on the defining categories, you can then assign a name to each cluster. I will leave this exercise to you.
#### Cluster 1
It appears that this cluster would be good for people interested in Hockey and coffee
```
cluster1 = borough_merged.loc[borough_merged['Cluster Labels'] == 0, borough_merged.columns[[1] + list(range(5, borough_merged.shape[1]))]]
display(cluster1.head())
print(cluster1.shape)
```
#### Cluster 2
These neighbourhoods would be ideal for those that like parks and women's stores
```
cluster2 = borough_merged.loc[borough_merged['Cluster Labels'] == 1, borough_merged.columns[[1] + list(range(5, borough_merged.shape[1]))]]
display(cluster2.head())
print(cluster2.shape)
```
#### Cluster 3
These neighbourhoods would be ideal for those that like Fast food and women's stores
```
cluster3 = borough_merged.loc[borough_merged['Cluster Labels'] == 2, borough_merged.columns[[1] + list(range(5, borough_merged.shape[1]))]]
display(cluster3.head())
print(cluster3.shape)
```
#### Cluster 4
These neighbourhoods would be ideal for those that like Pizza and Empanada
```
cluster4 = borough_merged.loc[borough_merged['Cluster Labels'] == 3, borough_merged.columns[[1] + list(range(5, borough_merged.shape[1]))]]
display(cluster4.head())
print(cluster4.shape)
```
#### Cluster 5
These neighbourhoods would be ideal for those that like Bars and Women's stores
```
cluster5 = borough_merged.loc[borough_merged['Cluster Labels'] == 4, borough_merged.columns[[1] + list(range(5, borough_merged.shape[1]))]]
display(cluster5.head())
print(cluster5.shape)
```
#### Cluster 6
```
cluster5 = borough_merged.loc[borough_merged['Cluster Labels'] == 5, borough_merged.columns[[1] + list(range(5, borough_merged.shape[1]))]]
display(cluster5.head())
print(cluster5.shape)
```
### 5.2 Ideal Clusters
Clusters 2 and 3 appear to be the most suitable for students as they have most of the important amenities for students
```
ideal_clusters = pd.concat([borough_merged.loc[borough_merged['Cluster Labels'] == 2],borough_merged.loc[borough_merged['Cluster Labels'] == 4]])
ideal_clusters
```
The above boroughs were found to be the ideal boroughs for students as they have the most number of venues ideal for students: cafes, Grocery stores, gyms, supermarkets.
### 5.3 Rental prices
Here we find the rental prices for each borough. Average rental prices for a single room was taken from : https://www.gov.uk/government/publications/private-rental-market-in-london-july-2018-to-june-2019.
```
# Read excel file
rental_stats = pd.read_excel('London_rental_statistics.xls',sheet_name='Table 1.2',header=11,skipfooter=42)
```
Cleaning up the data
```
# Remove useless columns
rental_stats.drop(columns='Unnamed: 0', inplace=True)
# Keep only the rows for Bedroom Category = Room
rental_stats = rental_stats[rental_stats['Bedroom Category']=='Room']
display(rental_stats.head())
# Change some Borough names to allow for easy merging
ideal_clusters = ideal_clusters.replace(to_replace='Barking and Dagenham [note 1]',value='Barking and Dagenham')
# Change some Borough names to allow for easy merging
ideal_clusters = ideal_clusters.replace(to_replace='Greenwich [note 2]',value='Greenwich')
# Change some Borough names to allow for easy merging
ideal_clusters = ideal_clusters.replace(to_replace='Hammersmith and Fulham [note 4]',value='Hammersmith and Fulham')
ideal_clusters.head()
```
Now let's merge the cluster dataframe and the rental stats dataframe.
```
ideal_boroughs = pd.merge(left=ideal_clusters[['Cluster Labels','Neighborhood','Latitude','Longitude']],right=rental_stats,left_on='Neighborhood',right_on='Borough')
ideal_boroughs.head()
```
Sort the values by the mean rental cost
```
# Sorting values
ideal_boroughs_sorted = ideal_boroughs.sort_values(by='Mean',ascending=True)
# Display fewer columns to decutter our results
ideal_boroughs_sorted_simplified = ideal_boroughs_sorted[['Borough','Mean','Lower quartile','Median','Upper quartile','Latitude','Longitude','Cluster Labels']]
# Display Data frame
ideal_boroughs_sorted_simplified
# create map
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
borough_merged1 = ideal_boroughs_sorted_simplified
# set color scheme for the clusters
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, poi, cluster, mean in zip(borough_merged1['Latitude'], borough_merged1['Longitude'], borough_merged1['Borough'], borough_merged1['Cluster Labels'], borough_merged1['Mean']):
label = folium.Popup('Post code: {}, Cluster: {}, Mean rent £{}'.format(str(poi), str(cluster), mean), parse_html=True)
# print(lat,lon)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=1).add_to(map_clusters)
map_clusters
```
| github_jupyter |
# Structural Analysis of Protein Ensembles
This notebook does the following:
- Creates a protein structural ensemble from the PDB structures prepared in [this notebook](../../1_Download_and_prepare_protein_ensembles/4_Prepare_proteins_using_pdb4amber.ipynb). The file is saved into `.dcd` and `.pdb` format files.
- Structures are aligned using the $C \alpha$ atoms of the residues comprising the protein's pocket.
- The `.pdb` file is used as input for `POVME` to compute the volume and the shape of the protein's pocket.
- Pair-wise RMSD is computed among all protein conformations.
- Classical Multidimensional Scaling Analysis (cMDS) is performed using the RMSD matrix as input.
```
import pandas as pd
import numpy as np
import pytraj as pyt
import nglview as nv
from glob import glob
import matplotlib.pyplot as plt
import seaborn as sns
from prody import *
import sys
sys.path.insert(0, '../..')
from helper_modules.get_cocristalized_ligands \
import get_pocket_ligand, PocketResidues
sns.set(style='ticks', font_scale = 1.2)
```
## Protein information dataframe
```
prot_name = 'hsp90'
df_prot_file = f'../1_Download_and_prepare_protein_ensembles/TABLA_MTDATA_HSP90_298_crys_LIGS_INFO.json'
df_prot = pd.read_json(df_prot_file)
df_prot.head()
df_prot.shape
```
### Only for the HSP90 protein
From the 298 retrieved structures, 2 conformations were not modeled:
```
path_to_prepared = '../1_Download_and_prepare_protein_ensembles/pdb_structures/pdb_prepared/'
pdbids_list = sorted(glob(f'{path_to_prepared}/*'))
pdbids_list = [i.split('/')[-1].split('_')[0] for i in pdbids_list]
# Update selection keeping only those ids which files were modeled and prepared
df_prot = df_prot[df_prot.index.isin(pdbids_list)]
# Save the dataframe
n_confs = df_prot.shape[0]
df_prot_file = f'../1_Download_and_prepare_protein_ensembles/TABLA_MTDATA_HSP90_{n_confs}_crys_LIGS_INFO.json'
df_prot.to_json(df_prot_file)
df_prot.shape
```
## Protein structural ensemble
```
%run ../../helper_modules/protein_subsequences.py
# Create a trajectory file using the protein ensemble
path_to_pdb_files = '../1_Download_and_prepare_protein_ensembles/pdb_structures/pdb_prepared/*'
# Load all protein structures to a single trajectory
crys_traj = pyt.io.iterload(filename = path_to_pdb_files)
print(f' Number of conformations: {crys_traj.n_frames}\n',
f'Number of atoms per frame: {crys_traj.n_atoms}.')
view = nv.show_pytraj(crys_traj)
view.add_representation('licorice')
view
```
### Save protein Ensemble as PDB trajectory
- **Alingment using Pocket subsequence**: PDB format as was used to compute Volumne using POVME3, check folder `./POVME_results`
```
# Use a reference structure to visualize the pocket residues
prot_name = 'hsp90'
ref_struc_id = '1byq'
ref_ligand_name = 'ADP'
# Use a reference structure to visualize the pocket residues
pkt_info = PocketResidues(ref_struc_id, ref_ligand_name)
pocket_residues_str = pkt_info.get_pocket_residues_as_list().split(' ')
# Merge the values
pocket_residues_str = list(set(pocket_residues_str))
pocket_residues_str = ','.join(pocket_residues_str)
print('Indices of pocket redsidues:', pocket_residues_str)
pkt_str_mask = f":{pocket_residues_str}&(@CA)"
pocket_alg = pyt.align(crys_traj, mask = pkt_str_mask)
# Save the traj: This traj was used to compute POVME, check folder ./POVME_results
pyt.write_traj(f'./PDB_{crys_traj.n_frames}_{prot_name}_POCKET_ALL.pdb',
pocket_alg,
options = 'model',
overwrite = True)
# Save the traj as dcd file to upload to github
pyt.write_traj(f'./PDB_{crys_traj.n_frames}_{prot_name}_POCKET_ALL.dcd',
pocket_alg,
overwrite = True)
```
### Compute the center of mass of the pocket using its residues
```
mask = f":{pocket_residues_str.replace(' ', ',')}"
center_geo = pyt.center_of_geometry(traj=pocket_alg,
mask=mask, dtype='ndarray',
frame_indices=None)
center_mass = pyt.center_of_mass(traj=pocket_alg,
mask=mask, dtype='ndarray',
frame_indices=None)
print('Center of geometry (mean):',
np.round(center_geo.mean(axis = 0)))
print('Center of mass (mean):',
np.round(center_mass.mean(axis = 0)))
view = nv.show_pytraj(pocket_alg)
center = np.round(center_mass.mean(axis = 0), 0).tolist()
center_2 = [2, 9, 24]
view.add_representation('licorice', selection = ':2,4')
view.shape.add_sphere(center_2, [0,0,3,0.2], 12)
view.update_representation(component=1, repr_index = 0, opacity = 0.5)
view
view = pkt_info.visualize_pocket()
view
```
- **Alingment using Pocket subsequence**: PDB format as was used to compute Volumne using POVME3, check folder `./POVME_results`
```
print(F'PDBID used as reference structure: {ref_struc_id}')
# Use a reference structure to identify the secondary structure residues
header = parsePDB(ref_struc_id, header=True, model=0)
structure = parsePDB(ref_struc_id, folder = './')
sec_sctr = assignSecstr(header, structure.select('protein')).getSecstrs()
sec_sctr_HE = structure.select(F'secondary H E and calpha').getResnums()
sec_sctr_res_list = sec_sctr_HE.tolist()
sec_sctr_res_str = ','.join(map(str, sec_sctr_res_list))
print(F'{len(sec_sctr_res_list)} residues belong \
to the secondary structure of the {prot_name.upper()} protein.')
# Perform the alignment
sec_str_mask = f':{sec_sctr_res_str}&(@CA)'
sec_str_alg = pyt.align(crys_traj, mask = sec_str_mask)
# Save the traj: This traj was used to compute POVME, check folder ./POVME_results
pyt.write_traj(f'./PDB_{crys_traj.n_frames}_{prot_name}_SECSTRUC_ALL.pdb',
sec_str_alg, overwrite = True)
# Save the traj as dcd file to upload to github
pyt.write_traj(f'./PDB_{crys_traj.n_frames}_{prot_name}_SECSTRUC.dcd',
sec_str_alg, overwrite = True)
nv.show_pytraj(sec_str_alg)
```
## Pocket Volume information
- Add Pocket Volume information computed by POVME3 to the protein dataframe
```
pocket_vol_file = './POVME3_results/Pocket_alignment/res_volumes.tabbed.txt'
# Keep only the Volume column from the POVME3 results
df = pd.read_csv(pocket_vol_file, sep='\t',
header = None,
names = ['conf_num', 'volume'])[['volume']]
# Add the volumen information to the main dataframe
df_prot['pocket_volume'] = df.volume.values
ax = df_prot.pocket_volume.plot.hist(bins = 20)
ax.set(title = 'Pocket Volume among conformations',
xlabel = 'Pkt Volume (A^3)')
plt.show()
df_prot
```
## Classical Multidimensional Scaling
```
%run ../../helper_modules/MDS.py
pkt_str_mask
```
#### cMDS: Using RMSD as distance metric
```
# Compute the distance matrices
pair_rmsd_sec = pyt.pairwise_rmsd(traj = crys_traj,
mask = sec_str_mask,
metric ='rms')
pair_rmsd_pkt = pyt.pairwise_rmsd(traj = crys_traj,
mask = pkt_str_mask,
metric ='rms')
# Compute cMDS
mds_sec = cMDS(pair_rmsd_sec)[0]
mds_pkt = cMDS(pair_rmsd_pkt)[0]
mds_sec.shape
mds_sec[:1].shape
```
#### Visualize the projections
```
def plot_subspace(x, y, title = '', **kwargs):
'''A simple scatterplot function'''
fig, ax = plt.subplots(figsize = (6, 6))
sns.scatterplot(x = x, y = y, alpha = 0.6,
linewidth = 0, color = '#02A298',
**kwargs
)
ax.set_title(title)
ax.set(xlabel = 'Dim. 1', ylabel = 'Dim. 2')
ax.axvline(0, ls = ':', color = 'gray')
ax.axhline(0, ls = ':', color = 'gray')
ax.grid()
plt.show()
# Visualize the projection
x, y = mds_sec[:2]
plot_subspace(x, y,
title = f'{prot_name.upper()}: cMDS using Ca (secondary structure residues)')
# Visualize the projection
x, y = mds_pkt[:2]
plot_subspace(x, y,
title = f'{prot_name.upper()}: cMDS using Ca (pocket residues)')
```
#### cMDS: Using pocket shape similarity
```
# Load POVME3 results
path_povme_results = './POVME3_results/Pocket_alignment/'
vol_tan_mtx = pd.read_csv(f'{path_povme_results}/POVME_Tanimoto_matrix.csv', header=None)
# The following figure shows the
# Tanimoto Similarity among pockets saphes
fig, ax = plt.subplots()
plt.imshow(vol_tan_mtx, cmap = 'Greens')
ax.set(xlabel = 'Protein conformations',
title = "Pocket similarity (shape)\n" + \
"among conformations"
)
plt.colorbar(label = 'Tanimoto Similarity')
plt.show()
# cMDS using Tanimoto
# Compute a dissimilarity matrix
dism_mtx = 1 - vol_tan_mtx
mds_vol_pkt = cMDS(dism_mtx)[0]
# Visualize the projection
x, y = mds_vol_pkt[:2]
plot_subspace(x, y,
title = f"{prot_name.upper()}: cMDS using Pocket's shape similarity")
```
## Save the projections
```
df_dims = pd.DataFrame([mds_sec[0], mds_sec[1],
mds_pkt[0], mds_pkt[1],
mds_vol_pkt[0], mds_vol_pkt[1]]).T
# Set names
colnames = ['mds_sec_x', 'mds_sec_y',
'mds_pkt_x', 'mds_pkt_y',
'mds_vol_pkt_x', 'mds_vol_pkt_y',
]
# Set the names
df_dims.columns = colnames
# Set index
df_dims.index = df_prot.index
# Save to file
df_dims.to_pickle('./df_PROTEINS_DIMS_reduced_TABLE.obj')
```
Fnished!
| github_jupyter |
```
# A script to impute missing values for for stand age in forest inventory plots
# load our libraries
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor
from sklearn import preprocessing
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.metrics import mean_squared_error
from math import sqrt
# read in the PNW FIA Database, with plot("condition")-level summary data
PNW_FIADB = pd.read_csv("G:/projects/ForestPlanner_2015/Data/Work/IDB_SUMMARY_2015-10-23.csv")
PNW_FIADB.head()
# rename a few columns
PNW_FIADB.rename(columns={'AGE_Calc': 'COND_AGE', 'LATITUDE_FUZZ': 'LAT', 'LONGITUDE_FUZZ': 'LON',
'Calc_SLOPE': 'SLOPE', 'Calc_ASPECT': 'ASPECT', 'ELEV_FT': 'ELEV'}, inplace=True)
# set thte COND_CN field as the dataframe index
PNW_FIADB.set_index("COND_ID", inplace = True)
PNW_FIADB.describe()
# take a look at the histogram of ages
PNW_FIADB.COND_AGE.hist(bins=50)
# some of our columns need to be formatted as factors/categorical variables
# FORTYPCD (Forest Type), SITECLCD (Site Class), SOIL_ROOTING_DEPTH_PNW,
# STND_COND_CD_PNWRS (Stand condition code, e.g., grass-forb, open sawtimber, closed sapling-pole-sawtimber, old-growth)
# STND_STRUC_CD_PNWRS (Stand Structure, e.g., even-aged single story, two-story, uneven-aged, mosaic),
# PHYSCLCD (Soil-Climate Type)
cat_cols = ["STAND_SIZE_CLASS", "SITE_CLASS_FIA", "FOR_TYPE", "FOR_TYPE_SECDRY"]
for col in cat_cols:
if col in ["FOR_TYPE", "FOR_TYPE_SECDRY"]:
PNW_FIADB[col] = PNW_FIADB[col].astype('category')
else:
PNW_FIADB[col] = PNW_FIADB[col].astype('category').cat.as_ordered()
# How many missing values do we have
print("Out of " + str(len(PNW_FIADB)) + " plots:")
# create a list of fields (other than age) with nulls
hasNaNs = []
for col in PNW_FIADB.columns.values:
print(col + ": " + str(PNW_FIADB[col].isnull().sum()) + " nulls")
if col != "COND_AGE" and PNW_FIADB[col].isnull().sum() >0:
hasNaNs.append(col)
print(hasNaNs)
# See how many plots with missing ages also have missing nulls in other columns
for col in hasNaNs:
print(col, str(PNW_FIADB.COND_AGE.loc[PNW_FIADB[col].isnull()].isnull().sum()))
# columns of important predictor variables
dropNaNcols = ["STAND_SIZE_CLASS", "QMD_TOT_CM", "SumOfBA_FT2_AC", "SumOfBIOM_TR_ABV_GRND_TON", "SumOfVOL_AC_GRS_FT3"]
# We could try to fill in missing values for some predictor variables, or just ignore them
# Let's see whether random forest thinks these values are helpful when we train the model
# on a dataset that drops the plots where values are not missing
# and also drops the columns that aren't predictors (PLT_CN, FORTYPE)
noNaNs = PNW_FIADB.dropna(axis=0, how='any', thresh=None, subset=dropNaNcols, inplace=False)
# Number of plots with complete predictor variables
print(str(len(noNaNs))+ " plots with complete predictor variables")
# Create a random forest training set of data from all plots with ages
train = noNaNs.dropna(axis=0, how='any', thresh=None, subset=["COND_AGE"], inplace=False)
# set parameters for random forest regression
randomforest = RandomForestRegressor(n_estimators = 100, oob_score=True, random_state = 54321)
# train randomforest on plots within the age range we care about
# return the ranked feature importances for that random forest model
# use that model to predict the ages for all plots where ages are known
# (including those outside the age range used to train the model)
# set which data we're using to train the model
# "train" includes all conditions with measured ages and all predictor variables
age_thresh = 150
training_set = train.loc[train.COND_AGE < age_thresh]
# Drop columns of variables that don't seem important as predictors
droplabels = ["SLOPE", "ASPECT", "ELEV", "FOR_TYPE_SECDRY", "MAI", "SITE_CLASS_FIA"]
droplabels.append("COND_AGE")
droplabels.append("PLOT_ID")
droplabels.append("FIA_FOREST_TYPE_NAME")
droplabels.append("GLC_GROUP")
X, y = training_set.drop(labels=droplabels, axis=1), training_set["COND_AGE"]
# train a random forest on the data subset with known ages
randomforest.fit(X,y)
# Gather the feature importances
importances = randomforest.feature_importances_
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for feature in range(len(training_set.drop(labels=droplabels, axis=1).columns.values)):
print("%d. %s (%f)" % (feature + 1, training_set.drop(labels=droplabels, axis=1).columns.values[indices[feature]], importances[indices[feature]]))
# Measures of fit
print("-----")
print("R-squared on training set (vs OOB sample): " + str(randomforest.oob_score_))
# Turn random forest age predictions into a series
RF_ages = pd.Series(randomforest.predict(train.drop(labels=droplabels, axis=1)))
RF_ages.name = "RF_AGE"
# Make a dataframe with measured and RF-predicted ages
RF_preds = pd.concat([train.COND_AGE.reset_index(), RF_ages], axis = 1)
# Calculate RMSE for various age ranges and print it
# this is, on average, how close we are to the actual age (in years)
def RMSE(measured, predicted):
return sqrt(mean_squared_error(measured,predicted))
print("-----")
print("RMSE for training set: " + str(RMSE(RF_preds.COND_AGE.loc[RF_preds.COND_AGE < age_thresh], RF_preds.RF_AGE.loc[RF_preds.COND_AGE < age_thresh])))
print("Overall RMSE " + str(RMSE(RF_preds.COND_AGE, RF_preds.RF_AGE)))
AgeRanges = [[0,10],[10,25],[25,50],[50,75],[75,100],[100,150],[150,200],[200,300]]
for agerange in AgeRanges:
print("Age Range -> " +str(agerange)),
print("RMSE " + str(RMSE(
RF_preds.COND_AGE.loc[(RF_preds.COND_AGE >= agerange[0]) & (RF_preds.COND_AGE < agerange[1])],
RF_preds.RF_AGE.loc[(RF_preds.COND_AGE >= agerange[0]) & (RF_preds.COND_AGE < agerange[1])])))
# Data to plot
x = RF_preds.COND_AGE.loc[RF_preds.COND_AGE < age_thresh]
y = RF_preds.RF_AGE.loc[RF_preds.COND_AGE < age_thresh]
plt.figure()
plt.grid(True)
plt.scatter(x, y, alpha = 0.03, facecolors='none', edgecolors='r')
plt.title("How well does Random Forest predict stand ages?")
plt.xlabel("Measured Stand Age [yrs]")
plt.ylabel("Predicted Stand Age [yrs]")
plt.xlim(0,age_thresh)
plt.ylim(0,age_thresh)
# calculate and plot bestfit line with np.polyfit
m, b = np.polyfit(x, y, 1)
plt.plot(x, m*x + b, 'r--', alpha = 0.65)
# add a 1:1 line
plt.plot([0, 350], [0, 350], 'k--')
plt.show()
# Predict ages for conditions without ages
# dataframe of conditions without ages, but have all predictor variables
noAges = noNaNs.loc[noNaNs.COND_AGE != noNaNs.COND_AGE]
# RF predictions of ages for conditions without measured ages
RFpred_noAges = pd.Series(randomforest.predict(noAges.drop(labels=droplabels, axis=1))).astype('int')
RFpred_noAges.name = "RF_Age"
# bring predicted ages into the dataframe
RFages_imputed = pd.concat([noAges.reset_index(), RFpred_noAges], axis = 1).set_index("COND_ID")
# Overwrite COND_AGE with RF_Age on PNW FIA DB 2011 dataframe
print(str(len(PNW_FIADB)) + " conditions total")
print(str(PNW_FIADB.COND_AGE.isnull().sum()) + " ages missing before impute")
PNW_FIADB.COND_AGE.loc[PNW_FIADB.COND_AGE != PNW_FIADB.COND_AGE] = RFages_imputed["RF_Age"]
print(str(PNW_FIADB.COND_AGE.isnull().sum()) + " ages missing after impute")
print(str(len(PNW_FIADB)-PNW_FIADB.COND_AGE.isnull().sum()) + " conditions with ages available")
# write condition unique ID, LAT, LON, slope, aspect, elevation, and stand age to a csv
# based on idb_summary format used in Forest Planner
PNW_FIADB2011_cond_summary = PNW_FIADB[["LAT", "LON", "ELEV", "ASPECT", "SLOPE", "COND_AGE"]].dropna(axis=0, how='any', thresh=None, subset=["COND_AGE"], inplace=False)
PNW_FIADB2011_cond_summary[["COND_AGE"]] = PNW_FIADB2011_cond_summary[["COND_AGE"]].astype(int)
PNW_FIADB2011_cond_summary.to_csv("G:/projects/ForestPlanner_2015/Data/Processed/IDB2pt0_COND_SUMMARY_ages-imputed_2015-11-02.csv", header = True, index = True)
```
| github_jupyter |
# Introduction
Here we give a brief introduction to `openscm_units`.
## The unit registry
``openscm_units.unit_registry`` extends Pint's default unit registry by adding simple climate modelling related units. We'll spare the details here (they can be found in [our documentation](https://openscm-units.readthedocs.io/en/latest/unit_registry.html)), but the short idea is that you can now do all sorts of simple climate modelling related conversions which were previously impossible.
```
# NBVAL_IGNORE_OUTPUT
import traceback
import pandas as pd
import seaborn as sns
from pint.errors import DimensionalityError
from openscm_units import unit_registry
```
## Basics
``openscm_units.unit_registry`` knows about basic units, e.g. 'CO2'.
```
unit_registry("CO2")
```
Standard conversions are now trivial.
```
unit_registry("CO2").to("C")
emissions_aus = 0.34 * unit_registry("Gt C / yr")
emissions_aus.to("Mt CO2/yr")
```
## Contexts
In general, we cannot simply convert e.g. CO$_2$ emissions into CH$_4$ emissions.
```
try:
unit_registry("CH4").to("CO2")
except DimensionalityError:
traceback.print_exc(limit=0, chain=False)
```
However, a number of metrics exist which do allow conversions between GHG species. Pint plus OpenSCM's inbuilt metric conversions allow you to perform such conversions trivially by specifying the `context` keyword.
```
with unit_registry.context("AR4GWP100"):
ch4_ar4gwp100_co2e = unit_registry("CH4").to("CO2")
ch4_ar4gwp100_co2e
```
## Gas mixtures
Some gases (mainly, refrigerants) are actually mixtures of other gases, for example HFC407a (aka R-407A). In general, they can be used like any other gas. Additionally, `openscm_units` provides the ability to split these gases into their constituents.
```
emissions = 20 * unit_registry('kt HFC407a / year')
with unit_registry.context("AR4GWP100"):
print(emissions.to('Gg CO2 / year'))
unit_registry.split_gas_mixture(emissions)
```
## Building up complexity
`openscm_units` is meant to be a simple repository which does one thing, but does it well. We encourage you to use it wherever you like (and if you do please let us know via the [issue tracker](https://github.com/openscm/openscm-units/issues)). As an example of something we can do, we can quickly see how GWP100 has changed between assessment reports.
```
# NBVAL_IGNORE_OUTPUT
units_of_interest = ["CO2", "CH4", "N2O", "HFC32", "CFC11"]
metrics_of_interest = ["SARGWP100", "AR4GWP100", "AR5GWP100"]
data = {
"unit": [],
"metric": [],
"value": [],
}
for metric in metrics_of_interest:
with unit_registry.context(metric):
for unit in units_of_interest:
data["unit"].append(unit)
data["metric"].append(metric)
data["value"].append(unit_registry(unit).to("CO2").magnitude)
data = pd.DataFrame(data)
sns.catplot(
data=data,
x="metric",
y="value",
kind="bar",
col="unit",
col_wrap=5,
sharey=False,
)
```
| github_jupyter |
# SVI Part III: ELBO Gradient Estimators
## Setup
We've defined a Pyro model with observations ${\bf x}$ and latents ${\bf z}$ of the form $p_{\theta}({\bf x}, {\bf z}) = p_{\theta}({\bf x}|{\bf z}) p_{\theta}({\bf z})$. We've also defined a Pyro guide (i.e. a variational distribution) of the form $q_{\phi}({\bf z})$. Here ${\theta}$ and $\phi$ are variational parameters for the model and guide, respectively. (In particular these are _not_ random variables that call for a Bayesian treatment).
We'd like to maximize the log evidence $\log p_{\theta}({\bf x})$ by maximizing the ELBO (the evidence lower bound) given by
$${\rm ELBO} \equiv \mathbb{E}_{q_{\phi}({\bf z})} \left [
\log p_{\theta}({\bf x}, {\bf z}) - \log q_{\phi}({\bf z})
\right]$$
To do this we're going to take (stochastic) gradient steps on the ELBO in the parameter space $\{ \theta, \phi \}$ (see references [1,2] for early work on this approach). So we need to be able to compute unbiased estimates of
$$\nabla_{\theta,\phi} {\rm ELBO} = \nabla_{\theta,\phi}\mathbb{E}_{q_{\phi}({\bf z})} \left [
\log p_{\theta}({\bf x}, {\bf z}) - \log q_{\phi}({\bf z})
\right]$$
How do we do this for general stochastic functions `model()` and `guide()`? To simplify notation let's generalize our discussion a bit and ask how we can compute gradients of expectations of an arbitrary cost function $f({\bf z})$. Let's also drop any distinction between $\theta$ and $\phi$. So we want to compute
$$\nabla_{\phi}\mathbb{E}_{q_{\phi}({\bf z})} \left [
f_{\phi}({\bf z}) \right]$$
Let's start with the easiest case.
## Easy Case: Reparameterizable Random Variables
Suppose that we can reparameterize things such that
$$\mathbb{E}_{q_{\phi}({\bf z})} \left [f_{\phi}({\bf z}) \right]
=\mathbb{E}_{q({\bf \epsilon})} \left [f_{\phi}(g_{\phi}({\bf \epsilon})) \right]$$
Crucially we've moved all the $\phi$ dependence inside of the exectation; $q({\bf \epsilon})$ is a fixed distribution with no dependence on $\phi$. This kind of reparameterization can be done for many distributions (e.g. the normal distribution); see reference [3] for a discussion. In this case we can pass the gradient straight through the expectation to get
$$\nabla_{\phi}\mathbb{E}_{q({\bf \epsilon})} \left [f_{\phi}(g_{\phi}({\bf \epsilon})) \right]=
\mathbb{E}_{q({\bf \epsilon})} \left [\nabla_{\phi}f_{\phi}(g_{\phi}({\bf \epsilon})) \right]$$
Assuming $f(\cdot)$ and $g(\cdot)$ are sufficiently smooth, we can now get unbiased estimates of the gradient of interest by taking a Monte Carlo estimate of this expectation.
## Tricky Case: Non-reparameterizable Random Variables
What if we can't do the above reparameterization? Unfortunately this is the case for many distributions of interest, for example all discrete distributions. In this case our estimator takes a bit more complicated form.
We begin by expanding the gradient of interest as
$$\nabla_{\phi}\mathbb{E}_{q_{\phi}({\bf z})} \left [
f_{\phi}({\bf z}) \right]=
\nabla_{\phi} \int d{\bf z} \; q_{\phi}({\bf z}) f_{\phi}({\bf z})$$
and use the chain rule to write this as
$$ \int d{\bf z} \; \left \{ (\nabla_{\phi} q_{\phi}({\bf z})) f_{\phi}({\bf z}) + q_{\phi}({\bf z})(\nabla_{\phi} f_{\phi}({\bf z}))\right \} $$
At this point we run into a problem. We know how to generate samples from $q(\cdot)$—we just run the guide forward—but $\nabla_{\phi} q_{\phi}({\bf z})$ isn't even a valid probability density. So we need to massage this formula so that it's in the form of an expectation w.r.t. $q(\cdot)$. This is easily done using the identity
$$ \nabla_{\phi} q_{\phi}({\bf z}) =
q_{\phi}({\bf z})\nabla_{\phi} \log q_{\phi}({\bf z})$$
which allows us to rewrite the gradient of interest as
$$\mathbb{E}_{q_{\phi}({\bf z})} \left [
(\nabla_{\phi} \log q_{\phi}({\bf z})) f_{\phi}({\bf z}) + \nabla_{\phi} f_{\phi}({\bf z})\right]$$
This form of the gradient estimator—variously known as the REINFORCE estimator or the score function estimator or the likelihood ratio estimator—is amenable to simple Monte Carlo estimation.
Note that one way to package this result (which is convenient for implementation) is to introduce a surrogate objective function
$${\rm surrogate \;objective} \equiv
\log q_{\phi}({\bf z}) \overline{f_{\phi}({\bf z})} + f_{\phi}({\bf z})$$
Here the bar indicates that the term is held constant (i.e. it is not to be differentiated w.r.t. $\phi$). To get a (single-sample) Monte Carlo gradient estimate, we sample the latent random variables, compute the surrogate objective, and differentiate. The result is an unbiased estimate of $\nabla_{\phi}\mathbb{E}_{q_{\phi}({\bf z})} \left [
f_{\phi}({\bf z}) \right]$. In equations:
$$\nabla_{\phi} {\rm ELBO} = \mathbb{E}_{q_{\phi}({\bf z})} \left [
\nabla_{\phi} ({\rm surrogate \; objective}) \right]$$
## Variance or Why I Wish I Was Doing MLE Deep Learning
We now have a general recipe for an unbiased gradient estimator of expectations of cost functions. Unfortunately, in the more general case where our $q(\cdot)$ includes non-reparameterizable random variables, this estimator tends to have high variance. Indeed in many cases of interest the variance is so high that the estimator is effectively unusable. So we need strategies to reduce variance (for a discussion see reference [4]). We're going to pursue two strategies. The first strategy takes advantage of the particular structure of the cost function $f(\cdot)$. The second strategy effectively introduces a way to reduce variance by using information from previous estimates of
$\mathbb{E}_{q_{\phi}({\bf z})} [ f_{\phi}({\bf z})]$. As such it is somewhat analogous to using momentum in stochastic gradient descent.
### Reducing Variance via Dependency Structure
In the above discussion we stuck to a general cost function $f_{\phi}({\bf z})$. We could continue in this vein (the approach we're about to discuss is applicable in the general case) but for concreteness let's zoom back in. In the case of stochastic variational inference, we're interested in a particular cost function of the form <br/><br/>
$$\log p_{\theta}({\bf x} | {\rm Pa}_p ({\bf x})) +
\sum_i \log p_{\theta}({\bf z}_i | {\rm Pa}_p ({\bf z}_i))
- \sum_i \log q_{\phi}({\bf z}_i | {\rm Pa}_q ({\bf z}_i))$$
where we've broken the log ratio $\log p_{\theta}({\bf x}, {\bf z})/q_{\phi}({\bf z})$ into an observation log likelihood piece and a sum over the different latent random variables $\{{\bf z}_i \}$. We've also introduced the notation
${\rm Pa}_p (\cdot)$ and ${\rm Pa}_q (\cdot)$ to denote the parents of a given random variable in the model and in the guide, respectively. (The reader might worry what the appropriate notion of dependency would be in the case of general stochastic functions; here we simply mean regular ol' dependency within a single execution trace). The point is that different terms in the cost function have different dependencies on the random variables $\{ {\bf z}_i \}$ and this is something we can leverage.
To make a long story short, for any non-reparameterizable latent random variable ${\bf z}_i$ the surrogate objective is going to have a term
$$\log q_{\phi}({\bf z}_i) \overline{f_{\phi}({\bf z})} $$
It turns out that we can remove some of the terms in $\overline{f_{\phi}({\bf z})}$ and still get an unbiased gradient estimator; furthermore, doing so will generally decrease the variance. In particular (see reference [4] for details) we can remove any terms in $\overline{f_{\phi}({\bf z})}$ that are not downstream of the latent variable ${\bf z}_i$ (downstream w.r.t. to the dependency structure of the guide). Note that this general trick—where certain random variables are dealt with analytically to reduce variance—often goes under the name of Rao-Blackwellization.
In Pyro, all of this logic is taken care of automatically by the `SVI` class. In particular as long as we use a `TraceGraph_ELBO` loss, Pyro will keep track of the dependency structure within the execution traces of the model and guide and construct a surrogate objective that has all the unnecessary terms removed:
```python
svi = SVI(model, guide, optimizer, TraceGraph_ELBO())
```
Note that leveraging this dependency information takes extra computations, so `TraceGraph_ELBO` should only be used in the case where your model has non-reparameterizable random variables; in most applications `Trace_ELBO` suffices.
### An Example with Rao-Blackwellization:
Suppose we have a gaussian mixture model with $K$ components. For each data point we: (i) first sample the component distribution $k \in [1,...,K]$; and (ii) observe the data point using the $k^{\rm th}$ component distribution. The simplest way to write down a model of this sort is as follows:
```python
ks = pyro.sample("k", dist.Categorical(probs)
.independent(1))
pyro.sample("obs", dist.Normal(locs[ks], scale)
.independent(1),
obs=data)
```
Since the user hasn't taken care to mark any of the conditional independencies in the model, the gradient estimator constructed by Pyro's `SVI` class is unable to take advantage of Rao-Blackwellization, with the result that the gradient estimator will tend to suffer from high variance. To address this problem the user needs to explicitly mark the conditional independence. Happily, this is not much work:
```python
# mark conditional independence
# (assumed to be along the rightmost tensor dimension)
with pyro.iarange("foo", data.size(-1)):
ks = pyro.sample("k", dist.Categorical(probs))
pyro.sample("obs", dist.Normal(locs[ks], scale),
obs=data)
```
That's all there is to it.
### Aside: Dependency tracking in Pyro
Finally, a word about dependency tracking. Tracking dependency within a stochastic function that includes arbitrary Python code is a bit tricky. The approach currently implemented in Pyro is analogous to the one used in WebPPL (cf. reference [5]). Briefly, a conservative notion of dependency is used that relies on sequential ordering. If random variable ${\bf z}_2$ follows ${\bf z}_1$ in a given stochastic function then ${\bf z}_2$ _may be_ dependent on ${\bf z}_1$ and therefore _is_ assumed to be dependent. To mitigate the overly coarse conclusions that can be drawn by this kind of dependency tracking, Pyro includes constructs for declaring things as independent, namely `irange` and `iarange` ([see the previous tutorial](svi_part_ii.ipynb)). For use cases with non-reparameterizable variables, it is therefore important for the user to make use of these constructs (when applicable) to take full advantage of the variance reduction provided by `SVI`. In some cases it may also pay to consider reordering random variables within a stochastic function (if possible). It's also worth noting that we expect to add finer notions of dependency tracking in a future version of Pyro.
### Reducing Variance with Data-Dependent Baselines
The second strategy for reducing variance in our ELBO gradient estimator goes under the name of baselines (see e.g. reference [6]). It actually makes use of the same bit of math that underlies the variance reduction strategy discussed above, except now instead of removing terms we're going to add terms. Basically, instead of removing terms with zero expectation that tend to _contribute_ to the variance, we're going to add specially chosen terms with zero expectation that work to _reduce_ the variance. As such, this is a control variate strategy.
In more detail, the idea is to take advantage of the fact that for any constant $b$, the following identity holds
$$\mathbb{E}_{q_{\phi}({\bf z})} \left [\nabla_{\phi}
(\log q_{\phi}({\bf z}) \times b) \right]=0$$
This follows since $q(\cdot)$ is normalized:
$$\mathbb{E}_{q_{\phi}({\bf z})} \left [\nabla_{\phi}
\log q_{\phi}({\bf z}) \right]=
\int \!d{\bf z} \; q_{\phi}({\bf z}) \nabla_{\phi}
\log q_{\phi}({\bf z})=
\int \! d{\bf z} \; \nabla_{\phi} q_{\phi}({\bf z})=
\nabla_{\phi} \int \! d{\bf z} \; q_{\phi}({\bf z})=\nabla_{\phi} 1 = 0$$
What this means is that we can replace any term
$$\log q_{\phi}({\bf z}_i) \overline{f_{\phi}({\bf z})} $$
in our surrogate objective with
$$\log q_{\phi}({\bf z}_i) \left(\overline{f_{\phi}({\bf z})}-b\right) $$
Doing so doesn't affect the mean of our gradient estimator but it does affect the variance. If we choose $b$ wisely, we can hope to reduce the variance. In fact, $b$ need not be a constant: it can depend on any of the random choices upstream (or sidestream) of ${\bf z}_i$.
#### Baselines in Pyro
There are several ways the user can instruct Pyro to use baselines in the context of stochastic variational inference. Since baselines can be attached to any non-reparameterizable random variable, the current baseline interface is at the level of the `pyro.sample` statement. In particular the baseline interface makes use of an argument `baseline`, which is a dictionary that specifies baseline options. Note that it only makes sense to specify baselines for sample statements within the guide (and not in the model).
##### Decaying Average Baseline
The simplest baseline is constructed from a running average of recent samples of $\overline{f_{\phi}({\bf z})}$. In Pyro this kind of baseline can be invoked as follows
```python
z = pyro.sample("z", dist.Bernoulli(...),
infer=dict(baseline={'use_decaying_avg_baseline': True,
'baseline_beta': 0.95}))
```
The optional argument `baseline_beta` specifies the decay rate of the decaying average (default value: `0.90`).
#### Neural Baselines
In some cases a decaying average baseline works well. In others using a baseline that depends on upstream randomness is crucial for getting good variance reduction. A powerful approach for constructing such a baseline is to use a neural network that can be adapted during the course of learning. Pyro provides two ways to specify such a baseline (for an extended example see the [AIR tutorial](air.ipynb)).
First the user needs to decide what inputs the baseline is going to consume (e.g. the current datapoint under consideration or the previously sampled random variable). Then the user needs to construct a `nn.Module` that encapsulates the baseline computation. This might look something like
```python
class BaselineNN(nn.Module):
def __init__(self, dim_input, dim_hidden):
super(BaselineNN, self).__init__()
self.linear = nn.Linear(dim_input, dim_hidden)
# ... finish initialization ...
def forward(self, x):
hidden = self.linear(x)
# ... do more computations ...
return baseline
```
Then, assuming the BaselineNN object `baseline_module` has been initialized somewhere else, in the guide we'll have something like
```python
def guide(x): # here x is the current mini-batch of data
pyro.module("my_baseline", baseline_module)
# ... other computations ...
z = pyro.sample("z", dist.Bernoulli(...),
infer=dict(baseline={'nn_baseline': baseline_module,
'nn_baseline_input': x}))
```
Here the argument `nn_baseline` tells Pyro which `nn.Module` to use to construct the baseline. On the backend the argument `nn_baseline_input` is fed into the forward method of the module to compute the baseline $b$. Note that the baseline module needs to be registered with Pyro with a `pyro.module` call so that Pyro is aware of the trainable parameters within the module.
Under the hood Pyro constructs a loss of the form
$${\rm baseline\; loss} \equiv\left(\overline{f_{\phi}({\bf z})} - b \right)^2$$
which is used to adapt the parameters of the neural network. There's no theorem that suggests this is the optimal loss function to use in this context (it's not), but in practice it can work pretty well. Just as for the decaying average baseline, the idea is that a baseline that can track the mean $\overline{f_{\phi}({\bf z})}$ will help reduce the variance. Under the hood `SVI` takes one step on the baseline loss in conjunction with a step on the ELBO.
Note that in practice it can be important to use a different set of learning hyperparameters (e.g. a higher learning rate) for baseline parameters. In Pyro this can be done as follows:
```python
def per_param_args(module_name, param_name):
if 'baseline' in param_name or 'baseline' in module_name:
return {"lr": 0.010}
else:
return {"lr": 0.001}
optimizer = optim.Adam(per_param_args)
```
Note that in order for the overall procedure to be correct the baseline parameters should only be optimized through the baseline loss. Similarly the model and guide parameters should only be optimized through the ELBO. To ensure that this is the case under the hood `SVI` detaches the baseline $b$ that enters the ELBO from the autograd graph. Also, since the inputs to the neural baseline may depend on the parameters of the model and guide, the inputs are also detached from the autograd graph before they are fed into the neural network.
Finally, there is an alternate way for the user to specify a neural baseline. Simply use the argument `baseline_value`:
```python
b = # do baseline computation
z = pyro.sample("z", dist.Bernoulli(...),
infer=dict(baseline={'baseline_value': b}))
```
This works as above, except in this case it's the user's responsibility to make sure that any autograd tape connecting $b$ to the parameters of the model and guide has been cut. Or to say the same thing in language more familiar to PyTorch users, any inputs to $b$ that depend on $\theta$ or $\phi$ need to be detached from the autograd graph with `detach()` statements.
#### A complete example with baselines
Recall that in the [first SVI tutorial](svi_part_i.ipynb) we considered a bernoulli-beta model for coin flips. Because the beta random variable is non-reparameterizable (or rather not easily reparameterizable), the corresponding ELBO gradients can be quite noisy. In that context we dealt with this problem by using a Beta distribution that provides (approximate) reparameterized gradients. Here we showcase how a simple decaying average baseline can reduce the variance in the case where the Beta distribution is treated as non-reparameterized (so that the ELBO gradient estimator is of the score function type). While we're at it, we also use `iarange` to write our model in a fully vectorized manner.
Instead of directly comparing gradient variances, we're going to see how many steps it takes for SVI to converge. Recall that for this particular model (because of conjugacy) we can compute the exact posterior. So to assess the utility of baselines in this context, we setup the following simple experiment. We initialize the guide at a specified set of variational parameters. We then do SVI until the variational parameters have gotten to within a fixed tolerance of the parameters of the exact posterior. We do this both with and without the decaying average baseline. We then compare the number of gradient steps we needed in the two cases. Here's the complete code:
(_Since apart from the use of_ `iarange` _and_ `use_decaying_avg_baseline`, _this code is very similar to the code in parts I and II of the SVI tutorial, we're not going to go through the code line by line._)
```
from __future__ import print_function
import os
import torch
import torch.distributions.constraints as constraints
import pyro
import pyro.distributions as dist
# Pyro also has a reparameterized Beta distribution so we import
# the non-reparameterized version to make our point
from pyro.distributions.testing.fakes import NonreparameterizedBeta
import pyro.optim as optim
from pyro.infer import SVI, TraceGraph_ELBO
import sys
# enable validation (e.g. validate parameters of distributions)
pyro.enable_validation(True)
# this is for running the notebook in our testing framework
smoke_test = ('CI' in os.environ)
max_steps = 2 if smoke_test else 10000
def param_abs_error(name, target):
return torch.sum(torch.abs(target - pyro.param(name))).item()
class BernoulliBetaExample(object):
def __init__(self, max_steps):
# the maximum number of inference steps we do
self.max_steps = max_steps
# the two hyperparameters for the beta prior
self.alpha0 = 10.0
self.beta0 = 10.0
# the dataset consists of six 1s and four 0s
self.data = torch.zeros(10)
self.data[0:6].data = torch.ones(6)
self.n_data = self.data.size(0)
# compute the alpha parameter of the exact beta posterior
self.alpha_n = self.data.sum() + self.alpha0
# compute the beta parameter of the exact beta posterior
self.beta_n = - self.data.sum() + torch.tensor(self.beta0 + self.n_data)
# initial values of the two variational parameters
self.alpha_q_0 = 15.0
self.beta_q_0 = 15.0
def model(self, use_decaying_avg_baseline):
# sample `latent_fairness` from the beta prior
f = pyro.sample("latent_fairness", dist.Beta(self.alpha0, self.beta0))
# use iarange to indicate that the observations are
# conditionally independent given f and get vectorization
with pyro.iarange("data_iarange"):
# observe all ten datapoints using the bernoulli likelihood
pyro.sample("obs", dist.Bernoulli(f), obs=self.data)
def guide(self, use_decaying_avg_baseline):
# register the two variational parameters with pyro
alpha_q = pyro.param("alpha_q", torch.tensor(self.alpha_q_0),
constraint=constraints.positive)
beta_q = pyro.param("beta_q", torch.tensor(self.beta_q_0),
constraint=constraints.positive)
# sample f from the beta variational distribution
baseline_dict = {'use_decaying_avg_baseline': use_decaying_avg_baseline,
'baseline_beta': 0.90}
# note that the baseline_dict specifies whether we're using
# decaying average baselines or not
pyro.sample("latent_fairness", NonreparameterizedBeta(alpha_q, beta_q),
infer=dict(baseline=baseline_dict))
def do_inference(self, use_decaying_avg_baseline, tolerance=0.80):
# clear the param store in case we're in a REPL
pyro.clear_param_store()
# setup the optimizer and the inference algorithm
optimizer = optim.Adam({"lr": .0005, "betas": (0.93, 0.999)})
svi = SVI(self.model, self.guide, optimizer, loss=TraceGraph_ELBO())
print("Doing inference with use_decaying_avg_baseline=%s" % use_decaying_avg_baseline)
# do up to this many steps of inference
for k in range(self.max_steps):
svi.step(use_decaying_avg_baseline)
if k % 100 == 0:
print('.', end='')
sys.stdout.flush()
# compute the distance to the parameters of the true posterior
alpha_error = param_abs_error("alpha_q", self.alpha_n)
beta_error = param_abs_error("beta_q", self.beta_n)
# stop inference early if we're close to the true posterior
if alpha_error < tolerance and beta_error < tolerance:
break
print("\nDid %d steps of inference." % k)
print(("Final absolute errors for the two variational parameters " +
"were %.4f & %.4f") % (alpha_error, beta_error))
# do the experiment
bbe = BernoulliBetaExample(max_steps=max_steps)
bbe.do_inference(use_decaying_avg_baseline=True)
bbe.do_inference(use_decaying_avg_baseline=False)
```
**Sample output:**
```
Doing inference with use_decaying_avg_baseline=True
....................
Did 1932 steps of inference.
Final absolute errors for the two variational parameters were 0.7997 & 0.0800
Doing inference with use_decaying_avg_baseline=False
..................................................
Did 4908 steps of inference.
Final absolute errors for the two variational parameters were 0.7991 & 0.2532```
For this particular run we can see that baselines roughly halved the number of steps of SVI we needed to do. The results are stochastic and will vary from run to run, but this is an encouraging result. This is a pretty contrived example, but for certain model and guide pairs, baselines can provide a substantial win.
## References
[1] `Automated Variational Inference in Probabilistic Programming`,
<br/>
David Wingate, Theo Weber
[2] `Black Box Variational Inference`,<br/>
Rajesh Ranganath, Sean Gerrish, David M. Blei
[3] `Auto-Encoding Variational Bayes`,<br/>
Diederik P Kingma, Max Welling
[4] `Gradient Estimation Using Stochastic Computation Graphs`,
<br/>
John Schulman, Nicolas Heess, Theophane Weber, Pieter Abbeel
[5] `Deep Amortized Inference for Probabilistic Programs`
<br/>
Daniel Ritchie, Paul Horsfall, Noah D. Goodman
[6] `Neural Variational Inference and Learning in Belief Networks`
<br/>
Andriy Mnih, Karol Gregor
| github_jupyter |
# I will be testing out different assumption and tools
```
import librosa
import pywt
# from pyAudioAnalysis import audioBasicIO # doesnt support mp3
# from pyAudioAnalysis import ShortTermFeatures
import numpy as np
# get sample rate of audio file in .wav and mp3. Parameter: file path
wav_sr = librosa.core.get_samplerate('/Users/macbookretina/blues.00042.wav')
mp3_sr = librosa.core.get_samplerate('/Users/macbookretina/073192.mp3')
wav_sr, mp3_sr
# load audio file in .wav and mp3 format. Parameters:
# path: (string, int, pathlib.Path or file-like object) path to the input file.
# sr: int - target sampling rate
# mono:bool - convert signal to mono
# offset:float - start reading after this time (in seconds)
# duration:float - only load up to this much audio (in seconds)
# dtype:numeric type - data type of y
# res_type:str - resample type
y, sr = librosa.core.load('/Users/macbookretina/073192.mp3', mp3_sr)
y, sr = librosa.core.load('/Users/macbookretina/073192.mp3', mp3_sr)
sr, y
# get duration of audio in seconds
duration = librosa.core.get_duration(y, mp3_sr)
duration
```
# Timbral Features
```
# Computer Linear Prediction Coefficients via Burg’s method. Parameters:
# y:np.ndarray - Time series to fit
# order:int > 0 - Order of the linear filter
# Question: How to determine which order to use
#
# Returns: a : np.ndarray of length order + 1 - LP prediction error coefficients, i.e. filter denominator polynomial
lpc = librosa.lpc(y, 3)
lpc
# Computer Spectral Centroid. Parameters:
# y:np.ndarray [shape=(n,)] or None - audio time series
# sr:number > 0 [scalar] - audio sampling rate of y
# S:np.ndarray [shape=(d, t)] or None - (optional) spectrogram magnitude
# n_fft:int > 0 [scalar] - FFT window size
# hop_length:int > 0 [scalar] - hop length for STFT. See librosa.core.stft for details.
# freq:None or np.ndarray [shape=(d,) or shape=(d, t)] - Center frequencies for spectrogram bins. If None, then FFT bin center frequencies are used. Otherwise, it can be a single array of d center frequencies, or a matrix of center frequencies as constructed by librosa.core.ifgram
# win_length:int <= n_fft [scalar] - Each frame of audio is windowed by window(). The window will be of length win_length and then padded with zeros to match n_fft.
# Returns: centroid : np.ndarray [shape=(1, t)] - centroid frequencies
spec_centroid = librosa.feature.spectral_centroid(y, mp3_sr)
mean_spec_centroid = np.mean(spec_centroid)
mean_spec_centroid
# 1233.136154764339
# Computer Spectral Roll-off. Parameters:
# y:np.ndarray [shape=(n,)] or None - audio time series
# sr:number > 0 [scalar] - audio sampling rate of y
# S:np.ndarray [shape=(d, t)] or None - (optional) spectrogram magnitude
# n_fft:int > 0 [scalar] - FFT window size
# hop_length:int > 0 [scalar] - hop length for STFT. See librosa.core.stft for details.
# freq:None or np.ndarray [shape=(d,) or shape=(d, t)] - Center frequencies for spectrogram bins. If None, then FFT bin center frequencies are used. Otherwise, it can be a single array of d center frequencies, or a matrix of center frequencies as constructed by librosa.core.ifgram
# win_length:int <= n_fft [scalar] - Each frame of audio is windowed by window(). The window will be of length win_length and then padded with zeros to match n_fft.
# Returns: rolloff : np.ndarray [shape=(1, t)] - roll-off frequency for each frame
spec_rolloff = librosa.feature.spectral_rolloff(y, mp3_sr)
mean_spec_rolloff = np.mean(spec_rolloff)
spec_rolloff.shape
# Compute zero crossing rate. Parameters:
# y:np.ndarray [shape=(n,)] - Audio time series
# frame_length:int > 0 - Length of the frame over which to compute zero crossing rates
# hop_length:int > 0 - Number of samples to advance for each frame
# center:bool - If True, frames are centered by padding the edges of y. This is similar to the padding in librosa.core.stft, but uses edge-value copies instead of reflection.
# kwargs:additional keyword arguments
# See librosa.core.zero_crossings
# Returns: zcr : np.ndarray [shape=(1, t)] - `zcr[0, i]` is the fraction of zero crossings in the `i` th frame
zcr = librosa.feature.zero_crossing_rate(y)
mean_zcr = np.mean(zcr)
zcr.shape
# Compute the first 13 mfcc coefficients. Parameters:
# y:np.ndarray [shape=(n,)] or None - audio time series
# sr:number > 0 [scalar] - sampling rate of y
# S:np.ndarray [shape=(d, t)] or None - log-power Mel spectrogram
# n_mfcc: int > 0 [scalar] - number of MFCCs to return
# dct_type:{1, 2, 3} - Discrete cosine transform (DCT) type. By default, DCT type-2 is used.
# norm:None or ‘ortho’ - If dct_type is 2 or 3, setting norm=’ortho’ uses an ortho-normal DCT basis. Normalization is not supported for dct_type=1.
# lifter:number >= 0 - If lifter>0, apply liftering (cepstral filtering) to the MFCCs:
# M[n, :] <- M[n, :] * (1 + sin(pi * (n + 1) / lifter)) * lifter / 2
# Setting lifter >= 2 * n_mfcc emphasizes the higher-order coefficients. As lifter increases, the coefficient weighting becomes approximately linear.
# kwargs:additional keyword arguments
# Arguments to melspectrogram, if operating on time series input
# hop_length:int > 0 - The number of samples to advance between frames.
# mfcc = librosa.feature.mfcc(y=y, sr=sr, hop_length=512, n_mfcc=13)
# Returns: M : np.ndarray [shape=(n_mfcc, t)] - MFCC sequence
mfcc = librosa.feature.mfcc(y=y, sr=mp3_sr, n_mfcc=13)
mfcc_scaled = np.mean(mfcc.T, axis=0)
mfcc_scaled
# Computer log-mel / constant-Q transform of an audio signal. Parameters:
# y:np.ndarray [shape=(n,)] - audio time series
# sr:number > 0 [scalar] - sampling rate of y
# hop_length:int > 0 [scalar] - number of samples between successive CQT columns.
# fmin:float > 0 [scalar] - Minimum frequency. Defaults to C1 ~= 32.70 Hz
# n_bins:int > 0 [scalar] - Number of frequency bins, starting at fmin
# n_bins = 84 by default
# Returns: CQT : np.ndarray [shape=(n_bins, t), dtype=np.complex or np.float] - Constant-Q value each frequency at each time.
cqt = librosa.cqt(y, sr=mp3_sr)
# abs_cqt = np.abs(cqt)
# scaled_abs_cqt = np.mean(abs_cqt.T, axis=0)
# scaled_abs_cqt
cqt.shape, cqt
# Computer Mel-spectogram. Parameters:
# y:np.ndarray [shape=(n,)] or None - audio time-series
# sr:number > 0 [scalar] - sampling rate of y
# S:np.ndarray [shape=(d, t)] - spectrogram
# n_fft:int > 0 [scalar] - length of the FFT window
# hop_length:int > 0 [scalar] - number of samples between successive frames. See librosa.core.stft
# win_length:int <= n_fft [scalar] - Each frame of audio is windowed by window(). The window will be of length win_length and then padded with zeros to match n_fft.
# If unspecified, defaults to win_length = n_fft.
# window:string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
# - a window specification (string, tuple, or number); see scipy.signal.get_window
# - a window function, such as scipy.signal.hanning
# - a vector or array of length n_fft
# center:boolean
# - If True, the signal y is padded so that frame t is centered at y[t * hop_length].
# - If False, then frame t begins at y[t * hop_length]
# pad_mode:string - If center=True, the padding mode to use at the edges of the signal. By default, STFT uses reflection padding.
# power:float > 0 [scalar] - Exponent for the magnitude melspectrogram. e.g., 1 for energy, 2 for power, etc.
# kwargs:additional keyword arguments
# Mel filter bank parameters. See librosa.filters.mel for details.
# Returns: S : np.ndarray [shape=(n_mels, t)] - Mel spectrogram
mel_spect = librosa.feature.melspectrogram(y=y, sr=mp3_sr)
# mel_spect_p = librosa.power_to_db(mel_spect, ref=np.max) # convert spectogram to decibels unit
mel_spect.shape, mel_spect
# Compute spectral bandwidth
# Parameters:
# y:np.ndarray [shape=(n,)] or None - audio time series
# sr:number > 0 [scalar] - audio sampling rate of y
# S:np.ndarray [shape=(d, t)] or None - (optional) spectrogram magnitude
# n_fft:int > 0 [scalar] - FFT window size
# hop_length:int > 0 [scalar] - hop length for STFT. See librosa.core.stft for details.
# win_length:int <= n_fft [scalar] - Each frame of audio is windowed by window(). The window will be of length
# win_length and then padded with zeros to match n_fft. If unspecified, defaults to win_length = n_fft.
# Returns: bandwidth:np.ndarray [shape=(1, t)] - frequency bandwidth for each frame
spec_bw = librosa.feature.spectral_bandwidth(y=y, sr=mp3_sr)
spec_bw.shape, spec_bw
# Compute spectral contrast. Parameters:
# y:np.ndarray [shape=(n,)] or None - audio time series
# sr:number > 0 [scalar] - audio sampling rate of y
# S:np.ndarray [shape=(d, t)] or None - (optional) spectrogram magnitude
# n_fft:int > 0 [scalar] - FFT window size
# hop_length:int > 0 [scalar] - hop length for STFT. See librosa.core.stft for details.
# win_length:int <= n_fft [scalar] - Each frame of audio is windowed by window(). The window will be of length win_length and then padded with zeros to match n_fft.
# If unspecified, defaults to win_length = n_fft.
# window:string, tuple, number, function, or np.ndarray [shape=(n_fft,)]
# - a window specification (string, tuple, or number); see scipy.signal.get_window
# - a window function, such as scipy.signal.hanning
# - a vector or array of length n_fft
# center:boolean
# - If True, the signal y is padded so that frame t is centered at y[t * hop_length].
# - If False, then frame t begins at y[t * hop_length]
# pad_mode:string - If center=True, the padding mode to use at the edges of the signal. By default, STFT uses reflection padding.
# freq:None or np.ndarray [shape=(d,)] - Center frequencies for spectrogram bins. If None, then FFT bin center frequencies are used. Otherwise, it can be a single array of d center frequencies.
# fmin:float > 0 - Frequency cutoff for the first bin [0, fmin] Subsequent bins will cover [fmin, 2*fmin], [2*fmin, 4*fmin], etc.
# n_bands:int > 1 - number of frequency bands
# quantile:float in (0, 1) - quantile for determining peaks and valleys
# linear:bool
# If True, return the linear difference of magnitudes: peaks - valleys.
# If False, return the logarithmic difference: log(peaks) - log(valleys).
# Returns: contrast:np.ndarray [shape=(n_bands + 1, t)] - each row of spectral contrast values corresponds to a given octave-based frequency
contrast = librosa.feature.spectral_contrast(y=y, sr=mp3_sr)
contrast.shape, contrast
# Compute spectral flux
[Fs, x] = audioBasicIO.read_audio_file("/Users/macbookretina/blues.00042.wav")
F, f_names = ShortTermFeatures.feature_extraction(x, Fs, 0.050*Fs, 0.025*Fs)
# F, f_names
# len(F) == len(f_names)
F[6]
```
# Rhythmic Features
```
# Compute tempo and beat. Parameters:
# y:np.ndarray [shape=(n,)] or None - audio time series
# sr:number > 0 [scalar] - sampling rate of y
# onset_envelope:np.ndarray [shape=(n,)] or None - (optional) pre-computed onset strength envelope.
# hop_length:int > 0 [scalar] - number of audio samples between successive onset_envelope values
# start_bpm:float > 0 [scalar] - initial guess for the tempo estimator (in beats per minute)
# tightness:float [scalar] - tightness of beat distribution around tempo
# trim:bool [scalar] - trim leading/trailing beats with weak onsets
# bpm:float [scalar] - (optional) If provided, use bpm as the tempo instead of estimating it from onsets.
# prior:scipy.stats.rv_continuous [optional] - An optional prior distribution over tempo. If provided, start_bpm will be ignored.
# units:{‘frames’, ‘samples’, ‘time’} - The units to encode detected beat events in. By default, ‘frames’ are used.
# returns the estimated global tempo (beats/s) and estimated beat event locations in the specified units.
tempo, beats = librosa.beat.beat_track(y=y, sr=mp3_sr)
tempo
beats.shape, beats
# Convert beats to timestamps. Parameters:
# frames: np.ndarray [shape=(n,)] - frame index or vector of frame indices
# sr: number > 0 [scalar] - audio sampling rate
# hop_length : int > 0 [scalar] - number of samples between successive frames
# n_fft : None or int > 0 [scalar] - Optional: length of the FFT window.
# If given, time conversion will include an offset of `n_fft / 2` to counteract windowing effects when using a non-centered STFT.
# Returns: times : np.ndarray [shape=(n,)] - time (in seconds) of each given frame number: `times[i] = frames[i] * hop_length / sr`
beat_timestamps = librosa.frames_to_time(beats, sr=mp3_sr)
beat_timestamps.shape, beat_timestamps
# Compute predominant local pulse (PLP) estimation.
onset_env = librosa.onset.onset_strength(y=y, sr=mp3_sr)
pulse = librosa.beat.plp(onset_envelope=onset_env, sr=mp3_sr)
pulse.shape, pulse
# Compute the tempogram: local autocorrelation of the onset strength envelope. Parameters:
# y : np.ndarray [shape=(n,)] or None - Audio time series.
# sr : number > 0 [scalar] - sampling rate of `y`
# onset_envelope : np.ndarray [shape=(n,) or (m, n)] or None - Optional pre-computed onset strength envelope as provided by `onset.onset_strength`.
# If multi-dimensional, tempograms are computed independently for each band (first dimension).
# hop_length : int > 0 - number of audio samples between successive onset measurements
# win_length : int > 0 - length of the onset autocorrelation window (in frames/onset measurements)
# The default settings (384) corresponds to `384 * hop_length / sr ~= 8.9s`.
# center : bool - If `True`, onset autocorrelation windows are centered. If `False`, windows are left-aligned.
# window : string, function, number, tuple, or np.ndarray [shape=(win_length,)] - A window specification as in `core.stft`.
# norm : {np.inf, -np.inf, 0, float > 0, None} - Normalization mode. Set to `None` to disable normalization.
# Returns: tempogram : np.ndarray [shape=(win_length, n) or (m, win_length, n)] - Localized autocorrelation of the onset strength envelope.
# If given multi-band input (`onset_envelope.shape==(m,n)`) then `tempogram[i]` is the tempogram of `onset_envelope[i]`.
hop_length = 512
oenv = librosa.onset.onset_strength(y=y, sr=mp3_sr, hop_length=hop_length)
tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=mp3_sr, hop_length=hop_length)
# scaled_tempogram = np.mean(tempogram.T, axis=0)
tempogram.shape, tempogram
# Compute global onset autocorrelation
ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0])
ac_global = librosa.util.normalize(ac_global)
mean_ac_global = np.mean(ac_global)
ac_global.shape, ac_global
# Compute the Fourier tempogram: the short-time Fourier transform of the onset strength envelope. Parameters:
# y : np.ndarray [shape=(n,)] or None - Audio time series.
# sr : number > 0 [scalar] - sampling rate of `y`
# onset_envelope : np.ndarray [shape=(n,)] or None - Optional pre-computed onset strength envelope as provided by `onset.onset_strength`.
# hop_length : int > 0 - number of audio samples between successive onset measurements
# win_length : int > 0 - length of the onset window (in frames/onset measurements) The default settings (384) corresponds to `384 * hop_length / sr ~= 8.9s`.
# center : bool - If `True`, onset windows are centered. If `False`, windows are left-aligned.
# window : string, function, number, tuple, or np.ndarray [shape=(win_length,)] - A window specification as in `core.stft`.
# Returns: tempogram : np.ndarray [shape=(win_length // 2 + 1, n)] - Complex short-time Fourier transform of the onset envelope.
fourier_tempogram = librosa.feature.fourier_tempogram(onset_envelope=oenv, sr=mp3_sr, hop_length=hop_length)
abs_fourier_tempogram = np.abs(fourier_tempogram)
fourier_tempogram.shape, fourier_tempogram
abs_fourier_tempogram.shape, abs_fourier_tempogram
# Compute the auto-correlation tempogram, unnormalized to make comparison easier
ac_fourier_tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sr, hop_length=hop_length, norm=None)
mean_ac_fourier_tempogram = np.mean(ac_fourier_tempogram)
ac_fourier_tempogram.shape, ac_fourier_tempogram
```
# Wavelet
```
# Buit-in wavelet faimilies (mother wavelets)
pywt.families()
# Builtin specific wavelet
pywt.wavelist()
# Builtin Daubechies wavelet
pywt.wavelist('db')
# Single level Discrete Wavelet Transform. Parameters:
# data : array_like - Input signal
# wavelet : Wavelet object or name - Wavelet to use
# mode : str, optional - Signal extension mode, see Modes.
# axis: int, optional - Axis over which to compute the DWT. If not given, the last axis is used.
# Returns: (cA, cD) : tuple - Approximation and detail coefficients.
(cA, cD) = pywt.dwt(y, 'db4')
cA.shape, cA, cD.shape, cD
# Compute the maximum useful level of decomposition. Parameters:
# data_len : int - Input data length.
# filter_len : int, str or Wavelet - The wavelet filter length. Alternatively, the name of a discrete wavelet or a Wavelet object can be specified.
# Returns: max_level : int - Maximum level.
pywt.dwt_max_level(len(y), 'db4')
# Compute the maximum level of decomposition for n-dimensional data.
# This returns the maximum number of levels of decomposition suitable for use with wavedec, wavedec2 or wavedecn.
# Parameters:
# shape : sequence of ints - Input data shape.
# wavelet : Wavelet object or name string, or tuple of wavelets - Wavelet to use. This can also be a tuple containing a wavelet to apply along each axis in axes.
# axes : sequence of ints, optional - Axes over which to compute the DWT. Axes may not be repeated.
# Returns: - level : int - Maximum level.
level = pywt.dwtn_max_level(y.shape, 'db4')
level
# Multilevel 1D Discrete Wavelet Transform of data. Parameters:
# data: array_like - Input data
# wavelet : Wavelet object or name string - Wavelet to use
# mode : str, optional - Signal extension mode, see Modes.
# level : int, optional - Decomposition level (must be >= 0). If level is None (default) then it will be calculated
# using the dwt_max_level function.
# axis: int, optional - Axis over which to compute the DWT. If not given, the last axis is used.
# Returns:[cA_n, cD_n, cD_n-1, …, cD2, cD1] : list
# Ordered list of coefficients arrays where n denotes the level of decomposition.
# The first element (cA_n) of the result is approximation coefficients array and the following elements
# (cD_n - cD_1) are details coefficients arrays.
db_coeffs = pywt.wavedec(y, 'db4', level=2)
cA2, cD2, cD1 = db_coeffs
cA2.shape, cA2, cD2.shape, cD2, cD1.shape, cD1
np.var(cA2)
```
| github_jupyter |
# Training a Generative Adversarial Network on MNIST
In this tutorial, we will train a Generative Adversarial Network (GAN) on the MNIST dataset. This is a large collection of 28x28 pixel images of handwritten digits. We will try to train a network to produce new images of handwritten digits.
## Colab
This tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.
[](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/Training_a_Generative_Adversarial_Network_on_MNIST.ipynb)
```
!pip install --pre deepchem
import deepchem
deepchem.__version__
```
To begin, let's import all the libraries we'll need and load the dataset (which comes bundled with Tensorflow).
```
import deepchem as dc
import tensorflow as tf
from deepchem.models.optimizers import ExponentialDecay
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, Dense, Reshape
import matplotlib.pyplot as plot
import matplotlib.gridspec as gridspec
%matplotlib inline
mnist = tf.keras.datasets.mnist.load_data(path='mnist.npz')
images = mnist[0][0].reshape((-1, 28, 28, 1))/255
dataset = dc.data.NumpyDataset(images)
```
Let's view some of the images to get an idea of what they look like.
```
def plot_digits(im):
plot.figure(figsize=(3, 3))
grid = gridspec.GridSpec(4, 4, wspace=0.05, hspace=0.05)
for i, g in enumerate(grid):
ax = plot.subplot(g)
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(im[i,:,:,0], cmap='gray')
plot_digits(images)
```
Now we can create our GAN. Like in the last tutorial, it consists of two parts:
1. The generator takes random noise as its input and produces output that will hopefully resemble the training data.
2. The discriminator takes a set of samples as input (possibly training data, possibly created by the generator), and tries to determine which are which.
This time we will use a different style of GAN called a Wasserstein GAN (or WGAN for short). In many cases, they are found to produce better results than conventional GANs. The main difference between the two is in the discriminator (often called a "critic" in this context). Instead of outputting the probability of a sample being real training data, it tries to learn how to measure the distance between the training distribution and generated distribution. That measure can then be directly used as a loss function for training the generator.
We use a very simple model. The generator uses a dense layer to transform the input noise into a 7x7 image with eight channels. That is followed by two convolutional layers that upsample it first to 14x14, and finally to 28x28.
The discriminator does roughly the same thing in reverse. Two convolutional layers downsample the image first to 14x14, then to 7x7. A final dense layer produces a single number as output. In the last tutorial we used a sigmoid activation to produce a number between 0 and 1 that could be interpreted as a probability. Since this is a WGAN, we instead use a softplus activation. It produces an unbounded positive number that can be interpreted as a distance.
```
class DigitGAN(dc.models.WGAN):
def get_noise_input_shape(self):
return (10,)
def get_data_input_shapes(self):
return [(28, 28, 1)]
def create_generator(self):
return tf.keras.Sequential([
Dense(7*7*8, activation=tf.nn.relu),
Reshape((7, 7, 8)),
Conv2DTranspose(filters=16, kernel_size=5, strides=2, activation=tf.nn.relu, padding='same'),
Conv2DTranspose(filters=1, kernel_size=5, strides=2, activation=tf.sigmoid, padding='same')
])
def create_discriminator(self):
return tf.keras.Sequential([
Conv2D(filters=32, kernel_size=5, strides=2, activation=tf.nn.leaky_relu, padding='same'),
Conv2D(filters=64, kernel_size=5, strides=2, activation=tf.nn.leaky_relu, padding='same'),
Dense(1, activation=tf.math.softplus)
])
gan = DigitGAN(learning_rate=ExponentialDecay(0.001, 0.9, 5000))
```
Now to train it. As in the last tutorial, we write a generator to produce data. This time the data is coming from a dataset, which we loop over 100 times.
One other difference is worth noting. When training a conventional GAN, it is important to keep the generator and discriminator in balance thoughout training. If either one gets too far ahead, it becomes very difficult for the other one to learn.
WGANs do not have this problem. In fact, the better the discriminator gets, the cleaner a signal it provides and the easier it becomes for the generator to learn. We therefore specify `generator_steps=0.2` so that it will only take one step of training the generator for every five steps of training the discriminator. This tends to produce faster training and better results.
```
def iterbatches(epochs):
for i in range(epochs):
for batch in dataset.iterbatches(batch_size=gan.batch_size):
yield {gan.data_inputs[0]: batch[0]}
gan.fit_gan(iterbatches(100), generator_steps=0.2, checkpoint_interval=5000)
```
Let's generate some data and see how the results look.
```
plot_digits(gan.predict_gan_generator(batch_size=16))
```
Not too bad. Many of the generated images look plausibly like handwritten digits. A larger model trained for a longer time can do much better, of course.
# Congratulations! Time to join the Community!
Congratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways:
## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem)
This helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build.
## Join the DeepChem Gitter
The DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!
| github_jupyter |
# Quantile Regression Demo
#### Import Statements
```
import sys
sys.path.append("../../")
%matplotlib inline
import matplotlib.pyplot as plt
from uq360.algorithms.quantile_regression import QuantileRegression
from uq360.metrics import picp, mpiw
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
import pandas as pd
import numpy as np
```
#### Load Boston Housing Prices Dataset
```
house_prices_dataset = datasets.load_boston()
house_prices_df = pd.DataFrame(house_prices_dataset['data'])
house_prices_df.columns = house_prices_dataset['feature_names']
all_features = ['RM','CRIM','PTRATIO', 'DIS']
X = house_prices_df[all_features].values
y = house_prices_dataset['target']
```
#### Train test split of the dataset
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
plt.scatter(X_train[:,0], y_train)
plt.xlabel('#of rooms')
plt.ylabel('house price')
plt.title('Training data')
scaler_y = StandardScaler()
y_train = scaler_y.fit_transform(y_train.reshape(-1, 1))
scaler_X = StandardScaler()
X_train = scaler_X.fit_transform(X_train)
X_test = scaler_X.transform(X_test)
```
### Train Quantile Regression
```
config = {
"alpha":0.95,
"n_estimators":20,
"max_depth":3,
"learning_rate":0.1,
"min_samples_leaf":20,
"min_samples_split":20
}
uq_model = QuantileRegression(model_type='gbr', config=config)
uq_model = uq_model.fit(X_train, y_train.squeeze())
y_mean, y_lower, y_upper = uq_model.predict(X_test)
y_mean, y_lower, y_upper = scaler_y.inverse_transform(y_mean), scaler_y.inverse_transform(y_lower), scaler_y.inverse_transform(y_upper)
pred_dict = {'rooms': scaler_X.inverse_transform(X_test)[:,0],
'y': y_test,
'y_mean': y_mean,
'y_upper': y_upper,
'y_lower': y_lower
}
pred_df = pd.DataFrame(data=pred_dict)
pred_df_sorted = pred_df.sort_values(by='rooms')
plt.plot(pred_df_sorted['rooms'], pred_df_sorted['y'], 'o', label='ground truth')
plt.plot(pred_df_sorted['rooms'], pred_df_sorted['y_mean'], '-', lw=2, label='mean')
plt.fill_between(pred_df_sorted['rooms'],
pred_df_sorted['y_upper'],
pred_df_sorted['y_lower'],
alpha=0.3, label='total uncertianty')
plt.legend()
plt.xlabel('#of rooms')
plt.ylabel('house price in $1000')
plt.title('Test data PICP: {:.2f} MPIW: {:.2f}'.format(
picp(y_test, y_lower, y_upper),
mpiw(y_lower, y_upper))
)
```
#### Let us now see the effect of increasing the number of estimators of the GBR on the prediction interval calibration.
```
num_estimators_arr = [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]
observed_alphas = np.zeros(len(num_estimators_arr))
observed_widths = np.zeros(len(num_estimators_arr))
rmses = np.zeros(len(num_estimators_arr))
for idx, num_estimators in enumerate(num_estimators_arr):
config["n_estimators"] = num_estimators
uq_model = QuantileRegression(model_type='gbr', config=config)
uq_model = uq_model.fit(X_train, y_train.squeeze())
y_mean, y_lower, y_upper = uq_model.predict(X_test)
y_mean, y_lower, y_upper = scaler_y.inverse_transform(y_mean), scaler_y.inverse_transform(y_lower), scaler_y.inverse_transform(y_upper)
observed_alphas[idx] = picp(y_test, y_lower, y_upper)
observed_widths[idx] = mpiw(y_lower, y_upper)
rmses[idx] = np.sqrt(mean_squared_error(y_test, y_mean))
plt.figure(figsize=(15,3))
plt.subplot(1, 3, 1)
plt.plot(num_estimators_arr, rmses)
plt.ylabel('RMSE')
plt.xlabel('#of estimators')
plt.subplot(1, 3, 2)
plt.plot(num_estimators_arr, observed_alphas)
plt.xlabel('#of estimators')
plt.ylabel('PICP')
plt.subplot(1, 3, 3)
plt.plot(num_estimators_arr, observed_widths)
plt.xlabel('#of estimators')
plt.ylabel('MPIW')
plt.show()
```
We can observe in the plots above that with an increase in the number of estimators used in the Gradiant boosted regression model, the RMSE and the average width of predictions (MPIW) reduce, but the calibration or coverage as measured by the PICP metric deteriorates.
| github_jupyter |
##### Copyright 2018 The TF-Agents Authors.
### Get Started
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/docs/tutorials/3_policies_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/agents/blob/master/docs/tutorials/3_policies_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
```
# Note: If you haven't installed tf-agents yet, run:
try:
%tensorflow_version 2.x
except:
pass
!pip install --upgrade tensorflow-probability
!pip install tf-agents
```
### Imports
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from tf_agents.specs import array_spec
from tf_agents.specs import tensor_spec
from tf_agents.networks import network
from tf_agents.policies import py_policy
from tf_agents.policies import random_py_policy
from tf_agents.policies import scripted_py_policy
from tf_agents.policies import tf_policy
from tf_agents.policies import random_tf_policy
from tf_agents.policies import actor_policy
from tf_agents.policies import q_policy
from tf_agents.policies import greedy_policy
from tf_agents.trajectories import time_step as ts
tf.compat.v1.enable_v2_behavior()
```
# Introduction
In Reinforcement Learning terminology, policies map an observation from the environment to an action or a distribution over actions. In TF-Agents, observations from the environment are contained in a named tuple `TimeStep('step_type', 'discount', 'reward', 'observation')`, and policies map timesteps to actions or distributions over actions. Most policies use `timestep.observation`, some policies use `timestep.step_type` (e.g. to reset the state at the beginning of an episode in stateful policies), but `timestep.discount` and `timestep.reward` are usually ignored.
Policies are related to other components in TF-Agents in the following way. Most policies have a neural network to compute actions and/or distributions over actions from TimeSteps. Agents can contain one or more policies for different purposes, e.g. a main policy that is being trained for deployment, and a noisy policy for data collection. Policies can be saved/restored, and can be used indepedently of the agent for data collection, evaluation etc.
Some policies are easier to write in Tensorflow (e.g. those with a neural network), whereas others are easier to write in Python (e.g. following a script of actions). So in TF agents, we allow both Python and Tensorflow policies. Morever, policies written in TensorFlow might have to be used in a Python environment, or vice versa, e.g. a TensorFlow policy is used for training but later deployed in a production python environment. To make this easier, we provide wrappers for converting between python and TensorFlow policies.
Another interesting class of policies are policy wrappers, which modify a given policy in a certain way, e.g. add a particular type of noise, make a greedy or epsilon-greedy version of a stochastic policy, randomly mix multiple policies etc.
# Python Policies
The interface for Python policies is defined in `policies/py_policy.Base`. The main methods are:
```
class Base(object):
@abc.abstractmethod
def __init__(self, time_step_spec, action_spec, policy_state_spec=()):
self._time_step_spec = time_step_spec
self._action_spec = action_spec
self._policy_state_spec = policy_state_spec
@abc.abstractmethod
def reset(self, policy_state=()):
# return initial_policy_state.
pass
@abc.abstractmethod
def action(self, time_step, policy_state=()):
# return a PolicyStep(action, state, info) named tuple.
pass
@abc.abstractmethod
def distribution(self, time_step, policy_state=()):
# Not implemented in python, only for TF policies.
pass
@abc.abstractmethod
def update(self, policy):
# update self to be similar to the input `policy`.
pass
@abc.abstractmethod
def copy(self):
# return a copy of self.
pass
@property
def time_step_spec(self):
return self._time_step_spec
@property
def action_spec(self):
return self._action_spec
@property
def policy_state_spec(self):
return self._policy_state_spec
```
The most important method is `action(time_step)` which maps a `time_step` containing an observation from the environment to a PolicyStep named tuple containing the following attributes:
* `action`: The action to be applied to the environment.
* `state`: The state of the policy (e.g. RNN state) to be fed into the next call to action.
* `info`: Optional side information such as action log probabilities.
The `time_step_spec` and `action_spec` are specifications for the input time step and the output action. Policies also have a `reset` function which is typically used for resetting the state in stateful policies. The `copy` function returns a copy of `self` and the `update(new_policy)` function updates `self` towards `new_policy`.
Now, let us look at a couple of examples of python policies.
## Example 1: Random Python Policy
A simple example of a `PyPolicy` is the `RandomPyPolicy` which generates random actions for the discrete/continuous given action_spec. The input `time_step` is ignored.
```
action_spec = array_spec.BoundedArraySpec((2,), np.int32, -10, 10)
my_random_py_policy = random_py_policy.RandomPyPolicy(time_step_spec=None,
action_spec=action_spec)
time_step = None
action_step = my_random_py_policy.action(time_step)
print(action_step)
action_step = my_random_py_policy.action(time_step)
print(action_step)
```
## Example 2: Scripted Python Policy
A scripted policy plays back a script of actions represented as a list of `(num_repeats, action)` tuples. Every time the `action` function is called, it returns the next action from the list until the specified number of repeats is done, and then moves on to the next action in the list. The `reset` method can be called to start executing from the beginning of the list.
```
action_spec = array_spec.BoundedArraySpec((2,), np.int32, -10, 10)
action_script = [(1, np.array([5, 2], dtype=np.int32)),
(0, np.array([0, 0], dtype=np.int32)), # Setting `num_repeates` to 0 will skip this action.
(2, np.array([1, 2], dtype=np.int32)),
(1, np.array([3, 4], dtype=np.int32))]
my_scripted_py_policy = scripted_py_policy.ScriptedPyPolicy(
time_step_spec=None, action_spec=action_spec, action_script=action_script)
policy_state = my_scripted_py_policy.get_initial_state()
time_step = None
print('Executing scripted policy...')
action_step = my_scripted_py_policy.action(time_step, policy_state)
print(action_step)
action_step= my_scripted_py_policy.action(time_step, action_step.state)
print(action_step)
action_step = my_scripted_py_policy.action(time_step, action_step.state)
print(action_step)
print('Resetting my_scripted_py_policy...')
policy_state = my_scripted_py_policy.get_initial_state()
action_step = my_scripted_py_policy.action(time_step, policy_state)
print(action_step)
```
# TensorFlow Policies
TensorFlow policies follow the same interface as Python policies. Let us look at a few examples.
## Example 1: Random TF Policy
A RandomTFPolicy can be used to generate random actions according to a given discrete/continuous `action_spec`. The input `time_step` is ignored.
```
action_spec = tensor_spec.BoundedTensorSpec(
(2,), tf.float32, minimum=-1, maximum=3)
input_tensor_spec = tensor_spec.TensorSpec((2,), tf.float32)
time_step_spec = ts.time_step_spec(input_tensor_spec)
my_random_tf_policy = random_tf_policy.RandomTFPolicy(
action_spec=action_spec, time_step_spec=time_step_spec)
observation = tf.ones(time_step_spec.observation.shape)
time_step = ts.restart(observation)
action_step = my_random_tf_policy.action(time_step)
print('Action:')
print(action_step.action)
```
## Example 2: Actor Policy
An actor policy can be created using either a network that maps `time_steps` to actions or a network that maps `time_steps` to distributions over actions.
### Using an action network
Let us define a network as follows:
```
class ActionNet(network.Network):
def __init__(self, input_tensor_spec, output_tensor_spec):
super(ActionNet, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name='ActionNet')
self._output_tensor_spec = output_tensor_spec
self._layers = [
tf.keras.layers.Dense(
action_spec.shape.num_elements(), activation=tf.nn.tanh),
]
def call(self, observations, step_type, network_state):
del step_type
output = tf.cast(observations, dtype=tf.float32)
for layer in self.layers:
output = layer(output)
actions = tf.reshape(output, [-1] + self._output_tensor_spec.shape.as_list())
# Scale and shift actions to the correct range if necessary.
return actions, network_state
```
In TensorFlow most network layers are designed for batch operations, so we expect the input time_steps to be batched, and the output of the network will be batched as well. Also the network is responsible for producing actions in the correct range of the given action_spec. This is conventionally done using e.g. a tanh activation for the final layer to produce actions in [-1, 1] and then scaling and shifting this to the correct range as the input action_spec (e.g. see `tf_agents/agents/ddpg/networks.actor_network()`).
Now, we can create an actor policy using the above network.
```
input_tensor_spec = tensor_spec.TensorSpec((4,), tf.float32)
time_step_spec = ts.time_step_spec(input_tensor_spec)
action_spec = tensor_spec.BoundedTensorSpec((3,),
tf.float32,
minimum=-1,
maximum=1)
action_net = ActionNet(input_tensor_spec, action_spec)
my_actor_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=action_net)
```
We can apply it to any batch of time_steps that follow time_step_spec:
```
batch_size = 2
observations = tf.ones([2] + time_step_spec.observation.shape.as_list())
time_step = ts.restart(observations, batch_size)
action_step = my_actor_policy.action(time_step)
print('Action:')
print(action_step.action)
distribution_step = my_actor_policy.distribution(time_step)
print('Action distribution:')
print(distribution_step.action)
```
In the above example, we created the policy using an action network that produces an action tensor. In this case, `policy.distribution(time_step)` is a deterministic (delta) distribution around the output of `policy.action(time_step)`. One way to produce a stochastic policy is to wrap the actor policy in a policy wrapper that adds noise to the actions. Another way is to create the actor policy using an action distribution network instead of an action network as shown below.
### Using an action distribution network
```
class ActionDistributionNet(ActionNet):
def call(self, observations, step_type, network_state):
action_means, network_state = super(ActionDistributionNet, self).call(
observations, step_type, network_state)
action_std = tf.ones_like(action_means)
return tfp.distributions.Normal(action_means, action_std), network_state
action_distribution_net = ActionDistributionNet(input_tensor_spec, action_spec)
my_actor_policy = actor_policy.ActorPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
actor_network=action_distribution_net)
action_step = my_actor_policy.action(time_step)
print('Action:')
print(action_step.action)
distribution_step = my_actor_policy.distribution(time_step)
print('Action distribution:')
print(distribution_step.action)
```
Note that in the above, actions are clipped to the range of the given action spec [-1, 1]. This is because a constructor argument of ActorPolicy clip=True by default. Setting this to false will return unclipped actions produced by the network.
Stochastic policies can be converted to deterministic policies using, for example, a GreedyPolicy wrapper which chooses `stochastic_policy.distribution().mode()` as its action, and a deterministic/delta distribution around this greedy action as its `distribution()`.
## Example 3: Q Policy
A Q policy is used in agents like DQN and is based on a Q network that predicts a Q value for each discrete action. For a given time step, the action distribution in the Q Policy is a categorical distribution created using the q values as logits.
```
input_tensor_spec = tensor_spec.TensorSpec((4,), tf.float32)
time_step_spec = ts.time_step_spec(input_tensor_spec)
action_spec = tensor_spec.BoundedTensorSpec((1,),
tf.int32,
minimum=-1,
maximum=1)
num_actions = action_spec.maximum - action_spec.minimum + 1
class QNetwork(network.Network):
def __init__(self, input_tensor_spec, action_spec, num_actions=num_actions, name=None):
super(QNetwork, self).__init__(
input_tensor_spec=input_tensor_spec,
state_spec=(),
name=name)
self._layers.append(tf.keras.layers.Dense(num_actions))
def call(self, inputs, step_type=None, network_state=()):
del step_type
inputs = tf.cast(inputs, tf.float32)
for layer in self.layers:
inputs = layer(inputs)
return inputs, network_state
batch_size = 2
observation = tf.ones([batch_size] + time_step_spec.observation.shape.as_list())
time_steps = ts.restart(observation, batch_size=batch_size)
my_q_network = QNetwork(
input_tensor_spec=input_tensor_spec,
action_spec=action_spec)
my_q_policy = q_policy.QPolicy(
time_step_spec, action_spec, q_network=my_q_network)
action_step = my_q_policy.action(time_steps)
distribution_step = my_q_policy.distribution(time_steps)
print('Action:')
print(action_step.action)
print('Action distribution:')
print(distribution_step.action)
```
# Policy Wrappers
A policy wrapper can be used to wrap and modify a given policy, e.g. add noise. Policy wrappers are a subclass of Policy (Python/TensorFlow) and can therefore be used just like any other policy.
## Example: Greedy Policy
A greedy wrapper can be used to wrap any TensorFlow policy that implements `distribution()`. `GreedyPolicy.action()` will return `wrapped_policy.distribution().mode()` and `GreedyPolicy.distribution()` is a deterministic/delta distribution around `GreedyPolicy.action()`:
```
my_greedy_policy = greedy_policy.GreedyPolicy(my_q_policy)
action_step = my_greedy_policy.action(time_steps)
print('Action:')
print(action_step.action)
distribution_step = my_greedy_policy.distribution(time_steps)
print('Action distribution:')
print(distribution_step.action)
```
| github_jupyter |
### Note: Just started hacking on this, the code is a mess =D
---
# Comparing Estimators on a few toy datasets
Inspired by this excellent sample from sklearn: http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html
```
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import collections
from sklearn.datasets import make_moons, make_circles, make_blobs
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import tensorflow as tf
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
%matplotlib inline
```
Create a few toy datasets for binary classification. 'Blobs' is linearly seperable, the others are not.
```
n_samples = 100
random_state = 0
datasets = collections.OrderedDict([
('Blobs', make_blobs(n_samples=n_samples, centers=2, cluster_std=0.5, random_state=random_state)),
('Circles', make_circles(n_samples=n_samples, factor=.5, noise=.03, random_state=random_state)),
('Moons', make_moons(n_samples=n_samples, noise=.03, random_state=random_state))
])
```
Let's plot them. Points from the first class will be colored blue, and the second class will be colored green.
```
figure = plt.figure(figsize=(18, 6))
colors = np.array(["blue", "green"])
i = 0
for name in datasets:
X, y = datasets[name]
i += 1
ax = plt.subplot(1, len(datasets), i)
plt.scatter(X[:, 0], X[:, 1], color=colors[y].tolist())
plt.title(name, fontsize=14)
plt.show()
```
This method creates a number of estimators for us to experiment with. It takes a description of the features to use as a parameter. We'll create this description separately for each dataset, later in the notebook.
```
def make_estimators(feature_columns, n_classes):
estimators = collections.OrderedDict([
('Linear', tf.estimator.LinearClassifier(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir="./graphs/canned/linear"
)),
('Deep', tf.estimator.DNNClassifier(
hidden_units=[128, 128],
feature_columns=feature_columns,
n_classes=n_classes,
model_dir="./graphs/canned/deep"
)),
# Note: the value of this model is when we
# use different types of feature engineering
# for the linear and dnn features
# see the Wide and Deep tutorial on tensorflow.org
# a non-trivial use-case.
('Wide_Deep', tf.estimator.DNNLinearCombinedClassifier(
dnn_hidden_units=[100, 50],
linear_feature_columns=feature_columns,
dnn_feature_columns=feature_columns,
n_classes=n_classes,
model_dir="./graphs/canned/wide_n_deep"
)),
])
return estimators
```
Calling predict on an estimator returns a generator object. For convenience, this method will give us a list of predictions. Here, we're returning the probabilities for each class.
```
def get_predictions(estimator, input_fn):
predictions = []
for prediction in estimator.predict(input_fn=input_fn):
probs = prediction['probabilities']
# If instead you'd like to return just the predicted class index
# you can use this code.
#cls = np.argmax(probs)
#predictions.append(cls)
predictions.append(probs)
return predictions
```
Let's train each Estimator on each dataset, and record the predictions for each test point, and the evaluation (which contains stats like overall accuracy) as we go.
```
# We'll use these objects to store results.
# Each maps from a tuple of (dataset_name, estimator_name) to the results.
evaluations = {}
predictions = {}
mesh_predictions = {}
# ===
# Parameters
# ===
# Training sets
steps = 100
# Step size in the mesh
h = .02
for ds_name in datasets:
# This is the entire dataset
X, y = datasets[ds_name]
# Standardize values to 0 mean and unit standard deviation
X = StandardScaler().fit_transform(X)
# Split in to train / test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
n_features = X_train.shape[1]
n_classes = len(np.unique(y_train))
feature_columns = [tf.feature_column.numeric_column('x', shape=n_features)]
estimators = make_estimators(feature_columns, n_classes)
# Create a mesh grid.
# The idea is we'll make a prediction for every coordinate
# in this space, so we display them later.
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
for es_name in estimators:
print("Training", es_name, "on", ds_name,"...")
estimator = estimators[es_name]
train_input_fn = tf.estimator.inputs.numpy_input_fn(
{'x': X_train},
y_train,
num_epochs=None, # Repeat forever
shuffle=True
)
test_input_fn = tf.estimator.inputs.numpy_input_fn(
{'x': X_test},
y_test,
num_epochs=1, # Repeat forever
shuffle=False
)
# An input function for each point on the mes
surface_input_fn = tf.estimator.inputs.numpy_input_fn(
{'x': np.c_[xx.ravel(), yy.ravel()]},
num_epochs=1, # Repeat forever
shuffle=False
)
estimator.train(train_input_fn, steps=steps)
# evaluate on the test data
evaluation = estimator.evaluate(test_input_fn)
# store the evaluation for later
evaluations[(ds_name, es_name)] = evaluation
# make a prediction for every coordinate in the mesh
predictions = np.array(get_predictions(estimator, input_fn=surface_input_fn))
# store the mesh predictions for later
mesh_predictions[(ds_name, es_name)] = predictions
print("Finished")
```
Let's plot the results.
```
n_datasets = len(datasets)
n_estimators = len(estimators)
figure = plt.figure(figsize=(n_datasets * 6, n_estimators * 2))
plot_num = 1
row = 0
for ds_name in datasets:
X, y = datasets[ds_name]
# Standardize values to 0 mean and unit standard deviation
X = StandardScaler().fit_transform(X)
# Split in to train/test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Step size in the mesh
h = .02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the dataset
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(estimators) + 1, plot_num)
plot_num += 1
if row == 0: ax.set_title("Input data")
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# Plot the testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
col = 1
for es_name in estimators:
evaluation = evaluations[(ds_name,es_name)]
accuracy = evaluation["accuracy"]
ax = plt.subplot(len(datasets), len(estimators) + 1, plot_num)
plot_num += 1
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
Z = mesh_predictions[(ds_name, es_name)][:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if row == 0: ax.set_title(es_name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % accuracy).lstrip('0'),
size=15, horizontalalignment='right')
col += 1
row += 1
plt.tight_layout()
plt.show()
```
Let's visualize the results in TensorBoard.
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Save and loading APIs
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/keras/save_and_serialize"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/keras/save_and_serialize.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/keras/save_and_serialize.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/keras/save_and_serialize.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
## Introduction
A Keras model consists of multiple components:
- An architecture, or configuration, which specifyies what layers the model contain, and how they're connected.
- A set of weights values (the "state of the model").
- An optimizer (defined by compiling the model).
- A set of losses and metrics (defined by compiling the model or calling `add_loss()` or `add_metric()`).
The Keras API makes it possible to save of these pieces to disk at once, or to only selectively save some of them:
- Saving everything into a single archive in the TensorFlow SavedModel format (or in the older Keras H5 format). This is the standard practice.
- Saving the architecture / configuration only, typically as a JSON file.
- Saving the weights values only. This is generally used when training the model.
Let's take a look at each of these options: when would you use one or the other? How do they work?
## The short answer to saving & loading
The TL;DR
**Saving a Keras model:**
```python
model = ... # Get model (Sequential, Functional Model, or Model subclass)
model.save('path/to/location')
```
**Loading the model back:**
```python
from tensorflow import keras
model = keras.models.load_model('path/to/location')
```
Now, let's look at the details.
## Setup
```
!pip install -U tf-nightly
import numpy as np
import tensorflow as tf
from tensorflow import keras
```
## Whole-model saving & loading
You can save an entire model to a single artifact. It will include:
- The model's architecture/config
- The model's weight values (which were learned during training)
- The model's compilation information (if `compile()`) was called
- The optimizer and its state, if any (this enables you to restart training where you left)
#### APIs
- `model.save()` or `tf.keras.models.save_model()`
- `tf.keras.models.load_model()`
There are two formats you can use to save an entire model to disk: **the TensorFlow SavedModel format**, and **the older Keras H5 format**. The recommended format is SavedModel. It is the default when you use `model.save()`.
You can switch to the H5 format by:
- Passing `format='h5'` to `save()`.
- Passing a filename that ends in `.h5` or `.keras` to `save()`.
### SavedModel format
**Example:**
```
def get_model():
# Create a simple model.
inputs = keras.Input(shape=(32,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
model.compile(optimizer='adam', loss='mean_squared_error')
return model
model = get_model()
# Train the model.
test_input = np.random.random((128, 32))
test_target = np.random.random((128, 1))
model.fit(test_input, test_target)
# Calling `save('my_model')` creates a SavedModel folder `my_model`.
model.save('my_model')
# It can be used to reconstruct the model identically.
reconstructed_model = keras.models.load_model('my_model')
# Let's check:
np.testing.assert_allclose(
model.predict(test_input),
reconstructed_model.predict(test_input))
# The reconstructed model is already compiled and has retained the optimizer
# state, so training can resume:
reconstructed_model.fit(test_input, test_target)
```
#### What the SavedModel contains
Calling `model.save('my_model')` creates a folder named `my_model`, containing the following:
```
!ls my_model
```
The model architecture, and training configuration (including the optimizer, losses, and metrics) are stored in `saved_model.pb`. The weights are saved in the `variables/` directory.
For detailed information on the SavedModel format, see the [SavedModel guide (*The SavedModel format on disk*)](https://www.tensorflow.org/guide/saved_model#the_savedmodel_format_on_disk).
#### How SavedModel handles custom objects
When saving the model and its layers, the SavedModel format stores the class name, **call function**, losses, and weights (and the config, if implemented). The call function defines the computation graph of the model/layer.
In the absence of the model/layer config, the call function is used to create a model that exists like the original model which can be trained, evaluated, and used for inference.
Nevertheless, it is always a good practice to define the `get_config` and `from_config` methods when writing a custom model or layer class. This allows you to easily update the computation later if needed. See the section about [Custom objects](save_and_serialize.ipynb#custom-objects) for more information.
Below is an example of what happens when loading custom layers from the SavedModel format **without** overwriting the config methods.
```
class CustomModel(keras.Model):
def __init__(self, hidden_units):
super(CustomModel, self).__init__()
self.dense_layers = [keras.layers.Dense(u) for u in hidden_units]
def call(self, inputs):
x = inputs
for layer in self.dense_layers:
x = layer(x)
return x
model = CustomModel([16, 16, 10])
# Build the model by calling it
input_arr = tf.random.uniform((1, 5))
outputs=model(input_arr)
model.save('my_model')
# Delete the custom-defined model class to ensure that the loader does not have
# access to it.
del CustomModel
loaded = keras.models.load_model('my_model')
np.testing.assert_allclose(loaded(input_arr), outputs)
print("Original model:", model)
print("Loaded model:", loaded)
```
As seen in the example above, the loader dynamically creates a new model class that acts like the original model.
### Keras H5 format
Keras also supports saving a single HDF5 file containing the model's architecture, weights values, and `compile()` information. It is a light-weight alternative to SavedModel.
**Example:**
```
model = get_model()
# Train the model.
test_input = np.random.random((128, 32))
test_target = np.random.random((128, 1))
model.fit(test_input, test_target)
# Calling `save('my_model.h5')` creates a h5 file `my_model.h5`.
model.save('my_h5_model.h5')
# It can be used to reconstruct the model identically.
reconstructed_model = keras.models.load_model('my_h5_model.h5')
# Let's check:
np.testing.assert_allclose(
model.predict(test_input),
reconstructed_model.predict(test_input))
# The reconstructed model is already compiled and has retained the optimizer
# state, so training can resume:
reconstructed_model.fit(test_input, test_target)
```
#### Limitations
Compared to the SavedModel format, there are two things that don't get included in the H5 file:
- **External losses & metrics** added via `model.add_loss()` & `model.add_metric()` are not saved (unlike SavedModel). If you have such losses & metrics on your model and you want to resume training, you need to add these losses back yourself after loading the model. Note that this does not apply to losses/metrics created *inside* layers via `self.add_loss()` & `self.add_metric()`. As long as the layer gets loaded, these losses & metrics are kept, since they are part of the `call` method of the layer.
- The **computation graph of custom objects** such as custom layers is not included in the saved file. At loading time, Keras will need access to the Python classes/functions of these objects in order to reconstruct the model. See [Custom objects](save_and_serialize.ipynb#custom-objects).
## Saving the architecture
The model's configuration (or architecture) specifies what layers the model contains, and how these layers are connected*. If you have the configuration of a model, then the model can be created with a freshly initialized state for the weights and no compilation information.
*Note this only applies to models defined using the functional or Sequential apis, not subclassed models.
### Configuration of a Sequential model or Functional API model
These types of models are explicit graphs of layers: their configuration is always available in a structured form.
#### APIs
- `get_config()` and `from_config()`
- `tf.keras.models.model_to_json()` and `tf.keras.models.model_from_json()`
#### `get_config()` and `from_config()`
Calling `config = model.get_config()` will return a Python dict containing the configuration of the model. The same model can then be reconstructed via `Sequential.from_config(config)` (for a `Sequential` model) or `Model.from_config(config)` (for a Functional API model).
The same workflow also works for any serializable layer.
**Layer example:**
```
layer = keras.layers.Dense(3, activation='relu')
layer_config = layer.get_config()
new_layer = keras.layers.Dense.from_config(layer_config)
```
**Sequential model example:**
```
model = keras.Sequential([keras.Input((32,)), keras.layers.Dense(1)])
config = model.get_config()
new_model = keras.Sequential.from_config(config)
```
**Functional model example:**
```
inputs = keras.Input((32,))
outputs = keras.layers.Dense(1)(inputs)
model = keras.Model(inputs, outputs)
config = model.get_config()
new_model = keras.Model.from_config(config)
```
#### `to_json()` and `tf.keras.models.model_from_json()`
This is similar to `get_config` / `from_config`, except it turns the model into a JSON string, which can then be loaded without the original model class. It is also specific to models, it isn't meant for layers.
**Example:**
```
model = keras.Sequential([keras.Input((32,)), keras.layers.Dense(1)])
json_config = model.to_json()
new_model = keras.models.model_from_json(json_config)
```
### Custom objects
**Models and layers**
The architecture of subclassed models and layers are defined in the methods `__init__` and `call`. They are considered Python bytecode, which cannot be serialized into a JSON-compatible config*.
In order to save/load a model with custom-defined layers, or a subclassed model, you should overwrite the `get_config` and optionally `from_config` methods. Additionally, you should use register the custom object so that Keras is aware of it.
*you could try serializing the bytecode (e.g. via `pickle`), but it's completely unsafe and means your model cannot be loaded on a different system.
**Custom functions**
Custom-defined functions (e.g. activation loss or initialization) do not need a `get_config` method. The function name is sufficient for loading as long as it is registered as a custom object.
#### Defining the config methods
Specifications:
* `get_config` should return a JSON-serializable dictionary in order to be compatible with the Keras architecture- and model-saving APIs.
* `from_config(config)` (`classmethod`) should return a new layer or model object that is created from the config. The default implementation returns `cls(**config)`.
Example:
```
class CustomLayer(keras.layers.Layer):
def __init__(self, a):
self.var = tf.Variable(a, name='var_a')
def call(self, inputs, training=False):
if training:
return inputs * self.var
else:
return inputs
def get_config(self):
return {'a': self.var.numpy()}
# There's actually no need to define `from_config` here, since returning
# `cls(**config)` is the default behavior.
@classmethod
def from_config(cls, config):
return cls(**config)
layer = CustomLayer(5)
layer.var.assign(2)
serialized_layer = keras.layers.serialize(layer)
new_layer = keras.layers.deserialize(serialized_layer, custom_objects={'CustomLayer': CustomLayer})
```
#### Registering the custom object
Keras keeps a note of which class generated the config. From the example above, `tf.keras.layers.serialize` generates a serialized form of the custom layer:
```
{'class_name': 'CustomLayer', 'config': {'a': 2}}
```
Keras keeps a master list of all built-in layer, model, optimizer, and metric classes, which is used to find the correct class to call `from_config`. If the class can't be found, than an error is raised (`Value Error: Unknown layer`). There are a few ways to register custom classes to this list:
1. Setting `custom_objects` argument in the loading function. (see the example in section above "Defining the config methods")
2. `tf.keras.utils.custom_object_scope` or `tf.keras.utils.CustomObjectScope`
3. `tf.keras.utils.register_keras_serializable`
#### Custom layer and function example
```
class CustomLayer(keras.layers.Layer):
def __init__(self, units=32, **kwargs):
super(CustomLayer, self).__init__(**kwargs)
self.units = units
def build(self, input_shape):
self.w = self.add_weight(
shape=(input_shape[-1], self.units),
initializer="random_normal",
trainable=True,
)
self.b = self.add_weight(
shape=(self.units,), initializer="random_normal", trainable=True
)
def call(self, inputs):
return tf.matmul(inputs, self.w) + self.b
def get_config(self):
config = super(CustomLayer, self).get_config()
config.update({"units": self.units})
return config
def custom_activation(x):
return tf.nn.tanh(x) ** 2
# Make a model with the CustomLayer and custom_activation
inputs = keras.Input((32,))
x = CustomLayer(32)(inputs)
outputs = keras.layers.Activation(custom_activation)(x)
model = keras.Model(inputs, outputs)
# Retrieve the config
config = model.get_config()
# At loading time, register the custom objects with a `custom_object_scope`:
custom_objects = {'CustomLayer': CustomLayer,
'custom_activation': custom_activation}
with keras.utils.custom_object_scope(custom_objects):
new_model = keras.Model.from_config(config)
```
### In-memory model cloning
You can also do in-memory cloning of a model via `tf.keras.models.clone_model()`. This is equivalent to getting the config then recreating the model from its config (so it does not preserve compilation information or layer weights values).
**Example**:
```
with keras.utils.custom_object_scope(custom_objects):
new_model = keras.models.clone_model(model)
```
## Saving & loading only the model's weights values
You can choose to only save & load a model's weights. This can be useful if:
- You only need the model for inference: in this case you won't need to restart training, so you don't need the compilation information or optimizer state.
- You are doing transfer learning: in this case you will be training a new model reusing the state of a prior model, so you don't need the compilation information of the prior model.
### APIs for in-memory weight transfer
Weights can be copied between different objects by using `get_weights` and `set_weights`:
* `tf.keras.layers.Layer.get_weights()`: Returns a list of numpy arrays.
* `tf.keras.layers.Layer.set_weights()`: Sets the model weights to the values in the `weights` argument.
Examples below.
***Transfering weights from one layer to another, in memory***
```
def create_layer():
layer = keras.layers.Dense(64, activation='relu', name='dense_2')
layer.build((None, 784))
return layer
layer_1 = create_layer()
layer_2 = create_layer()
# Copy weights from layer 2 to layer 1
layer_2.set_weights(layer_1.get_weights())
```
***Transfering weights from one model to another model with a compatible architecture, in memory***
```
# Create a simple functional model
inputs = keras.Input(shape=(784,), name='digits')
x = keras.layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = keras.layers.Dense(10, name='predictions')(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs, name='3_layer_mlp')
# Define a subclassed model with the same architecture
class SubclassedModel(keras.Model):
def __init__(self, output_dim, name=None):
super(SubclassedModel, self).__init__(name=name)
self.output_dim = output_dim
self.dense_1 = keras.layers.Dense(64, activation='relu', name='dense_1')
self.dense_2 = keras.layers.Dense(64, activation='relu', name='dense_2')
self.dense_3 = keras.layers.Dense(output_dim, name='predictions')
def call(self, inputs):
x = self.dense_1(inputs)
x = self.dense_2(x)
x = self.dense_3(x)
return x
def get_config(self):
return {'output_dim': self.output_dim, 'name': self.name}
subclassed_model = SubclassedModel(10)
# Call the subclassed model once to create the weights.
subclassed_model(tf.ones((1, 784)))
# Copy weights from functional_model to subclassed_model.
subclassed_model.set_weights(functional_model.get_weights())
assert len(functional_model.weights) == len(subclassed_model.weights)
for a, b in zip(functional_model.weights, subclassed_model.weights):
np.testing.assert_allclose(a.numpy(), b.numpy())
```
***The case of stateless layers***
Because stateless layers do not change the order or number of weights, models can have compatible architectures even if there are extra/missing stateless layers.
```
inputs = keras.Input(shape=(784,), name='digits')
x = keras.layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = keras.layers.Dense(10, name='predictions')(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs, name='3_layer_mlp')
inputs = keras.Input(shape=(784,), name='digits')
x = keras.layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense_2')(x)
# Add a dropout layer, which does not contain any weights.
x = keras.layers.Dropout(.5)(x)
outputs = keras.layers.Dense(10, name='predictions')(x)
functional_model_with_dropout = keras.Model(inputs=inputs, outputs=outputs, name='3_layer_mlp')
functional_model_with_dropout.set_weights(functional_model.get_weights())
```
### APIs for saving weights to disk & loading them back
Weights can be saved to disk by calling `model.save_weights` in the following formats:
* TensorFlow Checkpoint
* HDF5
The default format for `model.save_weights` is TensorFlow checkpoint. There are two ways to specify the save format:
1. `save_format` argument: Set the value to `save_format="tf"` or `save_format="h5"`.
2. `path` argument: If the path ends with `.h5` or `.hdf5`, then the HDF5 format is used. Other suffixes will result in a TensorFlow checkpoint unless `save_format` is set.
There is also an option of retrieving weights as in-memory numpy arrays. Each API has their pros and cons which are detailed below .
### TF Checkpoint format
**Example**
```
# Runnable example
sequential_model = keras.Sequential(
[keras.Input(shape=(784,), name='digits'),
keras.layers.Dense(64, activation='relu', name='dense_1'),
keras.layers.Dense(64, activation='relu', name='dense_2'),
keras.layers.Dense(10, name='predictions')])
sequential_model.save_weights('ckpt')
load_status = sequential_model.load_weights('ckpt')
# `assert_consumed` can be used as validation that all variable values have been
# restored from the checkpoint. See `tf.train.Checkpoint.restore` for other
# methods in the Status object.
load_status.assert_consumed()
```
#### Format details
The TensorFlow Checkpoint format saves and restores the weights using object attribute names. For instance, consider the `tf.keras.layers.Dense` layer. The layer contains two weights: `dense.kernel` and `dense.bias`. When the layer is saved to the `tf` format, the resulting checkpoint contains the keys `"kernel"` and `"bias"` and their corresponding weight values. For more information see ["Loading mechanics" in the Checkpoint guide](https://www.tensorflow.org/guide/checkpoint#loading_mechanics).
Note that attribute/graph edge is named after **the name used in parent object, not the name of the variable**. Consider the `CustomLayer` in the example below. The variable `CustomLayer.var` is saved with `"var"` as part of key, not `"var_a"`.
```
class CustomLayer(keras.layers.Layer):
def __init__(self, a):
self.var = tf.Variable(a, name='var_a')
layer = CustomLayer(5)
layer_ckpt = tf.train.Checkpoint(layer=layer).save('custom_layer')
ckpt_reader = tf.train.load_checkpoint(layer_ckpt)
ckpt_reader.get_variable_to_dtype_map()
```
#### Transfer learning example
Essentially, as long as two models have the same architecture, they are able to share the same checkpoint. Example:
```
inputs = keras.Input(shape=(784,), name='digits')
x = keras.layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = keras.layers.Dense(10, name='predictions')(x)
functional_model = keras.Model(inputs=inputs, outputs=outputs, name='3_layer_mlp')
# Extract a portion of the functional model defined in the Setup section.
# The following lines produce a new model that excludes the final output
# layer of the functional model.
pretrained = keras.Model(functional_model.inputs,
functional_model.layers[-1].input,
name='pretrained_model')
# Randomly assign "trained" weights.
for w in pretrained.weights:
w.assign(tf.random.normal(w.shape))
pretrained.save_weights('pretrained_ckpt')
pretrained.summary()
# Assume this is a separate program where only 'pretrained_ckpt' exists.
# Create a new functional model with a different output dimension.
inputs = keras.Input(shape=(784,), name='digits')
x = keras.layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = keras.layers.Dense(5, name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs, name='new_model')
# Load the weights from pretrained_ckpt into model.
model.load_weights('pretrained_ckpt')
# Check that all of the pretrained weights have been loaded.
for a, b in zip(pretrained.weights, model.weights):
np.testing.assert_allclose(a.numpy(), b.numpy())
print('\n','-'*50)
model.summary()
# Example 2: Sequential model
# Recreate the pretrained model, and load the saved weights.
inputs = keras.Input(shape=(784,), name='digits')
x = keras.layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense_2')(x)
pretrained_model = keras.Model(inputs=inputs, outputs=x, name='pretrained')
# Sequential example:
model = keras.Sequential(
[pretrained_model, keras.layers.Dense(5, name='predictions')])
model.summary()
pretrained_model.load_weights('pretrained_ckpt')
# Warning! Calling `model.load_weights('pretrained_ckpt')` won't throw an error,
# but will *not* work as expected. If you inspect the weights, you'll see that
# none of the weights will have loaded. `pretrained_model.load_weights()` is the
# correct method to call.
```
It is generally recommended to stick to the same API for building models. If you
switch between Sequential and Functional, or Functional and subclassed, etc., then always rebuild the pre-trained model and load the pre-trained weights to that model.
The next question is, how can weights be saved and loaded to different models if the model architectures are quite different? The solution is to use `tf.train.Checkpoint` to save and restore the exact layers/variables. Example:
```
# Create a subclassed model that essentially uses functional_model's first
# and last layers.
# First, save the weights of functional_model's first and last dense layers.
first_dense = functional_model.layers[1]
last_dense = functional_model.layers[-1]
ckpt_path = tf.train.Checkpoint(
dense=first_dense,
kernel=last_dense.kernel,
bias=last_dense.bias).save('ckpt')
# Define the subclassed model.
class ContrivedModel(keras.Model):
def __init__(self):
super(ContrivedModel, self).__init__()
self.first_dense = keras.layers.Dense(64)
self.kernel = self.add_variable('kernel', shape=(64, 10))
self.bias = self.add_variable('bias', shape=(10,))
def call(self, inputs):
x = self.first_dense(inputs)
return tf.matmul(x, self.kernel) + self.bias
model = ContrivedModel()
# Call model on inputs to create the variables of the dense layer.
_ = model(tf.ones((1, 784)))
# Create a Checkpoint with the same structure as before, and load the weights.
tf.train.Checkpoint(
dense=model.first_dense,
kernel=model.kernel,
bias=model.bias).restore(ckpt_path).assert_consumed()
```
### HDF5 format
The HDF5 format contains weights grouped by layer names. The weights are lists ordered by concatenating the list of trainable weights to the list of non-trainable weights (same as `layer.weights`). Thus, a model can use a hdf5 checkpoint if it has the same layers and trainable statuses as saved in the checkpoint.
**Example**
```
# Runnable example
sequential_model = keras.Sequential(
[keras.Input(shape=(784,), name='digits'),
keras.layers.Dense(64, activation='relu', name='dense_1'),
keras.layers.Dense(64, activation='relu', name='dense_2'),
keras.layers.Dense(10, name='predictions')])
sequential_model.save_weights('weights.h5')
sequential_model.load_weights('weights.h5')
```
Note that changing `layer.trainable` may result in a different
`layer.weights` ordering when the model contains nested layers.
```
class NestedDenseLayer(keras.layers.Layer):
def __init__(self, units, name=None):
super(NestedDenseLayer, self).__init__(name=name)
self.dense_1 = keras.layers.Dense(units, name='dense_1')
self.dense_2 = keras.layers.Dense(units, name='dense_2')
def call(self, inputs):
return self.dense_2(self.dense_1(inputs))
nested_model = keras.Sequential([keras.Input((784,)), NestedDenseLayer(10, 'nested')])
variable_names = [v.name for v in nested_model.weights]
print('variables: {}'.format(variable_names))
print('\nChanging trainable status of one of the nested layers...')
nested_model.get_layer('nested').dense_1.trainable = False
variable_names_2 = [v.name for v in nested_model.weights]
print('\nvariables: {}'.format(variable_names_2))
print('variable ordering changed:', variable_names != variable_names_2)
```
#### Transfer learning example
When loading pretrained weights from HDF5, it is recommended to load the weights into the original checkpointed model, and then extract the desired weights/layers into a new model. Example:
```
def create_functional_model():
inputs = keras.Input(shape=(784,), name='digits')
x = keras.layers.Dense(64, activation='relu', name='dense_1')(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = keras.layers.Dense(10, name='predictions')(x)
return keras.Model(inputs=inputs, outputs=outputs, name='3_layer_mlp')
functional_model = create_functional_model()
functional_model.save_weights('pretrained_weights.h5')
# In a separate program:
pretrained_model = create_functional_model()
pretrained_model.load_weights('pretrained_weights.h5')
# Create a new model by extracting layers from the original model:
extracted_layers = pretrained_model.layers[:-1]
extracted_layers.append(keras.layers.Dense(5, name='dense_3'))
model = keras.Sequential(extracted_layers)
model.summary()
```
| github_jupyter |
# Quantum algorithm
### Overview
An algorithm generally refers to a procedure that a computer performs in order to solve a certain problem.
A quantum algorithm is an algorithm that is designed to be run on a quantum computer.
It is often described by mathematical expression using bracket symbols, but eventually it is reduced to a quantum circuit using concrete quantum gates.
The aim of quantum algorithms is to perform calculations that are faster than classical computers, or that are virtually impossible with classical computers due to physical constraints (e.g., memory capacity) using quantum computers.
This is achieved by utilizing the superposition and entanglement properties of qubits.
Unlike (classical) qubits, $n$ qubits can hold $2^n$ superposition states simultaneously.
At first glance, it is tempting to think that $2^n$ of calculations can be performed in parallel, but unfortunately it is not so simple.
This is because in a quantum computer, only one of the $2^n$ states is probabilistically output by the "measurement" process.
In other words, a quantum computer can hold a huge amount of information internally, but the information it can output is very limited.
Therefore, quantum algorithms need to be designed to be good at obtaining the desired solution with a small number of outputs.
Currently, there are only a very limited number of calculations where quantum algorithms have been shown to be faster than classical algorithms.
Included in that limited number of examples are the famous Shor's and Grover's algorithms.
In the process of learning these algorithms, I think you will be able to grasp the kind of calculations in which a quantum computer is likely to have an advantage over a classical computer.
The discovery of new computations that will make quantum algorithms faster will be the driving force behind the use of quantum computers in the future.
### NISQ algorithm
Current (2021) quantum computers are still small in scale and the impact of noise on computational results cannot be ignored. Such quantum computers are distinguish as Noisy Intermediate-Scale Quantum (NISQ) devices [1].
Quantum algorithms that have been theoretically shown to be faster than classical algorithms, such as those described above, cannot be run on NISQ devices at a practical scale and accuracy.
It is said that it will take several decades to develop a quantum computer that can do this (Learge scale, fault tolerant quantum computer).
On the other hand, the largest current NISQ devices, as announced as "quantum transcendence", have reached a point where they cannot be efficiently simulated by existing classical computers [2].
Therefore, even if we do not wait for several decades, we expect that there will be calculations that can be performed faster than existing classical computers using NISQ devices.
Such an algorithm is called the NISQ algorithm, and a research and development race is currently underway around the world.
### On learning NISQ and non-NISQ algorithms
In the following tutorials, a distinction will be made between non-NISQ algorithms ("Universal Quantum Algorithm") and NISQ algorithms.
The basic construction of a quantum circuit and the quantum gates used are the same for both.
Compared to non-NISQ algorithms, the NISQ algorithm emphasizes the ability to operate with a small number of qubits and to tolerate errors in computation.
I think that non-NISQ quantum algorithms are better for learning, in point of seeing how quantum computation can be faster than classical computation.
On the other hand, if you want to focus on practical use in the near future, it is better to focus on the NISQ algorithm.
### References
[1] Bharti, Kishor, et al. "Noisy intermediate-scale quantum (NISQ) algorithms." arXiv preprint arXiv:2101.08448 (2021).
[2] Arute, Frank, et al. "Quantum supremacy using a programmable superconducting processor." Nature 574.7779 (2019): 505-51
| github_jupyter |
<!--NAVIGATION-->
< [Simple Widget Introduction](03.00-Widget_Basics.ipynb) | [Contents](00.00-index.ipynb) | [Output widgets: leveraging Jupyter's display system](04.01-more-on-output-widget.ipynb) >
# Widgets in the core ipywidgets package
The package `ipywidgets` provides two things:
+ A communication framework between the front end (your browser) and back end (python or other kernel).
+ A set of fundamental user interface elements like buttons and checkboxes.
The next couple of cells create a browser of the available elements. To see more detail about any of the elements click on its title. It will be easier to view both the overview and the detail if you have them open in separate tabs.
```
import ipywidgets as widgets
from widget_org import organized_widgets, list_overview_widget
```
## Instructions
Run the cell below. Click on the name of any widget to see a more detailed example of using the widget.
```
groups = organized_widgets(organize_by='ui')
help_url_base='../reference_guides/complete-ipywidgets-widget-list.ipynb'
list_overview_widget(groups, columns=2, min_width_single_widget=200, help_url_base=help_url_base)
```
## Exercises
You may not have time to finish all of these exercises.
### 1. Fix the example from the previous notebook
The code below is taken from the previous notebook of this tutorial.
Run the code below then try typing a number larger than 10 or smaller than 5 into the text box.
```
slider = widgets.FloatSlider(
value=7.5,
min=5.0,
max=10.0,
step=0.1,
description='Input:',
)
# Create text box to hold slider value
text = widgets.FloatText(description='Value')
# Link slider value and text box value
widgets.link((slider, 'value'), (text, 'value'))
# Put them in a vertical box
widgets.VBox([slider, text])
```
Note the slider has the wrong value! The slider has a minimum and maximum value but the text box doesn't.
Replace the `FloatText` in the code above with a text widget that has a minimum and maximum that matches the slider.
```
# %load solutions/bounded-float-text.py
```
## 2. Two widgets in a box and link them
Put two widgets, the `Play` widget and a widget of your choice that can hold an integer, in a horizontal box.
```
# %load solutions/widgets-in-a-box.py
```
Link the values of the two widgets above so that changing the value of one affects the value of the other.
## 3. Try tabs or accordions
Choose two or more widgets and place them in either different tabs or accordions. Set the name of each tab or accordion to something more meaningful than the default names.
Set which tab or accordion is selected by typing the right code in the cell below (hint, look at the `selected_index` attribute).
<!--NAVIGATION-->
< [Simple Widget Introduction](03.00-Widget_Basics.ipynb) | [Contents](00.00-index.ipynb) | [Output widgets: leveraging Jupyter's display system](04.01-more-on-output-widget.ipynb) >
| github_jupyter |
# Talking Head Anime from a Single Image 2: More Expressive (Manual Poser Tool)
**Instruction**
1. From the main menu, click "Runtime > Change runtime type."
2. Change "Hardware accelerator" to "GPU," and click "Save."
3. Run the four cells below, one by one, in order by clicking the "Play" button to the left of it. Wait for each cell to finish before going to the next one.
4. Scroll down to the end of the last cell, and play with the GUI.
**Constraints on Images**
1. Must be an image of a single humanoid anime character.
2. Must be of size 256x256.
3. The head must be roughly contained in the middle 128x128 middle box.
4. Must have PNG format.
5. Must have an alpha channel.
6. Background pixels must have RGBA=(0,0,0,0). See this link if you do not get clean results.
**Links**
* Github repository: http://github.com/pkhungurn/talking-head-anime-2-demo
* Project writeup: http://pkhungurn.github.io/talking-head-anime-2/
```
# Clone the repository
%cd /content
!git clone https://github.com/pkhungurn/talking-head-anime-2-demo.git
# CD into the repository directory.
%cd /content/talking-head-anime-2-demo
# Download model files
!wget -O data/combiner.pt https://www.dropbox.com/s/at2r3v22xgyoxtk/combiner.pt?dl=0
!wget -O data/eyebrow_decomposer.pt https://www.dropbox.com/s/pbomb5vgens03rk/eyebrow_decomposer.pt?dl=0
!wget -O data/eyebrow_morphing_combiner.pt https://www.dropbox.com/s/yk9m5ok03e0ub1f/eyebrow_morphing_combiner.pt?dl=0
!wget -O data/face_morpher.pt https://www.dropbox.com/s/77sza8qkiwd4qq5/face_morpher.pt?dl=0
!wget -O data/two_algo_face_rotator.pt https://www.dropbox.com/s/ek261g9sspf0cqi/two_algo_face_rotator.pt?dl=0
import torch
FRAME_RATE = 30.0
DEVICE_NAME = 'cuda'
device = torch.device(DEVICE_NAME)
import PIL.Image
import io
from io import StringIO, BytesIO
import IPython.display
import numpy
import ipywidgets
from tha2.util import extract_pytorch_image_from_filelike, convert_output_image_from_torch_to_numpy
import tha2.poser.modes.mode_20
import time
import threading
last_torch_input_image = None
torch_input_image = None
def show_pytorch_image(pytorch_image, output_widget=None):
output_image = pytorch_image.detach().cpu()
numpy_image = convert_output_image_from_torch_to_numpy(output_image)
pil_image = PIL.Image.fromarray(numpy_image, mode='RGBA')
IPython.display.display(pil_image)
input_image_widget = ipywidgets.Output(
layout={
'border': '1px solid black',
'width': '256px',
'height': '256px'
})
upload_input_image_button = ipywidgets.FileUpload(
accept='.png',
multiple=False,
layout={
'width': '256px'
}
)
output_image_widget = ipywidgets.Output(
layout={
'border': '1px solid black',
'width': '256px',
'height': '256px'
}
)
eyebrow_dropdown = ipywidgets.Dropdown(
options=["troubled", "angry", "lowered", "raised", "happy", "serious"],
value="troubled",
description="Eyebrow:",
)
eyebrow_left_slider = ipywidgets.FloatSlider(
value=0.0,
min=0.0,
max=1.0,
step=0.01,
description="Left:",
readout=True,
readout_format=".2f"
)
eyebrow_right_slider = ipywidgets.FloatSlider(
value=0.0,
min=0.0,
max=1.0,
step=0.01,
description="Right:",
readout=True,
readout_format=".2f"
)
eye_dropdown = ipywidgets.Dropdown(
options=["wink", "happy_wink", "surprised", "relaxed", "unimpressed", "raised_lower_eyelid"],
value="wink",
description="Eye:",
)
eye_left_slider = ipywidgets.FloatSlider(
value=0.0,
min=0.0,
max=1.0,
step=0.01,
description="Left:",
readout=True,
readout_format=".2f"
)
eye_right_slider = ipywidgets.FloatSlider(
value=0.0,
min=0.0,
max=1.0,
step=0.01,
description="Right:",
readout=True,
readout_format=".2f"
)
mouth_dropdown = ipywidgets.Dropdown(
options=["aaa", "iii", "uuu", "eee", "ooo", "delta", "lowered_corner", "raised_corner", "smirk"],
value="aaa",
description="Mouth:",
)
mouth_left_slider = ipywidgets.FloatSlider(
value=0.0,
min=0.0,
max=1.0,
step=0.01,
description="Value:",
readout=True,
readout_format=".2f"
)
mouth_right_slider = ipywidgets.FloatSlider(
value=0.0,
min=0.0,
max=1.0,
step=0.01,
description=" ",
readout=True,
readout_format=".2f",
disabled=True,
)
def update_mouth_sliders(change):
if mouth_dropdown.value == "lowered_corner" or mouth_dropdown.value == "raised_corner":
mouth_left_slider.description = "Left:"
mouth_right_slider.description = "Right:"
mouth_right_slider.disabled = False
else:
mouth_left_slider.description = "Value:"
mouth_right_slider.description = " "
mouth_right_slider.disabled = True
mouth_dropdown.observe(update_mouth_sliders, names='value')
iris_small_left_slider = ipywidgets.FloatSlider(
value=0.0,
min=0.0,
max=1.0,
step=0.01,
description="Left:",
readout=True,
readout_format=".2f"
)
iris_small_right_slider = ipywidgets.FloatSlider(
value=0.0,
min=0.0,
max=1.0,
step=0.01,
description="Right:",
readout=True,
readout_format=".2f",
)
iris_rotation_x_slider = ipywidgets.FloatSlider(
value=0.0,
min=-1.0,
max=1.0,
step=0.01,
description="X-axis:",
readout=True,
readout_format=".2f"
)
iris_rotation_y_slider = ipywidgets.FloatSlider(
value=0.0,
min=-1.0,
max=1.0,
step=0.01,
description="Y-axis:",
readout=True,
readout_format=".2f",
)
head_x_slider = ipywidgets.FloatSlider(
value=0.0,
min=-1.0,
max=1.0,
step=0.01,
description="X-axis:",
readout=True,
readout_format=".2f"
)
head_y_slider = ipywidgets.FloatSlider(
value=0.0,
min=-1.0,
max=1.0,
step=0.01,
description="Y-axis:",
readout=True,
readout_format=".2f",
)
neck_z_slider = ipywidgets.FloatSlider(
value=0.0,
min=-1.0,
max=1.0,
step=0.01,
description="Z-axis:",
readout=True,
readout_format=".2f",
)
control_panel = ipywidgets.VBox([
eyebrow_dropdown,
eyebrow_left_slider,
eyebrow_right_slider,
ipywidgets.HTML(value="<hr>"),
eye_dropdown,
eye_left_slider,
eye_right_slider,
ipywidgets.HTML(value="<hr>"),
mouth_dropdown,
mouth_left_slider,
mouth_right_slider,
ipywidgets.HTML(value="<hr>"),
ipywidgets.HTML(value="<center><b>Iris Shrinkage</b></center>"),
iris_small_left_slider,
iris_small_right_slider,
ipywidgets.HTML(value="<center><b>Iris Rotation</b></center>"),
iris_rotation_x_slider,
iris_rotation_y_slider,
ipywidgets.HTML(value="<hr>"),
ipywidgets.HTML(value="<center><b>Head Rotation</b></center>"),
head_x_slider,
head_y_slider,
neck_z_slider,
])
controls = ipywidgets.HBox([
ipywidgets.VBox([
input_image_widget,
upload_input_image_button
]),
control_panel,
ipywidgets.HTML(value=" "),
output_image_widget,
])
poser = tha2.poser.modes.mode_20.create_poser(device)
pose_parameters = tha2.poser.modes.mode_20.get_pose_parameters()
pose_size = poser.get_num_parameters()
last_pose = torch.zeros(1, pose_size).to(device)
iris_small_left_index = pose_parameters.get_parameter_index("iris_small_left")
iris_small_right_index = pose_parameters.get_parameter_index("iris_small_right")
iris_rotation_x_index = pose_parameters.get_parameter_index("iris_rotation_x")
iris_rotation_y_index = pose_parameters.get_parameter_index("iris_rotation_y")
head_x_index = pose_parameters.get_parameter_index("head_x")
head_y_index = pose_parameters.get_parameter_index("head_y")
neck_z_index = pose_parameters.get_parameter_index("neck_z")
def get_pose():
pose = torch.zeros(1, pose_size)
eyebrow_name = f"eyebrow_{eyebrow_dropdown.value}"
eyebrow_left_index = pose_parameters.get_parameter_index(f"{eyebrow_name}_left")
eyebrow_right_index = pose_parameters.get_parameter_index(f"{eyebrow_name}_right")
pose[0, eyebrow_left_index] = eyebrow_left_slider.value
pose[0, eyebrow_right_index] = eyebrow_right_slider.value
eye_name = f"eye_{eye_dropdown.value}"
eye_left_index = pose_parameters.get_parameter_index(f"{eye_name}_left")
eye_right_index = pose_parameters.get_parameter_index(f"{eye_name}_right")
pose[0, eye_left_index] = eye_left_slider.value
pose[0, eye_right_index] = eye_right_slider.value
mouth_name = f"mouth_{mouth_dropdown.value}"
if mouth_name == "mouth_lowered_cornered" or mouth_name == "mouth_raised_corner":
mouth_left_index = pose_parameters.get_parameter_index(f"{mouth_name}_left")
mouth_right_index = pose_parameters.get_parameter_index(f"{mouth_name}_right")
pose[0, mouth_left_index] = mouth_left_slider.value
pose[0, mouth_right_index] = mouth_right_slider.value
else:
mouth_index = pose_parameters.get_parameter_index(mouth_name)
pose[0, mouth_index] = mouth_left_slider.value
pose[0, iris_small_left_index] = iris_small_left_slider.value
pose[0, iris_small_right_index] = iris_small_right_slider.value
pose[0, iris_rotation_x_index] = iris_rotation_x_slider.value
pose[0, iris_rotation_y_index] = iris_rotation_y_slider.value
pose[0, head_x_index] = head_x_slider.value
pose[0, head_y_index] = head_y_slider.value
pose[0, neck_z_index] = neck_z_slider.value
return pose.to(device)
display(controls)
def update(change):
global last_pose
global last_torch_input_image
if torch_input_image is None:
return
needs_update = False
if last_torch_input_image is None:
needs_update = True
else:
if (torch_input_image - last_torch_input_image).abs().max().item() > 0:
needs_update = True
pose = get_pose()
if (pose - last_pose).abs().max().item() > 0:
needs_update = True
if not needs_update:
return
output_image = poser.pose(torch_input_image, pose)[0]
with output_image_widget:
output_image_widget.clear_output(wait=True)
show_pytorch_image(output_image, output_image_widget)
last_torch_input_image = torch_input_image
last_pose = pose
def upload_image(change):
global torch_input_image
for name, file_info in upload_input_image_button.value.items():
torch_input_image = extract_pytorch_image_from_filelike(io.BytesIO(file_info['content'])).to(device)
if torch_input_image is not None:
c,h,w = torch_input_image.shape
if h != 256 or w != 256:
with input_image_widget:
input_image_widget.clear_output(wait=True)
display(ipywidgets.HTML("Image must be 256x256 in size!!!"))
torch_input_image = None
if c != 4:
with input_image_widget:
input_image_widget.clear_output(wait=True)
display(ipywidgets.HTML("Image must have an alpha channel!!!"))
torch_input_image = None
if torch_input_image is not None:
with input_image_widget:
input_image_widget.clear_output(wait=True)
show_pytorch_image(torch_input_image, input_image_widget)
update(None)
upload_input_image_button.observe(upload_image, names='value')
eyebrow_dropdown.observe(update, 'value')
eyebrow_left_slider.observe(update, 'value')
eyebrow_right_slider.observe(update, 'value')
eye_dropdown.observe(update, 'value')
eye_left_slider.observe(update, 'value')
eye_right_slider.observe(update, 'value')
mouth_dropdown.observe(update, 'value')
mouth_left_slider.observe(update, 'value')
mouth_right_slider.observe(update, 'value')
iris_small_left_slider.observe(update, 'value')
iris_small_right_slider.observe(update, 'value')
iris_rotation_x_slider.observe(update, 'value')
iris_rotation_y_slider.observe(update, 'value')
head_x_slider.observe(update, 'value')
head_y_slider.observe(update, 'value')
neck_z_slider.observe(update, 'value')
```
| github_jupyter |
```
data_root = 'data/blizzard'
%matplotlib inline
from matplotlib import pyplot as plt
from os.path import join
from tqdm import tqdm
import numpy as np
import h5py
f = h5py.File(join(data_root, 'samples.hdf5'), 'r')
dataset = f['samples']
plt.plot(dataset[:3*16000])
plt.show()
from keras.layers import merge, Input
from keras.models import Model
from keras.layers.core import Dense, Activation
from keras.layers.convolutional import Convolution1D
from keras.layers.recurrent import LSTM
def generate_samples(data, batch_size, sample_length, discretize_input=False):
sample_offset = 0
step_size = sample_length + 1
while True:
if discretize_input:
history = np.zeros((batch_size, sample_length, nb_classes), dtype='float32')
else:
history = np.zeros((batch_size, sample_length, 1), dtype='float32')
predictions = np.zeros((batch_size, nb_classes), dtype='float32')
for i in range(batch_size):
if discretize_input:
for j, k in enumerate(data[sample_offset:sample_offset+sample_length]):
history[i, j, k] = 1
else:
history[i] = data[sample_offset:sample_offset+sample_length].reshape(-1, 1)
prediction = data[sample_offset+sample_length]
predictions[i, prediction] = 1
sample_offset = (sample_offset + step_size) % (data.shape[0] - step_size)
if not discretize_input:
history /= 255
yield (history, predictions)
# check out an example batch, make sure the generator is sane
itr = generate_samples(dataset,
batch_size=3,
sample_length=2,
discretize_input=discretize_input)
for i in range(10000):
X, y = next(itr)
X, y = next(itr)
print X.shape, y.shape
print X
print y
discretize_input = True
use_residual = True
use_convolutions = True
lstm_activation = 'relu'
conv_activation = 'linear'
nb_classes = 256
sample_length = 256
features = 128
hidden_units = 128
layers = 7 # should be odd
if discretize_input:
inputs = Input(shape=(sample_length, nb_classes))
else:
inputs = Input(shape=(sample_length, 1))
x = inputs
for i in range((layers - 1) / 2):
x = LSTM(hidden_units, return_sequences=True)(x)
x = Activation(lstm_activation)(x)
if use_convolutions:
x = Convolution1D(features, 1)(x)
x = Activation(conv_activation)(x)
y = LSTM(hidden_units, return_sequences=True)(x)
y = Activation(lstm_activation)(y)
if use_convolutions:
y = Convolution1D(features, 1)(y)
y = Activation(conv_activation)(y)
if use_residual:
x = merge([x, y], mode='sum')
else:
x = y
prediction = LSTM(nb_classes, activation='softmax', return_sequences=False)(x)
model = Model(input=inputs, output=prediction)
model.summary()
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
batch_size = 32
batches_per_epoch = 100
total_samples = len(dataset) / (sample_length + 1)
samples_per_epoch = batches_per_epoch * batch_size
epochs_per_real_epoch = total_samples / samples_per_epoch
print total_samples, 'samples in total'
print batch_size, 'batch_size'
print batches_per_epoch, 'batches_per_epoch'
print samples_per_epoch, 'samples_per_epoch'
print epochs_per_real_epoch, 'epochs for a full pass through the data'
model.fit_generator(generate_samples(dataset, batch_size, sample_length, discretize_input),
samples_per_epoch=samples_per_epoch,
nb_epoch=epochs_per_real_epoch)
itr = generate_samples(dataset, 1, sample_length, discretize_input)
X, y = next(itr)
prediction = model.predict(X)
plt.plot(prediction.flat)
plt.show()
```
| github_jupyter |
# ADVANCED SQL 2: Advanced Joins and Performance Tuning
We connect to MySQL server and workbench and make analysis with the parch-and-posey database. This course is the practicals of the course SQL for Data Analysis at Udacity.
```
# we import some required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pprint import pprint
import time
print('Done!')
import mysql
from mysql.connector import Error
from getpass import getpass
db_name = 'parch_and_posey'
try:
connection = mysql.connector.connect(host='localhost',
database=db_name,
user=input('Enter UserName:'),
password=getpass('Enter Password:'))
if connection.is_connected():
db_Info = connection.get_server_info()
print("Connected to MySQL Server version ", db_Info)
cursor = connection.cursor()
cursor.execute("select database();")
record = cursor.fetchone()
print("You're connected to database: ", record)
except Error as e:
print("Error while connecting to MySQL", e)
def query_to_df(query):
st = time.time()
# Assert Every Query ends with a semi-colon
try:
assert query.endswith(';')
except AssertionError:
return 'ERROR: Query Must End with ;'
# so we never have more than 20 rows displayed
pd.set_option('display.max_rows', 30)
df = None
# Process the query
cursor.execute(query)
columns = cursor.description
result = []
for value in cursor.fetchall():
tmp = {}
for (index,column) in enumerate(value):
tmp[columns[index][0]] = [column]
result.append(tmp)
# Create a DataFrame from all results
for ind, data in enumerate(result):
if ind >= 1:
x = pd.DataFrame(data)
df = pd.concat([df, x], ignore_index=True)
else:
df = pd.DataFrame(data)
print(f'Query ran for {time.time()-st} secs!')
return df
# Let's see the tables in Parch-and-Posey database
query_to_df(
'SHOW TABLES;'
)
# 1. For the accounts table
query = 'SELECT * FROM accounts LIMIT 3;'
query_to_df(query)
# 2. For the orders table
query = 'SELECT * FROM orders LIMIT 3;'
query_to_df(query)
# 3. For the sales_reps table
query = 'SELECT * FROM sales_reps LIMIT 3;'
query_to_df(query)
# 4. For the web_events table
query = 'SELECT * FROM web_events LIMIT 3;'
query_to_df(query)
# 5. For the region table
query = 'SELECT * FROM region LIMIT 3;'
query_to_df(query)
```
## 1. Inner Join:
<img src='https://video.udacity-data.com/topher/2017/November/5a147488_inner-join/inner-join.png' height=200 width=300>
**Inner Joins** produce results for which the join condition is matched in both tables. It is the default kind of join. Frankly, we can perform the Inner-join with just the key-word Join. SQL automatically does Inner-join on the Join clause.
```
query_to_df(
"SELECT a.*, s.* FROM accounts a INNER JOIN sales_reps s ON a.sales_rep_id=s.id;"
)
```
## 2. Left Join:
<img src='https://video.udacity-data.com/topher/2017/November/5a147484_left-join/left-join.png' height=200 width=300>
**Left Joins** also include unmatched rows from the left table, which is indicated in the “FROM” clause.<br>**NOTE That:** Left-Join and Left-Outer-Join are the same
```
query_to_df(
"SELECT a.*, s.* FROM accounts a LEFT JOIN sales_reps s ON a.sales_rep_id=s.id;"
)
```
## 3. Right Join:
<img src='https://video.udacity-data.com/topher/2017/November/5a147485_right-join/right-join.png' height=200 width=300>
**Right Joins** are similar to left joins, but include unmatched data from the right table -- the one that’s indicated in the JOIN clause..<br>**NOTE That:** Right-Join and Right-Outer-Join are the same
```
query_to_df(
"SELECT a.*, s.* FROM accounts a RIGHT JOIN sales_reps s ON a.sales_rep_id=s.id;"
)
```
## 4. Full-Outer Join:
<img src='https://video.udacity-data.com/topher/2017/November/5a147487_full-outer-join/full-outer-join.png' height=200 width=300>
**Full-Outer Joins** In some cases, you might want to include unmatched rows from both tables being joined. You can do this with a full outer join.<br>**NOTE That:** Full-Outer-Join and Full-Join are the same
A common application of this is when joining two tables on a timestamp. Let’s say you’ve got one table containing the number of item 1 sold each day, and another containing the number of item 2 sold. If a certain date, like January 1, 2018, exists in the left table but not the right, while another date, like January 2, 2018, exists in the right table but not the left:
a left join would drop the row with January 2, 2018 from the result set
a right join would drop January 1, 2018 from the result set
The only way to make sure both January 1, 2018 and January 2, 2018 make it into the results is to do a full outer join. A full outer join returns unmatched records in each table with null values for the columns that came from the opposite table.
If you wanted to return unmatched rows only, which is useful for some cases of data assessment, you can isolate them by adding the following line to the end of the query:
WHERE Table_A.column_name IS NULL OR Table_B.column_name IS NULL
#### Unfortunately MySQL doesn't support a Full-Outer-Join directly... No FULL JOIN clause.
Therefore to simulate a Full-Outer-Join, we'd combine a Left-Join and a Right-Join using a UNION ALL clause like so...
```
"SELECT * FROM accounts a LEFT JOIN orders o ON a.id=o.account_id
UNION ALL
SELECT * FROM accounts a RIGHT JOIN orders o ON a.id=o.account_id
WHERE a.id IS NULL;"
```
```
query_to_df(
"SELECT * FROM accounts a LEFT JOIN orders o ON a.id=o.account_id \
UNION ALL \
SELECT * FROM accounts a RIGHT JOIN orders o ON a.id=o.account_id \
WHERE a.id IS NULL;"
)
```
### Quiz: FULL OUTER JOIN
#### Finding Matched and Unmatched Rows with FULL OUTER JOIN
You’re not likely to use `FULL JOIN (which can also be written as FULL OUTER JOIN)` too often, but the syntax is worth practicing anyway. `LEFT JOIN` and `RIGHT JOIN` each return unmatched rows from one of the tables—`FULL JOIN` returns unmatched rows from both tables. `FULL JOIN` is commonly used in conjunction with aggregations to understand the amount of overlap between two tables.
**Say you're an analyst at Parch & Posey and you want to see:**
* each account who has a sales rep and each sales rep that has an account (all of the columns in these returned rows will be full)
* but also each account that does not have a sales rep and each sales rep that does not have an account (some of the columns in these returned rows will be empty)
This type of question is rare, but `FULL OUTER JOIN` is perfect for it. Write a query with FULL OUTER JOIN to fit the above described Parch & Posey scenario (selecting all of the columns in both of the relevant tables, accounts and sales_reps) then answer the subsequent multiple choice quiz.
```
query_to_df(
"SELECT * FROM accounts a LEFT JOIN sales_reps s ON a.sales_rep_id=s.id \
UNION ALL \
SELECT * FROM accounts a RIGHT JOIN sales_reps s ON a.sales_rep_id=s.id \
WHERE a.sales_rep_id IS NULL OR s.id IS NULL;"
)
```
## Joining without an Equals Sign
#### Inequality JOIN:
* One thing to keep in mind is that it's a little bit harder to predict what the results will look like when joining using inequalities. So the result won't give us any hint as to whether we've got the right query. Thus when doing inequality joins, we must pay rapt attention to the query logic being executed.
**EXPERT TIP:**
If you recall from earlier lessons on joins, the `JOIN` clause is evaluated before the `WHERE` clause -- filtering in the `JOIN` clause will eliminate rows before they are joined, while filtering in the `WHERE` clause will leave those rows in and produce some `NULLs`.
**Inequality JOINs**
Inequality operators (**`a.k.a. comparison operators`**) don't only need to be date times or numbers, they also work on strings
**QUIZ:**
Write a query that left joins the accounts table and the sales_reps tables on each sale rep's ID number and joins it using the < comparison operator on accounts.primary_poc and sales_reps.name. Result should have 3 columns:- The account_name, primary_poc and sales_rep name
```
query_to_df(
"SELECT a.name acct_name, a.primary_poc primary_poc, s.name sales_rep FROM accounts a LEFT JOIN sales_reps s \
ON a.sales_rep_id=s.id AND a.primary_poc < s.name LIMIT 10;"
)
```
### SELF JOIN:
Sometimes, we need to join a table with itself. Most of the time, we'd do this when we want to find two events that both occurred one after the other within a given time frame. For example, if we want to know which accounts made multiple orders within 30 days. We can find this by joining the orders table unto itself with an inequality join.
**Expert Tip**
This comes up pretty commonly in **job interviews**. Self JOIN logic can be pretty tricky -- often with multiple conditional statements. It is important to pause and think through each step when joining a table to itself.
**Note**
One of the most common use cases for self JOINs is in cases where two events occurred, one after another. As you may have noticed, using inequalities in conjunction with self JOINs is common.
#### QUIZ:
Use a `SELF JOIN` on the web_events table to find those events that re-occurred not more than one day after their first occurrence. Add a column for the `channel` variable in both instances of the table in your query
```
query_to_df(
"SELECT w1.account_id 1st_acct_id, w1.occurred_at 1st_occurred, w1.channel 1st_channel, \
w2.account_id 2nd_acct_id, w2.occurred_at 2nd_occurred, w2.channel 2nd_channel FROM web_events w1 \
JOIN web_events w2 ON w1.account_id = w2.account_id AND w2.occurred_at > w1.occurred_at AND \
DATEDIFF(w2.occurred_at, w1.occurred_at) <= 1 ORDER BY 1, 2;"
)
# Change False to True below and run cell to terminate connection
if True and connection.is_connected():
cursor.close()
connection.close()
print(f'Connection Terminated: {record} Database.')
```
| github_jupyter |
# Lesson 11
## 00:00:07 - Blog recap
* Blog on super convergence: [The 1cycle policy](https://sgugger.github.io/the-1cycle-policy.html)
* 5x faster than stepwise approaches.
* Let's you have massively high learning rates (somewhere between 1 and 3).
* Trains at high learning rates for lots of the epochs: loss doesn't improve much but it's doing a lot of searching to find generalisable areas.
* When learning rate is high, momentum is lower.
* Hamel Husain's blog on [sequence-to-sequence data products](https://towardsdatascience.com/how-to-create-data-products-that-are-magical-using-sequence-to-sequence-models-703f86a231f8)
## 00:05:42 - Building a sequence-to-sequence model using machine translation
* Neural translation has surpased typical translations techniques as of 2016.
* Path of neural translation similar to image classification in 2012: just surpased state of the art and now moving past it rapidly.
* Four big wins of Neural MT:
1. End-to-end training: all params are optimised to minimise loss function (less hyperparams)
2. Distributed rep share strength: better exploit word and phrase similarities.
3. Better exploitation of context: NMT can use a much bigger context to translate more accurately.
4. More fluent text generation.
* Models use Bidirectional LSTM with Attention (which is obviously not just useful for machine translation).
## 00:10:05 - Translate French into English
* Basic idea: make it look like a standard NN problem, need 3 things:
1. Data (x, y pairs)
2. Architecture
3. Loss function
* Lots of parallel corpuses (some language -> some other language), especially for European documents.
* For bounding boxes, all interesting stuff is in loss function, for translation, it's all in the arch.
## 00:13:16 - Neural translation walkthrough
* Take a sentence in English and put it through an RNN/Encoder.
* Encoder: piece of NN architecture that takes input and turns into some representation.
* Decoder: take the encoder / RNN output and convert into a sequence of French tokens.
* For translating language, you don't know how many words should be outputted from a sentence.
* Key issue: arbitrary length output which don't correspond to the input length.
## 00:18:19 - RNN revision
* Need to understand Lesson 6 if the lesson doesn't make sense.
* RNN is a standard fully connected network, which takes an input to a linear layer which is fed into another layer and so on. However, it has one key difference: the second layer can also accept and concat another input.
<img src="https://i.gyazo.com/900233717de09d0ac63b4330a2c6b877.gif" width="400px">
* Use the same weight matrix for each of the layer outputs and the same weight matrix for each input.
* The diagram can be refactored to be a for loop:
<img src="https://i.gyazo.com/a53c737b2b3c325112430d9d3ad4b6a5.gif" width="400px">
<img src="https://i.gyazo.com/82ecea54084aab349d420720a0caa647.gif" width="400px">
* The refactoring is basically what makes it an RNN.
* RNNs can be stacked: output of one RNN can be fed into another:
<img src="https://i.gyazo.com/0383008c47d943200ea38423ffcb3071.gif" width="400px">
* Need to be able to write it from scratch to really understand it.
```
%matplotlib inline
%reload_ext autoreload
%autoreload 2
import re
from pathlib import Path
import pickle
from collections import Counter, defaultdict
import numpy as np
from fastai.text import Tokenizer, partition_by_cores
PATH = Path('data/translate')
TMP_PATH = PATH / 'tmp'
TMP_PATH.mkdir(exist_ok=True)
fname = 'giga-fren.release2.fixed'
en_fname = PATH / f'{fname}.en'
fr_fname = PATH / f'{fname}.fr'
```
### **Start do not rerun**
```
!wget http://www.statmt.org/wmt10/training-giga-fren.tar --directory-prefix={PATH}
!cd {PATH} && tar -xvf training-giga-fren.tar
!cd {PATH} && gunzip giga-fren.release2.fixed.en.gz && gunzip giga-fren.release2.fixed.fr.gz
```
* Training translation models takes a long time.
* No conceptual difference between 2 and 8 layers: use 2 layers because we think it should be enough.
* Find questions that start with Wh (what, where, when etc) and match with French questions:
```
re_eng_questions = re.compile('^(Wh[^?.!]+\?)')
re_french_questions = re.compile('^([^?.!]+\?)')
en_fh = open(en_fname, encoding='utf-8')
fr_fh = open(fr_fname, encoding='utf-8')
lines = []
for eq, fq in zip(en_fh, fr_fh):
lines.append((
re_eng_questions.search(eq),
re_french_questions.search(fq)
))
questions = [(e.group(), f.group()) for e, f in lines if e and f]
pickle.dump(questions, (PATH / 'fr-en-qs.pkl').open('wb'))
```
### **End do not rerun**
```
questions = pickle.load((PATH / 'fr-en-qs.pkl').open('rb'))
```
* We now have 52k sentence pairs:
```
questions[:5], len(questions)
```
* Separate questions into each language:
```
en_qs, fr_qs = zip(*questions)
```
### **Start do no rerun**
* Tokenise using English and French tokenizer.
```
!python -m spacy download fr
en_tok = Tokenizer.proc_all_mp(partition_by_cores(en_qs))
fr_tok = Tokenizer.proc_all_mp(partition_by_cores(fr_qs), 'fr')
en_tok[0], fr_tok[0]
```
* Want to find the largest 90th sequence sentences, and make that the max sequence length.
```
np.percentile([len(o) for o in en_tok], 90), np.percentile([len(o) for o in fr_tok], 90)
keep = np.array([len(o) < 30 for o in en_tok])
en_tok = np.array(en_tok)[keep]
fr_tok = np.array(fr_tok)[keep]
pickle.dump(en_tok, (PATH / 'en_tok.pkl').open('wb'))
pickle.dump(fr_tok, (PATH / 'fr_tok.pkl').open('wb'))
```
### **End do no rerun**
```
en_tok = pickle.load((PATH / 'en_tok.pkl').open('rb'))
fr_tok = pickle.load((PATH / 'fr_tok.pkl').open('rb'))
```
* Don't need to know a lot of NLP stuff for deep learning on text, but the basics are useful: particurally tokenising.
* 00:28:37 - some students in the study group are trying to build language models for Chinese, need a tokeniser like [sentence piece](https://github.com/google/sentencepiece), since it doesn't have individual words.
* Next, turn tokens into numbers:
```
def toks2ids(tok, pre):
freq = Counter(p for o in tok for p in o)
itos = [o for o, c in freq.most_common(40000)]
itos.insert(0, '_bos_')
itos.insert(1, '_pad_')
itos.insert(2, '_eos_')
itos.insert(3, '_unk')
stoi = defaultdict(lambda: 3, {v: k for k, v in enumerate(itos)})
ids = np.array([([stoi[o] for o in p] + [2]) for p in tok])
np.save(TMP_PATH / f'{pre}_ids.npy', ids)
pickle.dump(itos, open(TMP_PATH / f'{pre}_itos.pkl', 'wb'))
return ids, itos, stoi
en_ids, en_itos, en_stoi = toks2ids(en_tok, 'en')
fr_ids, fr_itos, fr_stoi = toks2ids(fr_tok, 'fr')
def load_ids(pre):
ids = np.load(TMP_PATH / f'{pre}_ids.npy')
itos = pickle.load(open(TMP_PATH / f'{pre}_itos.pkl', 'rb'))
stoi = defaultdict(lambda: 3, {v:k for k, v in enumerate(itos)})
return ids, itos, stoi
en_ids, en_itos, en_stoi = load_ids('en')
fr_ids, fr_itos, fr_stoi = load_ids('fr')
[fr_itos[o] for o in fr_ids[0]], len(en_itos), len(fr_itos)
```
## 00:33:01 - Word vectors
* Seq-to-seq with language models hasn't been explored yet in academia: lots of potential papers to be written.
* Word2Vec has been surpased by a number of word vectors: FastText is a good choice.
#### **Start do not rerun**
```
!pip install git+https://github.com/facebookresearch/fastText.git
```
* Need to also download the fasttext word vectors:
```
#!wget https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.en.zip --directory-prefix={PATH}
#!wget https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.fr.zip --directory-prefix={PATH}
!cd {PATH} && unzip wiki.en.zip && unzip wiki.fr.zip
```
#### **End do not rerun**
```
import fastText as ft
english_vecs = ft.load_model(str((PATH / 'wiki.en.bin')))
french_vecs = ft.load_model(str((PATH / 'wiki.fr.bin')))
```
* Turn it into a dictionary:
```
def get_vecs(lang, ft_vecs):
vecd = {w: ft_vecs.get_word_vector(w) for w in ft_vecs.get_words()}
pickle.dump(vecd, open(PATH / 'wiki.{lang}.pkl'))
```
* Preparing data for PyTorch
```
en_ids_tr = np.array([o[:enlen_90] for o in en_ids])
fr_ids_tr = np.array([o[:rnlen_90] for o in fr_ids])
enlen_90, frlen_90
def iter_to_numpy(*a):
"""convert iterable object into numpy array"""
return np.array(a[0]) if len(a)==1 else [np.array(o) for o in a]
class Seq2SeqDataset(Dataset):
def __init__(self, x, y):
self.x = x
self.y = y
def __getitem__(self, idx):
return iter_to_numpy(self.x[idx], self.y[idx])
def __len__(self):
return len(self.x)
np.random.seed(42)
trn_keep = np.random.rand(len(en_ids_tr)) > 0.1
en_trn, fr_trn = es_ids_tr[trn_keep], fr_ids_tr[trn_keep]
en_val, fr_val = en_ids_tr[~trn_keep], fr_ids_tr[~trn_keep]
len(en_trn), len(en_val)
trn_ds = Seq2SeqDataset(fr_trn, en_trn)
val_ds = Seq2SeqDataset(fr_val, en_val)
bs = 125
```
| github_jupyter |
```
import torch
import torch.nn as nn
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
import matplotlib.pyplot as plt
import cv2
import re
import os
import copy
from helpers import makedir, find_high_activation_crop
import model
import push
#import train_and_test as tnt
import time
import save
from log import create_logger
from preprocess import mean, std, preprocess_input_function, undo_preprocess_input_function
class PPNet_ensemble(nn.Module):
def __init__(self, ppnets):
super(PPNet_ensemble, self).__init__()
self.ppnets = ppnets # a list of ppnets
def forward(self, x):
logits, min_distances_0 = self.ppnets[0](x)
min_distances = [min_distances_0]
for i in range(1, len(self.ppnets)):
logits_i, min_distances_i = self.ppnets[i](x)
logits.add_(logits_i)
min_distances.append(min_distances_i)
return logits, min_distances
##### MODEL AND DATA LOADING
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# load the models
# provide paths to saved models you want to combine:
# e.g. load_model_paths = ['./saved_models/densenet121/003/30_18push0.8043.pth',
# './saved_models/resnet34/002/10_19push0.7920.pth',
# './saved_models/vgg19/003/10_18push0.7822.pth']
# MUST NOT BE EMPTY
load_model_paths = []
ppnets = []
epoch_number_strs = []
start_epoch_numbers = []
for load_model_path in load_model_paths:
load_model_name = load_model_path.split('/')[-1]
epoch_number_str = re.search(r'\d+', load_model_name).group(0)
epoch_number_strs.append(epoch_number_str)
start_epoch_number = int(epoch_number_str)
start_epoch_numbers.append(start_epoch_number)
print('load model from ' + load_model_path)
ppnet = torch.load(load_model_path)
ppnet = ppnet.cuda()
ppnets.append(ppnet)
ppnet_ensemble = PPNet_ensemble(ppnets)
ppnet_ensemble = ppnet_ensemble.cuda()
ppnet_ensemble_multi = torch.nn.DataParallel(ppnet_ensemble)
img_size = ppnets[0].img_size
#ppnet_multi = torch.nn.DataParallel(ppnet)
#img_size = ppnet_multi.module.img_size
#prototype_shape = ppnet.prototype_shape
#max_dist = prototype_shape[1] * prototype_shape[2] * prototype_shape[3]
# load the (test) data
from settings import test_dir
test_batch_size = 100
normalize = transforms.Normalize(mean=mean,
std=std)
test_dataset = datasets.ImageFolder(
test_dir,
transforms.Compose([
transforms.Resize(size=(img_size, img_size)),
transforms.ToTensor(),
normalize,
]))
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=test_batch_size, shuffle=True,
num_workers=4, pin_memory=False)
print('test set size: {0}'.format(len(test_loader.dataset)))
for ppnet in ppnet_ensemble_multi.module.ppnets:
print(ppnet)
class_specific = True
# only supports last layer adjustment
def _train_or_test_ppnet_ensemble(model, dataloader, optimizer=None, class_specific=True, use_l1_mask=True,
coefs=None, log=print):
'''
model: the multi-gpu model
dataloader:
optimizer: if None, will be test evaluation
'''
is_train = optimizer is not None
start = time.time()
n_examples = 0
n_correct = 0
n_batches = 0
total_cross_entropy = 0
for i, (image, label) in enumerate(dataloader):
input = image.cuda()
target = label.cuda()
# torch.enable_grad() has no effect outside of no_grad()
grad_req = torch.enable_grad() if is_train else torch.no_grad()
with grad_req:
# nn.Module has implemented __call__() function
# so no need to call .forward
output, _ = model(input)
# compute loss
cross_entropy = torch.nn.functional.cross_entropy(output, target)
l1 = torch.tensor(0.0)
if class_specific:
if use_l1_mask:
for ppnet in model.module.ppnets:
l1_mask = 1 - torch.t(ppnet.prototype_class_identity).cuda()
l1_ = (ppnet.last_layer.weight * l1_mask).norm(p=1)
l1.add_(l1_)
else:
for ppnet in model.module.ppnets:
l1_ = ppnet.last_layer.weight.norm(p=1)
l1.add_(l1_)
else:
for ppnet in model.module.ppnets:
l1_ = ppnet.last_layer.weight.norm(p=1)
l1.add_(l1_)
# evaluation statistics
_, predicted = torch.max(output.data, 1)
n_examples += target.size(0)
n_correct += (predicted == target).sum().item()
n_batches += 1
total_cross_entropy += cross_entropy.item()
# compute gradient and do SGD step
if is_train:
if class_specific:
if coefs is not None:
loss = (coefs['crs_ent'] * cross_entropy
+ coefs['l1'] * l1)
else:
loss = cross_entropy + 1e-4 * l1
else:
if coefs is not None:
loss = (coefs['crs_ent'] * cross_entropy
+ coefs['l1'] * l1)
else:
loss = cross_entropy + 1e-4 * l1
optimizer.zero_grad()
loss.backward()
optimizer.step()
del input
del target
del output
del predicted
end = time.time()
log('\ttime: \t{0}'.format(end - start))
log('\tcross ent: \t{0}'.format(total_cross_entropy / n_batches))
log('\taccu: \t\t{0}%'.format(n_correct / n_examples * 100))
last_layer_p1_norm = 0
for ppnet in model.module.ppnets:
last_layer_p1_norm += ppnet.last_layer.weight.norm(p=1).item()
log('\tl1: \t\t{0}'.format(last_layer_p1_norm))
#p = model.module.prototype_vectors.view(model.module.num_prototypes, -1).cpu()
#with torch.no_grad():
# p_avg_pair_dist = torch.mean(list_of_distances(p, p))
#log('\tp dist pair: \t{0}'.format(p_avg_pair_dist.item()))
return n_correct / n_examples
def train_ensemble(model, dataloader, optimizer, class_specific=True, coefs=None, log=print):
assert(optimizer is not None)
log('\ttrain')
model.train()
return _train_or_test_ppnet_ensemble(model=model, dataloader=dataloader, optimizer=optimizer,
class_specific=class_specific, coefs=coefs, log=log)
def test_ensemble(model, dataloader, class_specific=True, log=print):
log('\ttest')
model.eval()
return _train_or_test_ppnet_ensemble(model=model, dataloader=dataloader, optimizer=None,
class_specific=class_specific, log=log)
def ensemble_last_only(model, log=print):
for ppnet in model.module.ppnets:
for p in ppnet.features.parameters():
p.requires_grad = False
for p in ppnet.add_on_layers.parameters():
p.requires_grad = False
ppnet.prototype_vectors.requires_grad = False
for p in ppnet.last_layer.parameters():
p.requires_grad = True
log('\tensemble last layer')
#check test accuracy
accu = test_ensemble(model=ppnet_ensemble_multi, dataloader=test_loader,
class_specific=class_specific, log=print)
```
| github_jupyter |
# RandomForestRegressor with RobustScaler
This Code template is for regression analysis using a RandomForestRegressor based on the Ensemble Learning technique using feature scaling via RobustScaler
### Required Packages
```
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
#x_values
features=[]
```
Target variable for prediction.
```
#y_value
target=''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X = df[features]
Y = df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
```
Calling preprocessing functions on the feature and target set.
```
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=NullClearner(Y)
X.head()
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 123)
```
### Data Rescaling
RobustScaler removes the median and scales the data according to the quantile range (defaults to IQR: Interquartile Range). The IQR is the range between the 1st quartile (25th percentile) and the 3rd quartile (75th percentile).
We will fit an object of RobustScaler to **train data** then transform the same data via <Code>fit_transform(X_train)</Code> method, following which we will transform **test data** via <Code>transform(X_test)</Code> method.
```
robust_scaler = RobustScaler()
X_train = robust_scaler.fit_transform(X_train)
X_test = robust_scaler.transform(X_test)
```
### Model
A random forest is a meta estimator that fits a number of classifying decision trees on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting. The sub-sample size is controlled with the <code>max_samples</code> parameter if <code>bootstrap=True</code> (default), otherwise the whole dataset is used to build each tree.
#### Model Tuning Parameters
1. n_estimators : int, default=100
> The number of trees in the forest.
2. criterion : {“mae”, “mse”}, default=”mse”
> The function to measure the quality of a split. Supported criteria are “mse” for the mean squared error, which is equal to variance reduction as feature selection criterion, and “mae” for the mean absolute error.
3. max_depth : int, default=None
> The maximum depth of the tree.
4. max_features : {“auto”, “sqrt”, “log2”}, int or float, default=”auto”
> The number of features to consider when looking for the best split:
5. bootstrap : bool, default=True
> Whether bootstrap samples are used when building trees. If False, the whole dataset is used to build each tree.
6. oob_score : bool, default=False
> Whether to use out-of-bag samples to estimate the generalization accuracy.
7. n_jobs : int, default=None
> The number of jobs to run in parallel. fit, predict, decision_path and apply are all parallelized over the trees. <code>None</code> means 1 unless in a joblib.parallel_backend context. <code>-1</code> means using all processors. See Glossary for more details.
8. random_state : int, RandomState instance or None, default=None
> Controls both the randomness of the bootstrapping of the samples used when building trees (if <code>bootstrap=True</code>) and the sampling of the features to consider when looking for the best split at each node (if <code>max_features < n_features</code>).
9. verbose : int, default=0
> Controls the verbosity when fitting and predicting.
```
model = RandomForestRegressor(n_jobs = -1,random_state = 123)
model.fit(X_train, y_train)
```
#### Model Accuracy
We will use the trained model to make a prediction on the test set.Then use the predicted value for measuring the accuracy of our model.
> **score**: The **score** function returns the coefficient of determination <code>R<sup>2</sup></code> of the prediction.
```
print("Accuracy score {:.2f} %\n".format(model.score(X_test,y_test)*100))
```
> **r2_score**: The **r2_score** function computes the percentage variablility explained by our model, either the fraction or the count of correct predictions.
> **mae**: The **mean abosolute error** function calculates the amount of total error(absolute average distance between the real data and the predicted data) by our model.
> **mse**: The **mean squared error** function squares the error(penalizes the model for large errors) by our model.
```
y_pred=model.predict(X_test)
print("R2 Score: {:.2f} %".format(r2_score(y_test,y_pred)*100))
print("Mean Absolute Error {:.2f}".format(mean_absolute_error(y_test,y_pred)))
print("Mean Squared Error {:.2f}".format(mean_squared_error(y_test,y_pred)))
```
#### Feature Importances
The Feature importance refers to techniques that assign a score to features based on how useful they are for making the prediction.
```
plt.figure(figsize=(8,6))
n_features = len(X.columns)
plt.barh(range(n_features), model.feature_importances_, align='center')
plt.yticks(np.arange(n_features), X.columns)
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.ylim(-1, n_features)
```
#### Prediction Plot
First, we make use of a plot to plot the actual observations, with x_train on the x-axis and y_train on the y-axis.
For the regression line, we will use x_train on the x-axis and then the predictions of the x_train observations on the y-axis.
```
plt.figure(figsize=(14,10))
plt.plot(range(20),y_test[0:20], color = "green")
plt.plot(range(20),model.predict(X_test[0:20]), color = "red")
plt.legend(["Actual","prediction"])
plt.title("Predicted vs True Value")
plt.xlabel("Record number")
plt.ylabel(target)
plt.show()
```
#### Creator: Viraj Jayant, Github: [Profile](https://github.com/Viraj-Jayant)
| github_jupyter |
# Merge facility information
Merge facility data from these 3 sources:
- HCRIS (Healthcare Cost Reporting Information System)
- HIFLD (Homeland Infrastructure Foundation-Level Data)
- DH (Definitive Healthcare)
```
import pandas as pd
import geopandas as gpd
from hscap.geo import spatial_join_facilities
hcris_gdf = gpd.read_file('../data/usa_hospital_beds_hcris2018_v2.geojson', encoding='utf-8')
hifld_gdf = gpd.read_file('../data/hifld_facility_data.geojson', encoding='utf-8')
dh_gdf = gpd.read_file('../data/dh_facility_data.geojson', encoding='utf-8')
```
## Filter facilities with unusable data.
```
dh_gdf = dh_gdf[~dh_gdf['geometry'].isna()]
hifld_gdf = hifld_gdf[hifld_gdf['BEDS'] > 0]
hcris_gdf = hcris_gdf[hcris_gdf['Total Staffed Beds'] > 0.0]
```
Perform the matching on facilities based on a spatial join and similarity score between address and name columns.
```
id_columns = {
'HCRIS': 'Provider Number',
'HIFLD': 'ID',
'DH': 'OBJECTID'
}
similarity_columns = {
'HCRIS': ['HOSP10_Name', 'Street_Addr'],
'HIFLD': ['NAME', 'ADDRESS'],
'DH': ['HOSPITAL_N', 'HQ_ADDRESS']
}
hcris_filtered_gdf = hcris_gdf[['geometry', id_columns['HCRIS']] + similarity_columns['HCRIS']]
hifld_filtered_gdf = hifld_gdf[['geometry', id_columns['HIFLD']] + similarity_columns['HIFLD']]
dh_filtered_gdf = dh_gdf[['geometry', id_columns['DH']] + similarity_columns['DH']]
joined_dh_hcris, db_dh_hcris = spatial_join_facilities(left=dh_filtered_gdf,
right=hcris_filtered_gdf,
lid_property = id_columns['DH'],
rid_property = id_columns['HCRIS'],
lsimilarity_properties = similarity_columns['DH'],
rsimilarity_properties = similarity_columns['HCRIS'],
similarity_weights=[0.6, 0.4],
distance=1000,
merge_unmatched=False)
joined_dh_hifld, db_dh_hifld = spatial_join_facilities(left=dh_filtered_gdf,
right=hifld_filtered_gdf,
lid_property = id_columns['DH'],
rid_property = id_columns['HIFLD'],
lsimilarity_properties = similarity_columns['DH'],
rsimilarity_properties = similarity_columns['HIFLD'],
similarity_weights=[0.6, 0.4],
distance=150,
merge_unmatched=False)
joined_dh_hcris[~joined_dh_hcris['Provider Number'].isnull()]
joined_dh_hifld[joined_dh_hifld['ID'].isnull()]
dh_gdf[dh_gdf['OBJECTID'] == 6579]
dh_gdf[dh_gdf['OBJECTID'] == 4001]
hcris_gdf[hcris_gdf['State'] == 'WA']
hcris_gdf
dh_gdf
joined_dh_hifld[~joined_dh_hifld['ID'].isnull()]
dh_hifld_df = joined_dh_hifld.merge(dh_gdf, on='OBJECTID')
dh_hifld_df = dh_hifld_df.merge(hifld_gdf, on='ID')
dh_hifld_df['DIFF'] = dh_hifld_df['NUM_LICENS'] - dh_hifld_df['BEDS']
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 100)
dh_hifld_df[dh_hifld_df['DIFF'] > 50]
dh_hifld_df['DIFF'] = dh_hifld_df['NUM_LICENS'] - dh_hifld_df['BEDS']
# Check DF and HCRIS
dh_hcris_df = joined_dh_hcris.merge(dh_gdf, on='OBJECTID')
dh_hcris_df = dh_hcris_df.merge(hcris_gdf, on='Provider Number', how='left')
dh_hcris_df['DIFF'] = dh_hcris_df['NUM_ICU_BE'] - dh_hcris_df['ICU Total Staffed Beds']
dh_hcris_df[dh_hcris_df['DIFF'] > 50]
matched_hcris = set(dh_hcris_df[~dh_hcris_df['Provider Number'].isnull()]['Provider Number'].values)
total_hcris = set(hcris_gdf['Provider Number'])
len(total_hcris - matched_hcris)
hcris_gdf[hcris_gdf['Provider Number'].isin(total_hcris - matched_hcris)]
dh_gdf[dh_gdf['COUNTY_NAM'] == 'Cullman']
db_dh_hcris[db_dh_hcris['left__OBJECTID'] == 86]
db_dh_hcris[db_dh_hcris['right__Provider Number'] == '010023']
```
#### Attempt using full outer join
```
joined_hcris_hifld, db_hcris_hifld = spatial_join_facilities(left=hifld_filtered_gdf,
right=hcris_filtered_gdf,
lid_property = id_columns['HIFLD'],
rid_property = id_columns['HCRIS'],
lsimilarity_properties = similarity_columns['HIFLD'],
rsimilarity_properties = similarity_columns['HCRIS'],
similarity_weights=[0.6, 0.4],
distance=50)
joined_hifld_dh, db_hifld_dh = spatial_join_facilities(left=hifld_filtered_gdf,
right=dh_filtered_gdf,
lid_property = id_columns['HIFLD'],
rid_property = id_columns['DH'],
lsimilarity_properties = similarity_columns['HIFLD'],
rsimilarity_properties = similarity_columns['DH'],
similarity_weights=[0.6, 0.4],
distance=50)
joined_hcris_dh, db_hcris_dh = spatial_join_facilities(left=hcris_filtered_gdf,
right=dh_filtered_gdf,
lid_property = id_columns['HCRIS'],
rid_property = id_columns['DH'],
lsimilarity_properties = similarity_columns['HCRIS'],
rsimilarity_properties = similarity_columns['DH'],
similarity_weights=[0.6, 0.4],
distance=50)
```
## Match validation
Check the transitive matching between the 3 different joins as a sanity check.
```
matched_hcris_hifld = joined_hcris_hifld.dropna()
matched_hifld_dh = joined_hifld_dh.dropna()
matched_hcris_dh = joined_hcris_dh.dropna()
matched_hcris_hifld_dh = \
matched_hcris_hifld.merge(matched_hifld_dh, how='inner', on=id_columns['HIFLD'])
matched_hcris_hifld_dh_hcris = \
matched_hcris_hifld_dh.merge(matched_hcris_dh, how='inner', on=id_columns['DH'])
matched_hcris_hifld_dh_hcris[matched_hcris_hifld_dh_hcris['Provider Number_x'] != matched_hcris_hifld_dh_hcris['Provider Number_y']]
hcris_gdf[hcris_gdf['Provider Number'] == '673041']
hifld_gdf[hifld_gdf['ID'] == 49175092.0]
dh_gdf[dh_gdf['OBJECTID'] == 5182.0]
hcris_gdf[hcris_gdf['Provider Number'] == '452041']
db_hcris_hifld
db_hcris_hifld[db_hcris_hifld['right__Provider Number'] == '673041']
db_hcris_dh
db_hcris_dh[db_hcris_dh['left__Provider Number'] == '450539']
hcris_gdf[hcris_gdf['Provider Number'] == '362004']
hcris_gdf[hcris_gdf['Provider Number'] == '363037']
dh_gdf[dh_gdf['OBJECTID'] == 5329.0]
dh_gdf[dh_gdf['OBJECTID'] == 5928.0]
matched_hifld_dh
hifld_gdf[hifld_gdf['ID'] == 11036301.0]
hcris_gdf[hcris_gdf['Provider Number'] == '010001']
hcris_gdf[hcris_gdf['Provider Number'] == '012010']
joined_hcris_hifld[joined_hcris_hifld['ID'] == 11036301.0]
x = joined_hcris_hifld.drop_duplicates('ID')
x[x['ID'] == 11036301.0]
matched_hcris_hifld
joined_hcris_hifld
matched_hcris_hifld
matched_hcris_dh
hcris_gdf.columns
hcris_gdf.columns
joined_hcris_hifld
joined_hifld_dh
joined_hcris_dh
dh_gdf.columns
hcris_gdf.count()
hifld_gdf.columns
hifld_gdf[hifld_gdf['ADDRESS'].isnull()]
hifld_gdf.count()
dh_gdf.count()
len(dh_gdf['OBJECTID'].values), len(set(dh_gdf['OBJECTID'].values))
dh_gdf['HOSPITAL_N']
import math
math.isnan('adf')
dh_gdf[dh_gdf['geometry'].isna()]
!pip install FuzzyWuzzy
!pip install python-Levenshtein
```
| github_jupyter |
```
# -*- coding: utf-8 -*-
"""
EVCで変換する.
詳細 : https://pdfs.semanticscholar.org/cbfe/71798ded05fb8bf8674580aabf534c4dbb8bc.pdf
Converting by EVC.
Check detail : https://pdfs.semanticscholar.org/cbfe/71798ded05fb8bf8674580abf534c4dbb8bc.pdf
"""
from __future__ import division, print_function
import os
from shutil import rmtree
import argparse
import glob
import pickle
import time
import numpy as np
from numpy.linalg import norm
from sklearn.decomposition import PCA
from sklearn.mixture import GMM # sklearn 0.20.0から使えない
from sklearn.preprocessing import StandardScaler
import scipy.signal
import scipy.sparse
%matplotlib inline
import matplotlib.pyplot as plt
import IPython
from IPython.display import Audio
import soundfile as sf
import wave
import pyworld as pw
import librosa.display
from dtw import dtw
import warnings
warnings.filterwarnings('ignore')
"""
Parameters
__Mixtured : GMM混合数
__versions : 実験セット
__convert_source : 変換元話者のパス
__convert_target : 変換先話者のパス
"""
# parameters
__Mixtured = 40
__versions = 'pre-stored0.1.1'
__convert_source = 'input/EJM10/V01/T01/TIMIT/000/*.wav'
__convert_target = 'adaptation/EJF01/V01/T01/ATR503/A/*.wav'
# settings
__same_path = './utterance/' + __versions + '/'
__output_path = __same_path + 'output/EJF01/' # EJF01, EJF07, EJM04, EJM05
Mixtured = __Mixtured
pre_stored_pickle = __same_path + __versions + '.pickle'
pre_stored_source_list = __same_path + 'pre-source/**/V01/T01/**/*.wav'
pre_stored_list = __same_path + "pre/**/V01/T01/**/*.wav"
#pre_stored_target_list = "" (not yet)
pre_stored_gmm_init_pickle = __same_path + __versions + '_init-gmm.pickle'
pre_stored_sv_npy = __same_path + __versions + '_sv.npy'
save_for_evgmm_covarXX = __output_path + __versions + '_covarXX.npy'
save_for_evgmm_covarYX = __output_path + __versions + '_covarYX.npy'
save_for_evgmm_fitted_source = __output_path + __versions + '_fitted_source.npy'
save_for_evgmm_fitted_target = __output_path + __versions + '_fitted_target.npy'
save_for_evgmm_weights = __output_path + __versions + '_weights.npy'
save_for_evgmm_source_means = __output_path + __versions + '_source_means.npy'
for_convert_source = __same_path + __convert_source
for_convert_target = __same_path + __convert_target
converted_voice_npy = __output_path + 'sp_converted_' + __versions
converted_voice_wav = __output_path + 'sp_converted_' + __versions
mfcc_save_fig_png = __output_path + 'mfcc3dim_' + __versions
f0_save_fig_png = __output_path + 'f0_converted' + __versions
converted_voice_with_f0_wav = __output_path + 'sp_f0_converted' + __versions
__measure_target = 'adaptation/EJF01/V01/T01/TIMIT/000/*.wav'
for_measure_target = __same_path + __measure_target
mcd_text = __output_path + __versions + '_MCD.txt'
EPSILON = 1e-8
class MFCC:
"""
MFCC() : メル周波数ケプストラム係数(MFCC)を求めたり、MFCCからスペクトルに変換したりするクラス.
動的特徴量(delta)が実装途中.
ref : http://aidiary.hatenablog.com/entry/20120225/1330179868
"""
def __init__(self, frequency, nfft=1026, dimension=24, channels=24):
"""
各種パラメータのセット
nfft : FFTのサンプル点数
frequency : サンプリング周波数
dimension : MFCC次元数
channles : メルフィルタバンクのチャンネル数(dimensionに依存)
fscale : 周波数スケール軸
filterbankl, fcenters : フィルタバンク行列, フィルタバンクの頂点(?)
"""
self.nfft = nfft
self.frequency = frequency
self.dimension = dimension
self.channels = channels
self.fscale = np.fft.fftfreq(self.nfft, d = 1.0 / self.frequency)[: int(self.nfft / 2)]
self.filterbank, self.fcenters = self.melFilterBank()
def hz2mel(self, f):
"""
周波数からメル周波数に変換
"""
return 1127.01048 * np.log(f / 700.0 + 1.0)
def mel2hz(self, m):
"""
メル周波数から周波数に変換
"""
return 700.0 * (np.exp(m / 1127.01048) - 1.0)
def melFilterBank(self):
"""
メルフィルタバンクを生成する
"""
fmax = self.frequency / 2
melmax = self.hz2mel(fmax)
nmax = int(self.nfft / 2)
df = self.frequency / self.nfft
dmel = melmax / (self.channels + 1)
melcenters = np.arange(1, self.channels + 1) * dmel
fcenters = self.mel2hz(melcenters)
indexcenter = np.round(fcenters / df)
indexstart = np.hstack(([0], indexcenter[0:self.channels - 1]))
indexstop = np.hstack((indexcenter[1:self.channels], [nmax]))
filterbank = np.zeros((self.channels, nmax))
for c in np.arange(0, self.channels):
increment = 1.0 / (indexcenter[c] - indexstart[c])
# np,int_ は np.arangeが[0. 1. 2. ..]となるのをintにする
for i in np.int_(np.arange(indexstart[c], indexcenter[c])):
filterbank[c, i] = (i - indexstart[c]) * increment
decrement = 1.0 / (indexstop[c] - indexcenter[c])
# np,int_ は np.arangeが[0. 1. 2. ..]となるのをintにする
for i in np.int_(np.arange(indexcenter[c], indexstop[c])):
filterbank[c, i] = 1.0 - ((i - indexcenter[c]) * decrement)
return filterbank, fcenters
def mfcc(self, spectrum):
"""
スペクトルからMFCCを求める.
"""
mspec = []
mspec = np.log10(np.dot(spectrum, self.filterbank.T))
mspec = np.array(mspec)
return scipy.fftpack.realtransforms.dct(mspec, type=2, norm="ortho", axis=-1)
def delta(self, mfcc):
"""
MFCCから動的特徴量を求める.
現在は,求める特徴量フレームtをt-1とt+1の平均としている.
"""
mfcc = np.concatenate([
[mfcc[0]],
mfcc,
[mfcc[-1]]
]) # 最初のフレームを最初に、最後のフレームを最後に付け足す
delta = None
for i in range(1, mfcc.shape[0] - 1):
slope = (mfcc[i+1] - mfcc[i-1]) / 2
if delta is None:
delta = slope
else:
delta = np.vstack([delta, slope])
return delta
def imfcc(self, mfcc, spectrogram):
"""
MFCCからスペクトルを求める.
"""
im_sp = np.array([])
for i in range(mfcc.shape[0]):
mfcc_s = np.hstack([mfcc[i], [0] * (self.channels - self.dimension)])
mspectrum = scipy.fftpack.idct(mfcc_s, norm='ortho')
# splrep はスプライン補間のための補間関数を求める
tck = scipy.interpolate.splrep(self.fcenters, np.power(10, mspectrum))
# splev は指定座標での補間値を求める
im_spectrogram = scipy.interpolate.splev(self.fscale, tck)
im_sp = np.concatenate((im_sp, im_spectrogram), axis=0)
return im_sp.reshape(spectrogram.shape)
def trim_zeros_frames(x, eps=1e-7):
"""
無音区間を取り除く.
"""
T, D = x.shape
s = np.sum(np.abs(x), axis=1)
s[s < 1e-7] = 0.
return x[s > eps]
def analyse_by_world_with_harverst(x, fs):
"""
WORLD音声分析合成器で基本周波数F0,スペクトル包絡,非周期成分を求める.
基本周波数F0についてはharvest法により,より精度良く求める.
"""
# 4 Harvest with F0 refinement (using Stonemask)
frame_period = 5
_f0_h, t_h = pw.harvest(x, fs, frame_period=frame_period)
f0_h = pw.stonemask(x, _f0_h, t_h, fs)
sp_h = pw.cheaptrick(x, f0_h, t_h, fs)
ap_h = pw.d4c(x, f0_h, t_h, fs)
return f0_h, sp_h, ap_h
def wavread(file):
"""
wavファイルから音声トラックとサンプリング周波数を抽出する.
"""
wf = wave.open(file, "r")
fs = wf.getframerate()
x = wf.readframes(wf.getnframes())
x = np.frombuffer(x, dtype= "int16") / 32768.0
wf.close()
return x, float(fs)
def preEmphasis(signal, p=0.97):
"""
MFCC抽出のための高域強調フィルタ.
波形を通すことで,高域成分が強調される.
"""
return scipy.signal.lfilter([1.0, -p], 1, signal)
def alignment(source, target, path):
"""
タイムアライメントを取る.
target音声をsource音声の長さに合うように調整する.
"""
# ここでは814に合わせよう(targetに合わせる)
# p_p = 0 if source.shape[0] > target.shape[0] else 1
#shapes = source.shape if source.shape[0] > target.shape[0] else target.shape
shapes = source.shape
align = np.array([])
for (i, p) in enumerate(path[0]):
if i != 0:
if j != p:
temp = np.array(target[path[1][i]])
align = np.concatenate((align, temp), axis=0)
else:
temp = np.array(target[path[1][i]])
align = np.concatenate((align, temp), axis=0)
j = p
return align.reshape(shapes)
covarXX = np.load(save_for_evgmm_covarXX)
covarYX = np.load(save_for_evgmm_covarYX)
fitted_source = np.load(save_for_evgmm_fitted_source)
fitted_target = np.load(save_for_evgmm_fitted_target)
weights = np.load(save_for_evgmm_weights)
source_means = np.load(save_for_evgmm_source_means)
"""
声質変換に用いる変換元音声と目標音声を読み込む.
"""
timer_start = time.time()
source_mfcc_for_convert = []
source_sp_for_convert = []
source_f0_for_convert = []
source_ap_for_convert = []
fs_source = None
for name in sorted(glob.iglob(for_convert_source, recursive=True)):
print("source = ", name)
x_source, fs_source = sf.read(name)
f0_source, sp_source, ap_source = analyse_by_world_with_harverst(x_source, fs_source)
mfcc_source = MFCC(fs_source)
#mfcc_s_tmp = mfcc_s.mfcc(sp)
#source_mfcc_for_convert = np.hstack([mfcc_s_tmp, mfcc_s.delta(mfcc_s_tmp)])
source_mfcc_for_convert.append(mfcc_source.mfcc(sp_source))
source_sp_for_convert.append(sp_source)
source_f0_for_convert.append(f0_source)
source_ap_for_convert.append(ap_source)
target_mfcc_for_fit = []
target_f0_for_fit = []
target_ap_for_fit = []
for name in sorted(glob.iglob(for_convert_target, recursive=True)):
print("target = ", name)
x_target, fs_target = sf.read(name)
f0_target, sp_target, ap_target = analyse_by_world_with_harverst(x_target, fs_target)
mfcc_target = MFCC(fs_target)
#mfcc_target_tmp = mfcc_target.mfcc(sp_target)
#target_mfcc_for_fit = np.hstack([mfcc_t_tmp, mfcc_t.delta(mfcc_t_tmp)])
target_mfcc_for_fit.append(mfcc_target.mfcc(sp_target))
target_f0_for_fit.append(f0_target)
target_ap_for_fit.append(ap_target)
# 全部numpy.arrrayにしておく
source_data_mfcc = np.array(source_mfcc_for_convert)
source_data_sp = np.array(source_sp_for_convert)
source_data_f0 = np.array(source_f0_for_convert)
source_data_ap = np.array(source_ap_for_convert)
target_mfcc = np.array(target_mfcc_for_fit)
target_f0 = np.array(target_f0_for_fit)
target_ap = np.array(target_ap_for_fit)
print("Load Input and Target Voice time = ", time.time() - timer_start , "[sec]")
def convert(source, covarXX, fitted_source, fitted_target, covarYX, weights, source_means):
"""
声質変換を行う.
"""
Mixtured = 40
D = source.shape[0]
E = np.zeros((Mixtured, D))
for m in range(Mixtured):
xx = np.linalg.solve(covarXX[m], source - fitted_source[m])
E[m] = fitted_target[m] + np.dot(covarYX[m], xx)
px = GMM(n_components = Mixtured, covariance_type = 'full')
px.weights_ = weights
px.means_ = source_means
px.covars_ = covarXX
posterior = px.predict_proba(np.atleast_2d(source))
return np.dot(posterior, E)
def calc_std_mean(input_f0):
"""
F0変換のために標準偏差と平均を求める.
"""
tempF0 = input_f0[ np.where(input_f0 > 0)]
fixed_logF0 = np.log(tempF0)
#logF0 = np.ma.log(input_f0) # 0要素にlogをするとinfになるのでmaskする
#fixed_logF0 = np.ma.fix_invalid(logF0).data # maskを取る
return np.std(fixed_logF0), np.mean(fixed_logF0) # 標準偏差と平均を返す
"""
距離を測るために,正しい目標音声を読み込む
"""
source_mfcc_for_measure_target = []
source_sp_for_measure_target = []
source_f0_for_measure_target = []
source_ap_for_measure_target = []
for name in sorted(glob.iglob(for_measure_target, recursive=True)):
print("measure_target = ", name)
x_measure_target, fs_measure_target = sf.read(name)
f0_measure_target, sp_measure_target, ap_measure_target = analyse_by_world_with_harverst(x_measure_target, fs_measure_target)
mfcc_measure_target = MFCC(fs_measure_target)
#mfcc_s_tmp = mfcc_s.mfcc(sp)
#source_mfcc_for_convert = np.hstack([mfcc_s_tmp, mfcc_s.delta(mfcc_s_tmp)])
source_mfcc_for_measure_target.append(mfcc_measure_target.mfcc(sp_measure_target))
source_sp_for_measure_target.append(sp_measure_target)
source_f0_for_measure_target.append(f0_measure_target)
source_ap_for_measure_target.append(ap_measure_target)
measure_target_data_mfcc = np.array(source_mfcc_for_measure_target)
measure_target_data_sp = np.array(source_sp_for_measure_target)
measure_target_data_f0 = np.array(source_f0_for_measure_target)
measure_target_data_ap = np.array(source_ap_for_measure_target)
def calc_mcd(source, convert, target):
"""
変換する前の音声と目標音声でDTWを行う.
その後,変換後の音声と目標音声とのMCDを計測する.
"""
dist, cost, acc, path = dtw(source, target, dist=lambda x, y: norm(x-y, ord=1))
aligned = alignment(source, target, path)
return 10.0 / np.log(10) * np.sqrt(2 * np.sum(np.square(aligned - convert))), aligned
"""
変換を行う.
"""
timer_start = time.time()
# 事前に目標話者の標準偏差と平均を求めておく
temp_f = None
for x in range(len(target_f0)):
temp = target_f0[x].flatten()
if temp_f is None:
temp_f = temp
else:
temp_f = np.hstack((temp_f, temp))
target_std, target_mean = calc_std_mean(temp_f)
# 変換
output_mfcc = []
filer = open(mcd_text, 'a')
for i in range(len(source_data_mfcc)):
print("voice no = ", i)
# convert
source_temp = source_data_mfcc[i]
output_mfcc = np.array([convert(source_temp[frame], covarXX, fitted_source, fitted_target, covarYX, weights, source_means)[0] for frame in range(source_temp.shape[0])])
# syntehsis
source_sp_temp = source_data_sp[i]
source_f0_temp = source_data_f0[i]
source_ap_temp = source_data_ap[i]
output_imfcc = mfcc_source.imfcc(output_mfcc, source_sp_temp)
y_source = pw.synthesize(source_f0_temp, output_imfcc, source_ap_temp, fs_source, 5)
np.save(converted_voice_npy + "s{0}.npy".format(i), output_imfcc)
sf.write(converted_voice_wav + "s{0}.wav".format(i), y_source, fs_source)
# calc MCD
measure_temp = measure_target_data_mfcc[i]
mcd, aligned_measure = calc_mcd(source_temp, output_mfcc, measure_temp)
filer.write("MCD No.{0} = {1} , shape = {2}\n".format(i, mcd, source_temp.shape))
# save figure spectram
range_s = output_imfcc.shape[0]
scale = [x for x in range(range_s)]
MFCC_sample_s = [source_temp[x][0] for x in range(range_s)]
MFCC_sample_c = [output_mfcc[x][0] for x in range(range_s)]
MFCC_sample_t = [aligned_measure[x][0] for x in range(range_s)]
plt.subplot(311)
plt.plot(scale, MFCC_sample_s, label="source", linewidth = 1.0)
plt.plot(scale, MFCC_sample_c, label="convert", linewidth = 1.0)
plt.plot(scale, MFCC_sample_t, label="target", linewidth = 1.0, linestyle="dashed")
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0.)
#plt.xlabel("Flame")
#plt.ylabel("amplitude MFCC")
MFCC_sample_s = [source_temp[x][1] for x in range(range_s)]
MFCC_sample_c = [output_mfcc[x][1] for x in range(range_s)]
MFCC_sample_t = [aligned_measure[x][1] for x in range(range_s)]
plt.subplot(312)
plt.plot(scale, MFCC_sample_s, label="source", linewidth = 1.0)
plt.plot(scale, MFCC_sample_c, label="convert", linewidth = 1.0)
plt.plot(scale, MFCC_sample_t, label="target", linewidth = 1.0, linestyle="dashed")
plt.ylabel("amplitude MFCC")
MFCC_sample_s = [source_temp[x][2] for x in range(range_s)]
MFCC_sample_c = [output_mfcc[x][2] for x in range(range_s)]
MFCC_sample_t = [aligned_measure[x][2] for x in range(range_s)]
plt.subplot(313)
plt.plot(scale, MFCC_sample_s, label="source", linewidth = 1.0)
plt.plot(scale, MFCC_sample_c, label="convert", linewidth = 1.0)
plt.plot(scale, MFCC_sample_t, label="target", linewidth = 1.0, linestyle="dashed")
plt.xlabel("Flame")
plt.savefig(mfcc_save_fig_png + "s{0}.png".format(i) , format='png', dpi=300)
plt.close()
# synthesis with conveted f0
source_std, source_mean = calc_std_mean(source_f0_temp)
std_ratio = target_std / source_std
log_conv_f0 = std_ratio * (source_f0_temp - source_mean) + target_mean
conv_f0 = np.maximum(log_conv_f0, 0)
np.save(converted_voice_npy + "f{0}.npy".format(i), conv_f0)
y_conv = pw.synthesize(conv_f0, output_imfcc, source_ap_temp, fs_source, 5)
sf.write(converted_voice_with_f0_wav + "sf{0}.wav".format(i) , y_conv, fs_source)
# save figure f0
F0_s = [source_f0_temp[x] for x in range(range_s)]
F0_c = [conv_f0[x] for x in range(range_s)]
plt.plot(scale, F0_s, label="source", linewidth = 1.0)
plt.plot(scale, F0_c, label="convert", linewidth = 1.0)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
plt.xlabel("Frame")
plt.ylabel("Amplitude")
plt.savefig(f0_save_fig_png + "f{0}.png".format(i), format='png', dpi=300)
plt.close()
filer.close()
print("Make Converted Spectram time = ", time.time() - timer_start , "[sec]")
```
| github_jupyter |
```
%matplotlib inline
```
Computing a covariance matrix
=============================
Many methods in MNE, including source estimation and some classification
algorithms, require covariance estimations from the recordings.
In this tutorial we cover the basics of sensor covariance computations and
construct a noise covariance matrix that can be used when computing the
minimum-norm inverse solution.
```
import os.path as op
import mne
from mne.datasets import sample
```
Let's read in the raw data
```
data_path = sample.data_path()
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(raw_fname)
raw.set_eeg_reference('average', projection=True)
raw.info['bads'] += ['EEG 053'] # bads + 1 more
```
... and then the empty room data
```
raw_empty_room_fname = op.join(
data_path, 'MEG', 'sample', 'ernoise_raw.fif')
raw_empty_room = mne.io.read_raw_fif(raw_empty_room_fname)
```
Warning: processing pipeline for noise covariance calculation must match actual data!
- Filtering
- ICA, SSP
```
raw_empty_room.info['bads'] = [
bb for bb in raw.info['bads'] if 'EEG' not in bb]
raw_empty_room.add_proj(
[pp.copy() for pp in raw.info['projs'] if 'EEG' not in pp['desc']])
```
# What is noise?
- empty room measurement
- resting state (e.g., in the case of evoked)
tmin and tmax for delimiting part of recording as noise
Using empty room measurement
```
noise_cov = mne.compute_raw_covariance(
raw_empty_room, tmin=0, tmax=None)
```
Now that you have the covariance matrix in an MNE-Python object you can
save it to a file with `mne.write_cov`. Later you can read it back
using `mne.read_cov`.
Using resting state measurement
```
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-0.2, tmax=0.5,
baseline=(-0.2, 0.0), decim=3, # we'll decimate for speed
verbose='error') # and ignore the warning about aliasing
```
We use baseline correction so that noise is zero mean.
```
noise_cov_baseline = mne.compute_covariance(epochs, tmax=0)
```
Note that this method also attenuates any activity in your
source estimates that resemble the baseline, if you like it or not.
Plot the covariance matrices
----------------------------
Try setting proj to False to see the effect. Notice that the projectors in
epochs are already applied, so ``proj`` parameter has no effect.
```
noise_cov.plot(raw_empty_room.info, proj=True);
noise_cov_baseline.plot(epochs.info, proj=True);
```
How should I regularize the covariance matrix?
----------------------------------------------
The estimated covariance can be numerically
unstable and tends to induce correlations between estimated source amplitudes
and the number of samples available.
In MNE-Python, regularization is done using advanced regularization methods
described in [1]. For this the 'auto' option can be used. With this
option cross-validation will be used to learn the optimal regularization:
```
noise_cov_reg = mne.compute_covariance(epochs, tmax=0., method='auto',
rank=None)
```
This procedure evaluates the noise covariance quantitatively by how well it
whitens the data using the
negative log-likelihood of unseen data. The final result can also be visually
inspected.
Under the assumption that the baseline does not contain a systematic signal
(time-locked to the event of interest), the whitened baseline signal should
be follow a multivariate Gaussian distribution, i.e.,
whitened baseline signals should be between -1.96 and 1.96 at a given time
sample.
Based on the same reasoning, the expected value for the global field power
(GFP) is 1
```
evoked = epochs.average()
evoked.plot_white(noise_cov_reg, time_unit='s', verbose=False);
```
This plot displays both, the whitened evoked signals for each channels and
the whitened GFP. The numbers in the GFP panel represent the estimated rank
of the data, which amounts to the effective degrees of freedom by which the
squared sum across sensors is divided when computing the whitened GFP.
The whitened GFP also helps detecting spurious late evoked components which
can be the consequence of over- or under-regularization.
Now let's compare two regularization methods
```
noise_covs = mne.compute_covariance(
epochs, tmax=0., method=('empirical', 'shrunk'), return_estimators=True,
rank=None)
evoked.plot_white(noise_covs, time_unit='s', verbose=False);
```
This will plot the whitened evoked for the optimal estimator and display the
GFPs for all estimators as separate lines in the related panel.
Finally, let's have a look at the difference between empty room and
event related covariance.
```
evoked_meg = evoked.copy().pick_types(meg=True, eeg=False)
noise_cov_meg = mne.pick_channels_cov(noise_cov_baseline, evoked_meg.ch_names)
noise_cov['method'] = 'empty_room'
noise_cov_meg['method'] = 'baseline'
evoked_meg.plot_white([noise_cov_meg, noise_cov], time_unit='s', verbose=False);
```
Based on the negative log likelihood, the baseline covariance seems more appropriate.
References
----------
[1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
| github_jupyter |
5 Regression on real dataset: air pollution mapping
-------------------
The power of *machine learning* for **regression** can only be manifested by its application to real dataset. For instance, you will never know why the **Random Forest** is such a popular algorithm and how it outperforms the other approaches.
In this challenge, you are going to work with GIS point dataset, where air pollutant NO<sub>2</sub> and some potential predictors (indicators) are provided. While acquiring point measurement of NO<sub>2</sub> at each station, indicators such as road length and population size within a buffer of the station is also obtained. Specifically you will have weekday daytime and nighttime NO<sub>2</sub> point measurements at several stations (point). Road length and population size around each station (point) is indicated with the buffer size. For instance, 'populaiton_3000m' means population size within the 3000m buffer zone around each stations (point).
You will work with famous 'pandas' module to handle table based dataset. You will also try to learn how to study the functionality of the module by exploring online open source documentation.
```
%matplotlib inline # Again, we need to inform this notebook to plot figures in lines, you can ignore this code
# Also, before reading the data you need to first clone the data on Github to our Colab workspace
!git clone https://github.com/jonwangio/uu_ml/tree/main/data
```
### 5.0 Data and preprocessing
```
# The first thing you need to do is to import the 'pandas' module
import pandas as pd
# You can be able to read *.csv based GIS point dataset by using 'pandas'
data_all = pd.read_csv('data/no2.csv') # 'Programming-Basics/data/no2.csv'
# Print(data_no2)
data_all.head(5) # What does this mean?
# Pandas dataframe allows overview of the variable names or the names of the columns
data_all.columns
# Extract the daytime NO2 data as a column
# Try to figure out what does the data type mean?
daytime_no2 = data_all['weekday_daytime_no2']
# You can further extract the values of the pandas column as Numpy array. Handy!
no2_val = daytime_no2.values
print(no2_val.shape)
# Or you can extract values from multiple columns
variables = data_all.iloc[:,5:].values
print(variables[:5,:]) # Visualize the first 5 rows of the data
```
### Short exercise 5.0
Before analyzing the data, it is always good to examine the data visually. There can be already some visible patterns in the data that can help you to make some assumptions and build models.
```
# A simple way to visualize data is try to plot scatter plots to examine one-by-one how variables correlate to each other.
# Please try to define a function that can help you to visualze
'''
# Answers and hints
def no2_scatter(name1, name2, data):
series1 = data[name1]
series2 = data[name2]
val1 = series1.values
val2 = series2.values
plt.plot(val1, val2,'o')
plt.title('scatter of'+name1+name2)
plt.show()
'''
# Try to call the function to see if it works.
'''
# Answers and hints
no2_scatter('weekday_daytime_no2', 'roadlength_1000m', data_all)
'''
```
Please also visualize simple statistics of the input data, such as the probabilistic distribution of the data. For instance, the following codes provide you a hint to examine data in the column *population_1000m*.
```
from matplotlib import pyplot as plt # Import module for plotting and visualization
import numpy as np
pop1000 = data_all['population_1000m'].values
print(type(pop1000))
print(np.min(pop1000), np.max(pop1000), np.mean(pop1000), np.std(pop1000))
# Knowing the rough distributions of the data, you may have a idea how to design a histogram to see the details
# Now you can define the histogram as you know the rough range and step according to the mean and standard deviation you obtained above.
# Try to examine the statistical distribution of other columns.
my_bin = list(np.arange(start=0.0,stop=21000.0,step=1000))
plt.hist(pop1000, bins = my_bin)
plt.title("histogram")
plt.show()
```
### Short exercise 5.1
Now recall what you have been experimented with in section 0, and try to fit simple linear models between anyone of variables and the NO2 data.
```
# Answers and hints
# Define a function to fit a linear model to the dummy data.
def fit(x, y):
# x : (N, D) np.ndarray
# y : (N,) np.ndarray
# Linear fit means a linear model in the form of M(x) = θ1 + θ2x is assumed
# Or, in the form of matrix multiplication M(X) = θ'X, where
# X is referred as designed vector/matrix in the form of [1, x]' and θ is [θ1, θ2]'
X_mat=np.hstack((np.ones(len(x)).reshape(-1,1), x)) # X_mat is a designed matrix
# Given the observed data y, a proper solution of θ to M(x) can be: θ* = argmin (||y - X @ θ||^2_2)
theta = np.linalg.inv(X_mat.T.dot(X_mat)).dot(X_mat.T).dot(y) # If you see an @ in the middle of a line, that's matrix multiplication.
var = np.mean(np.square(X_mat @ theta - y))
return theta, var
def predict(x, theta, var, return_std:bool=False):
X_mat=np.hstack((np.ones(len(x)).reshape(-1,1), x)) # X_mat is a designed matrix
y = X_mat @ theta
if return_std:
y_std = np.sqrt(var) + np.zeros_like(y)
return y, y_std
return y
column_name = 'population_3000m' # Select data column
weights, variance = fit(data_all[column_name].values.reshape(-1,1), data_all['weekday_daytime_no2'].values)
x_all = np.linspace(data_all[column_name].values.min(), data_all[column_name].values.max(), 500).reshape(-1,1)
no2_predict = predict(x_all, weights, variance)
plt.figure(figsize=(12,5))
plt.xlim([data_all[column_name].values.min(), data_all[column_name].values.max()])
plt.ylim([data_all['weekday_daytime_no2'].values.min(), data_all['weekday_daytime_no2'].values.max()])
plt.title('Data fitted')
plt.plot(data_all[column_name].values, data_all['weekday_daytime_no2'].values,'kx',label='training data')
plt.plot(x_all, no2_predict, '--g', linewidth=.5, label='predicted NO2-population curve')
plt.legend()
# Try to further explore how each variable correlates to the NO2
```
### Short exercise 5.2
Recall linear regression examples in section 1. Please implement **regression** between NO<sub>2</sub> and population as well as road network configuration. Compare how your model perform by using the **training** and **validation/test** manner. Try to interpret the figure plotted.
In the below example, all **variables** are used for a **regularized** regression. You can start your experiment by using one or few of them.
```
# This time, try to use all modules from sklearn for data preparation
from sklearn.linear_model import Lasso, Ridge
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
# Extract all independent variables/features
x = data_all.iloc[:,5:].values
# Daytime NO2 as the dependent variable, here you use daytime NO2, but also try to experiment with nighttime values.
y = data_all['weekday_daytime_no2'].values.reshape(-1,1) # Different way of extraction: column name vs. '.iloc'
# Prepare training and test set
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=40)
# As the input variables are with very large range, from 10^2~10^4, it is better to standardize the data before using
# Standardize the independent variables by using the Sklearn functionality
sc = StandardScaler() # Initiate data standardization functionality
x_train = sc.fit_transform(x_train) # Standardize both training and test sets for the independent variables
x_test = sc.transform(x_test)
# Inspect
RMSE_test = []
RMSE_train = []
for i in range(0, 200, 5):
# Configure model with different regularization parameter
# Also try to experiment with Ridge regression
model = Lasso(alpha=i/100)
# Then, fit this model to the data.
model.fit(x_train, y_train)
RMSE_train.append(np.sqrt(mean_squared_error(y_train, model.predict(x_train))))
RMSE_test.append(np.sqrt(mean_squared_error(y_test, model.predict(x_test))))
# Plot model performance with different regularization parameter
plt.figure(figsize=(12,5))
plt.plot(range(1,len(RMSE_test)+1), RMSE_test,'-or', linewidth=.5, label='Test_Data')
plt.plot(range(1,len(RMSE_train)+1), RMSE_train, '-*g', linewidth=.5, label='Train_Data')
plt.legend()
plt.title('RMSE with respect to models with different Ridge parameter')
plt.xlabel('Model instances')
plt.ylabel('RMSE')
y
# Try to experiment with Ridge with differrent regularization parameter.
```
### Short exercise 5.2
Now please implement the **Decision Tree** and **Random Forest** algorithms and apply them to the NO<sub>2</sub> **regression** task. Fine-tune the model parameters such as the tree depth and number of trees to see how the models perform in comparison to the above *more conventional* **regression** approaches. And which **variable**/**feature** is the most important one for tree/forest based approaches?
```
# You will utilize the Random Forest module provided by the 'Sklearn'
# There are also many preprocessing modules provided in 'Sklearn', such as split dataset for training and validation/testing
from sklearn.model_selection import train_test_split
# First extract all independent variables/features
x = data_all.iloc[:,5:].values
# Daytime NO2 as the dependent variable
y = data_all['weekday_daytime_no2'].values.reshape(-1,1) # Different way of extraction: column name vs. '.iloc'
# Split data into training and test set
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
# Randome Forest regression
from sklearn.ensemble import RandomForestRegressor
model_rf = RandomForestRegressor(n_estimators=100)
model_rf.fit(x_train, y_train)
y_pred = model_rf.predict(x_test)
# Model evaluation
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# Variable/feature importance
importance = model_rf.feature_importances_
# summarize feature importance
feature_name = data_all.columns[5:]
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
# plot feature importance
plt.bar(feature_name, importance)
plt.xticks(rotation='vertical')
plt.show()
```
As you have already noticed in *section 5.0*, data within different columns of the original dataset can be with significantly different distribution. If we again consider each **variable** as a **feature**, it means some **feature** may be quite *congested*, it is better to standardize **variables**/**features** to have all of them on the same scale before feeding them into *machine learning* algorithms. Let's apply standardization this time to see if improvements can be achieved. How do you interpret the results?
```
# Hint: in 'Sklearn', there are also modules for you to preprocessing your data, such as standardization.
# And training/validation split.
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# First extract all independent variables/features
x = data_all.iloc[:,5:].values
# Daytime NO2 as the dependent variable
y = data_all['weekday_daytime_no2'].values.reshape(-1,1) # Different way of extraction: column name vs. '.iloc'
# Split data into training and test set
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)
# Standardize the independent variables
sc = StandardScaler() # Initiate data standardization functionality
x_train = sc.fit_transform(x_train) # Standardize both training and test sets for the independent variables
x_test = sc.transform(x_test)
# Randome Forest regression
from sklearn.ensemble import RandomForestRegressor
model_rf = RandomForestRegressor(n_estimators=100, random_state=0)
model_rf.fit(x_train, y_train)
y_pred = model_rf.predict(x_test)
# Model evaluation
from sklearn import metrics
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
# Variable/feature importance
importance = model_rf.feature_importances_
# summarize feature importance
feature_name = data_all.columns[5:]
for i,v in enumerate(importance):
print('Feature: %0d, Score: %.5f' % (i,v))
# plot feature importance
plt.bar(feature_name, importance)
plt.xticks(rotation='vertical')
plt.show()
```
| github_jupyter |
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
import numpy as np
import os
from google.colab import drive
drive.mount('/content/drive')
os.chdir("/content/drive/MyDrive/TrainData_6012021/TrainData_6012021/TrainData")
```
**Reading of data:**
```
#customer_df = pd.read_csv('Customerdata_Train.csv')
customer_df = pd.read_excel('Customerdata_Train.xlsx')
print(customer_df.shape)
customer_df.head()
customer_df.DateOfRegistration.dtypes
email_df = pd.read_csv('Emaildata_Train.csv')
print(email_df.shape)
email_df.head()
email_df.DateOfemail.dtypes
train_df = pd.read_csv('Train.csv')
print(train_df.shape)
train_df.head()
transaction_df = pd.read_csv('transactionsdata_Train.csv')
print(transaction_df.shape)
transaction_df.head()
```
Merging the Data into one common dataframe based on "Customer ID":
```
df1 = pd.merge(customer_df, train_df, on = 'CustomerID', how = 'left')
print(df1.shape)
df1.head()
df2 = pd.merge(df1, email_df, on = 'CustomerID', how = 'left')
print(df2.shape)
df2.head()
df3 = pd.merge(df2, transaction_df , on = 'CustomerID', how = 'left')
print(df3.shape)
df3.head()
```
We could see their are 2 columns for timestamp(for registration date and Email date), hence we will concatenate them with their respective dates:
```
if 'timestamp_x' in df3.columns:
df3['DateOfOrder'] = df3['DateOfOrder']+' '+ df3['timestamp_x']
df3['DateOfOrder'] = pd.to_datetime(df3['DateOfOrder'])
del df3['timestamp_x']
if 'timestamp_y' in df3.columns:
df3['DateOfemail'] = df3['DateOfemail']+' '+ df3['timestamp_y']
df3['DateOfemail'] = pd.to_datetime(df3['DateOfemail'])
del df3['timestamp_y']
df3['CustomerID'].value_counts()[:10].to_dict()
df3.dtypes
df3.isnull().sum()
```
No missing values found in the data.
Now, lets plot the categorical variables:
```
column_datatypes = df3.dtypes
continuous_columns = list(column_datatypes[column_datatypes=="int64"].index.values)
sns.boxplot(data=df3[continuous_columns],orient='h')
Q90 = df3['OrderQuantity'].quantile(0.90)
Q90
Q100 = df3['OrderQuantity'].quantile(1)
Q99 = df3['OrderQuantity'].quantile(0.99)
print(Q100)
print(Q99)
```
We could see their is an outlier in "OrderQuantity" and in rest the values are just 0s and 1s.
```
df3['OrderQuantity'] = np.where(df3['OrderQuantity']==df3['OrderQuantity'].quantile(1),df3['OrderQuantity'].quantile(0.99),df3['OrderQuantity'])
df3['OrderQuantity'].quantile(1)
sns.boxplot(data=df3[continuous_columns],orient='h')
df3['AutomaticRefill'].unique()
df3['DoorstepDelivery'].unique()
df3['OnlineCommunication'].unique()
```
There is an outlier
```
#plt.hist(df3['City'])
#df3['DateOfRegistration'] = pd.to_datetime(df3['DateOfRegistration'], unit='ns')
df3['DateOfRegistration'].dtypes
column_datatypes = df3.dtypes
categorical_columns = list(column_datatypes[column_datatypes=="object"].index.values)
continuous_columns = list(column_datatypes[column_datatypes=="int64"].index.values)
categorical_columns
df3[categorical_columns[1]].value_counts()
df3[categorical_columns[2]].value_counts()
df3[categorical_columns[3]].value_counts()
df3[categorical_columns[4]].value_counts()
df3.groupby(['RetentionPotential','EmailType'])['CustomerID'].count()
df3[categorical_columns[5]].value_counts()
df3[categorical_columns[6]].value_counts()
df3.groupby(['RetentionPotential','EmailType','MailOpened'])['CustomerID'].count()
df3.groupby(['RetentionPotential','EmailType','MailOpened','MailClicked'])['CustomerID'].count()
df3[df3['MailClicked']=='no'].groupby(['RetentionPotential','EmailType','MailOpened','MailClicked'])['CustomerID'].count()
df3[df3['MailClicked']=='yes'].groupby(['RetentionPotential','EmailType','MailOpened','MailClicked'])['CustomerID'].count()
df3[df3['MailClicked']=='NotApplicable'].groupby(['RetentionPotential','EmailType','MailOpened','MailClicked'])['CustomerID'].count()
df3['MailClicked'] = np.where(df3['MailClicked']=="NotApplicable", "yes",df3['MailClicked'])
df3['MailClicked'].unique()
df3.groupby(['RetentionPotential','EmailType','MailOpened','MailClicked'])['CustomerID'].count()
df3['MailClicked'] = np.where((df3['MailOpened']=="yes") & (df3['MailClicked']=="no"), "yes",df3['MailClicked'])
df3.groupby(['RetentionPotential','EmailType','MailOpened','MailClicked'])['CustomerID'].count()
df3.DateOfRegistration.dtype
if 'DateOfRegistration' in df3.columns:
df3['registration_year'] = df3['DateOfRegistration'].dt.year
df3['registration_month'] = df3['DateOfRegistration'].dt.month
df3['registration_week'] = df3['DateOfRegistration'].dt.week
df3['registration_day'] = df3['DateOfRegistration'].dt.day
df3['registration_hour'] = df3['DateOfRegistration'].dt.hour
df3['registration_minute'] = df3['DateOfRegistration'].dt.minute
df3['registration_weekday'] = df3['DateOfRegistration'].dt.weekday
del df3['DateOfRegistration']
if 'DateOfOrder' in df3.columns:
df3['Order_year'] = df3['DateOfOrder'].dt.year
df3['Order_month'] = df3['DateOfOrder'].dt.month
df3['Order_week'] = df3['DateOfOrder'].dt.week
df3['Order_day'] = df3['DateOfOrder'].dt.day
df3['Order_hour'] = df3['DateOfOrder'].dt.hour
df3['Order_minute'] = df3['DateOfOrder'].dt.minute
df3['Order_weekday'] = df3['DateOfOrder'].dt.weekday
del df3['DateOfOrder']
if 'DateOfemail' in df3.columns:
df3['Email_year'] = df3['DateOfemail'].dt.year
df3['Email_month'] = df3['DateOfemail'].dt.month
df3['Email_week'] = df3['DateOfemail'].dt.week
df3['Email_day'] = df3['DateOfemail'].dt.day
df3['Email_hour'] = df3['DateOfemail'].dt.hour
df3['Email_minute'] = df3['DateOfemail'].dt.minute
df3['Email_weekday'] = df3['DateOfemail'].dt.weekday
del df3['DateOfemail']
df3.head()
df3.shape
df3.head()
df3.info()
for i in df3.select_dtypes('object').columns:
l_enc = LabelEncoder()
trans_data = l_enc.fit_transform(df3[i])
df3[i] = trans_data
df3['CustomerID'].value_counts()[:10]
y = df3['RetentionPotential']
X = df3.drop(['RetentionPotential'],axis = 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
model = SVC()
model.fit(X_train,y_train)
model.score(X_train,y_train)
model.score(X_test,y_test)
```
| github_jupyter |
```
from dpp_nets.layers.layers import *
import torch
import torch.nn as nn
from collections import OrderedDict
import shutil
import time
import gzip
import os
import json
import numpy as np
from dpp_nets.utils.io import make_embd, make_tensor_dataset, load_tensor_dataset
from dpp_nets.utils.io import data_iterator, load_embd
from torch.autograd import Variable
from torch.utils.data.dataloader import DataLoader
import time
from dpp_nets.my_torch.utilities import pad_tensor
## Data Sets
train_set = torch.load('/Users/Max/data/full_beer/pytorch/annotated_common.pt')
rat_set = torch.load('/Users/Max/data/full_beer/pytorch/annotated.pt')
embd = load_embd('/Users/Max/data/full_beer/pytorch/embeddings.pt')
# Parameters
torch.manual_seed(12)
batch_size = 25
_, max_set_size = train_set.data_tensor.size()
_, embd_dim = embd.weight.size()
hidden_dim = 500
enc_dim = 200
target_dim = 3 # let's choose the first three aspects to learn!
# Baseline
baseline_nets = DeepSetBaseline(embd_dim, hidden_dim, enc_dim, target_dim)
baseline = nn.Sequential(embd, baseline_nets, nn.Sigmoid())
# Model
kernel_dim = 200
trainer = MarginalTrainer(embd, hidden_dim, kernel_dim, enc_dim, target_dim)
trainer.reg = 0.1
trainer.reg_mean = 10
trainer.activation = nn.Sigmoid()
train_loader = DataLoader(train_set, batch_size, shuffle=True)
# Need also a training script for RTrainer!!
# incorporate embedding into trainer
torch.manual_seed(0)
kernel_net = KernelVar(embd_dim, hidden_dim, kernel_dim)
sampler = ReinforceSampler(3)
pred_net = PredNet(embd_dim, hidden_dim, enc_dim, target_dim)
Rtrainer = ReinforceTrainer(embd, kernel_net, sampler, pred_net)
Rtrainer.reg = 0.1
Rtrainer.reg_mean = 10
Rtrainer.activation = nn.Sigmoid()
params = [{'params': Rtrainer.kernel_net.parameters(), 'lr': 1e-3},
{'params': Rtrainer.pred_net.parameters(), 'lr': 1e-4}]
optimizer = torch.optim.Adam(params)
Rtrainer.double()
for epoch in range(20):
for t, (review, target) in enumerate(train_loader):
words = Variable(review)
target = Variable(target[:,:3]).double()
loss = Rtrainer(words, target)
# Backpropagate + parameter updates
optimizer.zero_grad()
loss.backward()
# print(Rtrainer.kernel_net.layer1.weight.grad)
optimizer.step()
if not (t+1) % 10:
print('Loss at it :', t+1, 'is', loss.data[0])
batch_size, alpha_iter, target_dim = 10, 3, 1
target = torch.randn(batch_size)
print(target)
target = target.unsqueeze(1).expand(batch_size, alpha_iter, target_dim).contiguous().view(batch_size * alpha_iter, target_dim)
print(target)
print(a.size())
print(a.unsqueeze(1).size())
print(b.size())
print(b.unsqueeze(1).size())
kernel_net.load_state_dict(state_dict)
kernel_net.layer1.weight
kk = KernelVar(embd_dim, hidden_dim, kernel_dim)
state_dict = kk.state_dict()
kk.layer1.weight
# Actual training loop for model
torch.manual_seed(12)
params = [{'params': trainer.kernel_net.parameters(), 'lr': 1e-3},
{'params': trainer.pred_net.parameters(), 'lr': 1e-4}]
optimizer = torch.optim.Adam(params)
for epoch in range(10):
for t, (review, target) in enumerate(train_loader):
review = Variable(review)
target = Variable(target[:,:3])
loss = trainer(review, target)
# Backpropagate + parameter updates
optimizer.zero_grad()
loss.backward()
optimizer.step()
if not (t+1) % 10:
print('Loss at it :', t+1, 'is', loss.data[0])
# Actual training loop for baseline
# Training
criterion = nn.MSELoss()
lr = 1e-4
optimizer = torch.optim.Adam(baseline_nets.parameters(), lr=lr)
for epoch in range(10):
for t, (review, target) in enumerate(train_loader):
target = Variable(target[:,:3])
words = Variable(review)
pred = baseline(words)
loss = criterion(pred, target)
# Backpropagate + parameter updates
optimizer.zero_grad()
loss.backward()
optimizer.step()
if not (t+1) % 10:
print('Loss at it :', t+1, 'is', loss.data[0])
def validate_baseline(val_set, model, criterion):
x = Variable(val_set.data_tensor, volatile=True)
y = Variable(val_set.target_tensor[:,:3], volatile=True)
pred = model(x)
loss = criterion(pred, y)
print(loss.data[0])
def validate_model(val_set, model):
model.reg = 0
x = Variable(val_set.data_tensor, volatile=True)
x = embd(x)
y = Variable(val_set.target_tensor[:,:3], volatile=True)
loss = model(x, y)
print(loss.data[0])
Rtrainer.float()
validate_model(train_set, Rtrainer)
x = Variable(train_set.data_tensor, volatile=True)
x = embd(x)
y = Variable(train_set.target_tensor[:,:3], volatile=True)
sampler = ReinforceSampler(1)
Rtrainer.sampler = sampler
Rtrainer.alpha_iter = 1
validate_baseline(train_set, baseline, nn.MSELoss())
import argparse
import os
import shutil
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data.dataloader import DataLoader
from dpp_nets.utils.io import make_embd, make_tensor_dataset
from dpp_nets.layers.layers import KernelVar, ReinforceSampler, PredNet, ReinforceTrainer
parser = argparse.ArgumentParser(description='REINFORCE VIMCO Trainer')
parser.add_argument('-a', '--aspect', type=str, choices=['aspect1', 'aspect2', 'aspect3', 'all'],
help='what is the target?', required=True)
parser.add_argument('-b', '--batch-size', default=50, type=int,
metavar='N', help='mini-batch size (default: 50)')
parser.add_argument('--epochs', default=30, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--lr_k', '--learning_rate_k', default=1e-3, type=float,
metavar='LRk', help='initial learning rate for kernel net')
parser.add_argument('--lr_p', '--learning_rate_p', default=1e-4, type=float,
metavar='LRp', help='initial learning rate for pred net')
parser.add_argument('--reg', type=float, required=True,
metavar='reg', help='regularization constant')
parser.add_argument('--reg_mean', type=float, required=True,
metavar='reg_mean', help='regularization_mean')
parser.add_argument('--alpha_iter', type=int, required=True,
metavar='alpha_iter', help='How many subsets to sample from DPP? At least 2!')
# Pre-training
parser.add_argument('--pretrain_kernel', type=str, default="",
metavar='pretrain_kernel', help='Give name of pretrain_kernel')
parser.add_argument('--pretrain_pred', type=str, default="",
metavar='pretrain_pred', help='Give name of pretrain_pred')
# Train locally or remotely?
parser.add_argument('--remote', type=int,
help='training locally or on cluster?', required=True)
# Burnt in Paths..
parser.add_argument('--data_path_local', type=str, default='/Users/Max/data/beer_reviews',
help='where is the data folder locally?')
parser.add_argument('--data_path_remote', type=str, default='/cluster/home/paulusm/data/beer_reviews',
help='where is the data folder remotely?')
parser.add_argument('--ckp_path_local', type=str, default='/Users/Max/checkpoints/beer_reviews',
help='where is the checkpoints folder locally?')
parser.add_argument('--ckp_path_remote', type=str, default='/cluster/home/paulusm/checkpoints/beer_reviews',
help='where is the data folder remotely?')
parser.add_argument('--pretrain_path_local', type=str, default='/Users/Max/checkpoints/beer_reviews',
help='where is the pre_trained model? locally')
parser.add_argument('--pretrain_path_remote', type=str, default='/cluster/home/paulusm/pretrain/beer_reviews',
help='where is the data folder? remotely')
def train(loader, trainer, optimizer):
trainer.train()
for t, (review, target) in enumerate(loader):
review = Variable(review)
if args.aspect == 'all':
target = Variable(target[:,:3]).type(torch.DoubleTensor)
else:
target = Variable(target[:,int(args.aspect[-1])]).type(torch.DoubleTensor)
loss = trainer(review, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print("trained one batch")
def validate(loader, trainer):
"""
Note, we keep the sampling as before.
i.e what ever alpha_iter is, we take it.
"""
trainer.eval()
total_loss = 0.0
total_pred_loss = 0.0
total_reg_loss = 0.0
for i, (review, target) in enumerate(loader, 1):
review = Variable(review, volatile=True)
if args.aspect == 'all':
target = Variable(target[:,:3], volatile=True)
else:
target = Variable(target[:,int(args.aspect[-1])], volatile=True)
trainer(review, target)
loss = trainer.loss.data[0]
pred_loss = trainer.pred_loss.data[0]
reg_loss = trainer.reg_loss.data[0]
delta = loss - total_loss
total_loss += (delta / i)
delta = pred_loss - total_pred_loss
total_pred_loss += (delta / i)
delta = reg_loss - total_reg_loss
total_reg_loss += (delta / i)
# print("validated one batch")
return total_loss, total_pred_loss, total_reg_loss
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR multiplied by factor 0.1 for every 10 epochs"""
if not ((epoch + 1) % 10):
factor = 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * factor
def log(epoch, loss, pred_loss, reg_loss):
string = str.join(" | ", ['Epoch: %d' % (epoch), 'V Loss: %.5f' % (loss),
'V Pred Loss: %.5f' % (pred_loss), 'V Reg Loss: %.5f' % (reg_loss)])
if args.remote:
destination = os.path.join(args.ckp_path_remote, args.aspect + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) +
'alpha_iter' + str(args.alpha_iter) + str(args.pretrain_kernel) + str(args.pretrain_pred) + 'reinforce_log.txt')
else:
destination = os.path.join(args.ckp_path_local, args.aspect + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) +
'alpha_iter' + str(args.alpha_iter) + str(args.pretrain_kernel) + str(args.pretrain_pred) + 'reinforce_log.txt')
with open(destination, 'a') as log:
log.write(string + '\n')
def save_checkpoint(state, is_best, filename='reinforce_checkpoint.pth.tar'):
"""
State is a dictionary that cotains valuable information to be saved.
"""
if args.remote:
destination = os.path.join(args.ckp_path_remote, args.aspect + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) +
'alpha_iter' + str(args.alpha_iter) + str(args.pretrain_kernel) + str(args.pretrain_pred) + str(args.filename))
else:
destination = os.path.join(args.ckp_path_local, args.aspect + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) +
'alpha_iter' + str(args.alpha_iter) + str(args.pretrain_kernel) + str(args.pretrain_pred) + str(args.filename))
torch.save(state, destination)
if is_best:
if args.remote:
best_destination = os.path.join(args.ckp_path_remote, args.aspect + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) +
'alpha_iter' + str(args.alpha_iter) + str(args.pretrain_kernel) + str(args.pretrain_pred) + 'reinforce_best.pth.tar')
else:
best_destination = os.path.join(args.ckp_path_local, args.aspect + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) +
'alpha_iter' + str(args.alpha_iter) + str(args.pretrain_kernel) + str(args.pretrain_pred) + 'reinforce_best.pth.tar')
shutil.copyfile(destination, best_destination)
global args, lowest_loss
args = parser.parse_args("-a aspect1 --remote 0 --reg 10 --reg_mean 0.1 --alpha_iter 4".split())
lowest_loss = 100 # arbitrary high number as upper bound for loss
### Load data
if args.remote:
# print('training remotely')
train_path = os.path.join(args.data_path_remote, str.join(".",['reviews', args.aspect, 'train.txt.gz']))
val_path = os.path.join(args.data_path_remote, str.join(".",['reviews', args.aspect, 'heldout.txt.gz']))
embd_path = os.path.join(args.data_path_remote, 'review+wiki.filtered.200.txt.gz')
else:
# print('training locally')
train_path = os.path.join(args.data_path_local, str.join(".",['reviews', args.aspect, 'train.txt.gz']))
val_path = os.path.join(args.data_path_local, str.join(".",['reviews', args.aspect, 'heldout.txt.gz']))
embd_path = os.path.join(args.data_path_local, 'review+wiki.filtered.200.txt.gz')
embd, word_to_ix = make_embd(embd_path)
train_set = make_tensor_dataset(train_path, word_to_ix)
val_set = make_tensor_dataset(val_path, word_to_ix)
print("loaded data")
torch.manual_seed(0)
train_loader = DataLoader(train_set, args.batch_size, shuffle=True)
val_loader = DataLoader(val_set, args.batch_size)
print("loader defined")
### Build model
# Network parameters
embd_dim = embd.weight.size(1)
kernel_dim = 200
hidden_dim = 500
enc_dim = 200
if args.aspect == 'all':
target_dim = 3
else:
target_dim = 1
# Model
torch.manual_seed(1)
# Add pre-training here...
kernel_net = KernelVar(embd_dim, hidden_dim, kernel_dim)
sampler = ReinforceSampler(args.alpha_iter)
pred_net = PredNet(embd_dim, hidden_dim, enc_dim, target_dim)
if args.pretrain_kernel:
if args.remote:
state_dict = torch.load(args.pretrain_path_remote + args.pretrain_kernel)
else:
state_dict = torch.load(args.pretrain_path_local + args.pretrain_kernel)
kernel_net.load_state_dict(state_dict)
if args.pretrain_pred:
if args.remote:
state_dict = torch.load(args.pretrain_path_remote + args.pretrain_pred)
else:
state_dict = torch.load(args.pretrain_path_local + args.pretrain_pred)
pred_net.load_state_dict(state_dict)
# continue with trainer
trainer = ReinforceTrainer(embd, kernel_net, sampler, pred_net)
trainer.reg = args.reg
trainer.reg_mean = args.reg_mean
trainer.activation = nn.Sigmoid()
trainer.double()
print("created trainer")
params = [{'params': trainer.kernel_net.parameters(), 'lr': args.lr_k},
{'params': trainer.pred_net.parameters(), 'lr': args.lr_p}]
optimizer = torch.optim.Adam(params)
print('set-up optimizer')
### Loop
torch.manual_seed(0)
print("started loop")
for epoch in range(args.epochs):
adjust_learning_rate(optimizer, epoch)
train(train_loader, trainer, optimizer)
loss, pred_loss, reg_loss = validate(val_loader, trainer)
log(epoch, loss, pred_loss, reg_loss)
print("logged")
is_best = pred_loss < lowest_loss
lowest_loss = min(pred_loss, lowest_loss)
save = {'epoch:': epoch + 1,
'model': 'Marginal Trainer',
'state_dict': trainer.state_dict(),
'lowest_loss': lowest_loss,
'optimizer': optimizer.state_dict()}
save_checkpoint(save, is_best)
print("saved a checkpoint")
print('*'*20, 'SUCCESS','*'*20)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jonkrohn/ML-foundations/blob/master/notebooks/single-point-regression-gradient.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Gradient of a Single-Point Regression
In this notebook, we calculate the gradient of quadratic cost with respect to a straight-line regression model's parameters. We keep the partial derivatives as simple as possible by limiting the model to handling a single data point.
```
import torch
```
Let's use the same data as we did in the [*Regression in PyTorch* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/regression-in-pytorch.ipynb) as well as for demonstrating the Moore-Penrose Pseudoinverse in the [*Linear Algebra II* notebook](https://github.com/jonkrohn/ML-foundations/blob/master/notebooks/2-linear-algebra-ii.ipynb):
```
xs = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7.])
ys = torch.tensor([1.86, 1.31, .62, .33, .09, -.67, -1.23, -1.37])
```
The slope of a line is given by $y = mx + b$:
```
def regression(my_x, my_m, my_b):
return my_m*my_x + my_b
```
Let's initialize $m$ and $b$ with the same "random" near-zero values as we did in the *Regression in PyTorch* notebook:
```
m = torch.tensor([0.9]).requires_grad_()
b = torch.tensor([0.1]).requires_grad_()
```
To keep the partial derivatives as simple as possible, let's move forward with a single instance $i$ from the eight possible data points:
```
i = 7
x = xs[i]
y = ys[i]
x
y
```
**Step 1**: Forward pass
We can flow the scalar tensor $x$ through our regression model to produce $\hat{y}$, an estimate of $y$. Prior to any model training, this is an arbitrary estimate:
```
yhat = regression(x, m, b)
yhat
```
**Step 2**: Compare $\hat{y}$ with true $y$ to calculate cost $C$
In the *Regression in PyTorch* notebook, we used mean-squared error, which averages quadratic cost over multiple data points. With a single data point, here we can use quadratic cost alone. It is defined by: $$ C = (\hat{y} - y)^2 $$
```
def squared_error(my_yhat, my_y):
return (my_yhat - my_y)**2
C = squared_error(yhat, y)
C
```
**Step 3**: Use autodiff to calculate gradient of $C$ w.r.t. parameters
```
C.backward()
```
The partial derivative of $C$ with respect to $m$ ($\frac{\partial C}{\partial m}$) is:
```
m.grad
```
And the partial derivative of $C$ with respect to $b$ ($\frac{\partial C}{\partial b}$) is:
```
b.grad
```
**Return to *Calculus II* slides here to derive $\frac{\partial C}{\partial m}$ and $\frac{\partial C}{\partial b}$.**
$$ \frac{\partial C}{\partial m} = 2x(\hat{y} - y) $$
```
2*x*(yhat.item()-y)
```
$$ \frac{\partial C}{\partial b} = 2(\hat{y}-y) $$
```
2*(yhat.item()-y)
```
### The Gradient of Cost, $\nabla C$
The gradient of cost, which is symbolized $\nabla C$ (pronounced "nabla C"), is a vector of all the partial derivatives of $C$ with respect to each of the individual model parameters:
$\nabla C = \nabla_p C = \left[ \frac{\partial{C}}{\partial{p_1}}, \frac{\partial{C}}{\partial{p_2}}, \cdots, \frac{\partial{C}}{\partial{p_n}} \right]^T $
In this case, there are only two parameters, $b$ and $m$:
$\nabla C = \left[ \frac{\partial{C}}{\partial{b}}, \frac{\partial{C}}{\partial{m}} \right]^T $
```
gradient = torch.tensor([[b.grad.item(), m.grad.item()]]).T
gradient
```
| github_jupyter |
拼多多2019笔试题。给若干任务,每个任务有完成时间与依赖。现限制单线程工作,求一个使得平均响应时间最小的完成方案。
思路:拓扑排序的变种。
```
import sys
n, m = map(int, sys.stdin.readline().strip().split())
d_arr = [0] * n # 入度数组
adj_table = {job_idx: list() for job_idx in range(n)}
ts = list(map(int, sys.stdin.readline().strip().split()))
for _ in range(m):
# from, to
f, t = map(int, sys.stdin.readline().strip().split())
if f - 1 not in adj_table:
adj_table[f - 1] = list()
adj_table[f - 1].append(t - 1)
d_arr[t - 1] += 1
def func(n, d_arr, ts, adj_table):
q = list()
t = 0
res = list()
# 找到第一个入度为0且所需时间最少的任务
cur_min = 0x7FFFFFFF
cur_job = -1
for job_idx, d_in in enumerate(d_arr):
if d_in == 0 and ts[job_idx] < cur_min:
cur_job, cur_min = job_idx, ts[job_idx]
q.append(cur_job)
# 完成n个任务
for epoch in range(n):
cur_job = q.pop(0)
d_arr[cur_job] -= 1
res.append(cur_job+1)
if epoch < n:
for post_job in adj_table[cur_job]:
d_arr[post_job] -= 1
bak_job = -1
cur_min = 0x7FFFFFFF
for job_idx, d_in in enumerate(d_arr):
if d_in == 0 and ts[job_idx] < cur_min:
bak_job, cur_min = job_idx, ts[job_idx]
q.append(bak_job)
return res
```
[Network Delay Time](https://leetcode.com/problems/network-delay-time/)。给一个有$N$个节点的有向图,再给出一若干节点之间的有向距离```times```。现从节点$K$发出一条消息,问经过多少时间后网络中的所有节点均已收到消息。索引从$1$开始。
思路:BFS。首先维护一个长度$N$的数组来表示各节点收到消息的时间,用大值初始化,其中```arr[K]=0```。然后开始BFS,所有节点只保留收到消息的最小时间。难点在于如何避免重复访问,该题数据是有环的。设置访问数组是错误的,会漏边;正确的做法是**只将那些更新过时间的邻居节点加入队列**。
```
def networkDelayTime(times, N: int, K: int) -> int:
adj_table = {idx: list() for idx in range(1, N+1)}
for f, t, time in times:
adj_table[f].append((t, time))
res = [0x7FFFFFFF]*(N+1)
res[K] = 0
q = [(K, 0)] # (node_idx,cur_time)
while q:
vis_node, cur_time = q.pop(0)
for t, time in adj_table[vis_node]:
if cur_time+time < res[t]: # 只有当时间更新了才加入队列,这是避免重复访问的关键
res[t] = cur_time+time
q.append((t, cur_time+time))
return max(res[1:]) if max(res[1:]) < 0x7FFFFFFF else -1
```
[Possible Bipartition](https://leetcode.com/problems/possible-bipartition/)。染色问题,给$N$个点,然后给出这些点的连接关系,现有两种颜色,直接相连的点不能同色。判断能否存在染色方案。
思路:BFS。因为只有两种颜色,首先随便选一个点进行染色,那么与它相邻的所有点均只能染另外一种颜色。设置一个颜色数组,记录每一个点的两种颜色,同时还起到一个访问数组的作用。
```
def possibleBipartition(N: int, dislikes) -> bool:
adj_table = {idx: list() for idx in range(N)}
for item in dislikes:
i, j = item[0]-1, item[1]-1
adj_table[i].append(j)
adj_table[j].append(i)
colors = [None]*N
for idx in range(N):
if colors[idx] is None:
q = [idx]
colors[idx] = True
while q:
vis_node = q.pop(0)
for neighbor in adj_table[vis_node]:
if colors[neighbor] is None: # 未被染色,染色并入队
colors[neighbor] = not colors[vis_node]
q.append(neighbor)
elif colors[neighbor] == colors[vis_node]: # 冲突返回false
return False
else:
continue
return True
```
[Is Graph Bipartite?](https://leetcode.com/problems/is-graph-bipartite/)。给一图,判断该图是否是二分图。二分图即把该图的所有节点分到两个集合中,每个集合中的任意两个节点都不存在边。
思路:染色问题。即判断所有直接相连的点能否染成不同的颜色。维护一个染色数组,初始化为None,然后对图做BFS。因为图可能存在孤岛,所以当队列为空时,需要随即取出一个未染色的点加入队列。
```
def isBipartite(graph) -> bool:
n = len(graph)
color_arr = [None]*n
color_arr[0] = True
q = [0]
while q:
vis_node = q.pop(0)
for neighbor in graph[vis_node]:
if color_arr[neighbor] == color_arr[vis_node]:
return False
else:
if color_arr[neighbor] is None:
color_arr[neighbor] = not color_arr[vis_node]
q.append(neighbor)
if not q: # 当出现孤岛时
for idx in range(n):
if color_arr[idx] is None:
color_arr[idx] = True
q.append(idx)
break
return True
```
[Find Eventual Safe States](https://leetcode.com/problems/find-eventual-safe-states/)。以数组的形式给一有向图,要求所有满足以下性质的节点:从该节点出发,无论怎么走都会到达一个最终节点。
思路:该题其实就是逆拓扑排序,首先将整个图反向,然后开始进行拓扑排序,能排序的点即是所求。
```
def eventualSafeNodes(graph):
n = len(graph)
adj_table = {idx: list() for idx in range(n)}
D_in = [0]*n
for idx, l in enumerate(graph):
D_in[idx] = len(l)
for i in l:
adj_table[i].append(idx)
q = list()
for idx in range(n):
if D_in[idx] == 0:
q.append(idx)
res = list()
while q:
vis_node = q.pop(0)
res.append(vis_node)
D_in[vis_node] -= 1
for neighbor in adj_table[vis_node]:
D_in[neighbor] -= 1
if D_in[neighbor] == 0:
q.append(neighbor)
return sorted(res)
```
[All Nodes Distance K in Binary Tree](https://leetcode.com/problems/all-nodes-distance-k-in-binary-tree/)。给一二叉树与一个目标节点```target```,在二叉树中找到所有跟```target```距离为$K$的节点。
思路:将二叉树转成无向图就好做了,直接以```target```为起点做DFS即可。
```
def distanceK(root, target, K):
graph = dict()
def build_G(graph, root):
if not root:
return
graph.setdefault(root, list())
if root.left:
graph.setdefault(root.left, list())
graph[root].append(root.left)
graph[root.left].append(root)
build_G(graph, root.left)
if root.right:
graph.setdefault(root.right, list())
graph[root].append(root.right)
graph[root.right].append(root)
build_G(graph, root.right)
build_G(graph, root)
res = list()
visited = {ptr: False for ptr in list(graph.keys())}
def dfs(idx, path):
if path == K:
res.append(idx.val)
return
for neighbor in graph[idx]:
if not visited[neighbor]:
visited[idx] = True
dfs(neighbor, path+1)
visited[idx] = False
dfs(target, 0)
return res
```
[二分图最大匹配](https://uoj.ac/problem/78)。给一二分图,有A、B两种节点,给出最大匹配的数量及匹配方案。
思路:匈牙利算法求解最大匹配。
```
import sys
def data_load():
n_u, n_v, n_row = map(int, sys.stdin.readline().strip().split())
adj_tab = {node_idx: list() for node_idx in range(n_v)} # 仅记录B类型节点的邻接表即可
for _ in range(n_row):
u, v = map(int, sys.stdin.readline().strip().split())
adj_tab[v - 1].append(u - 1) # 输入下标从1开始,改成0
return n_u, n_v, adj_tab
def find_aug_path_dfs(cur_node: int, alter_flag: list, adj_tab: dict) -> bool:
'''
dfs寻找增广路径
:param cur_node: 当前节点,只会根据交替路径在单边节点中移动
:param alter_flag: 交替路径访问标记
:param adj_tab: 邻接表
:return:
'''
for neighbor in adj_tab[cur_node]: # 遍历所有邻居
if not alter_flag[neighbor]: # 避免环路
alter_flag[neighbor] = 1
# 利用短路特性,若找到一个未匹配的邻居,则说明找到一条更长的增广路径
# 若邻居已被匹配,则直接跳过邻居移动到邻居的匹配节点,保证了cur_node的类型不变
# unmatch -> match -> match -> unmatch,cur_node会从位置1跳到位置3
if (matchs[neighbor] < 0) or (find_aug_path_dfs(matchs[neighbor], alter_flag, adj_tab)):
matchs[neighbor] = cur_node # 在记录匹配时,邻居(后一个节点)作为键值
return True
return False
if __name__ == "__main__":
n_a, n_b, adj_tab = data_load()
matchs = [-1] * n_a # 每个a匹配的b
res = 0
for i in range(n_b): # 遍历图中B类型的所有点
alter_flag = [0] * n_a # 交替路径访问标记,仅需要记录A节点的访问情况即可
if find_aug_path_dfs(i, alter_flag, adj_tab):
res += 1
print(res)
for a in range(n_a):
print(matchs[a] + 1, end=' ') # 下标从0开始转成从1开始
```
| github_jupyter |
```
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import models
from torchsummary import summary
import albumentations as A
from albumentations.pytorch import ToTensorV2, transforms
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score
import os
import numpy as np
import pickle
import pandas as pd
import seaborn as sns
import time
from tqdm import tqdm
import cv2
import matplotlib.pyplot as plt
from IPython import display
%matplotlib inline
def plot_samples_on_epoch(samples, nrow=4, chanels=3, size=(12,12)):
grid_img = torchvision.utils.make_grid(samples, nrow=nrow)
if chanels==1:
see_plot(grid_img.permute(1, 2, 0)*255, size=size)
else:
see_plot(grid_img.permute(1, 2, 0), size=size)
def see_plot(pict, size=(6,6)):
plt.figure(figsize=size)
plt.imshow(pict,cmap='gray') #, cmap=color, color='gray'
plt.grid()
plt.show()
def load_image(video, frame):
path = '/media/laggg/surviv_rl_data/all_videoframes_rgb_96/{}/'.format(video)
p = cv2.imread(path + 'f_{}.jpg'.format(frame))
return p[:,:,::-1]
#----------------------------------------------------------------------------------------------------
# info
print('torch_version: {},\ntorchvision_version: {}'.format(torch.__version__,torchvision.__version__))
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print('device:', device)
#-----------------------------------------------------------------------------------------
import itertools
from sklearn.metrics import confusion_matrix
from matplotlib import pyplot as plt
%matplotlib inline
def plot_confusion_matrix(targets,predictions,classes,
normalize=True,title='Confusion matrix',
figsize=(4, 4), cmap=plt.cm.Reds):
conf_matrix = confusion_matrix(y_true=targets, y_pred=predictions)
if normalize:
conf_matrix = conf_matrix.astype('float') / conf_matrix.sum(axis=1)[:, np.newaxis]
title = 'Normalized ' + title.lower()
plt.figure(figsize=figsize)
plt.imshow(conf_matrix, interpolation='nearest', cmap=cmap)
plt.title(title)
# plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = conf_matrix.max() / 2.
for i, j in itertools.product(range(conf_matrix.shape[0]), range(conf_matrix.shape[1])):
if normalize:
value = '{} %'.format(format(conf_matrix[i, j] * 100, '.2f'))
else:
value = format(conf_matrix[i, j], fmt)
plt.text(j,i,value,horizontalalignment="center",color="white" if conf_matrix[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.show()
```
## Data
```
# Val df:
val_p = []
val_t = []
path = './final_dataset_stone/val/stone/'
for x in os.listdir(path):
val_p.append(path+x)
val_t.append(1)
path = './final_dataset_stone/val/nostone/'
for x in os.listdir(path):
val_p.append(path+x)
val_t.append(0)
df_val = pd.DataFrame({'img_path': val_p, 'target': val_t})
print(df_val.shape, np.unique(df_val.target, return_counts=True))
df_val.sample(5)
# Val df:
train_p = []
train_t = []
path = './final_dataset_stone/train/stone/'
for x in os.listdir(path):
train_p.append(path+x)
train_t.append(1)
path = './final_dataset_stone/train/nostone/'
for x in os.listdir(path):
train_p.append(path+x)
train_t.append(0)
df_train = pd.DataFrame({'img_path': train_p, 'target': train_t})
print(df_train.shape, np.unique(df_train.target, return_counts=True))
df_train.sample(5)
```
## DataLoader
```
# для аугментации картинки движком
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
class ResNetUNet_v2(nn.Module):
def __init__(self, n_class):
super().__init__()
self.base_model = models.resnet18(pretrained=True)
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2)
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4)
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(3, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.dropout = nn.Dropout(0.5)
self.conv_last = nn.Conv2d(64, n_class, 1)
self.act_last = nn.Tanh()
self.support_conv1 = nn.Conv2d(11, 512, 1) # (bath,10+1) --> (batch,512)
def forward(self, inp):
x_original = self.conv_original_size0(inp[0])
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(inp[0])
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
cond = self.support_conv1(torch.unsqueeze(torch.unsqueeze(inp[1], 2), 2)) # ([8, 8]) --> Size([8, 512, 1, 1])
layer4 = self.layer4_1x1(layer4+cond)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
x = self.dropout(x)
out = self.conv_last(x)
out = self.act_last(out)
return out
neural_engine = ResNetUNet_v2(3)
neural_engine.load_state_dict(torch.load('../neural_engine/best_models/resunet_v5.pth'))
neural_engine = neural_engine.to(device)
neural_engine.train(False);
def apply_aug(p0, aug):
if aug == 0:
p = p0.copy()
elif aug == 1:
p = cv2.rotate(p0, cv2.ROTATE_90_CLOCKWISE)
elif aug == 2:
p = cv2.rotate(p0, cv2.ROTATE_180)
elif aug == 3:
p = cv2.rotate(p0, cv2.ROTATE_90_COUNTERCLOCKWISE)
elif aug == 4:
p = cv2.flip(p0, 1)
elif aug == 5:
p = cv2.rotate(p0, cv2.ROTATE_90_CLOCKWISE)
p = cv2.flip(p, 1)
elif aug == 6:
p = cv2.rotate(p0, cv2.ROTATE_180)
p = cv2.flip(p, 1)
elif aug == 7:
p = cv2.rotate(p0, cv2.ROTATE_90_COUNTERCLOCKWISE)
p = cv2.flip(p, 1)
return p
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def neural_aug(model,p,d,sp,zoom,n):
'''
model - in gpu in eval mode
p - tensor of frame with variables in [-1,1]
d - direction, one of {1,2,3,4,5,6,7,8}
sp - sp in current frame, int/float
zoom - zoom in current frame, one of {1} (обучал только для zoom=1)
n - number of timestamps, one of {1,2,3,4,5,6,7,8,9,10,11,12,13,14}
'''
p = torch.clone(p).to(device)
d = F.one_hot(torch.tensor(d-1), num_classes=8)
sp = torch.tensor(sp)/100
zoom = torch.tensor(zoom)/15
n = torch.tensor(n/14)
dd2 = torch.cat([d,
sp.unsqueeze(0),
zoom.unsqueeze(0),
n.unsqueeze(0)]).unsqueeze(0).float().to(device)
with torch.no_grad():
p = model((p.unsqueeze(0),dd2))[0]
return p.detach().cpu()
#=====================================================================================
class Stone_Dataset(Dataset):
def __init__(self, df, augs=None):
super().__init__()
self.df = df
self.augs = augs
self.transform = A.Compose([A.Normalize(mean=(0.5,), std=(0.5,)),
ToTensorV2(transpose_mask=False)])
self.transform_aug = A.Compose([A.augmentations.transforms.ChannelShuffle(p=0.1),
A.RandomBrightnessContrast(p=0.1)])
self.d = {0: [1,5], 1: [2,6], 2: [3,7], 3: [4,8]}
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
img = cv2.imread(self.df[idx][0])[:,:,::-1]
target = self.df[idx][1]
if self.augs:
aug = np.random.choice(np.arange(8), p=np.array([0.125]*8))
img = apply_aug(img, aug)
# neural-engine augmentation
if np.random.randint(10)<5:
img = self.transform(image=img)['image']
dd = np.random.randint(4)
n = np.random.randint(8)+2
img = neural_aug(neural_engine,img,self.d[dd][0],0,1,n)
img = neural_aug(neural_engine,img,self.d[dd][1],0,1,n)
img = (img.permute(1,2,0)+1)/2
img = img.numpy()*255
random_angle = np.random.randint(90)
img = rotate_image(img, random_angle)
img = self.transform_aug(image=img)['image']
img = self.transform(image=img)['image']
return img, target
```
## Model
```
class StoneClassifier(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 8, 3, 2, 1)
self.conv2 = nn.Conv2d(8, 16, 3, 2, 1)
self.conv3 = nn.Conv2d(16, 32, 3, 2, 1)
self.fc1 = nn.Linear(32 * 3 * 3, 128)
self.fc3 = nn.Linear(128, 2)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = self.fc3(x)
x = F.softmax(x, dim=1)
return x
```
## Train
```
def train_model(model, train_iterator, val_iterator, loss_func, optimizer, sheduler, params):
best_metr = 0
reward_frame_transform = torchvision.transforms.Compose([torchvision.transforms.CenterCrop(24)])
for i in range(params['EPOCHS']):
#===========TRAIN=============================
time.sleep(0.2)
model.train(True)
train_loss = 0.0
y_pred = []
y_true = []
for x in enumerate(train_iterator): #tqdm(train_iterator)
img = x[1][0].to(params['DEVICE'])
target = x[1][1].to(params['DEVICE'])
img = reward_frame_transform(img)
optimizer.zero_grad()
output = model(img)
loss = loss_func(output, target)
train_loss += loss.item()
loss.backward()
optimizer.step()
y_pred.extend((output[:,1]>0.95).int().detach().cpu().tolist())
y_true.extend(target.detach().cpu().tolist())
f1_macro = round(f1_score(y_true, y_pred, average='macro'),4)
train_loss = round(train_loss / len(train_iterator),4)
trainloss_ts.append(train_loss)
trainmetr_ts.append(f1_macro)
current_lr = optimizer.param_groups[0]['lr']
sheduler.step()
#===========VAL================================
time.sleep(0.2)
model.train(False)
val_loss = 0.0
y_pred = []
y_true = []
for x in enumerate(val_iterator):
img = x[1][0].to(params['DEVICE'])
target = x[1][1].to(params['DEVICE'])
img = reward_frame_transform(img)
with torch.no_grad():
output = model(img)
loss = loss_func(output, target)
val_loss += loss.item()
y_pred.extend((output[:,1]>0.95).int().detach().cpu().tolist())
y_true.extend(target.detach().cpu().tolist())
f1_macro = round(f1_score(y_true, y_pred, average='macro'),4)
val_loss = round(val_loss / len(val_iterator),4)
valloss_ts.append(val_loss)
valmetr_ts.append(f1_macro)
if f1_macro>best_metr:
best_metr = f1_macro
torch.save(model.state_dict(), 'laggg_stone_classifier.pth')
#==========PRINT===========================
print(f'{i+1}/{params["EPOCHS"]}',
'lr:',current_lr,'|',
'train_loss:',trainloss_ts[-1],'|',
'val_loss:',valloss_ts[-1],'|',
'train_metr:',trainmetr_ts[-1],'|',
'val_metr:',valmetr_ts[-1])
params = {'EPOCHS': 30,
'DEVICE': 'cuda:0',
'BATCH': 8}
train_data = Stone_Dataset(df_train.values, True)
train_dataloader = DataLoader(train_data, batch_size=params['BATCH'], shuffle=True)
test_data = Stone_Dataset(df_val.values, False)
test_dataloader = DataLoader(test_data, batch_size=params['BATCH'], shuffle=True)
img, target = iter(train_dataloader).next()
print(target)
plot_samples_on_epoch((img+1)/2)
model = StoneClassifier().to(params['DEVICE'])
model.load_state_dict(torch.load('laggg_stone_classifier_v1.pth'))
model = model.to(params['DEVICE'])
criterion = nn.CrossEntropyLoss()
opt = optim.Adam(model.parameters(), lr=1e-3)
scheduler = optim.lr_scheduler.StepLR(optimizer=opt, step_size=10, gamma=0.1)
trainloss_ts = []
trainmetr_ts = []
valloss_ts = []
valmetr_ts = []
train_model(model, train_dataloader, test_dataloader,
criterion, opt, scheduler, params)
plt.figure(figsize=(10,3));
plt.plot(trainloss_ts, label='train', linewidth=2)
plt.plot(valloss_ts, label='val', linewidth=2)
plt.title('CE_loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend()
plt.grid()
plt.show();
plt.figure(figsize=(10,3));
plt.plot(trainmetr_ts, label='train', linewidth=2)
plt.plot(valmetr_ts, label='val', linewidth=2)
plt.title('F1_macro')
plt.ylabel('metric')
plt.xlabel('epoch')
plt.legend()
plt.grid()
plt.show();
```
## Validate
```
params = {'EPOCHS': 30,
'DEVICE': 'cuda:0',
'BATCH': 8}
def validate(model, tresh=0.5, confm_normalize=True):
y_pred = []
y_true = []
model.eval()
reward_frame_transform = torchvision.transforms.Compose([torchvision.transforms.CenterCrop(24)])
for x in enumerate(test_dataloader):
img = x[1][0].to(params['DEVICE'])
target = x[1][1]
y_true.extend(target.tolist())
img = reward_frame_transform(img)
with torch.no_grad():
r = model(img)[:,1]
r = (r>tresh).int().detach().cpu().tolist()
y_pred.extend(r)
val_acc = round(accuracy_score(y_true, y_pred),4)
val_f1 = round(f1_score(y_true, y_pred, average='macro'),4)
print('f1 macro:', val_f1)
plot_confusion_matrix(y_true, y_pred, range(2), normalize=confm_normalize)
model2 = StoneClassifier()
model2.load_state_dict(torch.load('dfomin_stone_classifier_v2.pth'))
model2 = model2.to(params['DEVICE'])
model3 = StoneClassifier()
model3.load_state_dict(torch.load('laggg_stone_classifier_v1.pth'))
model3 = model3.to(params['DEVICE'])
model1 = StoneClassifier()
model1.load_state_dict(torch.load('laggg_stone_classifier_v2.pth'))
model1 = model1.to(params['DEVICE'])
validate(model2, tresh=0.95)
validate(model3, tresh=0.95)
validate(model1, tresh=0.95)
```
| github_jupyter |
```
from pathlib import Path
import os
import pandas as pd
import nccid_cleaning.etl as etl
from nccid_cleaning import clean_data_df, patient_df_pipeline
```
This notebook can be used to generate CSV files containing patient clinical data, and image metadata for each patient and image file within the NCCID data.
To use these tools you need to provide a `BASE_PATH` that points to the location of the data that has been pulled from the NCCID S3 bucket, where your local directory structure should match the original S3 structure. If you have split the data into training/test/validation sets, each subdirectory should have the same structure as the original S3 bucket and the below pipeline should be run separately for each of the dataset splits.
You can set the local path to your NCCID data below by changing the `DEFAULT_PATH` variable or alternatively set as an environment variable, `NCCID_DATA_DIR` in e.g., `.bashrc`.
```
# Edit this to update your local NCCID data path
DEFAULT_PATH = "/project/data/training"
BASE_PATH = Path(os.getenv("NCCID_DATA_DIR", DEFAULT_PATH))
print(BASE_PATH)
```
## Imaging Metadata
For the imaging metadata, a separate CSV is generated for each imaging modality: X-ray, CT, MRI. Three steps are performed:
<l>
<li> `select_image_files` - traverses the directory tree finding all files of the imaging modality. For X-ray is it recommended to set `select_all = True` to process all available X-ray files. Whereas, for 3D modalities, CT, and MRI, `select_first = True` is recommened to select only the first file of each imaging volume, to speed up run time and reduce redundancy of information. </li>
<li> `ingest_dicom_jsons` - reads the DICOM header information for each file. </li>
<li> `pydicom_to_df` - converts the DICOM metadata into a pandas DataFrame where the rows are images and columns are the DICOM attributes.
</l> <br>
The resulting DataFrames are saved as CSV files in `data/`
```
# subdirectories
XRAY_SUBDIR = "xray-metadata"
CT_SUBDIR = "ct-metadata"
MRI_SUBDIR = "mri-metadata"
# 1. finding image file lists within the subdirs
xray_files = etl.select_image_files(BASE_PATH / XRAY_SUBDIR, select_all=True)
ct_files = etl.select_image_files(BASE_PATH / CT_SUBDIR, select_first=True)
mri_files = etl.select_image_files(BASE_PATH / MRI_SUBDIR, select_first=True)
# 2. process image metadata
xray_datasets = etl.ingest_dicom_jsons(xray_files)
ct_datasets = etl.ingest_dicom_jsons(ct_files)
mri_datasets = etl.ingest_dicom_jsons(mri_files)
# 3. converting to DataFrame
xrays = etl.pydicom_to_df(xray_datasets)
cts = etl.pydicom_to_df(ct_datasets)
mris = etl.pydicom_to_df(mri_datasets)
# check structure of DFs
xrays.head()
# Save as csv
xrays.to_csv("data/xrays.csv")
cts.to_csv("data/cts.csv")
mris.to_csv("data/mris.csv")
```
## Patient Clinical Data
For patient clinical data, the most recent <b>data</b> file (for COVID-positive) or <b>status</b> file (for COVID-negative) is parsed for each patient in the directory tree. The resulting DataFrame is generated using `patient_jsons_to_df`, where rows are patients and columns are data fields. <br>
Three fields that are not in the original jsons files are included in the DataFrame:
<l>
<li> `filename_earliest_date` - earlist data/status file present for the patient. </li>
<li> `filename_latest_date` - latest data/status file present for the patient. This is the file from which the rest of the patient's data has been pulled. </li>
<li> `filename_covid_status` - indicates it the patient is in the COVID-postive or COVID-negative cohort, based on whether they have every been submitted with a <b>data</b> file (which are only present for positive patients. </li>
</l>
```
PATIENT_SUBDIR = "data"
# process patient clinical data
patient_files = list(os.walk(BASE_PATH / PATIENT_SUBDIR))
patients = etl.patient_jsons_to_df(patient_files)
patients.head()
```
### Clean and enrich
The cleaning pipeline can be run on the resulting patients DataFrame to improve quality. In addition, missing values in the patient DataFrame for Sex and Age, can be filled using the DICOM image headers. This step generates two new columns `sex_update` and `age_update`, from the cleaned columns `sex`, `age`.
```
# cleaning
patients = clean_data_df(patients, patient_df_pipeline)
# enriching
images = [xrays, cts, mris] # list all image DFs
patients = etl.patient_data_dicom_update(patients, images)
patients.head()
print(f"Sex Unknowns before merging with dicom: {(patients['sex']=='Unknown').sum()}")
print(f"Sex Unknowns after merging with dicom: {(patients['sex_update']=='Unknown').sum()}")
print("------")
print(f"Age NaNs before merging with dicom: {patients['age'].isnull().sum()}")
print(f"Age New after merging with dicom: {patients['age_update'].isnull().sum()}")
# save to csv
patients.to_csv("data/patients.csv")
```
| github_jupyter |
## KNN Classifier
The model predicts the severity of the landslide (or if there will even be one) within the next 2 days, based on weather data from the past 5 days.
Binary Classification yielded a maximum accuracy of 77.53%. Severity Classification (multiple classes) was around 56%.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
import sklearn
from sklearn.utils import shuffle
import pickle
# df = pd.read_csv("full_dataset_v1.csv")
# df = pd.read_csv("/Users/ishaanjavali/Documents/Science Fair/2020/Code/API/full_dataset_v1.csv")
df = pd.read_csv("dataset.csv")
len(df)
df['severity'].value_counts()
# filter by severity. na is for non-landslide data
# df = df[df['severity'].isin(["medium", "small", "large", "very_large", "na"])]
# Remove -1 slopes
# df = df.loc[~(df.slope == -1)]
print(len(df))
print(df.forest.value_counts())
df['severity'].value_counts()
df = shuffle(df)
df.reset_index(inplace=True, drop=True)
print(len(df))
df
df = df.query("(landslide == 0) | (landslide == 1 & (severity == 'medium' | severity == 'small' | severity == 'large' | severity=='very_large'))")
df
X = df.copy()
y = []
types = set()
for idx, row in X.iterrows():
if row.landslide == 0:
y.append(0)
elif row.severity == 'small':
y.append(1)
elif row.severity == 'medium':
y.append(2)
else:
y.append(3)
types.add(y[-1])
print(types)
columns=[]
for i in range(9, 4, -1):
columns.append('humidity' + str(i))
columns.append('ARI' + str(i))
columns.append('slope')
columns.append('forest2')
columns.append('osm')
X = X[columns]
X
```
## Scaling
```
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
cnt1 = 0
cnt2 = 0
for i in y_train:
if i == 1:
cnt1 += 1
else:
cnt2 += 1
print(cnt1,cnt2)
```
## Prediction
```
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=17)
knn.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
pred = knn.predict(X_train)
# class_probabilities = knn.predict_proba()
print("ACCURACY:", accuracy_score(pred, y_train))
best = 1
highest = 0
for i in range(1, 130):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
score = round(accuracy_score(pred, y_test)*10000)/100
print("k =", i, " ACCURACY:", score)
if score > highest:
highest = score
best = i
# Binary: k = 87, 58.9
# 62.4 na/landslide
print("Best k:", best, highest)
```
## Confusion Matrix
```
knn = KNeighborsClassifier(n_neighbors=best)
knn.fit(X_train, y_train)
pred = knn.predict(X_test)
print(accuracy_score(pred, y_test))
print("Best k:", best, highest)
from sklearn.metrics import confusion_matrix
array = confusion_matrix(y_test, pred)
array
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
binary = False
if binary:
df_cm = pd.DataFrame(array, index = [i for i in ["No", "Yes"]],
columns = [i for i in ["No", "Yes"]])
else:
df_cm = pd.DataFrame(array, index = [i for i in ["None", "Small", "Medium", "Large"]],
columns = [i for i in ["None", "Small", "Medium", "Large"]])
plt.figure(figsize = (10,7))
ax = sn.heatmap(df_cm, cmap="Blues", annot=True, annot_kws={"size":35}, fmt='g')
ax.tick_params(axis='both', which='major', labelsize=23)
plt.xlabel('Predicted', fontsize = 35)
# plt.title("KNN Confusion Matrix", fontsize = 50)
plt.ylabel('Actual', fontsize = 40)
plt.savefig("KNN Multi Matrix", bbox_inches="tight")
plt.show()
```
| github_jupyter |
<h2>Team Members</h2>
<h5>Anish Singh 001388384</h5>
<h5>Isha Madan 001393098</h5>
<h5>Neel Shah 001029882 </h5>
<h5>Vardhana Bhatt 001064528</h5>
<h1>Credit Card Fraud Detection</h1>
Detecting fraud transactions is of great importance for any credit card company.
Credit card fraud detection is a typical classification problem and we have tried to focus on analyzing, pre processing and deployment of multiple algorithms.
The goal is to build a classifier that tells if a transaction is a fraud or not.
<img src ="images/creditcard.jpg" width ="1500" height="600"></img>
**Dataset Context¶**
*The data contains 284,807 credit card transactions with 492 fraudulent.*
*Everything except the time and amount has been reduced by a Principle Component Analysis (PCA) for privacy concerns. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'.*
*'Time' contains the seconds elapsed between each transaction.*
* 'Amount' is the transaction Amount*
* 'Class' is the response variable and it takes value 1 in case of fraud and 0
A PCA transformation, features need to be previously scaled. So features V1, V2, ... V28 have been scaled already.
**Steps for the Project**
This project will use several different machine learning algorithms. We have also done Hyperparameter tuning and calculated the accuracy for each of them.
1: Data Exploration
2: Data cleaning Visualization
3: Scaling the Dataset
4: Decision Tree Classifier
5: Random Forest Classifier
6: K -Means Clustering
6: DNN
7: DNN with SMOTE
8 : Conclusion
**Importing Necessary Libraries**
```
# Getting the necessary libraries
import numpy as np
import pandas as pd
import sklearn
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report,accuracy_score
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
from sklearn.svm import OneClassSVM
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from pylab import rcParams
from sklearn.ensemble import RandomForestClassifier
LABELS = ["Normal", "Fraud"]
import itertools
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, f1_score, recall_score
from sklearn.model_selection import train_test_split,GridSearchCV
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from imblearn.over_sampling import SMOTE
from collections import Counter
```
**Getting the Dataset**
```
from google.colab import drive
drive.mount('/content/gdrive')
# Reading the CSV file
data = pd.read_csv('/creditcard.csv',sep=',')
data.head()
# To Check the shape of the data
data.shape
# To get some insight about the dataset
data.info()
data.head(3)
```
**Exploratory Data Analysis**
```
# To check if the data contains Null values or Not
data.isnull().values.any()
```
**Trying to observe how the distrubution of the Data looks like**
```
count_classes = pd.value_counts(data['Class'], sort = True)
count_classes.plot(kind = 'bar', rot=0)
plt.title("Transaction Class Distribution")
plt.xticks(range(2), LABELS)
plt.xlabel("Class")
plt.ylabel("Frequency")
## Get the Fraud and the normal dataset
fraud = data[data['Class']==1]
normal = data[data['Class']==0]
print(fraud.shape,normal.shape)
```
There are 492 Fruad Transaction and 284315 Normal Transaction
**Analyzed some more information from the transaction data**
```
fraud.Amount.describe()
normal.Amount.describe()
```
**Summary:**
*The mean transaction amout among fraud cases is 122 USD, and is 88 among non-fraud cases. And the difference is statistically significant. bold text*
```
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Amount per transaction by class')
bins = 50
ax1.hist(fraud.Amount, bins = bins)
ax1.set_title('Fraud')
ax2.hist(normal.Amount, bins = bins)
ax2.set_title('Normal')
plt.xlabel('Amount ($)')
plt.ylabel('Number of Transactions')
plt.xlim((0, 20000))
plt.yscale('log')
plt.show();
```
**Checked how fraudulent transactions occur more often during certain time frame**
```
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
f.suptitle('Time of transaction vs Amount by class')
ax1.scatter(data.Time, data.Amount)
ax1.set_title('Fraud')
ax2.scatter(data.Time, data.Amount)
ax2.set_title('Normal')
plt.xlabel('Time (in Seconds)')
plt.ylabel('Amount')
plt.figure(figsize=(80,80))
plt.show()
# Plot how fraud and non-fraud cases are scattered
plt.scatter(data.loc[data['Class'] == 0]['V1'], data.loc[data['Class'] == 0]['V2'], label="Class #0", alpha=0.5, linewidth=0.15)
plt.scatter(data.loc[data['Class'] == 1]['V1'], data.loc[data['Class'] == 1]['V2'], label="Class #1", alpha=0.5, linewidth=0.15,c='r')
plt.show()
```
**Tried to check how amount and time are related to each other**
```
import seaborn as sns
fig, ax = plt.subplots(1, 2, figsize=(18,4))
# Plot the distribution of 'Time' feature
sns.distplot(data['Time'].values/(60*60), ax=ax[0], color='r')
ax[0].set_title('Distribution of Transaction Time', fontsize=14)
ax[0].set_xlim([min(data['Time'].values/(60*60)), max(data['Time'].values/(60*60))])
sns.distplot(data['Amount'].values, ax=ax[1], color='b')
ax[1].set_title('Distribution of Transaction Amount', fontsize=14)
ax[1].set_xlim([min(data['Amount'].values), max(data['Amount'].values)])
plt.show()
```
**Summary:**
Time: Most transactions happended in day time.
We should better scale these two skewed features.
**Transaction Hour**
```
# Plot of transactions in 48 hours
bins = np.linspace(0, 48, 48) #48 hours
plt.hist((normal.Time/(60*60)), bins, alpha=1, density=True, label='Non-Fraud')
plt.hist((fraud.Time/(60*60)), bins, alpha=0.6,density=True, label='Fraud')
plt.legend(loc='upper right')
plt.title("Percentage of transactions by hour")
plt.xlabel("Transaction time from first transaction in the dataset (hours)")
plt.ylabel("Percentage of transactions (%)")
plt.show()
```
**Checked the corelation between the Datasets**
```
# Calculate pearson correlation coefficience
corr = data.corr()
# Plot heatmap of correlation
f, ax = plt.subplots(1, 1, figsize=(24,20))
sns.heatmap(corr, cmap='coolwarm_r', annot_kws={'size':20})
ax.set_title("Imbalanced Correlation Matrix \n (don't use for reference)", fontsize=24)
```
**We can observe that there is no Major corelation between Column**
**Scale data**
Here we are Scaling the amount columm:
```
data.head()
scaler = StandardScaler()
df=data
df['NormalizedAmount'] = scaler.fit_transform(df['Amount'].values.reshape(-1, 1))
df.drop(['Amount'], inplace=True, axis = 1)
df.head(3)
```
**Split data**
```
df.drop(['Time'], inplace=True, axis = 1)
y = data['Class']
X = data.drop(['Class'], axis = 1)
y.head()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
X_train.shape, X_test.shape
```
**Decision Tree**
Fit model
```
decision_tree_model = DecisionTreeClassifier()
decision_tree_model.fit(X_train, y_train)
```
**Evaluate model**
```
y_pred = decision_tree_model.predict(X_test)
decision_tree_model.score(X_test, y_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion Matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=0)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
cm = confusion_matrix(y_test, y_pred.round())
print(cm)
print(accuracy_score(y_test, y_pred.round()))
print(precision_score(y_test, y_pred.round()))
print(recall_score(y_test, y_pred.round()))
print(f1_score(y_test, y_pred.round()))
plot_confusion_matrix(cm, classes = [0, 1], title = 'Confusion Matrix - Test dataset')
```
**Tried to tune some hyperparameters using the GridSearchCV algorithm.**
```
# we are tuning three hyperparameters right now, we are passing the different values for both parameters
grid_param = {
'criterion': ['gini', 'entropy'],
'max_depth' : range(2,5,1),
}
# clf in name of the model n_jobs=-1 (it takes all the powers of CPU)
grid_search = GridSearchCV(estimator=decision_tree_model,
param_grid=grid_param,
cv=5,
n_jobs =-1)
grid_search.fit(X_train, y_train)
best_parameters = grid_search.best_params_
print(best_parameters)
grid_search.best_score_
clf = DecisionTreeClassifier(criterion = 'entropy', max_depth =4, min_samples_leaf= 1, min_samples_split= 2, splitter ='random')
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
```
**This shows that how Grid Search CV is Usefull for Choosing the right parameters**
**Random Forest modelling**
Fit model
```
rf_model = RandomForestClassifier(n_estimators = 100)
rf_model.fit(X_train, y_train)
```
**Evaluate model**
```
y_pred = rf_model.predict(X_test)
rf_model.score(X_test, y_test)
cm1 = confusion_matrix( y_test, y_pred.round())
print(cm1)
plot_confusion_matrix(cm, classes = [0, 1], title = 'Confusion Matrix - Test dataset')
print(accuracy_score(y_test, y_pred.round()))
print(precision_score(y_test, y_pred.round()))
print(recall_score(y_test, y_pred.round()))
print(f1_score(y_test, y_pred.round()))
```
** K-Means Clustering **
```
# Split the data into train set and test set
train,test = train_test_split(df,test_size=0.3,random_state=0)
# Get the arrays of features and labels in train dataset
features_train = train.drop(['Class'],axis=1)
features_train = features_train.values
labels_train = pd.DataFrame(train[['Class']])
labels_train = labels_train.values
# Get the arrays of features and labels in test dataset
features_test = test.drop(['Class'],axis=1)
features_test = features_test.values
labels_test = pd.DataFrame(test[["Class"]])
labels_test = labels_test.values
# Normalize the features in both train and test dataset
from sklearn.preprocessing import normalize
features_train = normalize(features_train)
features_test = normalize(features_test)
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix
model = KMeans(n_clusters=2,random_state=0)
model.fit(features_train)
labels_train_predicted = model.predict(features_train)
labels_test_predicted = model.predict(features_test)
from sklearn.metrics import confusion_matrix, precision_score, recall_score, accuracy_score,f1_score
# Calculating confusion matrix for kmeans
print('Confusion Matrix:\n',confusion_matrix(labels_test,labels_test_predicted))
c=confusion_matrix(labels_test,labels_test_predicted)
# Scoring kmeans
print('kmeans_precison_score:', precision_score(labels_test,labels_test_predicted))
print('kmeans_recall_score:', recall_score(labels_test,labels_test_predicted))
print('kmeans_accuracy_score:', accuracy_score(labels_test,labels_test_predicted))
print('kmeans_f1_score:',f1_score(labels_test,labels_test_predicted))
plot_confusion_matrix(c, classes = [0, 1], title = 'Confusion Matrix - Test dataset')
```
We will use Elbow Method to find the best value of K
```
'''1. Elbow method'''
# Fit KMeans and calculate SSE for each *k*
ss_error = {}
for k in range(1, 20):
k_means = KMeans(n_clusters=k, random_state=1)
k_means.fit(df)
ss_error[k] = k_means.inertia_
# Make elbow plot
plt.figure(figsize = (14,10))
plt.title('Elbow plot')
plt.xlabel('Value of k')
plt.ylabel('Sum of squared error')
sns.pointplot(x=list(ss_error.keys()), y=list(ss_error.values()))
```
We will selecct k=16 as the curve start becomming stable from there
```
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix
model = KMeans(n_clusters=16,random_state=0)
model.fit(features_train)
labels_train_predicted = model.predict(features_train)
labels_test_predicted = model.predict(features_test)
```
**Lets check Silhoutte score at different values of K**
**Deep neural network**
**Create model**
```
model = Sequential()
#add input layer
model.add(Dense(input_dim = 29, units = 16, activation = 'relu'))
#add 2nd hidden layer
model.add(Dense(units = 24, activation = 'relu'))
#add dropout layer
model.add(Dropout(0.5))
#add 3rd hidden layer
model.add(Dense(units = 20, activation = 'relu'))
#add 4th hidden layer
model.add(Dense(units = 24, activation = 'relu'))
#add ouptut layer
model.add(Dense(units = 1, activation = 'sigmoid'))
model.summary()
```
**Fit model**
```
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
history=model.fit(X_train, y_train, batch_size = 15, epochs = 50)
history.history
```
**Model Evaluation**
```
score = model.evaluate(X_test, y_test)
print(score)
y_pred = model.predict(X_test)
y_expect = pd.DataFrame(y_test)
cm = confusion_matrix(y_expect, y_pred.round())
plot_confusion_matrix(cm, classes = (0, 1))
plt.show()
print(accuracy_score(y_test, y_pred.round()))
print(precision_score(y_test, y_pred.round()))
print(recall_score(y_test, y_pred.round()))
print(f1_score(y_test, y_pred.round()))
```
**SMOTE & Model test**
```
X_resample, y_resample = SMOTE().fit_sample(X, y)
X_resample.shape
counter = Counter(y_resample)
print(counter)
X_train, X_test, y_train, y_test = train_test_split(X_resample, y_resample, test_size = 0.3)
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
```
**Model test**
```
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.fit(X_train, y_train, batch_size = 15, epochs = 10)
y_pred = model.predict(X_test)
y_expect = pd.DataFrame(y_test)
cm = confusion_matrix(y_expect, y_pred.round())
plot_confusion_matrix(cm, classes = (0, 1))
plt.show()
print(accuracy_score(y_test, y_pred.round()))
print(precision_score(y_test, y_pred.round()))
print(recall_score(y_test, y_pred.round()))
print(f1_score(y_test, y_pred.round()))
```
**Conclusion** :
**As a summary, we created 5 models, Decision Tree, Random Forest, Kmeans, DNN, DNN with SMOTE. As shown in the Table below, DNN with SMOTE shows better precision and F1 score.**
<table style="width:100%">
<tr>
<th>Algorithm </th>
<th>Precision %</th>
<th>Recall %</th>
<th>F1 %</th>
</tr>
<tr>
<td>Decision Tree</td>
<td>79 %</td>
<td>76 % </td>
<td>77 % </td>
</tr>
<tr>
<td>Random Forest </td>
<td>95 % </td>
<td>78 % </td>
<td>86 % </td>
</tr>
<tr>
<td>K-Means</td>
<td>64 %</td>
<td>17 % </td>
<td>53 % </td>
</tr>
<tr>
<td>DNN</td>
<td>85 % </td>
<td>62 % </td>
<td>72 % </td>
</tr>
<tr>
<td>DNN with SMOTE </td>
<td>99% </td>
<td>99% </td>
<td>99% </td>
</tr>
</table>
*Hence, we will choose DNN with SMOTE as it performs the best.*
---
| github_jupyter |
# RealSim-IFS Tutorial: Generating synthetic MaNGA data
Welcome to the RealSim-IFS tutorial. In this tutorial, we will generate some MaNGA-like synthetic kinematic data using a line-of-sight (LOS) velocity cube for a disk galaxy from the TNG50-1 simulation. The main requirements of RealSim-IFS are the `astropy`, `numpy`, and `progress` packages. Please install these modules before starting this tutorial.
```
import os,sys,time
from realsim_ifs import ifs as rifs
realsim_dir = os.path.dirname(rifs.__file__)
losvd_dir = f'{realsim_dir}/Data/LOSVD'
```
The data used in this tutorial is a LOS velocity distribution cube with 4 km/s velocity resolution in a [-500,500] km/s range and physical spatial resolution of 200 pc/pixel (assuming $h=0.6774$). Plots of mass surface density, velocity, and velocity dispersion are shown below for a random line-of-sight. These calculations assume single-component gaussian velocity distributions -- which is generally a poor assumption but is illustrative. **Input must be organized such that the first axis is the velocity/wavelength dimension and the second and third are the spatial axes.**
### 1. Idealized cubes LOSVD cubes
Use the `losvd_moments` convenience function to compute the first three LOSVD moments (total mass (or intensity), velocity, and velocity dispersion.
```
from astropy.io import fits
import matplotlib.pyplot as plt
import numpy as np
losvd_name = f'{losvd_dir}/losvd_TNG50-1_091_119450_stars_v0__32.fits'
fig,axarr = plt.subplots(1,3,figsize=(15,5))
with fits.open(losvd_name) as hdu:
losvd = hdu[0].data.transpose(2,0,1) # <-- note transpose
print(f'LOSVD cube shape: {losvd.shape}')
hdr = hdu[0].header
kpc_per_pixel = hdr['fov_kpc']/hdr['npixel']
vlim,delv,nvels = hdr['vlim'],hdr['delv'],hdr['nvaxel']
mass_density,vbar,vdisp = rifs.losvd_moments(losvd,kpc_per_pixel,
vlim,delv,nvels)
axarr[0].imshow(mass_density,cmap='bone',vmin=6,vmax=10)
axarr[1].imshow(vbar,cmap='jet',vmin=-150,vmax=150)
axarr[2].imshow(vdisp,cmap='coolwarm',vmin=50,vmax=100)
```
This disk-dominated galaxy is rotating rapidly and has a small but prominent kinematic bulge. Halo stars also have high velocity dispersions. Meanwhile, the disk of the galaxy is dynamically cold.
Note that the velocities and velocity dispersions are defined everywhere on the grid -- even where the masses are arbitrarily small. This is a result of the smoothing of discrete point particles (which represent unresolved stellar populations or the centres of mass of gas parcels) onto the Cartesian spatial grid.
### 2. Setting up the MaNGA observation
Start by generating the atmospheric seeing (atmospheric Point Spread Function; PSF) conditions using the `manga_seeing` function.
```
seed = 12345
seeing_arcsec = rifs.manga_seeing(seed=seed)
print(f'PSF FWHM: {seeing_arcsec:.3f} arcsec')
```
We will mock-observe this galaxy at redshift $z=0.04$ in this example -- which is the median redshift of the MaNGA target list. Alternatively, the `manga_redshift` can also be used, which draws from the MaNGA sample redshift distribution.
```
redshift = 0.04
# redshift = rifs.manga_redshift(seed)
print(f'Redshift: {redshift:0.3f}')
```
### 3. Seeing-convolved data cube
Before reaching any instrumental components, light is blurred by the atmospheric PSF. Since we are assuming that light tracks mass in our mass-weighted LOSVD cubes, the atmospheric PSF is applied spatially in every velocity channel. Therefore, the next step is to convolve the cube with the atmospheric PSF. For this, we use the general `apply_seeing` function (not MaNGA specific). Since the cubes are in physical units and the PSF FHWM is in angular units, the function requires and `astropy.cosmology` instance (default Planck15), and the spatial characteristics of the data. It then uses the angular diameter distance at `redshift`, and the physical resolution of the image to convert the seeing to physical units. Finally, it does the convolution slice-by-slice.
```
from astropy.cosmology import Planck15 as cosmo
losvd_conv = rifs.apply_seeing(losvd,kpc_per_pixel,redshift,
seeing_model='manga',
seeing_fwhm_arcsec=seeing_arcsec,
cosmo=cosmo)
```
The `seeing_model` keyword has two options with `manga` as the default option. In the MaNGA seeing model, the seeing model is the superposition of two Gaussian components. See the doc-string for `apply_seeing` for more details.
The LOSVD moments derived from the seeing-convolved cube is shown below. The grid resolution and units of the cube are unchanged. Only the seeing has changed. So the same keyword arguments are used.
```
mass_density,vbar,vdisp = rifs.losvd_moments(losvd_conv,kpc_per_pixel,
vlim,delv,nvels)
fig,axarr = plt.subplots(1,3,figsize=(15,5))
axarr[0].imshow(mass_density,cmap='bone',vmin=6,vmax=10)
axarr[1].imshow(vbar,cmap='jet',vmin=-150,vmax=150)
axarr[2].imshow(vdisp,cmap='coolwarm',vmin=50,vmax=100)
```
*Note that in general some total mass (intensity) will be lost in the convolution as it is spread beyond the edge of the FOV.*
### 4. Choosing the IFU design
MaNGA has 6 different IFU designs (bundles). Each IFU design and their (dithered exposure) observing pattern can be reproduced with the `manga_ifu` function. The names of each design denote the number of fibers in the corresponding bundle. MaNGA uses 3 dithered exposures for a complete footprint. In real observations, the dithered exposures would be taken multiple times. But since the astrometric precision for repeat exposures on the milli-arcsec level, it is unnecessary to take more than 3 exposures for the synthetic data.
```
bundle_names = ['N7','N19','N37','N61','N91','N127']
fig,axarr = plt.subplots(2,3,figsize=(18,12))
axarr = axarr.flatten()
fig.subplots_adjust(hspace=0.2,wspace=0.2)
from matplotlib.patches import Circle
for i,bundle_name in enumerate(bundle_names):
ax = axarr[i]
ax.set_xlim(-20,20) # arcsec
ax.set_ylim(-20,20) # arcsec
ax.text(0.05,0.95,bundle_name,transform=ax.transAxes,
ha='left',va='top',fontsize=20)
(xc_arcsec,yc_arcsec),params = rifs.manga_ifu(bundle_name=bundle_name,
rotation_degrees=0.)
fiber_diameter_arcsec = params['fiber_diameter_arcsec']
core_diameter_arcsec = params['core_diameter_arcsec']
n_observations = params['n_observations']
for i_obs in range(n_observations):
xc_obs,yc_obs = xc_arcsec[:,i_obs],yc_arcsec[:,i_obs]
for xy in zip(xc_obs,yc_obs):
core = Circle(xy=xy, radius=core_diameter_arcsec/2,
transform=ax.transData, edgecolor='None',
facecolor='Grey', alpha=0.3)
ax.add_artist(core)
```
*Note that the N7 design is not generally used for observing and is reserved for calibrations.*
The `manga_ifu` function creates arrays which give the $x$ anc $y$ coordinates of each fiber centroid in a MaNGA IFU design. Additionally, the `params` dictionary contains other important/useful properties of the fibers, design, and observing pattern. For example, the `'core_diameter_arcsec'` key gives the common diameter of each fiber core.
We now need to pick which IFU design is most suitable. MaNGA IFUs are assigned such that they cover a certain fraction of the galaxy light. We will pick the IFU which extends to 2.5 times the galaxy half-mass radius (stellar particle [partNum 4] SubhaloHalfMassRadType in the TNG group catalogues). Galaxies in the Secondary MaNGA target sample get this coverage. The FOV of the cube is 6 subhalo half-mass radii. So the best design is the one whose physical size at `redshift` is closest to 5 half-mass radii. In the example cube header, the `'fovunit'` keyword is `'rhalf'` and the `'fovsize'` keyword is 6.
```
# ifu footprints in arcsec
footprints = {'N19':12.5,'N37':17.5,'N61':22.5,'N91':27.5,'N127':32.5}
bundle_names = np.array(list(footprints.keys()))
bundle_footprints = np.array(list(footprints.values()))
nrhalf = 2.5
rhalf_kpc = hdr['fov_kpc']/hdr['fovsize']
print(f'Half-mass Radius: {rhalf_kpc:.3f} kpc')
from astropy.cosmology import Planck15 as cosmo
kpc_per_arcsec = cosmo.kpc_proper_per_arcmin(z=redshift).value/60.
rhalf_arcsec = rhalf_kpc / kpc_per_arcsec
fov_min = 2*nrhalf*rhalf_arcsec
print(f'Half-mass Radius:', f'2 x {nrhalf} x {rhalf_arcsec:.3f} = ',
f'{fov_min:0.3f} arcsec at z={redshift:0.3f}')
# find ifu with smallest difference
bundle_idx = np.argmin(np.abs(bundle_footprints-fov_min))
while bundle_footprints[bundle_idx]<fov_min:
bundle_idx+=1
bundle_name = bundle_names[bundle_idx]
print(f'Optimized IFU design: {bundle_name}')
print(f'Footprint: {footprints[bundle_name]} arcsec')
```
### 5. Applying the selected IFU design
Start by setting up the fiber positions and parameters for this design. As mentioned earlier, the `manga_ifu` function creates arrays for the fiber coordinates and a `params` dictionary containing ancillary information about the observation pattern. Note that the core arrays are converted from arcseconds to cube pixel units in this step. Below, the IFU design is shown overlaid on the moment maps.
```
(xc_arcsec,yc_arcsec),params = rifs.manga_ifu(bundle_name=bundle_name,rotation_degrees=0.)
fiber_diameter_arcsec = params['fiber_diameter_arcsec']
core_diameter_arcsec = params['core_diameter_arcsec']
n_observations = params['n_observations']
# coordinate change to convert to pixel
arcsec_per_pixel = kpc_per_pixel/kpc_per_arcsec
xc_pixels = (xc_arcsec/arcsec_per_pixel+losvd_conv.shape[2]/2.).flatten()
yc_pixels = (yc_arcsec/arcsec_per_pixel+losvd_conv.shape[1]/2.).flatten()
core_diameter_pixels = core_diameter_arcsec/arcsec_per_pixel
fig,axarr = plt.subplots(1,3,figsize=(15,5))
axarr[0].imshow(mass_density,cmap='bone',vmin=6,vmax=10)
axarr[1].imshow(vbar,cmap='jet',vmin=-150,vmax=150)
axarr[2].imshow(vdisp,cmap='coolwarm',vmin=50,vmax=100)
for ax in axarr:
for xy in zip(xc_pixels,yc_pixels):
core = Circle(xy=xy, radius=core_diameter_pixels/2,
transform=ax.transData,edgecolor='white',
facecolor='None',lw=0.5)
ax.add_artist(core)
```
Now 'observe' the cube with the selected design using the generalized `ifu_observe` function. This function takes the seeing-convolved cube, and the pixel-coordinates and diameters of the IFU fibers.
```
core_arrays = rifs.ifu_observe(losvd_conv, xc_pixels, yc_pixels,
core_diameter_pixels)
print(core_arrays.shape)
```
The `ifu_observe` essentially produces data in Row-Stacked Spectra (RSS) file format which will be familar to MaNGA users. Each row corresponds to a single fiber and contains the line-of-sight velocity distribution captured by that fiber in pixels which are fully or partially within its aperture.
### 6. Spatial reconstruction of fiber measurements
The final step is to reconstruct the fiber measurements onto a Cartesian grid. The `ifu_to_grid` function handles this task. There are many options for this reconstruction which follow from various data reduction pipelines. MaNGA uses the Modified Shepard algorithm. Full details are in the docstring and the paper.
First, the output grid is set up. The largest MaNGA IFU design is 32.5 arcsec in diameter. So a (70,70) grid with 0.5 arcsec per spaxel is guaranteed to encompass all IFU designs. The output grid sizes should be optimized to each IFU design to prevent wasted storage. The core positions and diameters are converted to the coordinate system of this output grid. These new coordinates are then passed to the `ifu_to_grid` function alongside the core arrays from the previous step. The `use_gaussian_weights` keyword causes the function to use the Modified Shepard algorithm to in the spatial reconstruction of the fiber intensities. The other option is the Inverse Drizzle Algorithm, which essentially reverses the functionality of `ifu_observe`. For details see the docstring and the paper.
```
manga_grid_dims = (70,70)
manga_arcsec_per_pixel = 0.5
manga_kpc_per_pixel = kpc_per_arcsec*manga_arcsec_per_pixel
manga_xc_pixels = xc_arcsec/manga_arcsec_per_pixel + manga_grid_dims[1]/2.
manga_yc_pixels = yc_arcsec/manga_arcsec_per_pixel + manga_grid_dims[0]/2.
manga_core_diameter_pixels = core_diameter_arcsec/manga_arcsec_per_pixel
losvd_manga,weight_map = rifs.ifu_to_grid(
core_arrays,
manga_xc_pixels,
manga_yc_pixels,
manga_core_diameter_pixels,
grid_dimensions_pixels=manga_grid_dims,
use_gaussian_weights=True,
use_broadcasting=True
)
losvd_manga[losvd_manga==0.]=np.nan
mass_density,vbar,vdisp = rifs.losvd_moments(losvd_manga, manga_kpc_per_pixel,
vlim, delv, nvels)
fig,axarr = plt.subplots(1,3,figsize=(15,5))
axarr[0].imshow(mass_density,cmap='bone',vmin=6,vmax=10)
axarr[1].imshow(vbar,cmap='jet',vmin=-150,vmax=150)
axarr[2].imshow(vdisp,cmap='coolwarm',vmin=50,vmax=100)
```
The resulting output cubes now incorporate the MaNGA observing strategy, footprint, and atmospheric point spread function. There are many other steps that could be added. For example, a line-spread function can be applied after Step 5 to incorporate realistic spectral resolution. Signal-to-noise considerations can be made using mass surface density as a proxy for signal and S/N typical of observations. Etc.
The generalized functions in RealSim-IFS such as `apply_seeing`, `ifu_observe`, and `ifu_to_grid` can be applied to any IFU design and observing strategy including those with fibers of multiple sizes. All that is needed are the fiber coordinate arrays and diameters. Similarly, RealSim-IFS can be used on spectral flux datacubes as this process is mathematically equivalent to LOSVD cubes. This is one of the main rationale for RealSim-IFS -- since it is much closer to the real data.
This concludes the RealSim-IFS MaNGA tutorial. In this tutorial, we used mock-observed the target galaxy at redshift $z=0.04$. The N61 design footprint is optimized for capturing the galaxy light out to at least 2.5 half-mass radii. Lowering the redshift will make the galaxy's angular size larger -- requiring a larger design. Setting $z=0.025$ in Section 2 above will result in the N127 design being used. Try it.
### Appendix A: Implementing the SAMI IFU design
The SAMI IFU design has also been given a dedicated function in RealSim-IFS. The plot below shows the footprint of a single SAMI fiber bundle (left) and the footprint of 7 dithered SAMI exposures (right). The SAMI `xc_arr`, `yc_arr`, and `params` can be easily implemented as a replacement for the above MaNGA tutorial. Note that SAMI use the Inverse Drizzle Algorithm in their data reduction pipeline (Sharp et al. 2015).
```
fig,axarr = plt.subplots(1,2,figsize=(12,6))
fig.subplots_adjust(wspace=0.1,)
for ax in axarr:
ax.set_xlim(-12,12)
ax.set_ylim(-12,12)
(xc_arr,yc_arr),params = rifs.sami_ifu(rotation_degrees=0)
common_args = {'radius':params['core_diameter_arcsec']/2.,
'facecolor':'Grey', 'alpha':0.3, 'zorder':0}
ax = axarr[0]
xc,yc = xc_arr[:,0],yc_arr[:,0]
for fiber in range(params['fibers_in_bundle']):
ax.add_artist(Circle(xy=(xc[fiber],yc[fiber]),**common_args))
ax = axarr[1]
idx_center = 30
for exposure in range(params['n_observations']):
xc,yc = xc_arr[:,exposure],yc_arr[:,exposure]
for fiber in range(params['fibers_in_bundle']):
ax.add_artist(Circle(xy=(xc[fiber],yc[fiber]),**common_args))
axarr[0].scatter(xc[idx_center],yc[idx_center],
c='crimson',s=10,zorder=1)
axarr[1].scatter(xc[idx_center],yc[idx_center],
c='crimson',s=10,zorder=1)
```
The crimson markers show the position of the central fiber in each of the 7 dithered exposures.
| github_jupyter |
# Покупка на закрытии и продажа на открытии следующего дня
```
import pandas as pd
import yfinance as yf
import numpy as np
from tqdm.notebook import tqdm
from numpy_ext import rolling_apply
import matplotlib.pyplot as plt
plt.rcParams['lines.linewidth'] = 3
plt.rcParams['axes.grid'] = True
plt.rcParams['grid.linestyle'] = ':'
plt.rcParams['figure.constrained_layout.use'] = True
```
## Загрузка данных
```
start = '1993-01-01'
end = '2020-12-01'
assets = [
'SPY',
]
df = yf.download(assets, start=start, end=end)
#для статистики
stat = pd.DataFrame(columns=[
'Data',
'Start_Date',
'End_Date',
'Return_On_Account',
#'Gross_Profit',
#'Gross_Loss',
'Num_Of_Trades',
'Num_Of_Win_Trades',
'Num_Of_Loss_Trades',
'Percent_Profitable',
'Largest_Win_Trade',
'Largest_Loss_trade',
'Avg_Win_Trade',
'Avg_Loss_Trade',
'Avg_Win_Avg_Loss',
'Avg_Trade',
'Max_Cons_Win',
'Max_Cons_Loss',
'Max_Drawdown',
'Profit_Factor',
'Sharpe_ratio'
])
```
## Подготовка данных
```
def add_buy(df):
"""Добавляет сделки
"""
df['Close_Open_Buy'] = df['Open'].apply(np.log)-df['Close'].shift(1).apply(np.log)
df['Open_Close_Buy'] = df['Open'].apply(np.log)-df['Close'].apply(np.log)
df['Close_Open_Sell'] = df['Close'].shift(1).apply(np.log)-df['Open'].apply(np.log)
df['Open_Close_Sell'] = df['Close'].apply(np.log)-df['Open'].apply(np.log)
return df
df = add_buy(df)
```
## Рассчеты
```
comission = 0.05*2
df['Return_Close_Open_Buy'] = df['Close_Open_Buy'].cumsum().apply(np.exp)
df['Return_Close_Open_Buy_Commiss'] = (df['Close_Open_Buy']-comission/100).cumsum().apply(np.exp)
df['Return_Open_Close_Buy'] = df['Open_Close_Buy'].cumsum().apply(np.exp)
df['Return_Open_Close_Buy_Commiss'] = (df['Open_Close_Buy']-comission/100).cumsum().apply(np.exp)
df['Return_Close_Open_Sell'] = df['Close_Open_Sell'].cumsum().apply(np.exp)
df['Return_Close_Open_Sell_Commiss'] = (df['Close_Open_Sell']-comission/100).cumsum().apply(np.exp)
df['Return_Open_Close_Sell'] = df['Open_Close_Sell'].cumsum().apply(np.exp)
df['Return_Open_Close_Sell_Commiss'] = (df['Open_Close_Sell']-comission/100).cumsum().apply(np.exp)
df[[
'Return_Close_Open_Buy',
'Return_Open_Close_Buy',
]].plot(figsize=(15,7))
plt.axhline(1,linestyle='--')
#plt.savefig('../src/return_spy_buy.png')
df[[
'Return_Close_Open_Buy_Commiss',
'Return_Open_Close_Buy_Commiss',
]].plot(figsize=(15,7))
plt.axhline(1,linestyle='--')
#plt.savefig('../src/return_spy_buy_commiss.png')
df[[
'Return_Close_Open_Sell',
'Return_Open_Close_Sell',
]].plot(figsize=(15,7))
plt.axhline(1,linestyle='--')
#plt.savefig('../src/return_spy_sell.png')
```
## Статистика
```
def calc_stat(df_, stat_, trades, ret, data):
"""Cчитает статистику по каждой стратегии
Определения брал отсюда https://www.investopedia.com/articles/fundamental-analysis/10/strategy-performance-reports.asp
Args:
df_ [dataframe]: массив со сделками
stat_ [dataframe]: массив со стистикой
days [int]: максимальное количество дней в в рассчётах
levers [int]: максимальные рычаг в рассчётах
Returns:
[float]: разница в % между текущей и предыдущей ценой при покупке или продаже (или NaN)
"""
start_date = df_[trades].index[0].strftime("%Y-%m-%d")
end_date = df_[trades].index[-1].strftime("%Y-%m-%d")
return_on_account = df_[ret].dropna().iloc[-1]
num_of_trades = df_[ret].dropna().count()
num_of_win_trades = (df_[trades]>0).sum()
num_of_loss_trades = (df_[trades]<0).sum()
percent_profitable = num_of_win_trades/num_of_trades
largest_win_trade = df_[df_[trades]>0][trades].max()
largest_loss_trade = df_[df_[trades]<0][trades].min()
avg_win_trade = df_[df_[trades]>0][trades].mean()
avg_loss_trade = df_[df_[trades]<0][trades].mean()
avg_win_avg_loss = abs(avg_win_trade/avg_loss_trade)
avg_trade = return_on_account/num_of_trades
#max_cons_win https://stackoverflow.com/questions/52717996/how-can-i-count-the-number-of-consecutive-trues-in-a-dataframe
ge_0 = df_[trades].dropna().ge(0)
ge_0_cumsum = ge_0.cumsum()
max_cons_win = ge_0_cumsum.sub(ge_0_cumsum.mask(ge_0).ffill().fillna(0)).astype(int).max()
#max_cons_loss
le_0 = df_[trades].dropna().le(0)
le_0_cumsum = le_0.cumsum()
max_cons_loss = le_0_cumsum.sub(le_0_cumsum.mask(le_0).ffill().fillna(0)).astype(int).max()
max_drowdown = df_[ret].dropna().div(df_[ret].dropna().cummax()).subtract(1).min()
#profit_factor
diff = df_[ret].dropna().diff(1)
gross_profit = diff[diff.ge(0)].sum()
gross_loss = abs(diff[diff.le(0)].sum())
profit_factor = gross_profit/gross_loss
#https://quant.stackexchange.com/questions/39839/how-to-calculate-sharpe-ratio-from-returns
sharpe_ratio = (df_[trades].mean()/df_[trades].std())*np.sqrt(252)
#stat_.loc[len(stat_)] = [
stat_.loc[trades] = [
data,
start_date,
end_date,
return_on_account,
num_of_trades,
num_of_win_trades,
num_of_loss_trades,
percent_profitable,
largest_win_trade,
largest_loss_trade,
avg_win_trade,
avg_loss_trade,
avg_win_avg_loss,
avg_trade,
max_cons_win,
max_cons_loss,
max_drowdown,
profit_factor,
sharpe_ratio
]
return stat_
stat = calc_stat(df, stat, 'Close_Open_Buy', 'Return_Close_Open_Buy', 'SPY')
stat
```
| github_jupyter |
```
%matplotlib inline
```
Deep Learning with PyTorch
**************************
Deep Learning Building Blocks: Affine maps, non-linearities and objectives
==========================================================================
Deep learning consists of composing linearities with non-linearities in
clever ways. The introduction of non-linearities allows for powerful
models. In this section, we will play with these core components, make
up an objective function, and see how the model is trained.
Affine Maps
~~~~~~~~~~~
One of the core workhorses of deep learning is the affine map, which is
a function $f(x)$ where
\begin{align}f(x) = Ax + b\end{align}
for a matrix $A$ and vectors $x, b$. The parameters to be
learned here are $A$ and $b$. Often, $b$ is refered to
as the *bias* term.
PyTorch and most other deep learning frameworks do things a little
differently than traditional linear algebra. It maps the rows of the
input instead of the columns. That is, the $i$'th row of the
output below is the mapping of the $i$'th row of the input under
$A$, plus the bias term. Look at the example below.
```
# Author: Robert Guthrie
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
lin = nn.Linear(5, 3) # maps from R^5 to R^3, parameters A, b
# data is 2x5. A maps from 5 to 3... can we map "data" under A?
data = torch.randn(2, 5)
print(lin(data)) # yes
```
Non-Linearities
~~~~~~~~~~~~~~~
First, note the following fact, which will explain why we need
non-linearities in the first place. Suppose we have two affine maps
$f(x) = Ax + b$ and $g(x) = Cx + d$. What is
$f(g(x))$?
\begin{align}f(g(x)) = A(Cx + d) + b = ACx + (Ad + b)\end{align}
$AC$ is a matrix and $Ad + b$ is a vector, so we see that
composing affine maps gives you an affine map.
From this, you can see that if you wanted your neural network to be long
chains of affine compositions, that this adds no new power to your model
than just doing a single affine map.
If we introduce non-linearities in between the affine layers, this is no
longer the case, and we can build much more powerful models.
There are a few core non-linearities.
$\tanh(x), \sigma(x), \text{ReLU}(x)$ are the most common. You are
probably wondering: "why these functions? I can think of plenty of other
non-linearities." The reason for this is that they have gradients that
are easy to compute, and computing gradients is essential for learning.
For example
\begin{align}\frac{d\sigma}{dx} = \sigma(x)(1 - \sigma(x))\end{align}
A quick note: although you may have learned some neural networks in your
intro to AI class where $\sigma(x)$ was the default non-linearity,
typically people shy away from it in practice. This is because the
gradient *vanishes* very quickly as the absolute value of the argument
grows. Small gradients means it is hard to learn. Most people default to
tanh or ReLU.
```
# In pytorch, most non-linearities are in torch.functional (we have it imported as F)
# Note that non-linearites typically don't have parameters like affine maps do.
# That is, they don't have weights that are updated during training.
data = torch.randn(2, 2)
print(data)
print(F.relu(data))
```
Softmax and Probabilities
~~~~~~~~~~~~~~~~~~~~~~~~~
The function $\text{Softmax}(x)$ is also just a non-linearity, but
it is special in that it usually is the last operation done in a
network. This is because it takes in a vector of real numbers and
returns a probability distribution. Its definition is as follows. Let
$x$ be a vector of real numbers (positive, negative, whatever,
there are no constraints). Then the i'th component of
$\text{Softmax}(x)$ is
\begin{align}\frac{\exp(x_i)}{\sum_j \exp(x_j)}\end{align}
It should be clear that the output is a probability distribution: each
element is non-negative and the sum over all components is 1.
You could also think of it as just applying an element-wise
exponentiation operator to the input to make everything non-negative and
then dividing by the normalization constant.
```
# Softmax is also in torch.nn.functional
data = torch.randn(5)
print(data)
print(F.softmax(data, dim=0))
print(F.softmax(data, dim=0).sum()) # Sums to 1 because it is a distribution!
print(F.log_softmax(data, dim=0)) # theres also log_softmax
```
Objective Functions
~~~~~~~~~~~~~~~~~~~
The objective function is the function that your network is being
trained to minimize (in which case it is often called a *loss function*
or *cost function*). This proceeds by first choosing a training
instance, running it through your neural network, and then computing the
loss of the output. The parameters of the model are then updated by
taking the derivative of the loss function. Intuitively, if your model
is completely confident in its answer, and its answer is wrong, your
loss will be high. If it is very confident in its answer, and its answer
is correct, the loss will be low.
The idea behind minimizing the loss function on your training examples
is that your network will hopefully generalize well and have small loss
on unseen examples in your dev set, test set, or in production. An
example loss function is the *negative log likelihood loss*, which is a
very common objective for multi-class classification. For supervised
multi-class classification, this means training the network to minimize
the negative log probability of the correct output (or equivalently,
maximize the log probability of the correct output).
Optimization and Training
=========================
So what we can compute a loss function for an instance? What do we do
with that? We saw earlier that Tensors know how to compute gradients
with respect to the things that were used to compute it. Well,
since our loss is an Tensor, we can compute gradients with
respect to all of the parameters used to compute it! Then we can perform
standard gradient updates. Let $\theta$ be our parameters,
$L(\theta)$ the loss function, and $\eta$ a positive
learning rate. Then:
\begin{align}\theta^{(t+1)} = \theta^{(t)} - \eta \nabla_\theta L(\theta)\end{align}
There are a huge collection of algorithms and active research in
attempting to do something more than just this vanilla gradient update.
Many attempt to vary the learning rate based on what is happening at
train time. You don't need to worry about what specifically these
algorithms are doing unless you are really interested. Torch provides
many in the torch.optim package, and they are all completely
transparent. Using the simplest gradient update is the same as the more
complicated algorithms. Trying different update algorithms and different
parameters for the update algorithms (like different initial learning
rates) is important in optimizing your network's performance. Often,
just replacing vanilla SGD with an optimizer like Adam or RMSProp will
boost performance noticably.
Creating Network Components in PyTorch
======================================
Before we move on to our focus on NLP, lets do an annotated example of
building a network in PyTorch using only affine maps and
non-linearities. We will also see how to compute a loss function, using
PyTorch's built in negative log likelihood, and update parameters by
backpropagation.
All network components should inherit from nn.Module and override the
forward() method. That is about it, as far as the boilerplate is
concerned. Inheriting from nn.Module provides functionality to your
component. For example, it makes it keep track of its trainable
parameters, you can swap it between CPU and GPU with the ``.to(device)``
method, where device can be a CPU device ``torch.device("cpu")`` or CUDA
device ``torch.device("cuda:0")``.
Let's write an annotated example of a network that takes in a sparse
bag-of-words representation and outputs a probability distribution over
two labels: "English" and "Spanish". This model is just logistic
regression.
Example: Logistic Regression Bag-of-Words classifier
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Our model will map a sparse BoW representation to log probabilities over
labels. We assign each word in the vocab an index. For example, say our
entire vocab is two words "hello" and "world", with indices 0 and 1
respectively. The BoW vector for the sentence "hello hello hello hello"
is
\begin{align}\left[ 4, 0 \right]\end{align}
For "hello world world hello", it is
\begin{align}\left[ 2, 2 \right]\end{align}
etc. In general, it is
\begin{align}\left[ \text{Count}(\text{hello}), \text{Count}(\text{world}) \right]\end{align}
Denote this BOW vector as $x$. The output of our network is:
\begin{align}\log \text{Softmax}(Ax + b)\end{align}
That is, we pass the input through an affine map and then do log
softmax.
```
data = [("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH")]
test_data = [("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# word_to_ix maps each word in the vocab to a unique integer, which will be its
# index into the Bag of words vector
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
print(word_to_ix)
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
class BoWClassifier(nn.Module): # inheriting from nn.Module!
def __init__(self, num_labels, vocab_size):
# calls the init function of nn.Module. Dont get confused by syntax,
# just always do it in an nn.Module
super(BoWClassifier, self).__init__()
# Define the parameters that you will need. In this case, we need A and b,
# the parameters of the affine mapping.
# Torch defines nn.Linear(), which provides the affine map.
# Make sure you understand why the input dimension is vocab_size
# and the output is num_labels!
self.linear = nn.Linear(vocab_size, num_labels)
# NOTE! The non-linearity log softmax does not have parameters! So we don't need
# to worry about that here
def forward(self, bow_vec):
# Pass the input through the linear layer,
# then pass that through log_softmax.
# Many non-linearities and other functions are in torch.nn.functional
return F.log_softmax(self.linear(bow_vec), dim=1)
def make_bow_vector(sentence, word_to_ix):
vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1)
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE)
# the model knows its parameters. The first output below is A, the second is b.
# Whenever you assign a component to a class variable in the __init__ function
# of a module, which was done with the line
# self.linear = nn.Linear(...)
# Then through some Python magic from the PyTorch devs, your module
# (in this case, BoWClassifier) will store knowledge of the nn.Linear's parameters
for param in model.parameters():
print(param)
# To run the model, pass in a BoW vector
# Here we don't need to train, so the code is wrapped in torch.no_grad()
with torch.no_grad():
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = model(bow_vector)
print(log_probs)
```
Which of the above values corresponds to the log probability of ENGLISH,
and which to SPANISH? We never defined it, but we need to if we want to
train the thing.
```
label_to_ix = {"SPANISH": 0, "ENGLISH": 1}
```
So lets train! To do this, we pass instances through to get log
probabilities, compute a loss function, compute the gradient of the loss
function, and then update the parameters with a gradient step. Loss
functions are provided by Torch in the nn package. nn.NLLLoss() is the
negative log likelihood loss we want. It also defines optimization
functions in torch.optim. Here, we will just use SGD.
Note that the *input* to NLLLoss is a vector of log probabilities, and a
target label. It doesn't compute the log probabilities for us. This is
why the last layer of our network is log softmax. The loss function
nn.CrossEntropyLoss() is the same as NLLLoss(), except it does the log
softmax for you.
```
# Run on test data before we train, just to see a before-and-after
with torch.no_grad():
for instance, label in test_data:
bow_vec = make_bow_vector(instance, word_to_ix)
log_probs = model(bow_vec)
print(log_probs)
# Print the matrix column corresponding to "creo"
print(next(model.parameters())[:, word_to_ix["creo"]])
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# Usually you want to pass over the training data several times.
# 100 is much bigger than on a real data set, but real datasets have more than
# two instances. Usually, somewhere between 5 and 30 epochs is reasonable.
for epoch in range(100):
for instance, label in data:
# Step 1. Remember that PyTorch accumulates gradients.
# We need to clear them out before each instance
model.zero_grad()
# Step 2. Make our BOW vector and also we must wrap the target in a
# Tensor as an integer. For example, if the target is SPANISH, then
# we wrap the integer 0. The loss function then knows that the 0th
# element of the log probabilities is the log probability
# corresponding to SPANISH
bow_vec = make_bow_vector(instance, word_to_ix)
target = make_target(label, label_to_ix)
# Step 3. Run our forward pass.
log_probs = model(bow_vec)
# Step 4. Compute the loss, gradients, and update the parameters by
# calling optimizer.step()
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
with torch.no_grad():
for instance, label in test_data:
bow_vec = make_bow_vector(instance, word_to_ix)
log_probs = model(bow_vec)
print(log_probs)
# Index corresponding to Spanish goes up, English goes down!
print(next(model.parameters())[:, word_to_ix["creo"]])
```
We got the right answer! You can see that the log probability for
Spanish is much higher in the first example, and the log probability for
English is much higher in the second for the test data, as it should be.
Now you see how to make a PyTorch component, pass some data through it
and do gradient updates. We are ready to dig deeper into what deep NLP
has to offer.
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import requests
import re
import statsmodels.api as sm
import statsmodels.formula.api as smf
from fbprophet import Prophet
from fbprophet.plot import add_changepoints_to_plot
from fbprophet.diagnostics import cross_validation
from datetime import datetime, timedelta
import calendar
import holidays
from dateutil.relativedelta import relativedelta
%matplotlib inline
#viz setup
# sns.set(style='whitegrid',font_scale=1.75,rc={"axes.spines.top":False,"axes.spines.right":False, "lines.linewidth": 2.5,'lines.markersize': 10},color_codes=False,palette=sns.color_palette(['#27a3aa','#f76d23','#70d6e3','#ffbb31','#b1c96d','#cce18a','#1c4c5d','#787642']))
sns.set(style='whitegrid',font_scale=1.5,rc={"axes.spines.top":False,"axes.spines.right":False, "lines.linewidth": 2.5,'lines.markersize': 10},color_codes=False,palette=sns.color_palette(['#27a3aa','#f76d23','#70d6e3','#ffbb31','#b1c96d','#cce18a','#1c4c5d','#787642']))
states = ["AL", "AZ", "AR", "CA", "CO", "CT", "DC", "DE", "FL", "GA",
"ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY"]
regions = ["_ENC","_MAC","_MTN","_NEC","_PAC","PUS","_WNC","_WSC","_ESC","_SAC"]
sectors = ['RES','COM']
idx = 0
for state in states:
for sector in sectors:
print("{}-{}".format(state,sector))
response_consumption = requests.get("http://api.eia.gov/series/?api_key=e45b817b9a5449da30e0b88815d5f119&series_id=ELEC.SALES.{}-{}.M".format(state,sector))
j_consumption = response_consumption.json()
tmp_consumption = pd.DataFrame(j_consumption['series'][0]['data'],columns=['month','sales_mkwh'])
tmp_consumption['state'] = state
tmp_consumption['sector'] = sector
response_consumers = requests.get("http://api.eia.gov/series/?api_key=e45b817b9a5449da30e0b88815d5f119&series_id=ELEC.CUSTOMERS.{}-{}.M".format(state,sector))
j_consumers = response_consumers.json()
tmp_consumers = pd.DataFrame(j_consumers['series'][0]['data'],columns=['month','consumers'])
tmp_consumers['state'] = state
tmp_consumers['sector'] = sector
response_price = requests.get("http://api.eia.gov/series/?api_key=e45b817b9a5449da30e0b88815d5f119&series_id=ELEC.PRICE.{}-{}.M".format(state,sector))
j_price = response_price.json()
tmp_price = pd.DataFrame(j_price['series'][0]['data'],columns=['month','price'])
tmp_price['state'] = state
tmp_price['sector'] = sector
tmp = tmp_consumption.merge(tmp_consumers,how='left',on=['month','state','sector']).merge(tmp_price,how='left',on=['month','state','sector'])
if idx == 0:
energy_data = tmp.copy()
else:
energy_data = energy_data.append(tmp)
idx = idx +1
idx = 0
for region in regions:
response_cool = requests.get("http://api.eia.gov/series/?api_key=e45b817b9a5449da30e0b88815d5f119&series_id=STEO.ZWCD{}.M".format(region))
j_cool = response_cool.json()
tmp_cool = pd.DataFrame(j_cool['series'][0]['data'],columns=['month','cooling_days'])
tmp_cool['region'] = region
response_heat = requests.get("http://api.eia.gov/series/?api_key=e45b817b9a5449da30e0b88815d5f119&series_id=STEO.ZWHD{}.M".format(region))
j_heat = response_heat.json()
tmp_heat = pd.DataFrame(j_heat['series'][0]['data'],columns=['month','heating_days'])
tmp_heat['region'] = region
tmp = tmp_cool.merge(tmp_heat,how='left',on=['month','region'])
if idx == 0:
heating_cooling_days = tmp.copy()
else:
heating_cooling_days = heating_cooling_days.append(tmp)
idx = idx +1
energy_data['revenue'] = energy_data.sales_mkwh*energy_data.price
country = energy_data.groupby(['month','sector']).sum().reset_index()
country['state'] = 'USA'
country.price = country.revenue/country.sales_mkwh
energy_data = energy_data.append(country)
energy_data['use_per_capita'] = energy_data.sales_mkwh*1000000/energy_data.consumers
heating_cooling_days.region = [re.sub('_','',r) for r in heating_cooling_days.region]
states.extend(['USA'])
state_region_mapping = pd.DataFrame(data={'state': states})
state_region_mapping['region'] = ''
state_region_mapping.loc[state_region_mapping.state.isin(['WA','OR','CA']),'region'] = 'PAC'
state_region_mapping.loc[state_region_mapping.state.isin(['MT','ID','WY','NV','UT','CO','AZ','NM']),'region'] = 'MTN'
state_region_mapping.loc[state_region_mapping.state.isin(['ND','SD','MN','NE','IA','KS','MO']),'region'] = 'WNC'
state_region_mapping.loc[state_region_mapping.state.isin(['OK','TX','AR','LA']),'region'] = 'WSC'
state_region_mapping.loc[state_region_mapping.state.isin(['WI','IL','IN','MI','OH']),'region'] = 'ENC'
state_region_mapping.loc[state_region_mapping.state.isin(['KY','TN','MS','AL']),'region'] = 'ESC'
state_region_mapping.loc[state_region_mapping.state.isin(['WV','MD','DE','VA','NC','SC','GA','FL','DC']),'region'] = 'SAC'
state_region_mapping.loc[state_region_mapping.state.isin(['NY','PA','NJ']),'region'] = 'MAC'
state_region_mapping.loc[state_region_mapping.state.isin(['RI','CT','MA','NH','VT','ME']),'region'] = 'NEC'
state_region_mapping.loc[state_region_mapping.state.isin(['USA']),'region'] = 'PUS'
energy_data = energy_data.merge(state_region_mapping,how='left',on='state')
energy_data= energy_data.merge(heating_cooling_days,how='left',on=['month','region'])
energy_data = energy_data.dropna()
energy_data = pd.concat([energy_data,pd.get_dummies(energy_data.sector)],axis=1)
energy_data['time'] = [12*(int(d[0:4])-2008)+int(d[4:6]) for d in energy_data.month]
energy_data['year'] = [int(d[0:4]) for d in energy_data.month]
energy_data['mon'] = [int(d[4:6]) for d in energy_data.month]
def get_season(m):
if (m == 12)|(m<=2):
return 'winter'
if (m>=3)&(m<=5):
return 'spring'
if(m>=6)&(m<=8):
return 'summer'
if(m>=9)&(m<=11):
return 'fall'
energy_data['season'] = energy_data.mon.apply(get_season)
energy_data.head()
energy_data['date'] = [datetime(y,m,1) for y,m in zip(energy_data.year, energy_data.mon)]
def get_datetime_features(date):
st = date
en = date + relativedelta(months=1) - relativedelta(days=1)
## number of days in month
num_days = len(pd.date_range(st,en))
## number of weekends in month
num_weekends = pd.date_range(st,en).weekday.isin([5,6]).sum()
## number of holidays in month
us_holidays = holidays.US(years=date.year)
us_holidays = pd.DataFrame(us_holidays.items(),columns=['date','hol'])
us_holidays['date'] = pd.to_datetime(us_holidays.date)
num_holidays = len(us_holidays[(us_holidays.date.dt.month == date.month) & ~(us_holidays.date.dt.weekday.isin([5,6]))])
num_weekends_or_holidays = num_holidays+num_weekends
## % of weekdays in month
pct_weekdays = 1 - (num_holidays+num_weekends)/num_days
return num_days, num_weekends_or_holidays, pct_weekdays
energy_data['num_days'], energy_data['num_hols'], energy_data['pct_weekdays'] = zip(*energy_data.date.apply(get_datetime_features))
energy_data['y'] = energy_data.use_per_capita/energy_data.num_days
energy_data.head()
energy_data.tail()
energy_data.date.max()
data_urls = ['https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2008_c20180718.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2009_c20180718.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2010_c20200922.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2011_c20180718.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2012_c20200317.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2013_c20170519.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2014_c20210120.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2015_c20191116.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2016_c20190817.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2017_c20210120.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2018_c20210716.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2019_c20210604.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2020_c20210716.csv.gz',
'https://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_details-ftp_v1.0_d2021_c20210716.csv.gz']
idx=0
for d in data_urls:
print(d)
tmp = pd.read_csv(d)
if idx == 0:
storm_data = tmp.copy()
else:
storm_data = storm_data.append(tmp)
idx = idx +1
storm_data.EVENT_TYPE.value_counts()
storm_data[storm_data.DAMAGE_PROPERTY == '629.00M']
# storm_data_clean = storm_data[storm_data.MAGNITUDE>60].copy()
storm_data_clean = storm_data[['BEGIN_YEARMONTH', 'BEGIN_DAY', 'END_YEARMONTH',
'END_DAY', 'EPISODE_ID', 'EVENT_ID', 'STATE', 'STATE_FIPS',
'EVENT_TYPE','MAGNITUDE', 'CATEGORY', 'TOR_F_SCALE',
'EPISODE_NARRATIVE']].copy()
storm_data_clean = storm_data_clean.drop_duplicates(subset=['EVENT_TYPE','EPISODE_ID','STATE'])
storm_data_clean.tail()
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY',
'USA':'USA'
}
us_state_abbrev_caps = {k.upper():v.upper() for k,v in us_state_abbrev.items()}
storm_data_clean['state'] = storm_data_clean.STATE.map(us_state_abbrev_caps)
storm_data_clean = storm_data_clean[~storm_data_clean.state.isna()]
storm_data_clean = storm_data_clean.drop(columns='STATE')
storm_data_clean['begin_date'] = [str(y) + str(d).zfill(2) for y,d in zip(storm_data_clean.BEGIN_YEARMONTH,storm_data_clean.BEGIN_DAY)]
storm_data_clean['end_date'] = [str(y) + str(d).zfill(2) for y,d in zip(storm_data_clean.END_YEARMONTH,storm_data_clean.END_DAY)]
storm_data_clean.begin_date = pd.to_datetime(storm_data_clean.begin_date)
storm_data_clean.end_date = pd.to_datetime(storm_data_clean.end_date)
storm_data_clean = storm_data_clean.drop_duplicates(subset='EPISODE_NARRATIVE').sort_values(['state','begin_date'])
storm_data_clean['num_days'] = (storm_data_clean.end_date - storm_data_clean.begin_date)
storm_data_clean.num_days = storm_data_clean.num_days.dt.days +1
storm_data_clean = storm_data_clean.drop(columns=['BEGIN_DAY','END_YEARMONTH','END_DAY'])
# events_to_keep = ['Thunderstorm Wind', 'Hail', 'Flash Flood', 'Flood', 'High Wind',
# 'Winter Weather', 'Tornado', 'Winter Storm', 'Heavy Snow', 'Heavy Rain',
# 'Lightning', 'Strong Wind', 'Blizzard', 'Heat', 'Frost/Freeze',
# 'Extreme Cold/Wind Chill', 'Excessive Heat', 'Cold/Wind Chill',
# 'Lake-Effect Snow',
# 'Ice Storm','Tropical Storm', 'Freezing Fog',
# 'Hurricane (Typhoon)',
# 'Hurricane']
events_to_keep = [
'Winter Weather', 'Winter Storm', 'Heavy Snow', 'Blizzard', 'Heat', 'Frost/Freeze',
'Extreme Cold/Wind Chill', 'Excessive Heat', 'Cold/Wind Chill','Lake-Effect Snow','Ice Storm',
'Thunderstorm Wind', 'High Wind','Tornado','Heavy Rain','Strong Wind','Tropical Storm', 'Hurricane (Typhoon)', 'Hurricane']
hot_cold_map = {'Winter Weather':'cold', 'Winter Storm':'cold', 'Heavy Snow':'cold', 'Blizzard':'cold', 'Heat':'hot', 'Frost/Freeze':'cold',
'Extreme Cold/Wind Chill':'cold', 'Excessive Heat':'hot', 'Cold/Wind Chill':'cold','Lake-Effect Snow':'cold','Ice Storm':'cold',
'Thunderstorm Wind':'wind', 'High Wind':'wind','Tornado':'wind','Heavy Rain':'wind','Strong Wind':'wind','Tropical Storm':'wind',
'Hurricane (Typhoon)':'wind', 'Hurricane':'wind'}
storm_data_clean = storm_data_clean[storm_data_clean.EVENT_TYPE.isin(events_to_keep)]
storm_data_clean['hot_cold'] = storm_data_clean.EVENT_TYPE.map(hot_cold_map)
storm_data_clean = storm_data_clean.groupby(['state','BEGIN_YEARMONTH','hot_cold']).sum().reset_index()
storm_data_clean = storm_data_clean.rename(columns={'BEGIN_YEARMONTH':'month'})
storm_data_clean.month = storm_data_clean.month.astype(str)
storm_data_clean = storm_data_clean.pivot(index=['state','month'], columns='hot_cold', values='num_days').reset_index().fillna(0)
storm_data_clean['mon'] = [x[4:6] for x in storm_data_clean.month]
storm_data_clean
avg_may = storm_data_clean.groupby(['state','mon']).mean().reset_index()
avg_may = avg_may[avg_may.mon.isin(['05'])]
avg_may['month'] = '2021'
avg_may['month'] = [x+y for x,y in zip(avg_may.month, avg_may.mon)]
avg_may
storm_data_clean = pd.concat([storm_data_clean, avg_may])
storm_data_clean.month.value_counts()
energy_data = energy_data.merge(storm_data_clean[['state','month','hot','cold','wind']],how='left',on=['state','month'])
# energy_data.storm_days = energy_data.storm_days.fillna(0)
energy_data.hot = energy_data.hot.fillna(0)
energy_data.cold = energy_data.cold.fillna(0)
energy_data.wind = energy_data.wind.fillna(0)
energy_data.tail()
energy_data.to_csv('../data/energy_data.csv',index=False)
```
| github_jupyter |
# Import Package
```
import math
import datetime
import numpy as np
import pandas as pd
from entropy import *
from collections import Counter
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from scipy.interpolate import interp1d
import random
import joblib
import sklearn
from sklearn import metrics
import sklearn.model_selection
from xgboost.sklearn import XGBClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import KFold
from sklearn import svm
import lightgbm as lgb
from lightgbm.sklearn import LGBMClassifier
from sklearn.pipeline import Pipeline
# from sklearn.externals import joblib
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
from sklearn.metrics import roc_auc_score
from itertools import cycle
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score
import datetime
from sklearn.model_selection import ShuffleSplit
import scipy.io as sio
```
# Load Dataset
```
matfn = 'posture_6_data.mat'
data = sio.loadmat(matfn)
trainSet = data['x_train']
testSet = data['x_test']
trainLabel = data['y_train']
testLabel = data['y_test']
trainSet = np.array(trainSet )
testSet = np.array(testSet )
np.set_printoptions(threshold=np.inf)
trainSet = trainSet.reshape(trainSet.shape[0], 200, 16)
trainSet = trainSet[:,:,0:8]
trainSet = trainSet.reshape(trainSet.shape[0], 200*8)
testSet = testSet.reshape(testSet.shape[0], 200, 16)
testSet = testSet[:,:,0:8]
testSet = testSet.reshape(testSet.shape[0], 200*8)
print("trainSet", trainSet.shape)
print("testSet ", testSet.shape)
trainLabel = np.array(trainLabel).ravel()
testLabel = np.array(testLabel ).ravel()
className = ['no motion', 'sit & stand', 'walk', 'run', 'turn left','turn right']
# replace label with string
trainLabel = [className[i] for i in trainLabel]
testLabel = [className[i] for i in testLabel ]
print("trainSet", trainSet.shape)
print("testSet ", testSet.shape)
# print("trainLabel", trainLabel.shape)
# print("testLabel ", testLabel.shape)
```
# Random Forest
```
rf= RandomForestClassifier(random_state=0, n_estimators=1000, max_depth=5, n_jobs=4, class_weight="balanced")
starttime = datetime.datetime.now()
rf.fit(trainSet, trainLabel)
endtime = datetime.datetime.now()
rfResult = rf.predict(testSet)
# print(rfResult)
# save the model
joblib.dump(rf, "model/rf_model1.m")
from sklearn.metrics import f1_score
print("F-score: {0:.2f}".format(f1_score(rfResult,testLabel,average='micro')))
print("the accurancy is :",np.mean(rfResult == testLabel))
print (endtime - starttime)
lw =2
proba = rf.predict_proba(testSet)
testLabel_bi = label_binarize(testLabel, classes=[0,1,2,3,4])
rfResult_bi = label_binarize(rfResult, classes=[0,1,2,3,4])
n_classes = 5
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
#print(testLabel[:, i])
#print(proba[:, i])
fpr[i], tpr[i], _ = roc_curve(testLabel_bi[:, i], proba[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(testLabel_bi.ravel(), proba.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#######
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
print(all_fpr)
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(5, 5))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC of Random Forest')
plt.legend(loc="lower right")
plt.savefig('figure/rf_roc.png')
plt.show()
```
# plot_learning_curve function
```
from sklearn.model_selection import learning_curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
plt.figure()
plt.title(title)
#ylim定义绘制的最小和最大y值
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="aquamarine")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="dodgerblue")
plt.plot(train_sizes, train_scores_mean, 'o-', color="aquamarine",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="dodgerblue",
label="Cross-validation score")
plt.legend(loc="best")
plt.savefig('figure/'+title+'.png')
return plt
```
# Learning Curve
```
X = trainSet
y = trainLabel
# print(X.shape)
# print(y.shape)
title = "Learning Curve of Random Forest"
cv = ShuffleSplit(n_splits=20, test_size=0.2, random_state=0)
plot_learning_curve(rf, title, X, y, (0.85, 1.01), cv=cv, n_jobs=4)
# 混淆矩阵
con_mat = confusion_matrix(testLabel, rfResult)
con_mat_norm = con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis] # 归一化
con_mat_norm = np.around(con_mat_norm, decimals=2)
# === plot ===
plt.figure(figsize=(5, 5))
sns.heatmap(con_mat_norm, annot=True, cmap='Blues')
plt.ylim(0, 6)
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.title('Confusion Matrix of Random Forest')
# save the figure
plt.savefig('figure/rf_confusion_matrix.png')
plt.show()
```
# LBGMClassifier
```
lgbm = LGBMClassifier(objective = 'multiclass', boosting_type = 'goss', num_leaves = 10, max_depth= -1, n_estimators =50, learning_rate = 0.3, subsample_for_bin = 800, n_jobs=4)
starttime = datetime.datetime.now()
lgbm.fit(trainSet, trainLabel)
endtime = datetime.datetime.now()
lgbmResult = lgbm.predict(testSet)
# save the model
joblib.dump(lgbm, 'model/lgbm.model')
from sklearn.metrics import f1_score
print("F-score: {0:.2f}".format(f1_score(lgbmResult,testLabel,average='micro')))
print("the accurancy is :",np.mean(lgbmResult == testLabel))
print (endtime - starttime)
proba = lgbm.predict_proba(testSet)
testLabel_bi = label_binarize(testLabel, classes=[0 ,1, 2,3,4])
lgbmResult_bi = label_binarize(lgbmResult, classes=[0 ,1, 2,3,4])
n_classes = 5
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(testLabel_bi[:, i], proba[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(testLabel_bi.ravel(), proba.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#######
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
print(all_fpr)
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(5, 5))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC of LightGBM')
plt.legend(loc="lower right")
plt.savefig('figure/lgbm_roc.png')
plt.show()
X = trainSet
y = trainLabel
title = "Learning Curve of LGBMClassifier"
cv = ShuffleSplit(n_splits=20, test_size=0.2, random_state=0)
plot_learning_curve(lgbm, title, X, y, (0.6, 1.01), cv=cv, n_jobs=4)
plt.show()
con_mat = confusion_matrix(testLabel, lgbmResult)
con_mat_norm = con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis] # 归一化
con_mat_norm = np.around(con_mat_norm, decimals=2)
# === plot ===
plt.figure(figsize=(5, 5))
sns.heatmap(con_mat_norm, annot=True, cmap='Blues')
plt.ylim(0, 5)
plt.title('Confusion Matrix of LGBM')
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.savefig('figure/lgbm_confusion_matrix.png')
plt.show()
```
# xgbBoost
```
xgb = XGBClassifier(random_state=0,n_estimators=100,scale_pos_weight=10,learning_rate=0.1,max_depth=6,subsample=0.8,min_child_weight=10)
xgb.fit(trainSet, trainLabel)
xgbResult = xgb.predict(testSet)
# save the model
joblib.dump(xgb, 'model/xgb.model')
from sklearn.metrics import f1_score
print("F-score: {0:.2f}".format(f1_score(xgbResult,testLabel,average='micro')))
print("the accurancy is :",np.mean(xgbResult == testLabel))
proba = xgb.predict_proba(testSet)
testLabel_bi = label_binarize(testLabel, classes=[0 ,1, 2,3,4])
n_classes = 5
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
#print(testLabel[:, i])
#print(proba[:, i])
fpr[i], tpr[i], _ = roc_curve(testLabel_bi[:, i], proba[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(testLabel_bi.ravel(), proba.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#######
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
print(all_fpr)
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(5, 5))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC of XGBoost')
plt.legend(loc="lower right")
plt.savefig('figure/xgb_roc.png')
plt.show()
X = trainSet
y = trainLabel
title = "Learning Curve of xgbBoost"
cv = ShuffleSplit(n_splits=20, test_size=0.2, random_state=0)
plot_learning_curve(xgb, title, X, y, (0.6, 1.01), cv=cv, n_jobs=4)
plt.show()
con_mat = confusion_matrix(testLabel, xgbResult)
con_mat_norm = con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis] # 归一化
con_mat_norm = np.around(con_mat_norm, decimals=2)
# === plot ===
plt.figure(figsize=(5, 5))
sns.heatmap(con_mat_norm, annot=True, cmap='Blues')
plt.ylim(0, 5)
plt.title('Confusion Matrix of XGBoost')
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.savefig('figure/xgb_confusion_matrix.png')
plt.show()
```
# AdaBoost
```
ad=AdaBoostClassifier(DecisionTreeClassifier(),algorithm="SAMME.R", n_estimators=100,learning_rate=0.1,random_state=0)
ad.fit(trainSet, trainLabel)
adResult = ad.predict(testSet)
# save the model
joblib.dump(ad, 'model/ad.model')
from sklearn.metrics import f1_score
print("F-score: {0:.2f}".format(f1_score(adResult,testLabel,average='micro')))
print("the accurancy is :",np.mean(adResult == testLabel))
proba = ad.predict_proba(testSet)
testLabel_bi = label_binarize(testLabel, classes=[0,1,2,3,4])
n_classes = 5
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
#print(testLabel[:, i])
#print(proba[:, i])
fpr[i], tpr[i], _ = roc_curve(testLabel_bi[:, i], proba[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(testLabel_bi.ravel(), proba.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#######
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
print(all_fpr)
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(5, 5))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC of AdaBoost')
plt.legend(loc="lower right")
plt.savefig('figure/ad_roc.png')
plt.show()
X = trainSet
y = trainLabel
title = "Learning Curve of AdaBoost"
cv = ShuffleSplit(n_splits=20, test_size=0.2, random_state=0)
plot_learning_curve(ad, title, X, y, (0.4, 1.05), cv=cv, n_jobs=4)
plt.show()
con_mat = confusion_matrix(testLabel, adResult)
con_mat_norm = con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis] # 归一化
con_mat_norm = np.around(con_mat_norm, decimals=2)
# === plot ===
plt.figure(figsize=(5, 5))
sns.heatmap(con_mat_norm, annot=True, cmap='Blues')
plt.ylim(0, 5)
plt.title('Confusion Matrix of AdaBoost')
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.savefig('figure/ad_confusion_matrix.png')
plt.show()
```
# GBDT
```
gbdt = GradientBoostingClassifier(random_state=0,n_estimators=100,learning_rate=0.1,subsample=0.8)
starttime = datetime.datetime.now()
gbdt.fit(trainSet, trainLabel)
endtime = datetime.datetime.now()
gbdtResult = gbdt.predict(testSet)
# save the model
joblib.dump(gbdt,"model/gbdt_model.h5")
from sklearn.metrics import f1_score
print("F-score: {0:.3f}".format(f1_score(gbdtResult,testLabel,average='micro')))
print("the accurancy is :",np.mean(gbdtResult == testLabel))
print (endtime - starttime)
proba = gbdt.predict_proba(testSet)
testLabel_bi = label_binarize(testLabel, classes=[0 ,1, 2,3,4])
lw =2
n_classes = 5
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
#print(testLabel[:, i])
#print(proba[:, i])
fpr[i], tpr[i], _ = roc_curve(testLabel_bi[:, i], proba[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(testLabel_bi.ravel(), proba.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#######
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
print(all_fpr)
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(5, 5))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC of GBDT')
plt.legend(loc="lower right")
plt.savefig('figure/gbdt_roc.png')
plt.show()
X = trainSet
y = trainLabel
title = "Learning Curve of GBDT"
cv = ShuffleSplit(n_splits=20, test_size=0.2, random_state=0)
plot_learning_curve(gbdt, title, X, y, (0.6, 1.01), cv=cv, n_jobs=4)
plt.show()
con_mat = confusion_matrix(testLabel, gbdtResult)
con_mat_norm = con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis] # 归一化
con_mat_norm = np.around(con_mat_norm, decimals=2)
# === plot ===
plt.figure(figsize=(5, 5))
sns.heatmap(con_mat_norm, annot=True, cmap='Blues')
plt.ylim(0, 5)
plt.title('Confusion Matrix of GBDT')
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
plt.savefig('figure/gbdt_confusion_matrix.png')
plt.show()
```
# SVM
```
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
svm = Pipeline(( ("scaler", StandardScaler()),
("linear_svc", LinearSVC(C=1, loss="hinge")) ))
svm.fit(trainSet, trainLabel)
svmResult = svm.predict(testSet)
# save the model
joblib.dump(svm,"model/svm_model.h5")
from sklearn.metrics import f1_score
print("F-score: {0:.3f}".format(f1_score(svmResult,testLabel,average='micro')))
print("the accurancy is :",np.mean(svmResult == testLabel))
# proba = svm.predict_proba(testSet)
testLabel_bi = label_binarize(testLabel, classes=[0 ,1, 2,3,4])
y_score = svm.fit(trainSet, trainLabel).decision_function(testSet)
lw =2
n_classes = 5
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
#print(testLabel[:, i])
#print(proba[:, i])
fpr[i], tpr[i], _ = roc_curve(testLabel_bi[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(testLabel_bi.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
#######
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
print(all_fpr)
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure(figsize=(5, 5))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC of SVM')
plt.legend(loc="lower right")
plt.savefig('figure/svm_roc.png')
plt.show()
X = trainSet
y = trainLabel
title = "Learning Curve of SVM"
cv = ShuffleSplit(n_splits=20, test_size=0.2, random_state=0)
plot_learning_curve(svm, title, X, y, (0.80, 1.01), cv=cv, n_jobs=4)
plt.show()
con_mat = confusion_matrix(testLabel, svmResult)
con_mat_norm = con_mat.astype('float') / con_mat.sum(axis=1)[:, np.newaxis] # 归一化
con_mat_norm = np.around(con_mat_norm, decimals=2)
# === plot ===
plt.figure(figsize=(5, 5))
sns.heatmap(con_mat_norm, annot=True, cmap='Blues')
plt.ylim(0, 5)
plt.title('Confusion Matrix of SVM')
plt.xlabel('Predicted labels')
plt.ylabel('True labels')
# save the figure
plt.savefig('figure/svm_confusion_matrix.png')
plt.show()
```
| github_jupyter |
# Chapter 2 End-to-End Machine Learning Project
Welcome to the Machine Learning Housing Corporation! Your first task is to
use California census data to build a model of housing prices in the state. This
data includes metrics such as the population, median income, and median
housing price for each block group in California. Block groups are the
smallest geographical unit for which the US Census Bureau publishes sample
data (a block group typically has a population of 600 to 3,000 people). We
will call them “districts” for short.
### What model to choose:
- it is clearly a typical supervised learning task, since you are given labeled training examples (each instance comes with the expected output, i.e., the district’s median housing price).
- It is also a typical regression task, since you are asked to predict a value. More specifically, this is a *multiple regression* problem, since the system will use multiple features to make a prediction (it will use the district’s population, the median income, etc.).
- It is also a *univariate* regression problem, since we are only trying to predict a single value for each district. If we were trying to predict multiple values per district, it would be a *multivariate* regression problem.
- Finally, there is no continuous flow of data coming into the system, there is no particular need to adjust to changing data rapidly, and the data is small enough to fit in memory, so plain *batch learning* should do just fine.
### Select a Performance Measure
A typical performance measure for regression problems is the Root Mean Square Error (RMSE). It gives an idea of how much error the system typically makes in its predictions, with a higher weight for large errors:
RMSE$(X,h)=\sqrt{\frac{1}{m} \sum \limits_{i=1}^m(h(x^i)-y^i)^2}$
- $m$ is the number of instances in the dataset you are measuring the RMSE on.
- For example, if you are evaluating the RMSE on a validation set of 2,000 districts, then $m = 2,000$.
- $x^i$ is a vector of all the feature values (excluding the label) of the $i$th instance in the dataset, and $y^i$ is its label (the desired output value for that instance)
- $X$ is a matrix containing all the feature values (excluding labels) of all instances in the dataset. There is one row per instance, and the $i$th row is equal to the transpose of $x^i$, noted $(x)^⊺$.
- $h$ is your system’s prediction function, also called a _hypothesis_. When your system is given an instance’s feature vector $x^i$ , it outputs a predicted value $ŷ^i= h(x^i)$ for that instance ($ŷ$ is pronounced “y-hat”).
- For example, if your system predicts that the median housing price in the first district is \\$158,400, then $ŷ^1 = h(x^1) = 158,400$. The prediction error for this district is $ŷ^1 – y^1 = 2,000$.
- RMSE$(X,h)$ is the cost function measured on the set of examples using your hypothesis $h$.
For example, suppose that there are many outlier districts. In that case, you may consider using the mean absolute error (MAE, also called the average absolute deviation:
MAE$(X,h) = \frac{1}{m}\sum \limits_{i=1}^m | h (x^i) - y^i|$
Both the RMSE and the MAE are ways to measure the distance between two vectors: the vector of predictions and the vector of target values.
- Computing the root of a sum of squares (RMSE) corresponds to the Euclidean norm.
- Computing the sum of absolutes (MAE) corresponds to the Manhattan norm.
- The $l_k$ norm of a vector $v$ containing n elements is defined as $||v||_k = (|v_0|^k+|v_1|^k+..+|v_n|^k)^\frac{1}{k}$.
- The higher the norm index, the more it focuses on large values and neglects small ones. This is why the RMSE is more sensitive to outliers than the MAE. But when outliers are exponentially rare (like in a bell-shaped curve), the RMSE performs very well and is generally preferred.
**Check the Assumptions!!!**
```
# fetch the data
import os
import tarfile
import urllib
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml2/master/"
HOUSING_PATH = os.path.join("datasets", "housing")
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
def fetch_housing_data(housing_url: str=HOUSING_URL,
housing_path: str=HOUSING_PATH) -> None:
"""Downloads and extracts housing data from the server"""
os.makedirs(housing_path, exist_ok=True)
tgz_path = os.path.join(housing_path, 'housing.tgz')
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
# Load dataset into Pandas
import pandas as pd
def load_housing_data(housing_path: str=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
fetch_housing_data()
df = load_housing_data()
df.head()
df.columns
df.info()
df['ocean_proximity'].value_counts()
df.describe()
%matplotlib inline
import matplotlib.pyplot as plt
df.hist(bins=50, figsize=(20,15))
plt.show()
```
1. First, the median income attribute does not look like it is expressed in US dollars (USD). After checking with the team that collected the data, you are told that the data has been scaled and capped at 15 (actually, 15.0001) for higher median incomes, and at 0.5 (actually, 0.4999) for lower median incomes. The numbers represent roughlytens of thousands of dollars (e.g., 3 actually means about \\$30,000). Working with preprocessed attributes is common in Machine Learning, and it is not necessarily a problem, but you should try to understand how the data was computed.
2. The housing median age and the median house value were also capped. The latter may be a serious problem since it is your target attribute (your labels).
3. Many histograms are tail-heavy: they extend much farther to the right of the median than to the left. This may make it a bit harder for some Machine Learning algorithms to detect patterns. We will try transforming these attributes later on to have more bell-shaped distributions.
## Create a test set
if you look at the test set, you may stumble upon some seemingly interesting pattern in the test data that leads you to select a particular kind of Machine Learning model. When you estimate the generalization error using the test set, your estimate will be too optimistic, and you will launch a system that will not perform as well as expected. This is called _data snooping_ bias.
```
import numpy as np
def split_train_test(data: pd, test_ratio: float) -> pd:
"""Splits dataframe into train test sets"""
shuffled_indices = np.random.permutation(len(data))
# Get percent the same percent of indices as test_ratio
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set, test_set = split_train_test(df, 0.2)
print('Train set size:', len(train_set))
print('Test set size:', len(test_set))
```
Well, this works, but it is not perfect: if you run the program again, it will generate a different test set! Over time, you (or your Machine Learning algorithms) will get to see the whole dataset, which is what you want to avoid.
For example, you could compute a hash of each instance’s identifier and put that instance in the test set if the hash is lower than or equal to 20% of the maximum hash value. This ensures that the test set will remain consistent across multiple runs, even if you refresh the dataset. The new test set will contain 20% of the new instances, but it will not contain any instance that was previously in the training set.
```
from zlib import crc32
def test_set_check(identifier: str, test_ratio: int) -> str:
"""Compute a hash for each instance's identifier"""
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2**32
def split_train_test_by_id(data: pd,
test_ratio: int,
id_column: str) -> pd:
"""
Generates the hash for each instance and puts that
instance in the test set based on test_ratio
"""
ids = data[id_column]
in_test_set = ids.apply(
lambda id_: test_set_check(id_, test_ratio)
)
return data.loc[~in_test_set], data.loc[in_test_set]
housing_with_id = df.reset_index()
# adds an `index` column
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
```
If you use the row index as a unique identifier, you need to make sure that new data gets appended to the end of the dataset and that no row ever gets deleted. If this is not possible, then you can try to use the most stable features to build a unique identifier. For example, a district’s latitude and longitude are guaranteed to be stable for a few million years, so you could combine them into an ID like so:
```
housing_with_id["id"] = df["longitude"] * 1000 + df["latitude"]
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "id")
```
We can do the same with `Scikit-learn` which provides similar function.
```
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(df, test_size=0.2, random_state=42)
```
Most median income values are clustered around 1.5 to 6 (i.e., \\$15,000–\\$60,000), but some median incomes go far beyond 6. It is important to have a sufficient number of instances in your dataset for each stratum, or else the estimate of a stratum’s importance may be biased.
The following code uses the `pd.cut()` function to create an income category attribute with five categories (labeled from 1 to 5): category 1 ranges from 0 to 1.5 (i.e., less than \\$15,000), category 2 from 1.5 to 3, and so on:
```
df['income_cat'] = pd.cut(df['median_income'],
bins=[0.0, 1.5, 3.0, 4.5, 6.0, np.inf],
labels=[1, 2, 3, 4, 5])
df['income_cat'].hist()
# Does stratified sampling based on the income category
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(df, df['income_cat']):
strata_train_set = df.loc[train_index]
strata_test_set = df.loc[test_index]
# Let's test our results
strata_test_set["income_cat"].value_counts() / len(strata_test_set)
# Now let's remove income_cat attribute
# so the data is back to its original state.
for set_ in (strata_train_set, strata_test_set):
set_.drop('income_cat', axis=1, inplace=True)
```
## Discover and Visualize the Data to Gain Insights
* If the training set is very large, you may want to sample an exploration set, to make manipulations easy and fast.
### Visualizing Geographical Data
```
df.plot(kind='scatter',
x='longitude',
y='latitude',
alpha=0.1)
```
Let's look at the housing prices. The radius of each circle represents the district’s population (option **s**), and the color represents the price (option **c**). We will use a predefined color map (option **cmap**) called **jet**, which ranges from blue (low values) to red (high prices):
```
df.plot(
kind='scatter', x='longitude', y='latitude', alpha=0.4,
s=df['population']/100, label='population', figsize=(10,7),
c='median_house_value', cmap=plt.get_cmap('jet'), colorbar=True
)
```
### Looking for Correlations
We can easily compute _standard correlation coefficient (Pearson's r)_ between every pair of attributes
```
corr_matrix = df.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
```
The correlation coefficient only measures linear correlations (“if x goes up, then y generally goes up/down”). It may completely miss out on nonlinear relationships (e.g., “if x is close to 0, then y generally goes up”).
```
# Another way to check for correlation between attributes
from pandas.plotting import scatter_matrix
attributes = ['median_house_value',
'median_income',
'total_rooms',
'housing_median_age']
scatter_matrix(df[attributes], figsize=(12,8))
```
The most promising attribute to predict the median house value is the median income, so let’s zoom in on their correlation scatterplot.
```
df.plot(kind='scatter', x='median_income', y='median_house_value', alpha=0.1)
```
1. The correlation is indeed very strong; you can clearly see the upward trend, and the points are not too dispersed.
2. The price cap that we noticed earlier is clearly visible as a horizontal line at \\$500,000.
3. This plot reveals other less obvious straight lines: a horizontal line around \\$450,000, another around \\$350,000, perhaps one around \\$280,000, and a few more below that.
### Experimenting with Attribute Combinations
We noticed that some attributes have a tail-heavy distribution, so you may want to transform them (computing their logarithm).
Also, let's try out various attribute combinations. Let’s create these new attributes:
```
df["rooms_per_household"] = df["total_rooms"] / df["households"]
df["bedrooms_per_room"] = df["total_bedrooms"] / df["total_rooms"]
df["population_per_household"]= df["population"] / df["households"]
# Now let's loot at the correlation matrix again
corr_matrix = df.corr()
corr_matrix['median_house_value'].sort_values(ascending=False)
```
The new **bedrooms_per_room** attribute is much more correlated with the median house value than the total number of rooms or bedrooms.
Apparently houses with a lower bedroom/room ratio tend to be more expensive. This is an iterative process: once you get a prototype up and running, you can analyze its output to gain more insights and come back to this exploration step.
## Prepare the Data for Machine Learning
Let's start by reverting to a clean training set. Also, let's seperate predictors and the labels.
```
housing = strata_train_set.drop("median_house_value", axis=1)
housing_labels = strata_train_set["median_house_value"].copy()
```
### Data Cleaning
To deal with missing values in **total_bedrooms** we have three options:
1. Get rid of the corresponding districts.
2. Get rid of the whole attribute.
3. Set the values to some value (zero, the mean, the median, etc.)
Here's how to do the same with Scikit-Learn. First, you need to create a `SimpleImputer` instance, specifying that you want to replace each attribute’s missing values with the median of that attribute:
```
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy='median')
# Since the median can only be computed on numerical attributes, we
# create a copy of the data without the text attribute ocean_proximity
housing_num = housing.drop('ocean_proximity', axis=1)
# Now we fit the imputer instance to the training data
imputer.fit(housing_num)
```
The imputer has simply computed the median of each attribute and stored the result in its `statistics_` instance variable. it is safer to apply
the `imputer` to all the numerical attributes.
```
print('Imputer:', imputer.statistics_)
print('Medians:', housing_num.median().values)
# Transform the traning set by replacing missing values medians
X = imputer.transform(housing_num)
# The result will be a plain NumPy array containing the transformed features.
housing_tr = pd.DataFrame(
X, columns=housing_num.columns, index=housing_num.index)
```
### SCIKIT-LEARN DESIGN
Scikit-Learn’s API is remarkably well designed.
_Estimators_
- Any object that can estimate some parameters based on a dataset is called an _estimator_ (e.g., an `imputer` is an estimator). The estimation itself is performed by the `fit()` method, and it takes only a dataset as a parameter (or two for supervised learning algorithms; the second dataset contains the labels). Any other parameter needed to guide the estimation process is considered a hyperparameter (such as an imputer ’s `strategy` ), and it must be set as an instance variable (generally via a constructor parameter).
_Transformers_
- Some estimators (such as an imputer ) can also transform a dataset; these are called _transformers_. The transformation is performed by the `transform()` method with the dataset to transform as a parameter. It returns the transformed dataset. This transformation generally relies on the learned parameters, as is the case for an `imputer`. All transformers also have a convenience method called `fit_transform()` that is equivalent to calling `fit()` and then `transform()` (but sometimes `fit_transform()` is optimized and runs much faster).
_Predictors_
- Finally, some estimators, given a dataset, are capable of making predictions; they are called _predictors_. For example, the `LinearRegression` model in the previous chapter was a predictor: given a country’s GDP per capita, it predicted life satisfaction. A predictor has a `predict()` method that takes a dataset of new instances and returns a dataset of corresponding predictions. It also has a `score()` method that measures the quality of the predictions, given a test set (and the corresponding labels, in the case of supervised learning algorithms).
_Inspection_
- All the estimator’s hyperparameters are accessible directly via public instance variables (e.g., `imputer.strategy`), and all the estimator’s learned parameters are accessible via public instance variables with an underscore suffix (e.g., `imputer.statistics_`).
### Handling Text and Categorical Attributes
Let's look at the **ocean_proximity** attribute:
```
housing_cat = housing[['ocean_proximity']]
housing_cat.head(10)
```
Let’s convert these categories from text to numbers. For this, we can use Scikit-Learn’s `OrdinalEncoder` class:
```
from sklearn.preprocessing import OrdinalEncoder
ordinal_encoder = OrdinalEncoder()
housing_cat_encoded = ordinal_encoder.fit_transform(housing_cat)
housing_cat_encoded[:10]
# We can get the list of categories
ordinal_encoder.categories_
```
One issue with this representation is that ML algorithms will assume that two nearby values are more similar than two distant values. This may be fine in some cases (e.g., for ordered categories such as “bad,” “average,” “good,” and“excellent”), but it is obviously not the case for the **ocean_proximity** column.
To fix this issue, a common solution is to create one binary attribute per category: one attribute equal to 1 when the category is “<1H OCEAN” (and 0 otherwise), another attribute equal to 1 when the category is u“INLAND” (and 0 otherwise), and so on. This is called _one-hot encoding_, because only one attribute will be equal to 1 (hot), while the others will be 0 (cold).
```
from sklearn.preprocessing import OneHotEncoder
cat_encoder = OneHotEncoder()
housing_cat_1hot = cat_encoder.fit_transform(housing_cat)
housing_cat_1hot
```
After one-hot encoding, we get a matrix with thousands of columns, and the matrix is full of 0s except for a single 1 per row. Using up tons of memory mostly to store zeros would be very wasteful, so instead a SciPy sparse matrix only stores the location of the nonzero elements. You can use it mostly like a normal 2D array, 21 but if you really want to convert it to a (dense) NumPy array, just call the `toarray()` method:
```
housing_cat_1hot.toarray()
cat_encoder.categories_
```
### Custom Transformers
You will want your transformer to work seamlessly with Scikit-Learn functionalities (such as pipelines), and since Scikit-Learn relies on duck typing (not inheritance), all you need to do is create a class and implement three methods: `fit()` (returning `self`), `transform()` , and `fit_transform()`.
You can get the last one for free by simply adding `TransformerMixin` as a base class. If you add `BaseEstimator` as a base class (and avoid \*args and \*\*kargs in your constructor), you will also get two extra methods(`get_params()` and `set_params()`) that will be useful for automatic hyperparameter tuning.
```
# Example class
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, households_ix = 3, 4, 5, 6
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
"""Combines and adds attributes to the given DataFrame"""
def __init__(self, add_bedrooms_per_room: bool=True) -> None:
# No *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, households_ix]
population_per_household = X[:, population_ix] / X[:, households_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
# Translates slice objects to concatenation along the second axis
return np.c_[X, rooms_per_household,
population_per_household, bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing.values)
```
This hyperparameter will allow you to easily find out whether adding this attribute helps the Machine Learning algorithms or not. More generally, you can add a hyperparameter to gate any data preparation step that you are not 100% sure about.
### Feature Scaling
Machine Learning algorithms don’t perform well when the input numerical attributes have very different scales. Note that scaling the target values is generally not required.
There are two common ways to get all attributes to have the same scale:
_min-max scaling_:
- Min-max scaling (many people call this normalization) is the simplest: values are shifted and rescaled so that they end up ranging from 0 to 1. We do this by subtracting the min value and dividing by the max minus the min. Scikit-Learn provides a transformer called `MinMaxScaler` for this.
_standardization._
- First it subtracts the mean value (so standardized values always have a zero mean), and then it divides by the standard deviation so that the resulting distribution has unit variance. Unlike min-max scaling, standardization does not bound values to a specific range, which may be a problem for some algorithms (e.g., neural networks often expect an input value ranging from 0 to 1). However, standardization is much less affected by outliers. Scikit-Learn provides a transformer called `StandardScaler` for standardization.
As with all the transformations, it is important to fit the scalers to the training data only.
### Transformation Pipelines
Scikit-Learn provides the `Pipeline` class to help with such sequences of transformations.
```
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy='median')),
('attribs_adder', CombinedAttributesAdder()),
('std_dealer', StandardScaler()),
])
housing_num_tr = num_pipeline.fit_transform(housing_num)
```
The `Pipeline` constructor takes a list of name/estimator pairs defining a sequence of steps.
All but the last estimator must be transformers (i.e., they must have a `fit_transform()` method). The names can be anything you like (as long as they are unique and don’t contain double underscores, __).
When you call the pipeline’s `fit()` method, it calls `fit_transform()` sequentially on all transformers, passing the output of each call as the parameter to the next call until it reaches the final estimator, for which it calls the `fit()` method.
It would be more convenient to have a single transformer able to handle all columns, applying the appropriate transformations to each column.
```
from sklearn.compose import ColumnTransformer
# List of numerical column names
num_attribs = list(housing_num)
# List of categorical column names
cat_attribs = ['ocean_proximity']
'''
The constructor requires a list of tuples, where each tuple
contains a name, a transformer, and a list of names (or indices)
of columns that the transformer should be applied to.
'''
full_pipeline = ColumnTransformer([
('num', num_pipeline, num_attribs),
('cat', OneHotEncoder(), cat_attribs),
])
# Create fully transformed dataframe
housing_prepared = full_pipeline.fit_transform(housing)
```
## Select and Train a Model
```
# Train a linear regression model
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
# Try Linear model on few instances from the training set
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = full_pipeline.transform(some_data)
print('Predictions:', lin_reg.predict(some_data_prepared))
print('Labels:', list(some_labels))
# Let's measure model's RMSE
from sklearn.metrics import mean_squared_error
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
print(lin_rmse)
```
This is better than nothing, but clearly not a great score: most districts’ median_housing_values range between \\$120,000 and \\$265,000, so a typical prediction error of \\$68,628 is not very satisfying. This is an example of a model underfitting the training data.
```
# Let's train a Decision Tree Model
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels)
# Let's evaluate on a training set
housing_predictions = tree_reg.predict(housing_prepared)
tree_mse = mean_squared_error(housing_labels, housing_predictions)
tree_rmse = np.sqrt(tree_mse)
print(tree_rmse)
```
Perfect fit? it's more like model has badly overfit data. As we saw earlier, you don’t want to touch the test setuntil you are ready to launch a model you are confident about, so you need to use part of the training set for training and part of it for model validation.
### Better Evaluation Using Cross-Validation
Greate way to evaluate a Decision Tree model is to use _K=fold cross-validation_ feature.
```
from sklearn.model_selection import cross_val_score
scores = cross_val_score(tree_reg, housing_prepared, housing_labels,
scoring='neg_mean_squared_error', cv=10)
tree_rmse_scores = np.sqrt(-scores)
```
**WARNING**
Scikit-Learn’s cross-validation features expect a utility function (greater is better) rather than a cost function (lower is better), so the scoring function is actually the opposite of the MSE (i.e., a negative value), which is why the preceding code computes -scores before calculating the square root.
```
# Check the results
def display_scores(scores):
"""Prints the cross-validation scores"""
print('Scores:', scores)
print('Mean:', scores.mean())
print('Standard Deviation:', scores.std())
display_scores(tree_rmse_scores)
```
The cross-validation allows you to get not only an estimate of the performance of your model, but also a measure of how precise this estimate is (i.e., its standard deviation).
The Decision Tree has a score of approximately 71,407, generally ±2,439. You would not have this information if you just used one validation set. But cross-validation comes at the cost of training the model several times, so it is not always possible.
```
# Compute CV for Linear Model
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels,
scoring='neg_mean_squared_error', cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
```
That’s right: the **Decision Tree** model is overfitting so badly that it performs worse than the **Linear Regression** model.
----
You should save every model you experiment with so that you can come back easily to
any model you want. Make sure you save both the hyperparameters and the trained
parameters, as well as the cross-validation scores and perhaps the actual predictions as
well. This will allow you to easily compare scores across model types, and compare the
types of errors they make. You can easily save Scikit-Learn models by using Python’s
`pickle` module or by using the `joblib` library, which is more efficient at serializing
large NumPy arrays (you can install this library using pip):
### Parameter vs Hyperparameter:
A model _parameter_ is a configuration variable that is internal to the model and whose value can be estimated from data.
- The weights in an artificial neural network.
- The support vectors in a support vector machine.
- The coefficients in a linear regression or logistic regression
A model _hyperparameter_ is a configuration that is external to the model and whose value cannot be estimated from data.
- The learning rate for training a neural network.
- The C and sigma hyperparameters for support vector machines.
- The k in k-nearest neighbors.
If you have to specify a model parameter manually then it is probably a model hyperparameter.
## Fine-Tune Your Model
### Grid Search
To find a great combination of hyperparameter values we can use Scikit's `GridSearchCV` to search for us. All you need to do is tell it which hyperparameters you want it to experiment with and what values to try out, and it will use cross-validation to evaluate all the possible combinations of hyperparameter values.
```
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
# Search for the best combination of hyperparameter values
param_grid = [
{'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]},
{'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}
]
forest_reg = RandomForestRegressor()
grid_search = GridSearchCV(forest_reg, param_grid, cv=5,
scoring='neg_mean_squared_error',
return_train_score=True,
refit=True)
grid_search.fit(housing_prepared, housing_labels)
'''When you have no idea what value a hyperparameter should have, a simple approach is
to try out consecutive powers of 10 (or a smaller number if you want a more fine-grained
search, as shown in this example with the n_estimators hyperparameter)'''
```
This `param_grid` tells Scikit-Learn to first evaluate all 3 × 4 = 12 combinations of n_estimators and max_features hyperparameter values specified in the first `dict`, hen try all 2 × 3 = 6 combinations of hyperparameter values in the second `dict`, but this time withthe bootstrap hyperparameter set to False instead of True (which is the default value for this hyperparameter).
The grid search will explore 12 + 6 = 18 combinations of RandomForestRegressor hyperparameter values, and it will train each model 5 times (As specified in `cv` argument). All in all, there will be 18 × 5 = 90 rounds of training! It may take quite a long time.
```
grid_search.best_params_
# Get the best estimator directly
grid_search.best_estimator_
# Evaluation Scores
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres['mean_test_score'], cvres['params']):
print(np.sqrt(-mean_score), params)
```
### Randomized Search
The grid search approach is fine when you are exploring relatively few combinations, like in the previous example, but when the hyperparameter search space is large, it is often preferable to use `RandomizedSearchCV` instead. This class can be used in much the same way as the `GridSearchCV` class, but instead of trying out all possible combinations, it evaluates a givennumber of random combinations by selecting a random value for each hyperparameter at every iteration. This approach has two main benefits:
### Analyze the Best Models and Their Errors
You will often gain good insights on the problem by inspecting the best models
```
feature_importances = grid_search.best_estimator_.feature_importances_
print(feature_importances)
# Display feature importances next to their attribute names
extra_attribs = ['rooms_per_hhold', 'pop_per_hhold', 'bedrooms_per_room']
cat_encoder = full_pipeline.named_transformers_['cat']
cat_one_hot_attribs = list(cat_encoder.categories_[0])
attributes = num_attribs + extra_attribs + cat_one_hot_attribs
sorted(zip(feature_importances, attributes), reverse=True)
```
With this information, you may want to try dropping some of the less useful features.
### Evaluate System on the Test Set
just get the predictors and the labels from your test set, run your `full_pipeline` to transform the data
(call `transform()` , not `fit_transform()` —you do not want to fit the test set!), and evaluate the final model on the test set:
```
final_model = grid_search.best_estimator_
X_test = strata_test_set.drop('median_house_value', axis=1)
y_test = strata_test_set['median_house_value'].copy()
X_test_prepared = full_pipeline.transform(X_test)
final_predictions = final_model.predict(X_test_prepared)
final_mse = mean_squared_error(y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
print('RMSE:', final_rmse)
```
You may want to know how precise this estimate is. For this you can compute a 95% confidence interval for the generalization error:
```
from scipy import stats
CONFIDENCE = 0.95
squared_errors = (final_predictions - y_test) ** 2
np.sqrt(stats.t.interval(CONFIDENCE, len(squared_errors) - 1,
loc=squared_errors.mean(),
scale=stats.sem(squared_errors)))
```
Now comes the project prelaunch phase: you need to present your solution (highlighting what you have learned, what worked and what did not, what assumptions were made, and what your system’s limitations are), document everything, and create nice presentations with clear visualizations and easy-to-remember statements (e.g., “the median income is the number one predictor of housing prices”). In this California housing example, the final performance of the system is not better than the experts’ price estimates, which were often off by about 20%, but it may still be a good idea to launch it, especially if this frees up some time for the experts so they can work on more interesting and productive tasks.
## Launch, Monitor, and Maintain Your System
If the data keeps evolving, you will need to update your datasets and retrain your model regularly. You should probably automate the whole process as much as possible. Here are a few things you can automate:
- Collect fresh data regularly and label it (e.g., using human raters).
- Write a script to train the model and fine-tune the hyperparameters automatically. This script could run automatically, for example every day or every week, depending on your needs.
- Write another script that will evaluate both the new model and the previous model on the updated test set, and deploy the model to production if the performance has not decreased (if it did, make sure you investigate why).
| github_jupyter |
# 3.1 signac-flow minimal example
## About
This notebook contains a minimal example for running a signac-flow project from scratch.
The example demonstrates how to compare an ideal gas with a lennard jones fluid by calculating a p-V phase diagram.
## Author
Carl Simon Adorf
## Before you start
Make sure you installed signac and signac-flow, e.g., with:
```
conda install -c conda-forge signac
conda install -c glotzer signac-flow
```
```
import signac
import flow
import numpy as np
# Enter the signac project directory
project = signac.init_project('FlowTutorialProject', 'projects/tutorial-signac-flow')
```
We want to generate a pressure-volume (p-V) phase diagram for an ideal gas.
We define a function to calculate the result for a given state point:
```
def V_idg(N, p, kT):
return N * kT / p
```
We want to use **signac** to manage our data, therefore we define an *operation* which has only the *signac job* as argument:
```
def compute_volume(job):
job.document['V'] = V_idg(** job.statepoint())
```
For this demonstration we will specialize a `flow.FlowProject` to manage our simple *workflow*.
The workflow is controlled by two core functions: `label()` and `next_operation()`:
- The `labels()` function allows us to *label* our jobs and get a good overview of the project *status*. This is especially important once the data space becomes larger and more complex and operations more expensive.
- The `next_operation()` functions helps to automate the workflow by identifying the next required operation for each job.
In this case there is only **one** operation:
```
class MyProject(flow.FlowProject):
def labels(self, job):
yield 'init'
if 'V' in job.document:
yield 'estimated'
def next_operation(self, job):
labels = set(self.labels(job))
if 'V' not in job.document:
return 'compute_volume'
```
We need to use the `get_project()` *class method* to get a project handle for this special project class.
```
project = MyProject.get_project(root='projects/tutorial-signac-flow')
```
Now it's time to actually generate some data! Let's initialize the data space!
```
for p in np.linspace(0.5, 5.0, 10):
sp = dict(N=1728, kT=1.0, p=p)
project.open_job(sp).init()
```
The `print_status()` function allows to get a quick overview of our project's *status*:
```
project.print_status(detailed=True, parameters=['p'])
```
The next cell will attempt to execute all operations by cycling through jobs and operations until no *next operations* are defined anymore.
We limit the max. number of cycles to prevent accidental infinite loops, the number of cycles is arbitrary.
```
for i in range(3):
for job in project:
for j in range(5):
next_op = project.next_operation(job)
if next_op is None:
break
print('execute', job, next_op)
globals()[next_op](job)
assert next_op != project.next_operation(job)
else:
raise RuntimeError("Reached max. # cycle limit!")
```
Let's double check the project status.
```
project.print_status()
```
After running all operations we can make a brief examination of the collected data.
```
for job in project:
print(job.statepoint()['p'], job.document.get('V'))
```
For a better presentation of the results we need to aggregate all results and sort them by pressure.
```
from matplotlib import pyplot as plt
%matplotlib inline
V = dict()
for job in project:
V[job.statepoint()['p']] = job.document['V']
p = sorted(V.keys())
V = [V[p_] for p_ in p]
print(V)
plt.plot(p, V, label='idG')
plt.xlabel(r'pressure [$\epsilon / \sigma^3$]')
plt.ylabel(r'volume [$\sigma^3$]')
plt.legend()
```
As a a final step, we ca generate a index of our project data.
You can store this index in a variable or within a database, e.g., for search operations.
```
for doc in project.index():
print(doc)
```
Uncomment and execute the following line to remove all data and start over.
```
#% rm -r projects/tutorial-signac-flow/workspace
```
| github_jupyter |
**Credit card Fraud versus a set of Models**
_This notebook contains an example of running several models against a credit card fraud dataset pulled from Kaggle._
# Setup
First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20.
```
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
```
# Load Data
I loaded the card fraud data from an S3 bucket on AWS for you.
```
import pandas as pd
cfraud=pd.read_csv("https://s3.amazonaws.com/www.ruxton.ai/creditcard.csv")
cfraud.head()
```
# Some Minimal EDA
```
from string import ascii_letters
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white")
# Generate a large random dataset
rs = np.random.RandomState(33)
d = pd.DataFrame(data=rs.normal(size=(100, 26)),
columns=list(ascii_letters[26:]))
# Compute the correlation matrix
corr = cfraud.corr()
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=np.bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
use=list(cfraud.columns.values[[1,2,3,4,5,6,7,9,10,11,12,14,16,17,18,19,28]]) # use all the predictor data for example
print(use)
```
EDA: Before fitting models, you should do EDA. Do you want to add any features as combos of others?
Transform data here
That looks awful. Let's try and dientify predictors that are intrinsic to banks balance sheet.
That looks better. Now try some other methods like random forest SVM, xgboost, decisio trees. Try tuning them. Which do you choose?
```
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.model_selection import train_test_split
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from xgboost import XGBClassifier
from sklearn import metrics
h = .02 # step size in the mesh
names = [ "Linear SVM", "Logistic",
"Decision Tree", "Random Forest", "Neural Net", "AdaBoost",
"Naive Bayes", "QDA","XGBoost"]
classifiers = [
SVC(kernel="linear", C=0.025),
LogisticRegression(),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1, max_iter=1000),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
XGBClassifier()]
X, y = make_classification(n_features=5, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
i=1. # figure counter
# preprocess dataset, split into training and test part
X, y = cfraud[use],cfraud["Class"]
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.3, random_state=42)
sm = SMOTE(random_state=42,sampling_strategy=.015)
X_train_res, y_train_res = sm.fit_sample(X_train, y_train.ravel())
# iterate over classifiers
for name, clf in zip(names, classifiers):
figure = plt.figure(num=i,figsize=(108, 6))
ax = plt.subplot(1, len(classifiers) + 1, i)
clf.fit(X_train, y_train)
fpr, tpr, _ = metrics.roc_curve(y_test, clf.predict(X_test))
roc_auc = metrics.auc(fpr, tpr)
# Plot of a ROC curve for a specific class
# plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC for '+ name )
plt.legend(loc="lower right")
plt.show()
# Do parallel SMOTE here
figure = plt.figure(num=i,figsize=(108, 6))
ax = plt.subplot(1, len(classifiers) + 1, i)
clf.fit(X_train_res, y_train_res)
fpr, tpr, _ = metrics.roc_curve(y_test, clf.predict(X_test))
roc_auc = metrics.auc(fpr, tpr)
# Plot of a ROC curve for a specific class
# plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('SMOTE: ROC for '+ name )
plt.legend(loc="lower right")
plt.show()
```
| github_jupyter |
<i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
<i>Licensed under the MIT License.</i>
# Training an Image Segmentation Model
In this notebook, we show how to train and evaluate an image segmentation model using a Unet architecture based on the [fast.ai](https://www.fast.ai/) library.
We distinguish between semantic and instance segmentation. Instance segmentation aims to find and segment different objects in an image - we showed how to solve such problems in the [02_mask_rcnn.ipynb](../detection/02_mask_rcnn.ipynb) notebook.
In this notebook, we are interested in semantic segmentation, which classifies each pixel in an image by object class. Using a small dataset of four different beverage packages, we demonstrate how to train and evaluate such models.
<img src="../detection/media/segmentaion_comparison.png" width="1000"/>
## Initialization
```
# Ensure edits to libraries are loaded and plotting is shown in the notebook.
%reload_ext autoreload
%autoreload 2
%matplotlib inline
```
Import all the functions we need.
```
import sys
sys.path.append("../../")
from pathlib import Path
import urllib
import fastai
from fastai.vision import *
import numpy as np
import scrapbook as sb
from utils_cv.common.data import unzip_url
from utils_cv.common.gpu import db_num_workers, which_processor
from utils_cv.segmentation.data import Urls
from utils_cv.segmentation.dataset import read_classes
from utils_cv.segmentation.model import get_ratio_correct_metric, predict, confusion_matrix, print_accuracies
from utils_cv.segmentation.plot import plot_image_and_mask, plot_mask_stats, plot_segmentation, plot_confusion_matrix
print(f"Fast.ai version = {fastai.__version__}")
which_processor()
```
This shows your machine's GPUs (if has any) and the computing device `fastai/torch` is using.
Next, set some model runtime parameters. We use the `unzip_url` helper function to download and unzip the data used in this example notebook. Different default parameters under `MODEL_TYPE` are provided for either a smaller model, or a deeper and wider model, which tends to have higher accuracy but can be a magnitude slower to train and run inference.
It is important to keep the same aspect ration as the original image. Try reducing the `BATCH_SIZE` if the model does not fit into memory.
```
# Path to data
DATA_PATH = unzip_url(Urls.fridge_objects_path, exist_ok=True)
# Choose between "high_accuracy" and "fast_inference"
MODEL_TYPE = "fast_inference"
# Training parameters
EPOCHS = 10 # Set to 20 or higher to improve maximum
LEARNING_RATE = 3e-4
# Reject all predictions with confidence lower than this threshold
THRESHOLD = None
#Set parameters based on your selected model.
assert MODEL_TYPE in ["high_accuracy", "fast_inference"]
if MODEL_TYPE == "fast_inference":
IM_SIZE = [200, 150]
ARCHITECTURE = models.resnet18
BATCH_SIZE = 8
elif MODEL_TYPE == "high_accuracy":
IM_SIZE = [500, 375]
ARCHITECTURE = models.resnet50
BATCH_SIZE = 2
```
---
# Prepare Dataset
In this notebook, we use a toy dataset specified in DATA_PATH which consists of 129 images of 4 classes of beverage containers `{can, carton, milk bottle, water bottle}`. For each image, a pixel-wise ground-truth mask is provided for training and evaluation.
```
DATA_PATH = Path(DATA_PATH)
DATA_PATH.ls()
```
You'll notice that `DATA_PATH` contains two subfolders and one file:
- `/images`
- `/segmentation-masks`
- `classes.txt`
This is a common data structure for image segmentation. The two folders use the same filename to indicate which mask corresponds to which image.
```
im_path = DATA_PATH / "images"
im_paths = sorted(get_image_files(im_path))
im_paths[:3]
anno_path = DATA_PATH / "segmentation-masks"
mask_paths = sorted(get_image_files(anno_path))
mask_paths[:3]
plot_image_and_mask(im_paths[0], mask_paths[0], alpha=0.8)
```
The masks in `segmentation-masks` contain at each pixel the id of the object. In our case, id 0 corresponds to "background", 1 to "can", 2 to "carton", 3 to "milk_bottle" and 4 to "water_bottle". This mapping from id to class name is defined in the file `classes.txt`.
```
classes_path = DATA_PATH / "classes.txt"
classes = read_classes(classes_path)
print(classes)
```
Note that the masks in `segmentation-masks` appear fully black when opening with an image viewer. This is because the maximum pixel-value in our masks is 4 (the id for "water_bottle") however the color white corresponds to a value 255 in PNG files.
When annotating images, some pixels can be ambiguous or time-consuming to annotate (e.g. object boundaries for the [pets](https://www.robots.ox.ac.uk/~vgg/data/pets/) dataset). In such cases, one can label these pixels with the special class "void". Pixels marked as "void" will then be ignored during model evaluation (but still influence training). For more details see the implementation of `get_ratio_correct_metric()` in [utils_cv\segmentation\model.py](../../utils_cv/segmentation/model.py).
# Load Images
For training and validation, we randomly split the data in an `8:2` ratio, holding 80% of the data for training and 20% for validation. One can also create dedicated train-test splits using e.g. fastai's `split_by_folder()` or `split_by_fname_file()` functions.
```
# Define function which, given as input a path to an image, returns the corresponding path to its mask
get_gt_filename = lambda x: anno_path / f"{x.stem}.png"
# Load data
data = (
SegmentationItemList.from_folder(im_path)
#.split_by_fname_file('../valid.txt') #example how to implement a dedicated split
.split_by_rand_pct(valid_pct=0.2, seed=10)
.label_from_func(get_gt_filename, classes=classes)
.transform(get_transforms(), resize_method = ResizeMethod.CROP, size=IM_SIZE, tfm_y=True)
.databunch(bs=BATCH_SIZE, num_workers=db_num_workers())
.normalize(imagenet_stats)
)
```
Show the number of images in the training and validation sets.
```
data.batch_stats
```
Show all available classes:
```
print(f"Number of images: {len(data.y.items)}")
print(f"Classes: {data.classes}")
```
We examine some sample data using the `databunch` we created.
```
data.show_batch(rows=2)
```
Finally, some information about the provided ground truth masks. The figure shows the number of pixels for each class within the masks, the number of connected segments for each class, and the size distribution of the segments. Note that because of e.g. occlusions a single object can be split into multiple segments, which is why in our dataset we see some segments with only very few pixels.
```
plot_mask_stats(data, classes, figsize=(24,4), exclude_classes = ['background'])
```
# Train a Model
Our image segmentation model is based on a [UNet](https://docs.fast.ai/vision.models.unet.html) architecture with an [ImageNet](http://www.image-net.org/) pre-trained Resnet-18 backbone.
We use fastai's `unet_learner()` function to obtain a UNet object. Accuracy measures which are computed on the validation set can be specified via the `metrics` parameter.
```
learn = unet_learner(
data,
ARCHITECTURE,
wd=1e-2,
metrics=get_ratio_correct_metric(classes)
)
```
We first mark all CNN layers for training using the [`unfreeze()`](https://docs.fast.ai/basic_train.html#Learner.unfreeze) method, and then use fast.ai's [`fit_one_cycle()`](https://docs.fast.ai/basic_train.html#fit_one_cycle) to train the model.
```
learn.unfreeze()
learn.fit_one_cycle(EPOCHS, LEARNING_RATE)
learn.recorder.plot_losses()
```
Lastly we visualize some of the predictions on the validation set.
```
learn.show_results(rows=1)
```
# Validate the model
This section provides a quantitative analysis of the trained model. We report the overall accuracy (ie ratio of correctly classified pixels), as well as the accuracy for each class.
```
cmat, cmat_norm = confusion_matrix(learn, data.valid_dl)
accs = print_accuracies(cmat, cmat_norm, classes)
```
More insights into the model performance can be obtained from the confusion matrix. For each class, the matrix lists the number of pixels with correctly predicted labels along the diagonal, and incorrect pixels off-diagonal. This allows a detailed inspection which classes the model confuses.
```
plot_confusion_matrix(cmat, cmat_norm, classes)
```
# Scoring
The `predict()` and `plot_segmentation()` functions can be used to run the model on a given image, and to visualize the results respectively.
```
# Download an example image
IM_URL = "https://cvbp.blob.core.windows.net/public/images/cvbp_two_cartons.jpg"
im_path = "example.jpg"
urllib.request.urlretrieve(IM_URL, im_path)
# Run model inference
mask, scores = predict(im_path, learn, thres=0.2)
# Plot results
plot_segmentation(im_path, mask, scores)
```
The number of pixel assigned to each class in the predicted mask can be seen below.
```
for i in range(mask.max()+1):
print(f"Class {classes[i]:>15} has {sum(sum(mask == i)):>6} pixels")
# Preserve some of the notebook outputs
sb.glue("validation_overall_accuracy", accs[0])
sb.glue("validation_class_accuracies", list(accs[1]))
```
## Fine-tuning parameters <a name="finetuning"></a>
Using the provided default parameters, one can get good results across a wide variety of datasets. However, as in most machine learning projects, getting the best possible results for a new dataset often requires tuning the parameters further.
See the image classification [03_training_accuracy_vs_speed.ipynb](../../classification/notebooks/03_training_accuracy_vs_speed.ipynb) notebook for guidelines on optimizing for accuracy, inference speed, or model size for a given dataset. In addition, the notebook also goes through the parameters that typically have the largest impact on the model as well as the parameters that may not be worth modifying.
The notebook [11_exploring_hyperparameters.ipynb](11_exploring_hyperparameters.ipynb) in this directory is provided to help find the parameters with highest image segmentation accuracy. Below is an example where, to identify good default parameters for this repository, we tried different image resolutions and training epochs on six diverse datasets. As can be seen, using 10 epochs with 300x300 pixels resolution, the average accuracy over all datasets is 84%. This compares to an accuracy of 87% when using 20 epochs and 500x500 resolution, however training time increases almost by a factor of 5.
<img src="media/param_sweep.jpg" width="600px" />
# Conclusion
Using the concepts introduced in this notebook, you can bring your own dataset and train a model to detect the per-pixel location of objects of interest for your specific setting.
| github_jupyter |
# Deploying and Making Predictions with a Trained Model
**Learning Objectives**
- Deploy a model on Google CMLE
- Make online and batch predictions with a deployed model
## Introduction
In this notebook, we will deploy the model we trained to predict birthweight and we will use that deployed model to make predictions using our cloud-hosted machine learning model. Cloud ML Engine provides two ways to get predictions from trained models; i.e., online prediction and batch prediction; and we do both in this notebook.
Have a look at this blog post on [Online vs Batch Prediction](https://cloud.google.com/ml-engine/docs/tensorflow/online-vs-batch-prediction) to see the trade-offs of both approaches.
As usual we start by setting our environment variables to reference our Project and Bucket.
```
PROJECT = "cloud-training-demos" # Replace with your PROJECT
BUCKET = "cloud-training-bucket" # Replace with your BUCKET
REGION = "us-central1" # Choose an available region for Cloud MLE
TFVERSION = "1.14" # TF version for CMLE to use
import os
os.environ["BUCKET"] = BUCKET
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
os.environ["TFVERSION"] = TFVERSION
%%bash
if ! gsutil ls -r gs://${BUCKET} | grep -q gs://${BUCKET}/babyweight/trained_model/; then
gsutil mb -l ${REGION} gs://${BUCKET}
# copy canonical model if you didn't do previous notebook
gsutil -m cp -R gs://cloud-training-demos/babyweight/trained_model gs://${BUCKET}/babyweight/trained_model
fi
```
## Deploy trained model
Next we'll deploy the trained model to act as a REST web service using a simple gcloud call. To start, we'll check if our model and version already exists and if so, we'll delete them.
```
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
# Check to see if the model and version already exist,
# if so, delete them to deploy anew
if gcloud ai-platform models list | grep "$MODEL_NAME \+ $MODEL_VERSION"; then
echo "Deleting the version '$MODEL_VERSION' of model '$MODEL_NAME'"
yes | gcloud ai-platform versions delete ${MODEL_VERSION} --model=$MODEL_NAME
echo "Deleting the model '$MODEL_NAME'"
yes |gcloud ai-platform models delete ${MODEL_NAME}
else
echo "The model '$MODEL_NAME' with version '$MODEL_VERSION' does not exist."
fi
```
We'll now deploy our model. This will take a few minutes. Once the cell below completes, you should be able to see your newly deployed model in the 'Models' portion of the[ AI Platform section of the GCP console](https://pantheon.corp.google.com/mlengine/models).
Let's have a look at the contents of the `exporter` bucket to see which model binaries we have. We can deploy a model by specifying any of these locations. To make sure we grab the model trained from the most recent training job, we'll use `tail -1`
```
%%bash
gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/
```
#### **Exercise 1**
After completing the TODOs in the code cell below, we will be able to deploy our saved model to the cloud and make predictions. There are two TODOs below.
- For the first TODO, write a `gcloud` command to create a model called `babyweight`.
- In the second TODO, write a `gcloud` to create a version called `ml_on_gcp`.
Look up the Cloud AI-Platform documentation to remind your self how to create these commands. You'll need to provide use the `MODEL_NAME`, `MODEL_VERSION`, `MODEL_LOCATION`, `REGION` and `TFVERSION` provided for you.
```
%%bash
MODEL_NAME="babyweight"
MODEL_VERSION="ml_on_gcp"
MODEL_LOCATION=$(gsutil ls gs://${BUCKET}/babyweight/trained_model/export/exporter/ | tail -1)
echo "Deploying the model '$MODEL_NAME', version '$MODEL_VERSION' from $MODEL_LOCATION"
echo "... this will take a few minutes"
gcloud # TODO: Your code goes here
gcloud # TODO: Your code goes here
```
## Use the deployed model to make online predictions
To make online predictions, we'll send a JSON request to the endpoint of the service to make it predict a baby's weight. The order of the responses are the order of the instances.
#### **Exercise 2**
In the cell below we'll make online predictions with the model we just deployed. In order to do that, we need to set up the right `token` and `api` to create the correct request post at the bottom. Complete the TODOs below. You will need to
- Specify the correct `MODEL_NAME` and `MODEL_VERSION` we want to use for prediction
- Use `GoogleCredentials` library to create an access token
- Create a variable called `api` which specifies the Google prediciton API using the Project, model name, and model version
- Add an addtional instance to prediction with the following properties
'is_male': 'Unknown',
'mother_age': 29.0,
'plurality': 'Multiple(2+)',
'gestation_weeks': 38
- Create a variable called `response` which will post a request to our model API to make prediction
```
from oauth2client.client import GoogleCredentials
import requests
import json
MODEL_NAME = # TODO: Your code goes here
MODEL_VERSION = # TODO: Your code goes here
token = # TODO: Your code goes here
api = # TODO: Your code goes here
headers = {"Authorization": "Bearer " + token }
data = {
"instances": [
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Single(1)",
"gestation_weeks": 39
},
{
"is_male": "False",
"mother_age": 29.0,
"plurality": "Single(1)",
"gestation_weeks": 38
},
{
"is_male": "True",
"mother_age": 26.0,
"plurality": "Triplets(3)",
"gestation_weeks": 39
},
# TODO: Your code goes here
]
}
response = # TODO: Your code goes here
print(response.content)
```
## Use model for batch prediction
Batch prediction is commonly used when you want to make thousands to millions of predictions at a time. To perform batch prediction we'll create a file with one instance per line and submit the entire prediction job through a `gcloud` command.
To illustrate this, let's create a file `inputs.json` which has two instances on which we want to predict.
```
%%writefile inputs.json
{"is_male": "True", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
{"is_male": "False", "mother_age": 26.0, "plurality": "Single(1)", "gestation_weeks": 39}
```
#### **Exercise 3**
In the cells below we'll write the `inputs.json` file we just created to our Cloud storage bucket, then submit a batch prediction job to the cloud pointing at that file. We'll also need to specify the output location in GCS where we'd like the final predicitons to be deposited. In the TODOs below, you will need to
- Use `gsutil` to copy the `inputs.json` file to the location specified by `INPUT`
- Use `gsutil` to clear out the directory specified by `OUTPUT`. This will ensure the only thing is that location are our predictions
- Complete the `glcoud` command to submit a batch prediction job.
- Specify the values of all the arguments for the `gcloud` command
Have a look at the documentation for submitting batch predictions via `gcloud` to remind yourself of the format.
```
%%bash
INPUT=gs://${BUCKET}/babyweight/batchpred/inputs.json
OUTPUT=gs://${BUCKET}/babyweight/batchpred/outputs
gsutil # TODO: Your code goes here
gsutil # TODO: Your code goes here
gcloud ai-platform # TODO: Your code goes here
--data-format= # TODO: Your code goes here
--region= # TODO: Your code goes here
--input-paths= # TODO: Your code goes here
--output-path= # TODO: Your code goes here
--model= # TODO: Your code goes here
--version= # TODO: Your code goes here
```
Check the [ML Engine jobs submitted to the GCP console](https://pantheon.corp.google.com/mlengine/jobs) to make sure the prediction job has completed, then let's have a look at the results of our predictions.
```
!gsutil ls gs://$BUCKET/babyweight/batchpred/outputs
!gsutil cat gs://$BUCKET/babyweight/batchpred/outputs/prediction.results*
```
Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| github_jupyter |
# Introduction
This notebook walks you through the creation of an LSTM (Long Short Term Memory) model using [TensorFlow](https://www.tensorflow.org/). The model can be used to insert punctuations automatically on paragraphs without punctuations. For example, given:
*i think it is a report which will for the most part be supported by my group*
It produces:
*i think it is a report which will , for the most part , be supported by my group . *
An imaginary usage of the model is for typing --- you can type a bunch of words and let it insert puncuations for you. It may also be used in speech recognition.
The model does not rely on capitalization. All training and prediction data are converted to lowercase during data preparation.
Send any feedback to datalab-feedback@google.com.
# Prepare Data
The training data used are [europarl](http://www.statmt.org/europarl/) and [comtran](http://www.fask.uni-mainz.de/user/rapp/comtrans/) from [NLTK Corpora](http://www.nltk.org/nltk_data/). I think both are extracted from the proceedings of the European Parliament. I chose these two datasets because first they have clean punctuations, and second they are large enough to create a decent model.
```
# Download and unzip data.
!mkdir -p /content/datalab/punctuation/tmp
!mkdir -p /content/datalab/punctuation/data
!mkdir -p /content/datalab/punctuation/datapreped
!wget -q -P /content/datalab/punctuation/tmp/ https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/europarl_raw.zip
!wget -q -P /content/datalab/punctuation/tmp/ https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/comtrans.zip
!unzip -q -o /content/datalab/punctuation/tmp/europarl_raw.zip -d /content/datalab/punctuation/tmp
!unzip -q -o /content/datalab/punctuation/tmp/comtrans.zip -d /content/datalab/punctuation/tmp
!cp /content/datalab/punctuation/tmp/europarl_raw/english/* /content/datalab/punctuation/data
# We only need English from `comtran` data. Extract English text only.
with open('/content/datalab/punctuation/tmp/comtrans/alignment-en-fr.txt', 'r') as f_in, \
open('/content/datalab/punctuation/data/comtrans.txt', 'w') as f_out:
num_lines = 0
for l in f_in.readlines():
if num_lines == 0:
f_out.write(l)
num_lines = (0 if num_lines == 2 else num_lines + 1)
"""Prepare data by cleaning up text."""
import glob
import os
from random import randint
import re
import string
def prep_data(corpora_path, out_dir):
"""Clean up raw data and split them into train, validation, and test source."""
printable = set(string.printable)
all_corpora_files = glob.glob(corpora_path)
lines = []
for corpora_file in all_corpora_files:
with open(corpora_file, 'r') as f:
lines += f.readlines()
dest_train = os.path.join(out_dir, 'train.txt')
dest_valid = os.path.join(out_dir, 'valid.txt')
dest_test = os.path.join(out_dir, 'test.txt')
valid_lines = 0
test_lines = 0
train_lines = 0
with open(dest_train, 'w') as f_train, open(dest_valid, 'w') as f_valid, open(dest_test, 'w') as f_test:
for l in lines:
s = l.strip()
# Remove "bad" sentences.
if s.endswith(')') and s.startswith('('):
continue
if not s.endswith('.') and not s.endswith('!') and not s.endswith('?'):
continue
if s.find('...') != -1:
continue
# Remove quotes, apostrophes, leading dashes.
s = re.sub('"', '', s)
s = re.sub(' \' s ', 's ', s)
s = re.sub('\'', '', s)
s = re.sub('^- ', '', s)
# Clean double punctuations.
s = re.sub('\? \.', '\?', s)
s = re.sub('\! \.', '\!', s)
# Extract human names to reduce vocab size. There are many names like 'Mrs Plooij-van Gorsel'
# 'Mr Cox'.
s = re.sub('Mr [\w]+ [A-Z][\w]+ ', '[humanname] ', s)
s = re.sub('Mrs [\w]+ [A-Z][\w]+ ', '[humanname] ', s)
s = re.sub('Mr [\w]+ ', '[humanname] ', s)
s = re.sub('Mrs [\w]+ ', '[humanname] ', s)
# Remove brackets and contents inside.
s = re.sub('\(.*\) ', '', s)
s = re.sub('\(', '', s)
s = re.sub('\)', '', s)
# Extract numbers to reduce the vocab size.
s = re.sub('[0-9\.]+ ', '[number] ', s)
# Replace i.e., p.m., a.m. to reduce confusion on period.
s = re.sub(' i\.e\.', ' for example', s)
s = re.sub(' p\.m\.', ' pm', s)
s = re.sub(' a\.m\.', ' am', s)
# Remove unprintable characters.
s = filter(lambda x: x in printable, s)
s = s.lower()
# For every 3 sentences we cut a new line to simulate a paragraph.
# Produce train/validation/test sets by 20:2:78
r = randint(0,50)
if r < 10:
valid_lines += 1
sep = '\n' if (valid_lines % 3) == 0 else ' '
f_valid.write(s + sep)
elif r == 11:
test_lines += 1
sep = '\n' if (test_lines % 3) == 0 else ' '
f_test.write(s + sep)
else:
train_lines += 1
sep = '\n' if (train_lines % 3) == 0 else ' '
f_train.write(s + sep)
prep_data('/content/datalab/punctuation/data/*', '/content/datalab/punctuation/datapreped')
```
# Training
Some of the code is ported from TensorFlow model [PTB Language Model](https://github.com/tensorflow/models/tree/master/tutorials/rnn/ptb).
```
# We deal with limited punctuations only because of limited training data.
PUNCTUATIONS = (u'.', u',', u'?', u'!', u':')
# `n` means no punctuation.
TARGETS = list(PUNCTUATIONS) + ['n']
# Set vocab size to remove low frequency words. Roughly with 10000 vocab, words with less than three counts are excluded.
VOCAB_SIZE = 10000
"""Helper functions for reading input data."""
import collections
import os
import tensorflow as tf
def read_words(filename):
"""Read words from file.
Args:
filename: path to the file to read words from.
Returns:
Words split by white space.
"""
with tf.gfile.GFile(filename, "r") as f:
x = f.read().decode("utf-8").replace("\n", " <eos> ").split()
if x[-1] != '<eos>':
x.append('<eos>')
indices = [i for i, w in enumerate(x) if w in PUNCTUATIONS]
# The next word after a punctuation is an important signal. We switch the punctuation
# with next word so it can be used as part of the context.
for i in indices:
x[i], x[i+1] = x[i+1], x[i]
return x
def build_vocab(filename):
"""Build vocabulary from training data file.
Args:
filename: path to the file to read words from.
Returns:
A dict with key being words and value being indices.
"""
x = read_words(filename)
counter = collections.Counter(x)
count_pairs = sorted(counter.items(), key=lambda a: (-a[1], a[0]))
count_pairs = count_pairs[:VOCAB_SIZE-1]
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
word_to_id['<unk>'] = VOCAB_SIZE - 1
return word_to_id
def file_to_word_and_punc_ids(filename, word_to_id):
"""Produce indices from words in file. x are indices for words, and y are indices for punctuations.
Args:
filename: path to the file to read words from.
word_to_id: the vocab to indices dict.
Returns:
A pair. First element is the words indices. Second element is the target punctuation indices.
"""
x_words = read_words(filename)
x_id = [word_to_id[w] if w in word_to_id else word_to_id['<unk>'] for w in x_words]
target_to_id = {p:i for i, p in enumerate(TARGETS)}
y_words = x_words[1:] + ['padding']
y_puncts = ['n' if elem not in PUNCTUATIONS else elem for elem in y_words]
y_id = [target_to_id[p] for p in y_puncts]
return x_id, y_id
def content_to_word_ids(content, word_to_id):
"""Produce indices from words from a given string.
Args:
filename: path to the file to read words from.
word_to_id: the vocab to indices dict.
Returns:
Words indices.
"""
x = content.decode("utf-8").replace("\n", " <eos> ").split()
indices = [i for i, w in enumerate(x) if w in PUNCTUATIONS]
for i in indices:
x[i], x[i+1] = x[i+1], x[i]
x_id = [word_to_id[w] if w in word_to_id else word_to_id['<unk>'] for w in x]
return x_id
"""The training model. """
import tensorflow as tf
import json
class TrainingConfig(object):
init_scale = 0.1
learning_rate = 1.0
max_grad_norm = 5
num_layers = 2
num_steps = 50
hidden_size = 150
max_epoch =20
max_max_epoch = 25
keep_prob = 0.5
lr_decay = 0.7
batch_size = 100
class TrainingInput(object):
"""The input data producer."""
def _make_input_producer(self, raw_data, batch_size, num_steps, name=None):
with tf.name_scope(name, "InputProducer"):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0 : batch_size * batch_len], [batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
epoch_size = tf.identity(epoch_size, name="epoch_size")
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = tf.strided_slice(data, [0, i * num_steps], [batch_size, (i + 1) * num_steps])
x.set_shape([batch_size, num_steps])
return x
def __init__(self, config, data_x, data_y, name=None):
self.epoch_size = ((len(data_x) // config.batch_size) - 1) // config.num_steps
self.input_data = self._make_input_producer(data_x, config.batch_size, config.num_steps, name=name)
self.targets = self._make_input_producer(data_y, config.batch_size, config.num_steps, name=name)
class PuctuationModel(object):
"""The Punctuation training/evaluation model."""
def __init__(self, is_training, config, input_):
self._input = input_
batch_size = config.batch_size
num_steps = config.num_steps
size = config.hidden_size
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)
attn_cell = lstm_cell
if is_training and config.keep_prob < 1:
def attn_cell():
return tf.contrib.rnn.DropoutWrapper(lstm_cell(), output_keep_prob=config.keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([attn_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(batch_size, tf.float32)
embedding = tf.get_variable("embedding", [VOCAB_SIZE, size], dtype=tf.float32)
inputs = tf.nn.embedding_lookup(embedding, input_.input_data)
if is_training and config.keep_prob < 1:
inputs = tf.nn.dropout(inputs, config.keep_prob)
inputs = tf.unstack(inputs, num=num_steps, axis=1)
outputs, state = tf.contrib.rnn.static_rnn(cell, inputs, initial_state=self._initial_state)
output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable("softmax_w", [size, len(TARGETS)], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [len(TARGETS)], dtype=tf.float32)
logits = tf.matmul(output, softmax_w) + softmax_b
self._predictions = tf.argmax(logits, 1)
self._targets = tf.reshape(input_.targets, [-1])
loss = tf.contrib.legacy_seq2seq.sequence_loss_by_example(
[logits],
[tf.reshape(input_.targets, [-1])],
[tf.ones([batch_size * num_steps], dtype=tf.float32)])
self._cost = cost = tf.reduce_sum(loss) / batch_size
self._final_state = state
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self._lr)
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
self._new_lr = tf.placeholder(tf.float32, shape=[], name="new_learning_rate")
self._lr_update = tf.assign(self._lr, self._new_lr)
def assign_lr(self, session, lr_value):
session.run(self._lr_update, feed_dict={self._new_lr: lr_value})
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def final_state(self):
return self._final_state
@property
def cost(self):
return self._cost
@property
def predictions(self):
return self._predictions
@property
def targets(self):
return self._targets
@property
def lr(self):
return self._lr
@property
def train_op(self):
return self._train_op
"""The trainer. """
import numpy as np
def run_epoch(session, model, num_steps, word_to_id, is_eval=False):
"""Runs the model on the given data for one epoch."""
costs = 0.0
iters = 0
state = session.run(model.initial_state)
fetches = {
"cost": model.cost,
"final_state": model.final_state,
"predictions": model.predictions,
"targets": model.targets,
}
if is_eval is False:
fetches["train_op"] = model.train_op
confusion_matrix = np.zeros(shape=(len(TARGETS),len(TARGETS)), dtype=np.int64)
for step in range(model.input.epoch_size):
feed_dict = {}
# Set the state back to model after each run.
for i, (c, h) in enumerate(model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
vals = session.run(fetches, feed_dict)
cost = vals["cost"]
state = vals["final_state"]
targets = vals["targets"]
predictions = vals['predictions']
for t, p in zip(targets, predictions):
confusion_matrix[t][p] += 1
costs += cost
iters += num_steps
if is_eval is True:
for i, t in enumerate(confusion_matrix):
print('%s --- total: %d, correct: %d, accuracy: %.3f, ' % (TARGETS[i], sum(t), t[i], float(t[i]) / sum(t)))
# Costs are calculated as cross-entropy loss.
# Returns perplexity value (https://en.wikipedia.org/wiki/Perplexity), which is a common measurements on language models.
return np.exp(costs / iters), confusion_matrix
def train(train_data_path, validation_data_path, save_path):
"""Train the model and save a checkpoint at the end."""
word_to_id = build_vocab(train_data_path)
train_data_x, train_data_y = file_to_word_and_punc_ids(train_data_path, word_to_id)
valid_data_x, valid_data_y = file_to_word_and_punc_ids(validation_data_path, word_to_id)
config = TrainingConfig()
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale, config.init_scale)
with tf.name_scope("Train"):
train_input = TrainingInput(config=config, data_x=train_data_x, data_y=train_data_y, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
train_model = PuctuationModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training_Loss", train_model.cost)
tf.summary.scalar("Learning_Rate", train_model.lr)
with tf.name_scope("Valid"):
valid_input = TrainingInput(config=config, data_x=valid_data_x, data_y=valid_data_y, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
valid_model = PuctuationModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation_Loss", valid_model.cost)
sv = tf.train.Supervisor(logdir=save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
train_model.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(train_model.lr)))
train_perplexity, _ = run_epoch(session, train_model, config.num_steps, word_to_id)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity, _ = run_epoch(session, valid_model, config.num_steps, word_to_id, is_eval=True)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
model_file_prefix = sv.saver.save(session, save_path, global_step=sv.global_step)
word_to_id_file = os.path.join(os.path.dirname(save_path), 'word_to_id.json')
with open(word_to_id_file, 'w') as outfile:
json.dump(word_to_id, outfile)
return model_file_prefix
# Delete the model directory if it exists so it always trains from beginning.
!rm -r -f /content/datalab/punctuation/model
```
Start training. Training takes about 20 ~ 30 minutes on a n1-standard-1 GCP VM.
```
model_dir = '/content/datalab/punctuation/model'
saved_model_path = model_dir + '/punctuation'
model_file_prefix = train(
'/content/datalab/punctuation/datapreped/train.txt',
'/content/datalab/punctuation/datapreped/valid.txt',
saved_model_path)
```
In epoch 1, the mode predicted almost everything to be 'n'. It makes sense because vast majority of targets is "no punctuation" for each word so betting on that gives good overal accuracy already, although useless.
Starting from epoch 2, it learned to predict some '.'. After epoch 10, it could predict about 50% of ','. Only after epoch 15 it started predicting some '?'. Unfortunately, it never predicted '!' well, probably because the difference between '.' and '!' is very subtle. It also had problems predicting ':', maybe because lack of training instances.
Start a tensorboard instance, and you will see the training/validation loss curves, as well as other stats.
```
# Start a tensorboard to see the curves in Datalab.
from google.datalab.ml import TensorBoard
tb = TensorBoard.start(model_dir)
```
Tensorboard is good but the curves are not saved with notebook. We can use Datalab's library to list and plot events.
```
from google.datalab.ml import Summary
summary = Summary(model_dir)
summary.list_events()
summary.plot(event_names=['Train/Training_Loss', 'Valid/Validation_Loss'])
```
From the curves above, we got the best validation results around step 4000, and then in some runs a little bit over-fitting after.
# Evaluation
At this point, we are done with training, and evaluation starts from a saved checkpoint. We will reuse the `PuctuationModel` defined earlier since evaluation model and training model are mostly the same.
```
"""Run the model with some test data."""
import os
def run_eval(model_file_prefix, test_data_path):
"""Run evaluation on test data."""
word_to_id_file = os.path.join(os.path.dirname(model_file_prefix), 'word_to_id.json')
with open(word_to_id_file, 'r') as f:
word_to_id = json.load(f)
test_data_x, test_data_y = file_to_word_and_punc_ids(test_data_path, word_to_id)
eval_config = TrainingConfig()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
with tf.name_scope("Test"):
test_input = TrainingInput(config=eval_config, data_x=test_data_x, data_y=test_data_y, name="TestInput")
with tf.variable_scope("Model", reuse=None):
mtest = PuctuationModel(is_training=False, config=eval_config, input_=test_input)
logdir=os.path.join(os.path.dirname(model_file_prefix), 'eval')
sv = tf.train.Supervisor(logdir=logdir)
with sv.managed_session() as session:
sv.saver.restore(session, model_file_prefix)
test_perplexity, cm_data = run_epoch(session, mtest, 1, word_to_id, is_eval=True)
return cm_data
```
View accuracy and confusion matrix.
```
from google.datalab.ml import ConfusionMatrix
from pprint import pprint
cm_data = run_eval(model_file_prefix, '/content/datalab/punctuation/datapreped/test.txt')
pprint(cm_data.tolist())
cm = ConfusionMatrix(cm_data, TARGETS)
cm.plot()
```
Confusion matrix after removing "no punctuation".
```
cm_data_puncuations = cm_data.tolist()
for i, r in enumerate(cm_data_puncuations):
cm_data_puncuations[i] = r[:-1]
cm_data_puncuations = cm_data_puncuations[:-1]
ConfusionMatrix(cm_data_puncuations, TARGETS[:-1]).plot()
```
Many of the "," are mistakenly predicted as "no punctuation", probably because many times either with or without comma is correct in syntax. There are some confusions between "," and ".", meaning that the model "knows" it is a break in sentence, but mistakenly chose comma or period. 65% of question marks are predicted correctly. For that we can give credits to LSTM model because it can "remember" the beginning of a sentence (which, what, where, etc) even if it is long.
# Prediction
Fun time. Let's try generating some puncuations on test data. We'll need to define a "Prediction Model". It is a simplified training model, with num_steps and batch_size both being 1, and no loss or training ops. But the model is "compatible" with the training model in the sense that they share same variables, and it can load a checkpoint produced in training.
```
import tensorflow as tf
class PredictModel(object):
"""The Prediction model."""
def __init__(self, config):
self._input = tf.placeholder(shape=[1, 1], dtype=tf.int64)
size = config.hidden_size
def lstm_cell():
return tf.contrib.rnn.BasicLSTMCell(size, forget_bias=0.0, state_is_tuple=True)
cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(config.num_layers)], state_is_tuple=True)
self._initial_state = cell.zero_state(1, tf.float32)
embedding = tf.get_variable("embedding", [VOCAB_SIZE, size], dtype=tf.float32)
inputs = tf.nn.embedding_lookup(embedding, self._input)
inputs = tf.unstack(inputs, num=1, axis=1)
outputs, self._final_state = tf.contrib.rnn.static_rnn(cell, inputs, initial_state=self._initial_state)
output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, size])
softmax_w = tf.get_variable("softmax_w", [size, len(TARGETS)], dtype=tf.float32)
softmax_b = tf.get_variable("softmax_b", [len(TARGETS)], dtype=tf.float32)
logits = tf.matmul(output, softmax_w) + softmax_b
self._prediction = tf.argmax(logits, 1)
@property
def input(self):
return self._input
@property
def initial_state(self):
return self._initial_state
@property
def final_state(self):
return self._final_state
@property
def prediction(self):
return self._prediction
"""The Predictor that runs the prediction model."""
import json
import os
import random
class Predictor(object):
def __init__(self, model_file_prefix):
word_to_id_file = os.path.join(os.path.dirname(model_file_prefix), 'word_to_id.json')
with open(word_to_id_file, 'r') as f:
self._word_to_id = json.load(f)
config = TrainingConfig()
with tf.Graph().as_default():
with tf.variable_scope("Model", reuse=None):
self._model = PredictModel(config=config)
saver = tf.train.Saver()
self._session = tf.Session()
saver.restore(self._session, model_file_prefix)
def _get_predicted_until_punc(self, min_steps, data_x):
state = self._session.run(self._model.initial_state)
fetches = {
"final_state": self._model.final_state,
"prediction": self._model.prediction,
}
predicted_puncs = []
step = 0
for x in data_x:
feed_dict = {}
for i, (c, h) in enumerate(self._model.initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
feed_dict[self._model.input] = [[x]]
vals = self._session.run(fetches, feed_dict)
state = vals["final_state"]
prediction = vals["prediction"]
predicted = TARGETS[prediction[0]]
predicted_puncs.append(predicted)
step += 1
if predicted != 'n' and step > min_steps:
break
return predicted_puncs
def _apply_puncts_to_original(self, original, inserted):
current_index = 0
punc_positions = {}
for w in inserted.split():
if w in PUNCTUATIONS:
punc_positions[current_index] = w
else:
current_index += 1
words = []
for i, w in enumerate(original.split() + ['']):
if i in punc_positions:
words.append(punc_positions[i])
words.append(w)
return ' '.join(words)
def predict(self, content):
"""Insert punctuations with given string."""
content = content.strip().lower()
for p in PUNCTUATIONS:
content = content.replace(' ' + p, '')
prediction_source = content
prediction_result = ''
content = '<eos> ' + content + ' <eos>'
min_step = 0
while True:
data_x = content_to_word_ids(content, self._word_to_id)
puncts = self._get_predicted_until_punc(min_step, data_x)
if len(data_x) == len(puncts):
content = content.replace('. <eos> ', '').replace(' <eos>', ' ' + puncts[-1]) + '\n'
prediction_result = self._apply_puncts_to_original(prediction_source, content)
break
else:
words1 = [self._word_to_id.keys()[self._word_to_id.values().index(data_x[index])] for index in range(len(puncts) - 1)]
indices = [i for i, w in enumerate(words1) if w in PUNCTUATIONS]
for i in indices:
words1[i], words1[i-1] = words1[i-1], words1[i]
words2 = [self._word_to_id.keys()[self._word_to_id.values().index(data_x[index])] for index in range(len(puncts) - 1, len(data_x))]
all_words = words1 + [puncts[-1]] + words2
content = ' '.join(all_words)
min_step = len(puncts)
return prediction_source, prediction_result
def predict_from_test_file(self, filename, num_random_lines):
"""given a file from test file, pick some random lines and do prediction."""
num_lines = sum(1 for line in open(filename))
with open(filename) as f:
lines = random.sample(f.readlines(), num_random_lines)
for line in lines:
line = line.strip().lower()
source, predicted = self.predict(line)
yield line, source, predicted
def close(self):
self._session.close()
```
Let's play with three paragraphs. First and second are single sentences, the third one contains multiple sentences.
```
predictor = Predictor(model_file_prefix)
sources = [
'i think it is a report which will for the most part be supported by my group',
'so what is the european union doing about it',
'we must work more rapidly towards achieving the targets stipulated ' +
'in the white paper for renewable energy sources as this would bring ' +
'about a massive reduction in greenhouse gases but in common with others ' +
' we too are having to endure the greenhouse effect furthermore we should ' +
'utilise an extraordinary budget line since this is an extraordinarily catastrophic situation',
]
for s in sources:
source, predicted = predictor.predict(s)
print('\n---SOURCE----\n' + source)
print('---PREDICTED----\n' + predicted)
predictor.close()
```
The last prediction is actually somewhat incorrect. It should be:
`we must work more rapidly towards achieving the targets stipulated in the white paper for renewable energy sources , as this would bring about a massive reduction in greenhouse gases . but in common with others , we too are having to endure the greenhouse effect . furthermore , we should utilise an extraordinary budget line , since this is an extraordinarily catastrophic situation .`
It mistakenly predicted the first period where it should be comma. I think we may improve it by showing more words instead of one after the punctuation, or doing it bidirectionally and mix both scores.
Below we try some data outside our test data (test data and training data are generated from the same data). The first two are common conversational questions, and third is from recent european parliament news.
```
predictor = Predictor(model_file_prefix)
sources = [
'how are you',
'where do you see yourself in five years',
'last december the european commission proposed updating the existing customs union with ' +
'turkey and extending bilateral trade relations once negotiations have been completed ' +
'the agreement would still have to be approved by the Parliament before it could enter into force',
]
for s in sources:
source, predicted = predictor.predict(s)
print('\n---SOURCE----\n' + source)
print('---PREDICTED----\n' + predicted)
predictor.close()
```
As a convenience, the predictor can pick random sentences from a test files.
```
predictor = Predictor(model_file_prefix)
for t, s, p in predictor.predict_from_test_file('/content/datalab/punctuation/datapreped/test.txt', 3):
print('\n---SOURCE----\n' + s)
print('---PREDICTED----\n' + p)
print('---TRUTH----\n' + t)
predictor.close()
```
# Clean up
```
TensorBoard.stop(tb)
```
| github_jupyter |
# Quick start guide
## Installation
### Stable
Fri can be installed via the Python Package Index (PyPI).
If you have `pip` installed just execute the command
pip install fri
to get the newest stable version.
The dependencies should be installed and checked automatically.
If you have problems installing please open issue at our [tracker](https://github.com/lpfann/fri/issues/new).
### Development
To install a bleeding edge dev version of `FRI` you can clone the GitHub repository using
git clone git@github.com:lpfann/fri.git
and then check out the `dev` branch: `git checkout dev`.
We use [poetry](https://poetry.eustace.io/) for dependency management.
Run
poetry install
in the cloned repository to install `fri` in a virtualenv.
To check if everything works as intented you can use `pytest` to run the unit tests.
Just run the command
poetry run pytest
in the main project folder
## Using FRI
Now we showcase the workflow of using FRI on a simple classification problem.
### Data
To have something to work with, we need some data first.
`fri` includes a generation method for binary classification and regression data.
In our case we need some classification data.
```
import numpy as np
# fixed Seed for demonstration
STATE = np.random.RandomState(123)
from fri import genClassificationData
```
We want to create a small set with a few features.
Because we want to showcase the all-relevant feature selection, we generate multiple strongly and weakly relevant features.
```
n = 300
features = 6
strongly_relevant = 2
weakly_relevant = 2
X,y = genClassificationData(n_samples=n,
n_features=features,
n_strel=strongly_relevant,
n_redundant=weakly_relevant,
random_state=STATE)
```
The method also prints out the parameters again.
```
X.shape
```
We created a binary classification set with 6 features of which 2 are strongly relevant and 2 weakly relevant.
#### Preprocess
Because our method expects mean centered data we need to standardize it first.
This centers the values around 0 and deviation to the standard deviation
```
from sklearn.preprocessing import StandardScaler
X_scaled = StandardScaler().fit_transform(X)
```
### Model
Now we need to creata a Model.
We use the `FRI` module.
```
import fri
```
`fri` provides a convenience class `fri.FRI` to create a model.
`fri.FRI` needs the type of problem as a first argument of type `ProblemName`.
Depending on the Problem you want to analyze pick from one of the available models in `ProblemName`.
```
list(fri.ProblemName)
```
Because we have Classification data we use the `ProblemName.CLASSIFICATION` to instantiate our model.
```
fri_model = fri.FRI(fri.ProblemName.CLASSIFICATION,
loss_slack=0.2,
w_l1_slack=0.2,
random_state=STATE)
fri_model
```
We used no parameters for creation so the defaults are active.
#### Fitting to data
Now we can just fit the model to the data using `scikit-learn` like commands.
```
fri_model.fit(X_scaled,y)
```
The resulting feature relevance bounds are saved in the `interval_` variable.
```
fri_model.interval_
```
If you want to print out the relevance class use the `print_interval_with_class()` function.
```
print(fri_model.print_interval_with_class())
```
The bounds are grouped in 2d sublists for each feature.
To acess the relevance bounds for feature 2 we would use
```
fri_model.interval_[2]
```
The relevance classes are saved in the corresponding variable `relevance_classes_`:
```
fri_model.relevance_classes_
```
`2` denotes strongly relevant features, `1` weakly relevant and `0` irrelevant.
#### Plot results
The bounds in numerical form are useful for postprocesing.
If we want a human to look at it, we recommend the plot function `plot_relevance_bars`.
We can also color the bars according to `relevance_classes_`
```
# Import plot function
from fri.plot import plot_relevance_bars
import matplotlib.pyplot as plt
%matplotlib inline
# Create new figure, where we can put an axis on
fig, ax = plt.subplots(1, 1,figsize=(6,3))
# plot the bars on the axis, colored according to fri
out = plot_relevance_bars(ax,fri_model.interval_,classes=fri_model.relevance_classes_)
```
### Setting constraints manually
Our model also allows to compute relevance bounds when the user sets a given range for the features.
We use a dictionary to encode our constraints.
```
preset = {}
```
#### Example
As an example, let us constrain the third from our example to the minimum relevance bound.
```
preset[2] = fri_model.interval_[2, 0]
```
We use the function `constrained_intervals`.
Note: we need to fit the model before we can use this function.
We already did that, so we are fine.
```
const_ints = fri_model.constrained_intervals(preset=preset)
const_ints
```
Feature 3 is set to its minimum (at 0).
How does it look visually?
```
fig, ax = plt.subplots(1, 1,figsize=(6,3))
out = plot_relevance_bars(ax, const_ints)
```
Feature 3 is reduced to its minimum (no contribution).
In turn, its correlated partner feature 4 had to take its maximum contribution.
### Print internal Parameters
If we want to take at internal parameters, we can use the `verbose` flag in the model creation.
```
fri_model = fri.FRI(fri.ProblemName.CLASSIFICATION, verbose=True, random_state=STATE)
fri_model.fit(X_scaled,y)
```
This prints out the parameters of the baseline model
One can also see the best selected hyperparameter according to gridsearch and the training score of the model in `score`.
### Multiprocessing
To enable multiprocessing simply use the `n_jobs` parameter when init. the model.
It expects an integer parameter which defines the amount of processes used.
`n_jobs=-1` uses all available on the CPU.
```
fri_model = fri.FRI(fri.ProblemName.CLASSIFICATION,
n_jobs=-1,
verbose=1,
random_state=STATE)
fri_model.fit(X_scaled,y)
```
| github_jupyter |
########################################################
### This file is used to generate Table 4-6, Fig 2-3 ###
########################################################
- [Forward Problem](#Forward-Problem)
- [Verify Assumption 1](#Verify-Assumption-1)
- [Table 4](#Table-4)
- [Table 5](#Table-5)
- [Verify Lemma 1](#Verify-Lemma-1)
- [Left plot in Figure 2](#Left-plot-in-Figure-2)
- [Verify Theorem 3.1](#Verify-Theorem-3.1)
- [Right plot in Figure 2](#Right-plot-in-Figure-2)
- [Inverse Problem](#Inverse-Problem)
- [Verify Assumption 2](#Verify-Assumption-2)
- [Table 6](#Table-6)
- [Verify Theorem 4.2](#Verify-Theorem-4.2)
- [Figure 3](#Figure-3)
```
import os
import numpy as np
import numpy.polynomial.legendre as leg
from scipy.stats import beta
from scipy.stats import uniform
from scipy.integrate import odeint
from scipy.stats import gaussian_kde as kde
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from matplotlib import pyplot as plt
%matplotlib inline
####### Plot Formatting ######
plt.rc('lines', linewidth = 1.5)
plt.rc('xtick', labelsize = 14)
plt.rc('ytick', labelsize = 14)
plt.rc('legend',fontsize=14)
# plt.rcParams["font.family"] = "serif"
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['axes.titlesize'] = 12
plt.rcParams['lines.markersize'] = 6
plt.rcParams['figure.figsize'] = (8.0, 6.0)
```
## Modified version of Example from Xiu2002
$$ \frac{dy(t)}{dt} = -\lambda y, \ \ y(0)=1 $$
$$ y(t) = e^{-\lambda t} $$
$$QoI = y(0.5)$$
$\lambda\sim U[-1,1]$, $t\in[0,1]$
$\Lambda = [-1,1]$, $\mathcal{D}=[e^{-0.5},e^{0.5}]$
```
def Phi(n):
'''Define L_n'''
coeffs = [0]*(n+1)
coeffs[n] = 1
return coeffs
def inner2_leg(n):
return 2/(2*n+1)
def product3_leg(i,j,l):
#compute \Phi_i*\Phi_j*\Phi_l
return lambda x: leg.legval(x, leg.legmul(leg.legmul(Phi(i),Phi(j)),Phi(l)))
def inner3_leg(i,j,l):
'''
compute <\Phi_i\Phi_j\Phi_l>
Set up Gauss-Legendra quadrature
'''
x, w=leg.leggauss(20)
inner=sum([product3_leg(i,j,l)(x[idx]) * w[idx] for idx in range(20)])
return inner
def ode_system_leg(y, t, P):
'''P indicates highest order of Polynomial we use'''
dydt = np.zeros(P+1)
for l in range(len(dydt)):
dydt[l] = -(sum(sum(inner3_leg(i,j,l)*ki_leg[i]*y[j] for j in range(P+1)) for i in range(P+1)))/inner2_leg(l)
return dydt
P=5
ki_leg = [0,1]+[0]*(P-1)
sol_leg = odeint(ode_system_leg, [1.0]+[0.0]*P, np.linspace(0,1,101), args=(P,))
def a(i):
return sol_leg[:,i][50]
coef = np.array([a(0), a(1), a(2), a(3), a(4), a(5)]) #fixed
def Q(i,x):
return leg.legval(x,coef[:(i+1)])
def Qexact(x):
return np.exp(-x*0.5)
#### Use plot to show the difference between the exact and approximate map #####
fig = plt.figure()
def plot_Qn(n):
fig.clear()
x = np.linspace(-3,3,100)
y = Qexact(x)
yn = Q(n, x)
plt.plot(x,y,linestyle='-.',linewidth=4,label="$Q(\lambda)$")
plt.plot(x,yn,label='Q_'+str(n)+'$(\lambda)$')
plt.xlabel('$\Lambda$')
plt.legend();
interact(plot_Qn,
n = widgets.IntSlider(value=1,min=1,max=5,step=1))
```
## Forward Problem
$\lambda\sim U([-1,1])$, QOI is the value at $t=0.5$ ($y(0.5)$). $Q_n$ defines the Polynomial Chaos expansion with degree $n$.
$$
Q(\lambda)=y(0.5)=\sum\limits_{i=0}^{\infty} y_i(0.5)\Phi_i
$$
$$
Q_n(\lambda)=\sum\limits_{i=0}^n y_i(0.5)\Phi_i
$$
Verify Result of Lemma 2:
$Q_n(\lambda)\to Q(\lambda)$ in $L^p(\Lambda)$, if Assumptions 1 holds and $D_c\subset\mathcal{D}$ being compact, then
\begin{equation}
\pi_{\mathcal{D}}^{Q_n}(q) \to \pi_{\mathcal{D}}^{Q}(q) \text{ almost in} {L^r(D_c)}
\end{equation}
Since $\mathcal{D}$ is compact in this problem, we choose $D_c=\mathcal{D}$.
Verify Result of Theorem 3.1:
$Q_n(\lambda)\to Q(\lambda)$ in $L^p(\Lambda)$, if Assumptions 1 holds, $\{\pi_{\mathcal{D}}^{Q_n}\}$ are uniformly integrable in $L^p(\mathcal{D})$, $\mathcal{D}$ is compact, then
\begin{equation}
\pi_{\mathcal{D}}^{Q_n}(Q_n(\lambda)) \to \pi_{\mathcal{D}}^{Q}(Q(\lambda)) \text{ in } L^p(\Lambda)
\end{equation}
### Verify Assumption 1
```
##### Generate data in Table 4 and 5 #####
def assumption1(n,J):
np.random.seed(123456)
initial_sample = np.random.uniform(-1,1,size = J)
pfprior_sample_n = Q(n,initial_sample)
pfprior_dens_n = kde(pfprior_sample_n)
x = np.linspace(-1,3,1000)
return np.round(np.max(np.abs(np.gradient(pfprior_dens_n(x), x))), 2), np.round(np.max(pfprior_dens_n(x)),2)
size_J = [int(1E3), int(1E4), int(1E5)]
degree_n = [1, 2, 3, 4, 5]
Bound_matrix, Lip_Bound_matrix = np.zeros((3,5)), np.zeros((3,5))
for i in range(3):
for j in range(5):
n, J = degree_n[j], size_J[i]
Lip_Bound_matrix[i,j] = assumption1(n, J)[0]
Bound_matrix[i,j] = assumption1(n, J)[1]
```
#### Table 4
```
###########################################
################ Table 4 ##################
###########################################
print('Table 4')
print('Bound under certain n and J values')
print(Bound_matrix)
```
#### Table 5
```
###########################################
################ Table 5 ##################
###########################################
print('Table 5')
print('Lipschitz bound under certain n and J values')
print(Lip_Bound_matrix)
#### Use plot to show the difference between the exact pushforward and approximate pushforward #####
fig=plt.figure()
def plot_pushforward(n,J):
fig.clear()
np.random.seed(123456)
initial_sample = np.random.uniform(-1,1,size = J)
pfprior_sample = Qexact(initial_sample)
pfprior_dens = kde(pfprior_sample)
pfprior_sample_n = Q(n,initial_sample)
pfprior_dens_n = kde(pfprior_sample_n)
fig.clear()
x = np.linspace(-1,3,1000)
y = pfprior_dens(x)
yn = pfprior_dens_n(x)
plt.plot(x,y,color='r', linestyle='-.', linewidth=4, label="$\pi_{\mathcal{D}}^Q$")
plt.plot(x,yn,linewidth=2,label="$\pi_{\mathcal{D}}^{Q_{n}}$")
plt.title('Lipschitz const. = %4.2f and Bound = %2.2f' %(np.max(np.abs(np.gradient(pfprior_dens_n(x), x))),
np.max(pfprior_dens_n(x))))
plt.xlabel("$\mathcal{D}$")
plt.legend()
interact(plot_pushforward,
n = widgets.IntSlider(value=1,min=1,max=5,step=1),
J = widgets.IntSlider(value=int(1E3),min=int(1E3),max=int(1E5),step=int(1E3)))
```
### Verify Lemma 1
**Print out Monte Carlo Approximation of $ \|\pi_{\mathcal{D}}^Q(q)-\pi_{\mathcal{D}}^{Q_n}(q)\|_{L^r(\mathcal{D_c})} $ where $r>0$ and $D_c=\mathcal{D}$ because $\mathcal{D}$ is compact.**
```
#Build $\pi_D^Q$ and $\pi_D^{Q,n}$, use 10,000 samples
N_kde = int(1E4)
N_mc = int(1E4)
np.random.seed(123456)
initial_sample = np.random.uniform(-1,1,size = N_kde)
pfprior_sample = Qexact(initial_sample)
pfprior_dens = kde(pfprior_sample)
def pfprior_dens_n(n,x):
pfprior_sample_n = Q(n,initial_sample)
pdf = kde(pfprior_sample_n)
return pdf(x)
error_r_D = np.zeros((5,5))
np.random.seed(123456)
qsample = np.random.uniform(np.exp(-0.5),np.exp(0.5),N_mc)
for i in range(5):
for j in range(5):
error_r_D[i,j] = (np.mean((np.abs(pfprior_dens(qsample) - pfprior_dens_n(j+1,qsample)))**(i+1)))**(1/(i+1))
np.set_printoptions(linewidth=110)
print('L^r error on data space for Forward Problem',end='\n\n')
print(error_r_D)
#### To make it cleaner, create Directory "images" to store all the figures ####
imagepath = os.path.join(os.getcwd(),"images")
os.makedirs(imagepath,exist_ok=True)
```
#### Left plot in Figure 2
```
###########################################
######### The left plot of Fig 2 ##########
###########################################
fig = plt.figure()
plt.xlim([0,6])
marker = ['-D', '-o', '-v', '-s', '-.']
for i in range(5):
plt.semilogy([1,2,3,4,5],error_r_D[i,:],marker[i],label='r = ' + np.str(i+1))
plt.xlabel('Order of PCE (n)')
plt.ylabel('$L^r$'+' Error in Push-Forward on '+'$\mathcal{D}$')
plt.legend();
# fig.savefig("images/1forward_D_uniform.png")
fig.savefig("images/Fig2(Left).png")
```
### Verify Theorem 3.1
**Print out Monte Carlo Approximation of $ \|\pi_{\mathcal{D}}^Q(Q(\lambda))-\pi_{\mathcal{D}}^{Q_n}(Q_n(\lambda))\|_{L^2(\Lambda)} $**
```
##### Generate data for the right plot of Fig 2 #####
np.random.seed(123456)
lamsample = np.random.uniform(-1,1,size = N_mc)
error_2 = np.zeros(5)
for i in range(5):
error_2[i] = (np.mean((np.abs(pfprior_dens(Qexact(lamsample)) - pfprior_dens_n(i+1,Q(i+1,lamsample))))**2))**(1/2)
np.set_printoptions(linewidth=110)
print('L^2 error on parameter space for Forward Problem',end='\n\n')
print(error_2)
```
#### Right plot in Figure 2
```
############################################
######### The right plot of Fig 2 ##########
############################################
fig = plt.figure()
plt.xlim([0,6])
plt.semilogy([1,2,3,4,5],error_2,'-s')
plt.xlabel('Order of PCE (n)')
plt.ylabel('$L^2$'+' Error in Push-Forward on '+'$\Lambda$');
# fig.savefig("images/1forward_Lam_uniform.png")
fig.savefig("images/Fig2(Right).png")
```
## Inverse Problem
Initial guess is $\lambda\sim U([-1,1])$.
Observation is $\pi_{\mathcal{D}}\sim Beta(4,4)$ with location and scale parameters chosen to be on $[1,1.25]$.
Verify Result of Theorem 4.2:
$Q_n(\lambda)\to Q(\lambda)$ in $L^p(\Lambda)$, $\pi_{\Lambda}^i\in L^p(\mathcal{D})$. If Assumptions 1, 2 hold, $\{\pi_{\mathcal{D}}^{Q_n}\}$ are uniformly integrable in $L^p(\mathcal{D})$, then
\begin{equation}
\pi_{\Lambda}^{u,n}(\lambda) \to \pi_{\Lambda}^{u}(\lambda) \text{ in } L^p(\Lambda)
\end{equation}
```
def pdf_obs(x):
return beta.pdf(x, a=4, b=4, loc=1, scale=0.25)
#### Use plot to show the difference between the pushforward of the init and the observed #####
fig = plt.figure()
xx = np.linspace(-1,3,1000)
y = pdf_obs(xx)
y_pf = pfprior_dens(xx)
plt.plot(xx,y,label="$\pi_{\mathcal{D}}^{obs}$")
plt.plot(xx,y_pf, label="$\pi_{\mathcal{D}}^{Q(init)}$")
plt.xlabel("$\mathcal{D}$")
plt.legend();
```
### Verify Assumption 2
```
def Meanr(n):
pfprior_sample_n = Q(n,initial_sample)
if n==0:
r = pdf_obs(pfprior_sample)/pfprior_dens(pfprior_sample)
else:
r = pdf_obs(pfprior_sample_n)/pfprior_dens_n(n,pfprior_sample_n)
return np.mean(r)
def pdf_update(n,x):
if n==0:
r = pdf_obs(pfprior_sample)/pfprior_dens(pfprior_sample)
pdf = kde(initial_sample,weights=r)
else:
pfprior_sample_n = Q(n,initial_sample)
# pfprior_dens_n = kde(pfprior_sample_n)
r = pdf_obs(pfprior_sample_n)/pfprior_dens_n(n,pfprior_sample_n)
pdf = kde(initial_sample,weights=r)
return pdf(x)
Expect_r = np.zeros(6)
for i in range(6):
Expect_r[i] = Meanr(i)
```
#### Table 6
```
###########################################
################ Table 6 ##################
###########################################
print('Table 6')
print('Expected ratio for verifying Assumption 2')
print(Expect_r[1:])
#### Use plot to show the difference between the initial, updated, approximate updated #####
fig=plt.figure()
def plot_update(n):
fig.clear()
xx = np.linspace(-1.1,1.1,100)
plt.plot(xx, uniform.pdf(xx, loc=-1, scale=2), label="Initial Density")
plt.plot(xx, pdf_update(0,xx), label="$\pi_{\Lambda}^u$")
plt.plot(xx, pdf_update(n,xx), label="$\pi_{\Lambda}^{u,n}$, n="+str(n))
plt.legend()
plt.xlabel("$\Lambda$")
plt.title('$\mathbb{E}(r) =$ %3.2f' %(Expect_r[n]));
interact(plot_update,
n = widgets.IntSlider(value=int(1),min=int(1),max=int(5),step=1))
#### Use plot to show the difference between the observed and the pushforward of the approximate updated pdf #####
def update_pushforward(n,x):
pfprior_sample_n = Q(n,initial_sample)
r = pdf_obs(pfprior_sample_n)/pfprior_dens_n(n,pfprior_sample_n)
pdf = kde(pfprior_sample_n,weights=r)
return pdf(x)
fig = plt.figure()
xx = np.linspace(-1,3,100)
y = pdf_obs(xx)
plt.plot(xx,y,label="$\pi_{\mathcal{D}}^{obs}$")
for i in range(1,6,1):
y_pf = update_pushforward(i,xx)
plt.plot(xx,y_pf, label="n="+str(i))
plt.xlabel("$\mathcal{D}$")
plt.legend();
```
### Verify Theorem 4.2
Print out Monte Carlo Approximation of $\|\pi_{\Lambda}^{u,n}(\lambda)-\pi_{\Lambda}^u(\lambda)\|_{L^2(\Lambda)} $
```
##### Generate data for Fig 3 #####
np.random.seed(123456)
lamsample = np.random.uniform(-1,1,size = N_mc)
error_update = np.zeros(5)
for i in range(5):
error_update[i] = (np.mean((np.abs(pdf_update(0,lamsample) - pdf_update(i+1,lamsample)))**2))**(1/2)
np.set_printoptions(linewidth=110)
print('L^2 Error for Inverse Problem',end='\n\n')
print(error_update)
```
#### Figure 3
```
###########################################
################ Figure 3 #################
###########################################
fig = plt.figure()
plt.xlim([0,6])
plt.semilogy([1,2,3,4,5],error_update,'-s')
plt.xlabel('Order of PCE (n)')
plt.ylabel('$L^2$'+' Error in Update');
# fig.savefig("images/1inverse_error_uniform.png")
fig.savefig("images/Fig3.png")
```
| github_jupyter |
# Introduction to Recurrent Neural Networks
(c) Deniz Yuret, 2018
* Objectives: learn about RNNs, the RNN layer, compare with MLP on a tagging task.
* Prerequisites: param, relu, train!, nll, zeroone, Adam
```
using Pkg
for p in ("Knet","ProgressMeter")
haskey(Pkg.installed(),p) || Pkg.add(p)
end
```
## The Brown Corpus
To introduce recurrent neural networks (RNNs) we will train a part-of-speech tagger using the [Brown Corpus](https://en.wikipedia.org/wiki/Brown_Corpus). We will train three models: a MLP, a unidirectional RNN, a bidirectional RNN and observe significant performance differences.
```
using Knet: Knet
include(Knet.dir("data/nltk.jl"))
(data,words,tags) = brown()
println("The Brown Corpus has $(length(data)) sentences, $(sum(length(p[1]) for p in data)) tokens, with a word vocabulary of $(length(words)) and a tag vocabulary of $(length(tags)).")
```
`data` is an array of `(w,t)` pairs each representing a sentence, where `w` is a sequence of word ids, and `t` is a sequence of tag ids. `words` and `tags` contain the strings for the ids.
```
summary.((data,words,tags))
```
Here is what the first sentence looks like with ids and with strings:
```
ENV["COLUMNS"]=120
(w,t) = first(data)
display(permutedims([w t]))
display(permutedims([words[w] tags[t]]))
```
## Word Embeddings
`data` has each sentence tokenized into an array of words and each word mapped to a `UInt16` id. To use these words as inputs to a neural network we further map each word to a Float32 vector. We will keep these vectors in the columns of a size (X,V) matrix where X is the embedding dimension and V is the vocabulary size. The vectors will be initialized randomly, and trained just like any other network parameter. Let's define an embedding layer for this purpose:
```
using Knet: param
struct Embed; w; end
Embed(vocabsize::Int,embedsize::Int) = Embed(param(embedsize,vocabsize))
(e::Embed)(x) = e.w[:,x]
```
This is what the words, word ids and embeddings for a sentence looks like: (note the identical id and embedding for the 2nd and 5th words)
```
embedlayer = Embed(length(words),8)
(w,t) = data[52855]
display(permutedims(words[w]))
display(permutedims(w))
display(embedlayer(w))
```
## Fully connected layers
```
# Fully connected (Linear) layer with optional activation function at the end
struct FC; w; b; f; end
FC(input::Int,output::Int,f=identity) = FC(param(output,input),param(output),f)
reshape2d(x) = reshape(x,(size(x,1),:))
(fc::FC)(x;o...) = fc.f.(fc.w * reshape2d(x) .+ fc.b)
```
## RNN layers
```
# TODO: define these manually, for now just check out @doc RNN
using Knet: RNN
```
## The three taggers: MLP, RNN, biRNN
```
using Knet: relu
# A chain of layers
struct Chain; layers; end
Chain(layer1,layer2,layers...)=Chain((layer1,layer2,layers...))
(c::Chain)(x;o...) = (for l in c.layers; x = l(x;o...); end; x)
Tagger0(vocab,embed,hidden,output)= # MLP Tagger
Chain(Embed(vocab,embed),FC(embed,hidden,relu),FC(hidden,output))
Tagger1(vocab,embed,hidden,output)= # RNN Tagger
Chain(Embed(vocab,embed),RNN(embed,hidden,rnnType=:relu),FC(hidden,output))
Tagger2(vocab,embed,hidden,output)= # biRNN Tagger
Chain(Embed(vocab,embed),RNN(embed,hidden,rnnType=:relu,bidirectional=true),FC(2hidden,output));
```
## Tagger0 (MLP)
This is what Tagger0 looks like. Every tag is predicted independently. The prediction of each tag only depends on the corresponding word.
<img src="https://docs.google.com/drawings/d/e/2PACX-1vTfV4-TB0KwjDbFKpj3rL0tfeApEh9XXaDJ1OF3emNVAmc_-hvgqpEBuA_K0FsNuxymZrv3ztScXxqF/pub?w=378&h=336"/>
## Tagger1 (RNN)
This is what Tagger1 looks like. The RNN layer takes its previous output as an additional input. The prediction of each tag is based on words to the left.
<img src="https://docs.google.com/drawings/d/e/2PACX-1vTaizzCISuSxihPCjndr7xMVwklsrefi9zn7ZArCvsR8fb5V4DGKtusyIzn3Ujp3QbAJgUz1WSlLvIJ/pub?w=548&h=339"/>
## Tagger2 (biRNN)
This is what Tagger2 looks like. There are two RNNs: the forward RNN reads the sequence from left to right, the backward RNN reads it from right to left. The prediction of each tag is dependent on all the words in the sentence.
<img src="https://docs.google.com/drawings/d/e/2PACX-1vQawvnCj6odRF2oakF_TgXd8gLxSsfQP8-2ZdBdEIpfgIyPq0Zp_EF6zcFJf6JlGhfiKQvdVyg-Weq2/pub?w=566&h=335"/>
## Sequence Minibatching
Minibatching is a bit more complicated with sequences compared to simple classification problems, this section can be skipped on a first reading. In addition to the input and minibatch sizes, there is also the time dimension to consider. To keep things simple we will concatenate all sentences into one big sequence, then split this sequence into equal sized chunks. The input to the tagger will be size (B,T) where B is the minibatch size, and T is the chunk size. The input to the RNN layer will be size (X,B,T) where X is the embedding size.
```
BATCHSIZE = 64
SEQLENGTH = 32;
function seqbatch(x,y,B,T)
N = length(x) ÷ B
x = permutedims(reshape(x[1:N*B],N,B))
y = permutedims(reshape(y[1:N*B],N,B))
d = []; for i in 0:T:N-T
push!(d, (x[:,i+1:i+T], y[:,i+1:i+T]))
end
return d
end
allw = vcat((x->x[1]).(data)...)
allt = vcat((x->x[2]).(data)...)
d = seqbatch(allw, allt, BATCHSIZE, SEQLENGTH);
```
This may be a bit more clear if we look at an example minibatch:
```
(x,y) = first(d)
words[x]
```
Julia indexing allows us to get the embeddings for this minibatch in one go as an (X,B,T) array where X is the embedding size, B is the minibatch size, and T is the subsequence length.
```
embedlayer = Embed(length(words),128)
summary(embedlayer(x))
```
## Experiments
```
# shuffle and split minibatches into train and test portions
using Random; shuffle!(d)
dtst = d[1:10]
dtrn = d[11:end]
length.((dtrn,dtst))
# For running experiments
using Knet: train!, nll, zeroone, Adam, AutoGrad
import ProgressMeter
function trainresults(file,model; o...)
if (print("Train from scratch? ");readline()[1]=='y')
results = Float64[]; updates = 0; prog = ProgressMeter.Progress(2500)
function callback(J)
if updates % 100 == 0
push!(results, nll(model,dtst), zeroone(model,dtst))
ProgressMeter.update!(prog, updates)
end
return (updates += 1) <= 2500
end
train!(model, dtrn; callback=callback, optimizer=Adam(), o...)
results = reshape(results,(2,:))
Knet.gc()
Knet.save(file,"model",model,"results",results)
else
isfile(file) || download("http://people.csail.mit.edu/deniz/models/tutorial/$file",file)
model,results = Knet.load(file,"model","results")
end
println(minimum(results,dims=2))
return (model,results)
end
VOCABSIZE = length(words)
EMBEDSIZE = 128
HIDDENSIZE = 128
OUTPUTSIZE = length(tags);
(t0,r0) = trainresults("tagger0.jld2",Tagger0(VOCABSIZE,EMBEDSIZE,HIDDENSIZE,OUTPUTSIZE));
(t1,r1) = trainresults("tagger1.jld2",Tagger1(VOCABSIZE,EMBEDSIZE,HIDDENSIZE,OUTPUTSIZE));
(t2,r2) = trainresults("tagger2.jld2",Tagger2(VOCABSIZE,EMBEDSIZE,HIDDENSIZE,OUTPUTSIZE));
using Plots; default(fmt=:png,ls=:auto,ymirror=true)
plot([r0[2,:], r1[2,:], r2[2,:]]; xlabel="x100 updates", ylabel="error",
ylim=(0,0.15), yticks=0:0.01:0.15, labels=["MLP","RNN","biRNN"])
plot([r0[1,:], r1[1,:], r2[1,:]]; xlabel="x100 updates", ylabel="loss",
ylim=(0,.5), yticks=0:0.1:.5, labels=["MLP","RNN","biRNN"])
```
## Playground
Below, you can type and tag your own sentences:
```
wdict=Dict{String,UInt16}(); for (i,w) in enumerate(words); wdict[w]=i; end
unk = UInt16(length(words))
wid(w) = get(wdict,w,unk)
function tag(tagger,s::String)
w = permutedims(split(s))
t = tags[(x->x[1]).(argmax(Array(tagger(wid.(w))),dims=1))]
vcat(w,t)
end
tag(t2,readline())
```
| github_jupyter |
Copyright Jana Schaich Borg/Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)
# MySQL Exercise 9: Subqueries and Derived Tables
Now that you understand how joins work, in this lesson we are going to learn how to incorporate subqueries and derived tables into our queries.
Subqueries, which are also sometimes called inner queries or nested queries, are queries that are embedded within the context of another query. The output of a subquery is incorporated into the queries that surround it. Subqueries can be used in SELECT, WHERE, and FROM clauses. When they are used in FROM clauses they create what are called derived tables.
### The main reasons to use subqueries are:
+ Sometimes they are the most logical way to retrieve the information you want
+ They can be used to isolate each logical part of a statement, which can be helpful for troubleshooting long and complicated queries
+ Sometimes they run faster than joins
Some people find subqueries easier to read than joins. However, that is often a result of not feeling comfortable with the concepts behind joins in the first place (I prefer join syntax, so admittedly, that is my preference).
### Subqueries must be enclosed in parentheses. Subqueries have a couple of rules that joins don't:
+ ORDER BY phrases cannot be used in subqueries (although ORDER BY phrases can still be used in outer queries that contain subqueries).
+ Subqueries in SELECT or WHERE clauses that return more than one row must be used in combination with operators that are explicitly designed to handle multiple values, such as the IN operator. Otherwise, subqueries in SELECT or WHERE statements can output no more than 1 row.
### So why would you use subqueries?
Let's look at some examples.
**Start by loading the sql library and database, and making the Dognition database your default database**:
```
%load_ext sql
%sql mysql://studentuser:studentpw@localhost/dognitiondb
%sql USE dognitiondb
```
#### 1) "On the fly calculations" (or, doing calculations as you need them)
One of the main uses of subqueries is to calculate values as you need them. This allows you to use a summary calculation in your query without having to enter the value outputted by the calculation explicitly. A situation when this capability would be useful is if you wanted to see all the records that were greater than the average value of a subset of your data.
Recall one of the queries we wrote in "MySQL Exercise 4: Summarizing your Data" to calculate the average amount of time it took customers to complete all of the tests in the exam_answers table (we had to exclude negative durations from the calculation due to some abnormalities in the data):
```sql
SELECT AVG(TIMESTAMPDIFF(minute,start_time,end_time)) AS AvgDuration
FROM exam_answers
WHERE TIMESTAMPDIFF(minute,start_time,end_time)>0;
```
What if we wanted to look at just the data from rows whose durations were greater than the average, so that we could determine whether there are any features that seem to correlate with dogs taking a longer time to finish their tests? We could use a subquery to calculate the average duration, and then indicate in our SELECT and WHERE clauses that we only wanted to retrieve the rows whose durations were greater than the average. Here's what the query would look like:
```sql
SELECT *
FROM exam_answers
WHERE TIMESTAMPDIFF(minute,start_time,end_time) >
(SELECT AVG(TIMESTAMPDIFF(minute,start_time,end_time)) AS AvgDuration
FROM exam_answers
WHERE TIMESTAMPDIFF(minute,start_time,end_time)>0);
```
You can see that TIMESTAMPDIFF gets compared to the singular average value outputted by the subquery surrounded by parentheses. You can also see that it's easier to read the query as a whole if you indent and align all the clauses associated with the subquery, relative to the main query.
**Question 1: How could you use a subquery to extract all the data from exam_answers that had test durations that were greater than the average duration for the "Yawn Warm-Up" game? Start by writing the query that gives you the average duration for the "Yawn Warm-Up" game by itself (and don't forget to exclude negative values; your average duration should be about 9934):**
```
%%sql
SELECT AVG(TIMESTAMPDIFF(minute,start_time,end_time)) AS AvgDuration
FROM exam_answers
WHERE TIMESTAMPDIFF(minute,start_time,end_time)>0 AND test_name='Yawn Warm-Up';
```
**Question 2: Once you've verified that your subquery is written correctly on its own, incorporate it into a main query to extract all the data from exam_answers that had test durations that were greater than the average duration for the "Yawn Warm-Up" game (you will get 11059 rows):**
```
%%sql
SELECT *
FROM exam_answers
WHERE TIMESTAMPDIFF(minute,start_time,end_time) >
(SELECT AVG(TIMESTAMPDIFF(minute,start_time,end_time)) AS AvgDuration
FROM exam_answers
WHERE TIMESTAMPDIFF(minute,start_time,end_time)>0 AND
test_name='Yawn Warm-Up');
```
Now double check the results you just retrieved by replacing the subquery with "9934"; you should get the same results. It is helpful to get into the habit of including these kinds of quality checks into your query-writing process.
This example shows you how subqueries allow you retrieve information dynamically, rather than having to hard code in specific numbers or names. This capability is particularly useful when you need to build the output of your queries into reports or dashboards that are supposed to display real-time information.
#### 2) Testing membership
Subqueries can also be useful for assessing whether groups of rows are members of other groups of rows. To use them in this capacity, we need to know about and practice the IN, NOT IN, EXISTS, and NOT EXISTS operators.
Recall from MySQL Exercise 2: Selecting Data Subsets Using WHERE that the IN operator allows you to use a WHERE clause to say how you want your results to relate to a list of multiple values. It's basically a condensed way of writing a sequence of OR statements. The following query would select all the users who live in the state of North Carolina (abbreviated "NC") or New York (abbreviated "NY"):
```mysql
SELECT *
FROM users
WHERE state IN ('NC','NY');
```
Notice the quotation marks around the members of the list referred to by the IN statement. These quotation marks are required since the state names are strings of text.
A query that would give an equivalent result would be:
```mysql
SELECT *
FROM users
WHERE state ='NC' OR state ='NY';
```
A query that would select all the users who do NOT live in the state of North Carolina or New York would be:
```mysql
SELECT *
FROM users
WHERE state NOT IN ('NC','NY');
```
**Question 3: Use an IN operator to determine how many entries in the exam_answers tables are from the "Puzzles", "Numerosity", or "Bark Game" tests. You should get a count of 163022.**
```
%%sql
SELECT COUNT(*)
FROM exam_answers
WHERE subcategory_name IN ('Puzzles','Numerosity','Bark Game');
```
**Question 4: Use a NOT IN operator to determine how many unique dogs in the dog table are NOT in the "Working", "Sporting", or "Herding" breeding groups. You should get an answer of 7961.**
```
%%sql
SELECT COUNT(DISTINCT dog_guid)
FROM dogs
WHERE breed_group NOT IN ('Working','Sporting','Herding');
```
EXISTS and NOT EXISTS perform similar functions to IN and NOT IN, but EXISTS and NOT EXISTS can only be used in subqueries. The syntax for EXISTS and NOT EXISTS statements is a little different than that of IN statements because EXISTS is not preceded by a column name or any other expression. The most important difference between EXISTS/NOT EXISTS and IN/NOT IN statements, though, is that unlike IN/NOT IN statements, EXISTS/NOT EXISTS are logical statements. Rather than returning raw data, per se, EXISTS/NOT EXISTS statements return a value of TRUE or FALSE. As a practical consequence, EXISTS statements are often written using an asterisk after the SELECT clause rather than explicit column names. The asterisk is faster to write, and since the output is just going to be a logical true/false either way, it does not matter whether you use an asterisk or explicit column names.
We can use EXISTS and a subquery to compare the users who are in the users table and dogs table, similar to what we practiced previously using joins. If we wanted to retrieve a list of all the users in the users table who were also in the dogs table, we could write:
```sql
SELECT DISTINCT u.user_guid AS uUserID
FROM users u
WHERE EXISTS (SELECT d.user_guid
FROM dogs d
WHERE u.user_guid =d.user_guid);
```
You would get the same result if you wrote:
```sql
SELECT DISTINCT u.user_guid AS uUserID
FROM users u
WHERE EXISTS (SELECT *
FROM dogs d
WHERE u.user_guid =d.user_guid);
```
Essentially, both of these queries say give me all the distinct user_guids from the users table that have a value of "TRUE" in my EXISTS clause. The results would be equivalent to an inner join with GROUP BY query. Now...
**Question 5: How could you determine the number of unique users in the users table who were NOT in the dogs table using a NOT EXISTS clause? You should get the 2226, the same result as you got in Question 10 of MySQL Exercise 8: Joining Tables with Outer Joins.**
```
%%sql
SELECT COUNT(DISTINCT u.user_guid) AS NumUsers
FROM users u
WHERE NOT EXISTS (SELECT *
FROM dogs d
WHERE u.user_guid=d.user_guid);
```
#### 3) Accurate logical representations of desired output and Derived Tables
A third situation in which subqueries can be useful is when they simply represent the logic of what you want better than joins.
We saw an example of this in our last MySQL Exercise. We wanted a list of each dog a user in the users table owns, with its accompanying breed information whenever possible. To achieve this, we wrote this query in Question 6:
```sql
SELECT u.user_guid AS uUserID, d.user_guid AS dUserID, d.dog_guid AS dDogID, d.breed
FROM users u LEFT JOIN dogs d
ON u.user_guid=d.user_guid
```
Once we saw the "exploding rows" phenomenon due to duplicate rows, we wrote a follow-up query in Question 7 to assess how many rows would be outputted per user_id when we left joined the users table on the dogs table:
```sql
SELECT u.user_guid AS uUserID, d.user_guid AS dUserID, count(*) AS numrows
FROM users u LEFT JOIN dogs d
ON u.user_guid=d.user_guid
GROUP BY u.user_guid
ORDER BY numrows DESC
```
This same general query without the COUNT function could have been used to output a complete list of all the distinct users in the users table, their dogs, and their dogs' breed information. However, the method we used to arrive at this was not very pretty or logically satisfying. Rather than joining many duplicated rows and fixing the results later with the GROUP BY clause, it would be much more elegant if we could simply join the distinct UserIDs in the first place. There is no way to do that with join syntax, on its own. However, you can use subqueries in combination with joins to achieve this goal.
To complete the join on ONLY distinct UserIDs from the users table, we could write:
```sql
SELECT DistinctUUsersID.user_guid AS uUserID, d.user_guid AS dUserID, count(*) AS numrows
FROM (SELECT DISTINCT u.user_guid
FROM users u) AS DistinctUUsersID
LEFT JOIN dogs d
ON DistinctUUsersID.user_guid=d.user_guid
GROUP BY DistinctUUsersID.user_guid
ORDER BY numrows DESC
```
**Try it yourself:**
```
%%sql
SELECT DistinctUserID.user_guid AS uUserID,
d.user_guid AS dUserID, count(*) AS numrows
FROM (SELECT DISTINCT u.user_guid FROM users u) AS DistinctUserID
LEFT JOIN dogs d ON DistinctUserID.user_guid=d.user_guid
GROUP BY DistinctUserID.user_guid
ORDER BY numrows DESC;
```
**<mark> Queries that include subqueries always run the innermost subquery first, and then run subsequent queries sequentially in order from the innermost query to the outermost query. </mark>**
Therefore, the query we just wrote extracts the distinct user_guids from the users table *first*, and then left joins that reduced subset of user_guids on the dogs table. As mentioned at the beginning of the lesson, since the subquery is in the FROM statement, it actually creates a temporary table, called a derived table, that is then incorporated into the rest of the query.
**There are several important points to notice about the syntax of this subquery.** First, an alias of "DistinctUUsersID" is used to name the results of the subquery. *We are required to give an alias to any derived table we create in subqueries within FROM statements.* Otherwise there would be no way for the database to refer to the multiple columns within the temporary results we create.
Second, *we need to use this alias every time we want to execute a function that uses the derived table.* Remember that the results in which we are interested require a join between the dogs table and the temporary table, not the dogs table and the original users table with duplicates. That means we need to make sure we reference the temporary table alias in the ON, GROUP BY, and SELECT clauses.
Third, relatedly, aliases used within subqueries can refer to tables outside of the subqueries. However, *outer queries cannot refer to aliases created within subqueries unless those aliases are explicitly part of the subquery output*. In other words, if you wrote the first line of the query above as:
```sql
SELECT u.user_guid AS uUserID, d.user_guid AS dUserID, count(*) AS numrows
...
```
the query would not execute because the alias "u" is contained inside the subquery, but is not included in the output. **Go ahead and try it to see what the error message looks like:**
```
%%sql
SELECT u.user_guid AS uUserID, d.user_guid AS dUserID, count(*) AS numrows
FROM (SELECT DISTINCT u.user_guid FROM users u) AS DistinctUserID
LEFT JOIN dogs d ON DistinctUserID.user_guid=d.user_guid
GROUP BY DistinctUserID.user_guid
ORDER BY numrows DESC;
```
A similar thing would happen if you tried to use the alias u in the GROUP BY statement.
Another thing to take note of is that when you use subqueries in FROM statements, the temporary table
you create can have multiple columns in the output (unlike when you use subqueries in outside SELECT statements). But for that same reason, subqueries in FROM statements can be very computationally intensive. Therefore, it's a good idea to use them sparingly, especially when you have very large data sets.
Overall, subqueries and joins can often be used interchangeably. Some people strongly prefer one approach over another, but there is no consensus about which approach is best. When you are analyzing very large datasets, it's a good idea to test which approach will likely be faster or easier to troubleshoot for your particular application.
# Let's practice some more subqueries!
**Question 6: Write a query using an IN clause and equijoin syntax that outputs the dog_guid, breed group, state of the owner, and zip or the owner for each distinct dog in the Working, Sporting, and Herding breed groups. (You should get 10,254 rows; the query will be a little slower than some of the others we have practiced)**
```
%%sql
SELECT DISTINCT d.dog_guid AS DogID, d.breed_group AS BreedGroup,
u.state AS State, u.zip AS Zip
FROM users u, dog d
WHERE d.breed_group IN ('Working','Sporting','Herding')
AND u.user_guid=d.user_guid;
```
**Question 7: Write the same query as in Question 6 using traditional join syntax.**
```
%%sql
SELECT DISTINCT d.dog_guid AS DogID, d.breed_group AS BreedGroup,
u.state AS State, u.zip AS Zip
FROM users u JOIN dogs d ON sub.user_guid=d.user_guid
WHERE d.breed_group IN ('Working','Sporting','Herding');
```
**Question 8: Earlier we examined unique users in the users table who were NOT in the dogs table. Use a NOT EXISTS clause to examine all the users in the dogs table that are not in the users table (you should get 2 rows in your output).**
```
%%sql
SELECT DISTINCT d.user_guid, d.dog_guid
FROM dogs d
WHERE NOT EXISTS (SELECT DISTINCT u.user_guid
FROM users u
WHERE u.user_guid=d.dog_guid);
```
**Question 9: We saw earlier that user_guid 'ce7b75bc-7144-11e5-ba71-058fbc01cf0b' still ends up with 1819 rows of output after a left outer join with the dogs table. If you investigate why, you'll find out that's because there are duplicate user_guids in the dogs table as well. How would you adapt the query we wrote earlier (copied below) to only join unique UserIDs from the users table with unique UserIDs from the dog table?**
Join we wrote earlier:
```sql
SELECT DistinctUUsersID.user_guid AS uUserID, d.user_guid AS dUserID, count(*) AS numrows
FROM (SELECT DISTINCT u.user_guid
FROM users u) AS DistinctUUsersID
LEFT JOIN dogs d
ON DistinctUUsersID.user_guid=d.user_guid
GROUP BY DistinctUUsersID.user_guid
ORDER BY numrows DESC;
```
**Let's build our way up to the correct query. To troubleshoot, let's only examine the rows related to user_guid 'ce7b75bc-7144-11e5-ba71-058fbc01cf0b', since that's the userID that is causing most of the trouble. Rewrite the query above to only LEFT JOIN *distinct* user(s) from the user table whose user_guid='ce7b75bc-7144-11e5-ba71-058fbc01cf0b'. The first two output columns should have matching user_guids, and the numrows column should have one row with a value of 1819:**
```
%%sql
SELECT DistinctUUsersID.user_guid AS uUserID,
d.user_guid AS dUserID, count(*) AS numrows
FROM (SELECT DISTINCT u.user_guid
FROM users u
WHERE u.user_guid='ce7b75bc-7144-11e5-ba71-058fbc01cf0b') AS DistinctUUsersID
LEFT JOIN dogs d
ON DistinctUUsersID.user_guid=d.user_guid
GROUP BY DistinctUUsersID.user_guid
ORDER BY numrows DESC;
```
**Question 10: Now let's prepare and test the inner query for the right half of the join. Give the dogs table an alias, and write a query that would select the distinct user_guids from the dogs table (we will use this query as a inner subquery in subsequent questions, so you will need an alias to differentiate the user_guid column of the dogs table from the user_guid column of the users table).**
```
%%sql
SELECT DISTINCT DistinctUserArf.dog_guid
FROM dogs DistinctUserArf
```
**Question 11: Now insert the query you wrote in Question 9 as a subquery on the right part of the join you wrote in question 8. The output should return columns that should have matching user_guids, and 1 row in the numrows column with a value of 1. If you are getting errors, make sure you have given an alias to the derived table you made to extract the distinct user_guids from the dogs table, and double-check that your aliases are referenced correctly in the SELECT and ON statements.**
```
%%sql
SELECT DistinctUUsersID.user_guid AS uUserID,
DistinctUserArf.user_guid AS dUserID, count(*) AS numrows
FROM (SELECT DISTINCT u.user_guid
FROM users u
WHERE u.user_guid='ce7b75bc-7144-11e5-ba71-058fbc01cf0b') AS DistinctUUsersID
LEFT JOIN (SELECT DISTINCT d.user_guid
FROM dogs d) AS DistinctUserArf
ON DistinctUUsersID.user_guid=DistinctUserArf.user_guid
GROUP BY DistinctUUsersID.user_guid
ORDER BY numrows DESC;
```
**Question 12: Adapt the query from Question 10 so that, in theory, you would retrieve a full list of all the DogIDs a user in the users table owns, with its accompagnying breed information whenever possible. HOWEVER, BEFORE YOU RUN THE QUERY MAKE SURE TO LIMIT YOUR OUTPUT TO 100 ROWS *WITHIN* THE SUBQUERY TO THE LEFT OF YOUR JOIN.** If you run the query without imposing limits it will take a *very* long time. If you try to limit the output by just putting a limit clause at the end of the outermost query, the database will still have to hold the entire derived tables in memory and join each row of the derived tables before limiting the output. If you put the limit clause in the subquery to the left of the join, the database will only have to join 100 rows of data.
```
%%sql
SELECT DistinctUUsersID.user_guid AS uUserID,
DistinctUserArf.user_guid AS dUserID, count(*) AS numrows
FROM (SELECT DISTINCT u.user_guid
FROM users u
LIMIT 100) AS DistinctUUsersID
LEFT JOIN (SELECT DISTINCT d.user_guid
FROM dogs d) AS DistinctUserArf
ON DistinctUUsersID.user_guid=DistinctUserArf.user_guid
GROUP BY DistinctUUsersID.user_guid
ORDER BY numrows DESC;
```
**Question 13: You might have a good guess by now about why there are duplicate rows in the dogs table and users table, even though most corporate databases are configured to prevent duplicate rows from ever being accepted. To be sure, though, let's adapt this query we wrote above:**
```sql
SELECT DistinctUUsersID.user_guid AS uUserID, d.user_guid AS dUserID, count(*) AS numrows
FROM (SELECT DISTINCT u.user_guid FROM users u) AS DistinctUUsersID
LEFT JOIN dogs d
ON DistinctUUsersID.user_guid=d.user_guid
GROUP BY DistinctUUsersID.user_guid
ORDER BY numrows DESC
```
**Add dog breed and dog weight to the columns that will be included in the final output of your query. In addition, use a HAVING clause to include only UserIDs who would have more than 10 rows in the output of the left join (your output should contain 5 rows).**
```
%%sql
SELECT DistinctUUsersID.user_guid AS uUserID, d.user_guid AS dUserID,
d.breed AS Breed, d.weight AS Weight, count(*) AS numrows
FROM (SELECT DISTINCT u.user_guid FROM users u) AS DistinctUUsersID
LEFT JOIN dogs d
ON DistinctUUsersID.user_guid=d.user_guid
GROUP BY DistinctUUsersID.user_guid
HAVING numrows > 10
ORDER BY numrows DESC;
```
You can see that almost all of the UserIDs that are causing problems are Shih Tzus that weigh 190 pounds. As we learned in earlier lessons, Dognition used this combination of breed and weight to code for testing accounts. These UserIDs do not represent real data. These types of testing entries would likely be cleaned out of databases used in large established companies, but could certainly still be present in either new databases that are still being prepared and configured, or in small companies which have not had time or resources to perfect their data storage.
There are not very many incorrect entries in the Dognition database and most of the time these entries will not appreciably affect your queries or analyses. However, you have now seen the effects such entries can have in the rare cases when you need to implement outer joins on tables that have duplicate rows or linking columns with many to many relationships. Hopefully, understanding these rare cases has helped you understand more deeply the fundamental concepts behind joining tables in relational databases.
**Feel free to practice more subqueries below!**
```
%%sql
SELECT d.dog_guid AS DogID, d.breed_group AS BreedGroup, sub.state AS State, sub.zip AS Zip
FROM (SELECT DISTINCT u.user_guid, u.state, u.zip FROM users u) AS sub
JOIN dogs d ON sub.user_guid=d.user_guid
WHERE d.breed_group IN ('Working','Sporting','Herding');
```
| github_jupyter |
<a href="https://colab.research.google.com/github/cedro3/Toonify-Yourself/blob/master/Toonify_Yourself.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# セットアップ
```
%tensorflow_version 1.x
!git clone https://github.com/cedro3/Toonify-Yourself.git
%cd Toonify-Yourself
!nvcc test_nvcc.cu -o test_nvcc -run
# フォルダーの作成
!mkdir aligned
!mkdir generated
```
# BaseモデルとBlendedモデルのダウンロード
```
# Baseモデル(ffhqモデル)とblendedモデルのダウンロード
import pretrained_networks
# use my copy of the blended model to save Doron's download bandwidth
# get the original here https://mega.nz/folder/OtllzJwa#C947mCCdEfMCRTWnDcs4qw
#blended_url = "https://drive.google.com/uc?id=1H73TfV5gQ9ot7slSed_l-lim9X7pMRiU"
blended_url = "https://drive.google.com/uc?id=1BRqqHWk_4BjNHLXTrpHkoxmZhGw3CU59"
ffhq_url = "http://d36zk2xti64re0.cloudfront.net/stylegan2/networks/stylegan2-ffhq-config-f.pkl"
_, _, Gs_blended = pretrained_networks.load_networks(blended_url)
_, _, Gs = pretrained_networks.load_networks(ffhq_url)
```
# 関数の定義
```
# 画像表示
import matplotlib.pyplot as plt
from PIL import Image
import glob
import numpy as np
def display_pic(folder):
fig = plt.figure(figsize=(30, 40))
files = glob.glob(folder)
files.sort()
images=[]
for i in range(len(files)):
img = Image.open(files[i])
images = np.asarray(img)
ax = fig.add_subplot(10, 10, i+1, xticks=[], yticks=[])
image_plt = np.array(images)
ax.imshow(image_plt)
ax.set_xlabel(str(i), fontsize=20)
plt.show()
plt.close()
# 潜在変数(latents)から生成した画像を表示
import PIL.Image
def display(latents):
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=8)
fig = plt.figure(figsize=(30, 40))
for i in range(len(latents)):
vec = latents[i].reshape(1,18,512)
images = Gs_blended.components.synthesis.run(vec, randomize_noise=False, **synthesis_kwargs)
images = images.transpose((0,2,3,1))
PIL.Image.fromarray(images[0], 'RGB')
ax = fig.add_subplot(10, 10, i+1, xticks=[], yticks=[])
image_plt = np.array(images[0])
ax.imshow(image_plt)
ax.set_xlabel(str(i), fontsize=20)
plt.show()
plt.close()
# 潜在変数(latents)の順番を指定して、トランジションのGIFを作成する
import os
def generate_gif(latents, idx):
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=8)
image_gif = []
os.makedirs('my/gif', exist_ok=True)
for j in range(len(idx)-1):
for i in range(20):
latent = latents[idx[j]]+(latents[idx[j+1]]-latents[idx[j]])*i/19
latent = latent.reshape(1, 18, 512)
images = Gs_blended.components.synthesis.run(latent, randomize_noise=False, **synthesis_kwargs)
images = images.transpose((0,2,3,1))
image_one = PIL.Image.fromarray(images[0], 'RGB')
image_gif.append(image_one.resize((256,256)))
image_gif[0].save('./my/gif/anime.gif', save_all=True, append_images=image_gif[1:],
duration=100, loop=0)
```
# 用意した画像から顔画像を切り取る
rawフォルダーの画像から顔画像を切り取り、alignedフォルダーに保存する
```
# 顔画像の切り取り
!python align_images.py raw aligned
display_pic('./aligned/*.png')
```
# 潜在変数wを求める
alignedフォルダーの顔画像から潜在変数wを求め、潜在変数wを generated/*.npy に保存し、潜在変数wから生成した画像を generated/*.png に保存する
```
# 顔画像の潜在変数を求める
!python project_images.py --num-steps 500 aligned generated
display_pic('./generated/*.png')
```
# アニメ顔を生成する
潜在変数(generated/*.npy)を読み込み、blended モデルでアニメ画像を保存(generate/*-toon.png)し、潜在変数はlatentsに保存する。
```
# アニメ顔を生成する
import numpy as np
from PIL import Image
import dnnlib
import dnnlib.tflib as tflib
from pathlib import Path
latent_dir = Path("generated")
latents = sorted(latent_dir.glob("*.npy"))
for i, latent_file in enumerate(latents):
latent = np.load(latent_file)
latent = np.expand_dims(latent,axis=0)
synthesis_kwargs = dict(output_transform=dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=False), minibatch_size=8)
images = Gs_blended.components.synthesis.run(latent, randomize_noise=False, **synthesis_kwargs)
Image.fromarray(images.transpose((0,2,3,1))[0], 'RGB').save(latent_file.parent / (f"{latent_file.stem}-toon.jpg"))
if i == 0:
latents = latent
else:
latents = np.concatenate([latents, latent])
# 潜在変数(latents)から生成した画像を表示する
display(latents)
```
# トランジションGIFを作成する
潜在変数(latents)の順番(リスト形式)を指定して、トランジションGIFを作成する
```
# トランジションGIFを作成する
from IPython.display import Image
generate_gif(latents,[0,1,2,0])
Image('./my/gif/anime.gif', format='png')
```
| github_jupyter |
# COVIDvu <img src='resources/UN-flag.png' align='right'>
COVID-19 model for predicting the spread of coronavirus through populations.
---
# Background
In `covidvu.predict` we apply the [logistic equation](https://en.wikipedia.org/wiki/Logistic_function) to model the spread of COVID-19 cases in a given population. Briefly, the logistic equation may be used to describe the dynamics of a species (coronavirus) which is under the influence of two competing effects: population expansion through pure birth, and competition for resources causing over-crowding. We represent these dynamics mathematically with the following differential equation:
$$\frac{\mathrm{d}X}{\mathrm{d}t} = r X \left(1 - \frac{X}{K} \right)$$
where
- $t=$ time
- $X=$ population size, analogous to the total number of infected individuals
- $r=$ growth rate, which is the rate at which the virus spread if left unimpeded in an infinite-sized population
- $K=$ carrying capacity, which is the total number of infected individuals as $t \rightarrow \infty$ in a finite-sized population given constraints such as hand washing, social isolation, health-care effectiveness etc.
# Data cleaning
We have found that the dynamics of the spread of COVID-19 tends to follow logistic growth once the total number of cases has become more than just a handful. We therefore neglect data where the total number of cases $X \leq 50$, and require at least 10 days of data for this condition to be true before attempting to train a model for a particular region.
# Mathematical model
The general solution to the differential equation above is
$$X(t) = \frac{K}{1+\left(\frac{K-X_0}{X_0}\right)e^{-r t}}$$
where $X_0$ is the initial infected population size. Assuming $K \gg X_0$, we re-cast this equation in the form
$$X(t) = \frac{L}{1+e^{-k(t-t_0)}}$$
where $K=L$, $r=k$, and $t_0=1/r \ln(K)$.
Let $\hat{X}(t)$ be a time series corresponding to measurements of $X(t)$. We take a log transformation of $X(t)$ for numerical stability, $Y(t) = \ln(X(t)+1)$. Allowing $\theta$ to denote the parameter vector $\theta=(L, k, t_0)$, and $Y_\theta(t)$ the corresponding parametrised curve, we assume that $\hat{Y}(t)$ obeys the following likelihood
$$P(\hat{Y}(t)|\theta) = \prod_t \mathcal{N}(Y_\theta(t), \sigma^2)$$
where $\mathcal{N}(\mu, \sigma^2)$ is a normal distribution with mean $\mu$ and variance $\sigma^2$. In other words, we assume that the mean number of cases follows the logistic equation, with Normally-distributed noise in log space. $Y_\theta(t)$ denotes that $Y(t)$ is parametrized by $\theta$. Defining the error model in log space has the advantage of naturally allowing the size of measurement error to grow in proportion to the mean number of cases, as well as potentially allowing for greater numerical stability for Markov chain Monte Carlo-based inference.
We perform Bayesian inference using a No-U-Turn Sampler (NUTS; Hoffman, 2014) as implemented in [PyStan](https://pystan.readthedocs.io/en/latest/) using the following broad, weakly-informative, model priors:
$$P(\log_{10}{K}) = \mathrm{Unif}(0, 10)$$
$$P(t_0) = \mathrm{HalfNormal}(0, 10^6)$$
$$P(k) = \mathcal{N}(0.5, 0.25)$$
$$P(\sigma) = \mathrm{HalfNormal}(0, 100)$$
where $\mathrm{Unif}(a,b)$ is a uniform distribution between $a$ and $b$, and $\mathrm{HalfNormal}(\mu, \sigma^2)$ is a normal distribution with mean $\mu$ and variance $\sigma^2$, which has been truncated at zero, retaining only positive values.
# Outputs from MCMC
We return approximate central Bayesian confidence intervals and the posterior mean for $X(t)$ in `covidvu.predict.predictLogisticGrowth`. One may interpret the e.g. 95% confidence interval with the following statement:
"Assuming that the above model is the true model describing the dynamics of the spread of COVID-19 through a particular population, and given our model priors, we may say with *approximately* 95% confidence that the true value of $X(t)$ will lie within the confidence bounds".
We use the word "approximate" because Hamiltonian Monte Carlo will only guarantee that the numerically-generated samples from the posterior are true samples in the limit of an infinitely long chain. One may make the approximations more accurate by increasing `nSamples` in `covidvu.predict.predictLogisticGrowth`, and to a lesser extent, `nTune` and `nChains`.
We also return the posterior mean, which is a good point-estimate for the dynamics of COVID-19 given our uncertainty in model parameters, given the observed data.
---
© the COVIDvu Contributors. All rights reserved.
| github_jupyter |
# Generate CovidCareMap Regional Data
This rolls up count values from the facility data based on three regions: county, state, and Hospital Referral Region (HRR).
Most of the work is done in the `covidcaremap.geo` package, in the `sum_per_region` method. See that code for specifics.
## Methods
- Take the facility data, spatially join to regional data, and sum count properties for each region. See note about the calculation of occupancy rates.
- Based on population counts, create "per 1000" versions of each column for each of total population, adult population, and elderly population.
- Save the three aggregated files as GeoJSONs and CSVs.
### Notes on aggregation of occupancy rates
Occupancy rates are a weighted average based on the the number of beds (or icu beds for ICU Occupancy) contributing to the total amount of beds for that aggregation.
If the occupancy rate is NaN, then no beds are contributed to the amount of beds used to weight the aggregation for that facility.
So the occupancy rate $O$ is calculated as
$$O=\frac{\sum_{f\in F}b_{f}o_{f}}{\sum_{f\in F}b_{f}}$$
where $F$ is the set of facilities that have a non-NaN value for occupancy, $o_{f}$ is the occupancy rate for facility $f$, and $b_{f}$ is the bed count for facility $f$.
In some cases HCRIS data reports an occupancy rate that is greater than 1. This is left in the facility-level data as source data error. Any occupancy rate greater than 1 is considered 1 for this calculation.
```
import geopandas as gpd
import pandas as pd
import numpy as np
from covidcaremap.data import (read_facility_gdf,
read_us_hrr_gdf,
read_us_states_gdf,
read_us_counties_gdf,
external_data_path,
published_data_path)
from covidcaremap.geo import sum_per_region
facility_gdf = read_facility_gdf()
```
## By HRR
```
hrr_fname = 'us_healthcare_capacity-hrr-CovidCareMap'
hrr_geojson_path = published_data_path('{}.geojson'.format(hrr_fname))
hrr_csv_path = published_data_path('{}.csv'.format(hrr_fname))
hrr_gdf = read_us_hrr_gdf()
hrr_gdf = hrr_gdf.drop(columns=['HRR_BDRY_I', 'HRRNUM'])
hosp_hrr_gdf = sum_per_region(facility_gdf,
hrr_gdf,
groupby_columns=['HRRCITY'],
region_id_column='HRRCITY')
hosp_hrr_gdf.to_file(hrr_geojson_path, driver='GeoJSON')
hosp_hrr_df = hosp_hrr_gdf.drop(columns=['geometry']).sort_values(by='HRRCITY')
hosp_hrr_df.to_csv(hrr_csv_path, index=False)
```
## By County
```
county_fname = 'us_healthcare_capacity-county-CovidCareMap'
county_geojson_path = published_data_path('{}.geojson'.format(county_fname))
county_csv_path = published_data_path('{}.csv'.format(county_fname))
county_gdf = read_us_counties_gdf().rename(columns={ 'COUNTY_FIPS': 'fips_code'})
filtered_county_gdf = county_gdf[['GEO_ID',
'geometry',
'Population',
'Population (20+)',
'Population (65+)']]
hosp_county_gdf = sum_per_region(facility_gdf,
filtered_county_gdf,
groupby_columns=['GEO_ID'],
region_id_column='GEO_ID')
hosp_county_gdf['ICU Bed Source'] = 'Facility aggregation'
hosp_county_gdf['ICU Bed Source Last Updated'] = np.nan
merged_county_gdf = county_gdf[['GEO_ID', 'fips_code', 'State', 'County Name']] \
.merge(hosp_county_gdf, on='GEO_ID') \
.drop(columns=['GEO_ID'])
```
### Manual Override - County
There are instances where we get county-level data from a more up-to-date source, or the data is off and can be corrected manually. We place county information into the same format that is constructed here, and include information about why we are overriding the data and the new source of information.
```
manual_override_data = pd.read_csv(
external_data_path('covidcaremap-ushcsc-county-manual-override.csv'),
dtype={'fips_code': str }
)
override_dict = manual_override_data.to_dict(orient='record')
override_dict_by_fips_code = dict((r['fips_code'], r) for r in override_dict)
county_cols = set(merged_county_gdf.columns) - set(['fips_code',
'ICU Bed Source',
'ICU Bed Source Last Updated',
'geometry',
'GEO_ID'])
for fips_code, entries in override_dict_by_fips_code.items():
print('Overriding county {} for reason "{}" with new data source {}'.format(
entries['fips_code'],
entries['Manual Override Reason'],
entries['Manual Override New Data Source']
))
if not merged_county_gdf[merged_county_gdf['fips_code'] == fips_code].values.any():
raise Exception('Cannot find county with fips code {}'.format(fips_code))
merged_county_gdf.loc[merged_county_gdf['fips_code'] == fips_code,
county_cols] = [entries[col] for col in county_cols]
merged_county_gdf.loc[merged_county_gdf['fips_code'] == fips_code,
'ICU Bed Source'] = entries['Manual Override New Data Source']
merged_county_gdf.loc[merged_county_gdf['fips_code'] == fips_code,
'ICU Bed Source Last Updated'] = entries['Source Date']
```
Merge with county geometries and write out results.
```
ccm_county_gdf = gpd.GeoDataFrame(merged_county_gdf, crs='EPSG:4326')
ccm_county_gdf.to_file(county_geojson_path, driver='GeoJSON')
ccm_county_df = merged_county_gdf.drop(columns=['geometry']).sort_values(by=['State',
'County Name'])
ccm_county_df.to_csv(county_csv_path, index=False)
```
## By State
```
state_fname = 'us_healthcare_capacity-state-CovidCareMap'
state_geojson_path = published_data_path('{}.geojson'.format(state_fname))
state_csv_path = published_data_path('{}.csv'.format(state_fname))
state_gdf = read_us_states_gdf()
filtered_state_gdf = state_gdf[['State',
'geometry',
'Population',
'Population (20+)',
'Population (65+)']]
facility_without_state_gdf = facility_gdf.drop(columns=['State'])
hosp_state_gdf = sum_per_region(facility_without_state_gdf,
filtered_state_gdf,
groupby_columns=['State'],
region_id_column='State')
hosp_state_gdf = gpd.GeoDataFrame(
state_gdf[['State', 'State Name']].merge(hosp_state_gdf, on='State'),
crs='EPSG:4326'
)
hosp_state_gdf['ICU Bed Source'] = 'Facility aggregation'
hosp_state_gdf['ICU Bed Source Last Updated'] = np.nan
```
### Manual Override - State
There are instances where we get state-level data from a more up-to-date source, or the data is off and can be corrected manually. We place county information into the same format that is constructed here, and include information about why we are overriding the data and the new source of information.
```
manual_override_data = pd.read_csv(
external_data_path('covidcaremap-ushcsc-state-manual-override.csv'),
dtype={'State': str }
)
override_dict = manual_override_data.to_dict(orient='record')
override_dict_by_state = dict((r['State'], r) for r in override_dict)
state_cols = set(hosp_state_gdf.columns) - set(['State',
'ICU Bed Source',
'ICU Bed Source Last Updated',
'geometry'])
for state_abbrv, entries in override_dict_by_state.items():
print('Overriding state {} for reason "{}" with new data source {}'.format(
entries['State'],
entries['Manual Override Reason'],
entries['Manual Override New Data Source']
))
if not hosp_state_gdf[hosp_state_gdf['State'] == state_abbrv].values.any():
raise Exception('Cannot find state {}'.format(state_abbrv))
hosp_state_gdf.loc[hosp_state_gdf['State'] == state_abbrv,
state_cols] = [entries[col] for col in state_cols]
hosp_state_gdf.loc[hosp_state_gdf['State'] == state_abbrv,
'ICU Bed Source'] = entries['Manual Override New Data Source']
hosp_state_gdf.loc[hosp_state_gdf['State'] == state_abbrv,
'ICU Bed Source Last Updated'] = entries['Source Date']
```
### Merge ventilator data into state data
```
vents_path = external_data_path('ventilators_by_state.csv')
vents_df = pd.read_csv(vents_path, encoding='utf-8')
vents_df = vents_df.drop(columns=['Location']).rename(columns={'State Abbrv': 'State'})
# Rename columns to be explicit that this is older estimate data.
vent_renames = {
'Estimated No. Full-Featured Mechanical Ventilators': (
'Estimated No. Full-Featured Mechanical Ventilators (2010 study estimate)'
),
'Estimated No. Full-Featured Mechanical Ventilators per 100,000 Population': (
'Estimated No. Full-Featured Mechanical Ventilators per 100,000 Population (2010 study estimate)'
),
'Estimated No. Pediatrics-Capable Full-Feature Mechanical Ventilators': (
'Estimated No. Pediatrics-Capable Full-Feature Mechanical Ventilators (2010 study estimate)'
),
'Estimated No. Full-Feature Mechanical Ventilators, Pediatrics Capable per 100,000 Population <14 y': (
'Estimated No. Full-Feature Mechanical Ventilators, Pediatrics Capable per 100,000 Population <14 y (2010 study estimate)'
)
}
for column in vent_renames:
assert column in vents_df
vents_df = vents_df.rename(columns=vent_renames)
hosp_state_gdf = hosp_state_gdf.merge(vents_df, on='State')
hosp_state_gdf.to_file(state_geojson_path, driver='GeoJSON')
hosp_state_df = hosp_state_gdf.drop(columns=['geometry']).sort_values(by='State')
hosp_state_df.to_csv(state_csv_path, index=False)
```
| github_jupyter |
# VTK tools
Pygslib use VTK:
- as data format and data converting tool
- to plot in 3D
- as a library with some basic computational geometry functions, for example to know if a point is inside a surface
Some of the functions in VTK were obtained or modified from Adamos Kyriakou at https://pyscience.wordpress.com/
```
import pygslib
import numpy as np
```
## Functions in vtktools
```
help(pygslib.vtktools)
```
## Load a cube defined in an stl file and plot it
STL is a popular mesh format included an many non-commercial and commercial software, example: Paraview, Datamine Studio, etc.
```
#load the cube
mycube=pygslib.vtktools.loadSTL('../datasets/stl/cube.stl')
# see the information about this data... Note that it is an vtkPolyData
print mycube
# Create a VTK render containing a surface (mycube)
renderer = pygslib.vtktools.polydata2renderer(mycube, color=(1,0,0), opacity=0.50, background=(1,1,1))
# Now we plot the render
pygslib.vtktools.vtk_show(renderer, camera_position=(-20,20,20), camera_focalpoint=(0,0,0))
```
## Ray casting to find intersections of a lines with the cube
This is basically how we plan to find points inside solid and to define blocks inside solid
```
# we have a line, for example a block model row
# defined by two points or an infinite line passing trough a dillhole sample
pSource = [-50.0, 0.0, 0.0]
pTarget = [50.0, 0.0, 0.0]
# now we want to see how this looks like
pygslib.vtktools.addLine(renderer,pSource, pTarget, color=(0, 1, 0))
pygslib.vtktools.vtk_show(renderer) # the camera position was already defined
# now we find the point coordinates of the intersections
intersect, points, pointsVTK= pygslib.vtktools.vtk_raycasting(mycube, pSource, pTarget)
print "the line intersects? ", intersect==1
print "the line is over the surface?", intersect==-1
# list of coordinates of the points intersecting
print points
#Now we plot the intersecting points
# To do this we add the points to the renderer
for p in points:
pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0))
pygslib.vtktools.vtk_show(renderer)
```
### Test line on surface
```
# we have a line, for example a block model row
# defined by two points or an infinite line passing trough a dillhole sample
pSource = [-50.0, 5.01, 0]
pTarget = [50.0, 5.01, 0]
# now we find the point coordinates of the intersections
intersect, points, pointsVTK= pygslib.vtktools.vtk_raycasting(mycube, pSource, pTarget)
print "the line intersects? ", intersect==1
print "the line is over the surface?", intersect==-1
# list of coordinates of the points intersecting
print points
# now we want to see how this looks like
pygslib.vtktools.addLine(renderer,pSource, pTarget, color=(0, 1, 0))
for p in points:
pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0))
pygslib.vtktools.vtk_show(renderer) # the camera position was already defined
# note that there is a tolerance of about 0.01
```
# Finding points
```
#using same cube but generation arbitrary random points
x = np.random.uniform(-10,10,150)
y = np.random.uniform(-10,10,150)
z = np.random.uniform(-10,10,150)
```
## Find points inside a solid
```
# selecting all inside the solid
# This two methods are equivelent but test=4 also works with open surfaces
inside,p=pygslib.vtktools.pointquering(mycube, azm=0, dip=0, x=x, y=y, z=z, test=1)
inside1,p=pygslib.vtktools.pointquering(mycube, azm=0, dip=0, x=x, y=y, z=z, test=4)
err=inside==inside1
#print inside, tuple(p)
print x[~err]
print y[~err]
print z[~err]
# here we prepare to plot the solid, the x,y,z indicator and we also
# plot the line (direction) used to ray trace
# convert the data in the STL file into a renderer and then we plot it
renderer = pygslib.vtktools.polydata2renderer(mycube, color=(1,0,0), opacity=0.70, background=(1,1,1))
# add indicator (r->x, g->y, b->z)
pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-7,-10,-10], color=(1, 0, 0))
pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-7,-10], color=(0, 1, 0))
pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-10,-7], color=(0, 0, 1))
# add ray to see where we are pointing
pygslib.vtktools.addLine(renderer, (0.,0.,0.), tuple(p), color=(0, 0, 0))
# here we plot the points selected and non-selected in different color and size
# add the points selected
for i in range(len(inside)):
p=[x[i],y[i],z[i]]
if inside[i]!=0:
#inside
pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0))
else:
pygslib.vtktools.addPoint(renderer, p, radius=0.2, color=(0.0, 1.0, 0.0))
#lets rotate a bit this
pygslib.vtktools.vtk_show(renderer, camera_position=(0,0,50), camera_focalpoint=(0,0,0))
```
# Find points over a surface
```
# selecting all over a solid (test = 2)
inside,p=pygslib.vtktools.pointquering(mycube, azm=0, dip=0, x=x, y=y, z=z, test=2)
# here we prepare to plot the solid, the x,y,z indicator and we also
# plot the line (direction) used to ray trace
# convert the data in the STL file into a renderer and then we plot it
renderer = pygslib.vtktools.polydata2renderer(mycube, color=(1,0,0), opacity=0.70, background=(1,1,1))
# add indicator (r->x, g->y, b->z)
pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-7,-10,-10], color=(1, 0, 0))
pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-7,-10], color=(0, 1, 0))
pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-10,-7], color=(0, 0, 1))
# add ray to see where we are pointing
pygslib.vtktools.addLine(renderer, (0.,0.,0.), tuple(-p), color=(0, 0, 0))
# here we plot the points selected and non-selected in different color and size
# add the points selected
for i in range(len(inside)):
p=[x[i],y[i],z[i]]
if inside[i]!=0:
#inside
pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0))
else:
pygslib.vtktools.addPoint(renderer, p, radius=0.2, color=(0.0, 1.0, 0.0))
#lets rotate a bit this
pygslib.vtktools.vtk_show(renderer, camera_position=(0,0,50), camera_focalpoint=(0,0,0))
```
# Find points below a surface
```
# selecting all over a solid (test = 2)
inside,p=pygslib.vtktools.pointquering(mycube, azm=0, dip=0, x=x, y=y, z=z, test=3)
# here we prepare to plot the solid, the x,y,z indicator and we also
# plot the line (direction) used to ray trace
# convert the data in the STL file into a renderer and then we plot it
renderer = pygslib.vtktools.polydata2renderer(mycube, color=(1,0,0), opacity=0.70, background=(1,1,1))
# add indicator (r->x, g->y, b->z)
pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-7,-10,-10], color=(1, 0, 0))
pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-7,-10], color=(0, 1, 0))
pygslib.vtktools.addLine(renderer,[-10,-10,-10], [-10,-10,-7], color=(0, 0, 1))
# add ray to see where we are pointing
pygslib.vtktools.addLine(renderer, (0.,0.,0.), tuple(p), color=(0, 0, 0))
# here we plot the points selected and non-selected in different color and size
# add the points selected
for i in range(len(inside)):
p=[x[i],y[i],z[i]]
if inside[i]!=0:
#inside
pygslib.vtktools.addPoint(renderer, p, radius=0.5, color=(0.0, 0.0, 1.0))
else:
pygslib.vtktools.addPoint(renderer, p, radius=0.2, color=(0.0, 1.0, 0.0))
#lets rotate a bit this
pygslib.vtktools.vtk_show(renderer, camera_position=(0,0,50), camera_focalpoint=(0,0,0))
```
## Export points to a VTK file
```
data = {'inside': inside}
pygslib.vtktools.points2vtkfile('points', x,y,z, data)
```
The results can be ploted in an external viewer, for example mayavi or paraview:
<img src="figures/Fig_paraview.png">
| github_jupyter |
# Introduction to Pytorch
PyTorch is a popular deep learning framework that was introduced around one year ago. It has a slightly lower-level API than Keras, but is much easier to use than TensorFlow when it comes to defining custom model. It is very popular among researchers and kagglers - a little less in the industry (yet).
http://pytorch.org/
## Principles
PyTorch essentially provides the user with two main utilies:
- It allows to perform Tensor/vector computation on the GPU with an API similar (but not compatible) to Numpy.
- It records all computation to be able to backpropagate through them. That is, provided a sequence of operations that starts from a tensor $\theta$ to define a scalar $g(\theta)$, it is able to compute
$\nabla_\theta g(\theta)$ exactly, with only one function call.
Typically, $\theta$ will be a parameter of a neural network and $g$ a loss function such as $\ell(f_{\theta}(x), y)$ for supervised learning.
The essential difference with TensorFlow lies in the way $g$ is defined: in PyTorch, every node in the computation graph is done while executing the forward pass, from within the Python interpreter: any Numpy code can be ported to PyTorch easily, and all flow control operation (e.g. loops, if/else, etc.) can be kept untouched. In contrast, TensorFlow requires the user to define a graph more declaratively. This graph is then used internally (i.e. outside the Python interpreter), to compute both the the predictions, the loss value and its derivatives.
PyTorch takes care of recording everything it needs to do the backpropagation, *on the fly*.
Note that recent versions of TensorFlow (1.5 and later) now come with the [eager mode](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/eager/python/g3doc/guide.md) to make TensorFlow use the define-by-run semantics of PyTorch (and Chainer). However this mode is quite new and still experimental.
## Installation
We will use the CPU only version for the moment. We will assume that you have a working Anaconda environment.
### On Linux:
`conda install pytorch-cpu torchvision -c pytorch`
If you prefer to use pip, see: http://pytorch.org.
### On OSX
`conda install pytorch torchvision -c pytorch`
If you prefer to use pip, see: http://pytorch.org.
### On Windows
`conda install -c peterjc123 pytorch-cpu`
Refer to https://github.com/peterjc123/pytorch-scripts for Windows installation problems.
## A first example
We will define a vector $x \in \mathbb{R}^n$ and computes its norms using PyTorch. For this, we will define our first `Tensor` -- the Tensor object is the central element of PyTorch.
Let us fill this vector with random values
```
import torch
torch.manual_seed(42)
n = 3
x = torch.FloatTensor(n).uniform_()
```
`x` is a tensor, which can be manipulated roughly as a Numpy array:
```
x
```
Let's do a small API tour
```
x.shape, x.shape[0]
x * 10
```
We can go back and forth from numpy to PyTorch tensors:
```
x_np = x.numpy()
import numpy as np
A = torch.from_numpy(np.ones((n, n)))
A
```
Note that all tensors are typed and that you can only do operations with tensors of the same type:
```
torch.matmul(A, x)
torch.matmul(A.float(), x)
A.float() @ x
```
Let us now define a norm function that returns the norm of $x$
```
def f(x):
return torch.sqrt(torch.sum(x ** 2, dim=0))
f(x)
```
We are now interested in computing $\nabla_x f(x) = \frac{x}{|| x ||_2}$.
**Exercise**:
- Show that if $f(x) = || x ||_2$ then $\nabla_x f(x) = \frac{x}{|| x ||_2}$.
Assume we are too lazy to derive the analytical form of the gradient manually. Instead we will use the `autograd` facilities of PyTorch: central to it is the Variable class:
```
from torch.autograd import Variable
```
Variables are typically initialized by wrapping some tensor value:
```
x = Variable(x, requires_grad=True)
x
x.data
```
Let us now compute the norm of f. All PyTorch functions can handle both Variables and Tensors.
```
norm = f(x)
norm
```
We can compute the gradient of this scalar variable with respect to all the variables that were used to compute its value.
The following `.backward()` call will assign `.grad` attributes to all Variables requires in $f$ computation for which a gradient is required.
```
norm.backward()
```
The gradient $\nabla_x f(x)$ can be found in `x.grad`, which is also a Variable
```
x.grad
```
Let us compare it to the result of the evaluation of the analytical expression of the derivative of f(x) given x: $\nabla_x f(x) = \frac{x}{|| x ||_2}$
```
expected_grad = x / f(x)
expected_grad
```
It works ! You now know everything you need to know to use PyTorch. Note that, similar to Keras, PyTorch comes with a number of predefined functions that are useful in network definition. Check out http://pytorch.org/docs/ and tutorials for an overview of the tools you can use.
**Exercises**
- Write a function the pytorch code for function `g` that computes the cosine similarity of two variable vectors with float entries $\mathbf{x}$ and $\mathbf{y}$:
$$g(\mathbf{x}, \mathbf{y}) = \frac{\mathbf{x}^T \mathbf{y}}{|| \mathbf{x} ||_2 || \mathbf{y} ||_2 }$$
- Use `torch.autograd` to compute the derivatives with respect to $\mathbf{x} \in \mathbb{R}^3$ and $\mathbf{y} \in \mathbb{R}^3$ for some values of your choice;
- Compute $\nabla_x g(x, y)$ and $\nabla_y g(x, y)$ for some choice of $\mathbf{x} = \alpha \cdot \mathbf{y}$ with any $\alpha \in \mathbb{R}$. Check that you can get the expected result with Pytorch.
```
def g(x, y):
# TODO: fix the following code to implement the cosine
# similarity function instead.
return torch.sum(x) + torch.sum(y)
x = Variable(torch.FloatTensor([0, 1, 2]), requires_grad=True)
y = Variable(torch.FloatTensor([3, 0.9, 2.2]), requires_grad=True)
cosine = g(x, y)
cosine
# %load solutions/cosine_autograd.py
# %load solutions/cosine_autograd_colinear.py
```
Let's reinitialize our two variables to non colinear values:
```
x = Variable(torch.FloatTensor([0, 1, 2]), requires_grad=True)
y = Variable(torch.FloatTensor([3, 0.9, 2.2]), requires_grad=True)
```
Execute the following cells several times (use `ctrl-enter`):
```
cosine = g(x, y)
print(cosine)
cosine.backward()
x.data.add_(0.5 * x.grad.data)
y.data.add_(0.5 * y.grad.data)
x.grad.data.zero_()
y.grad.data.zero_()
print("x", x)
print("y", y)
print(x / y)
```
What do you observe?
## Comparing gradient descent methods
In this notebook, we will use PyTorch to compare the different gradient methods and a toy 2D examples: we will try to find the minimum of the difference of two Gaussians. PyTorch provides a convenient wrapper named `nn.Module` to define parametrized functions, that we will use along this tutorial.
```
import torch.nn as nn
from torch.nn import Parameter
class Norm(nn.Module):
def __init__(self, p=2.):
super(Norm, self).__init__()
self.p = Parameter(torch.FloatTensor([p]))
def forward(self, x):
p_sum = torch.sum(torch.pow(x, self.p), dim=0)
return torch.pow(p_sum, 1 / self.p)
torch.manual_seed(42)
x = torch.FloatTensor(n).uniform_()
x = Variable(x)
norm = Norm(p=3.)
v = norm(x)
v.backward()
```
We can access $\nabla_p(x \to || x ||_p)$ in `norm.p.grad`
```
norm.p.grad
```
We now define a Gaussian operator, along with a generic Gaussian combination. We will not consider the gradient w.r.t the parameters of these modules, hence we specify `requires_grad=False`
```
class Gaussian(nn.Module):
def __init__(self, precision, mean):
super(Gaussian, self).__init__()
assert precision.shape == (2, 2)
assert mean.shape == (2,)
self.precision = Parameter(precision, requires_grad=False)
self.mean = Parameter(mean, requires_grad=False)
def forward(self, x):
"""Compute the (unormalized) likelihood of x given a Gaussian.
x can be a 2D-vector or a batch of 2D-vectors.
"""
xc = x - self.mean
if len(xc.shape) == 1:
# Reshape xc to work as a mini-batch.
xc = xc.view(1, -1)
value = torch.exp(-.5 * (torch.sum((xc @ self.precision) * xc, dim=1)))
return value
class GaussianCombination(nn.Module):
def __init__(self, precisions, means, weights):
super(GaussianCombination, self).__init__()
assert len(precisions) == len(means) == len(weights)
self.gaussians = nn.ModuleList()
for precision, mean in zip(precisions, means):
self.gaussians.append(Gaussian(precision, mean))
self.weights = weights
def forward(self, x):
return sum(w * g(x) for g, w in zip(self.gaussians, self.weights))
```
We now define $f(x) = \exp(-(x- m_1)^T P_1 (x - m_1)) - \exp(-(x- m_2)^T P_2 (x - m_2))$
```
p1 = torch.FloatTensor([[1, 0], [0, 4]])
m1 = torch.FloatTensor([0, 1])
w1 = 1
p2 = torch.FloatTensor([[1, -2], [-2, 10]])
m2 = torch.FloatTensor([0, -2])
w2 = - 1
f = GaussianCombination([p1, p2], [m1, m2], [w1, w2])
```
We define a plotting function to visualize $f$. Note the small boilerplate to interface PyTorch with Numpy
```
import matplotlib.pyplot as plt
def plot_function(f, ax):
x_max, y_max, x_min, y_min = 3, 3, -3, -3
x = np.linspace(x_min, x_max, 100, dtype=np.float32)
y = np.linspace(y_min, y_max, 100, dtype=np.float32)
X, Y = np.meshgrid(x, y)
samples = np.concatenate((X[:, :, None], Y[:, :, None]), axis=2)
samples = samples.reshape(-1, 2)
samples = Variable(torch.from_numpy(samples), requires_grad=False)
Z = f(samples).data.numpy()
Z = Z.reshape(100, 100)
CS = ax.contour(X, Y, Z)
ax.clabel(CS, inline=1, fontsize=10)
fig, ax = plt.subplots(figsize=(8, 8))
plot_function(f, ax)
```
We will now try to minimize $f$ using gradient descent, with optional flavors. For this, we define a minimize function that performs gradient descent, along with a helper class `GradientDescent` that will perform the updates given the gradient
```
class GradientDescent:
def __init__(self, params, lr=0.1):
self.params = params
self.lr = lr
def step(self):
for param in self.params:
param.data = param.data - self.lr * param.grad.data
def zero_grad(self):
for param in self.params:
if param.grad is not None:
param.grad.data.zero_()
def minimize(f, optimizer, max_iter=500, verbose=False):
if hasattr(optimizer, 'params'):
[iterate] = optimizer.params
else:
# For pytorch optimizers.
[iterate] = optimizer.param_groups[0]['params']
iterate_record = []
for i in range(max_iter):
# iterate.grad may be non zero. We zero it first:
optimizer.zero_grad()
value = f(iterate)
# Compute the gradient of f with respect to the parameters:
value.backward()
# iterate.grad now holds $\nabla_x f(x)$
if float(torch.sum(iterate.grad ** 2)) < 1e-6:
if verbose:
print("Converged!")
break
# We store the trajectory of the iterates
iterate_record.append(iterate.data.clone()[None, :])
if verbose:
print('Iteration %i: f(x) = %e, x = [%e, %e]'
% (i, value, iterate[0], iterate[1]))
# Perform the parameter update step using the gradient
# values:
optimizer.step()
iterate_record = torch.cat(iterate_record, dim=0)
return iterate_record
```
Run the minimization algorithm and plot it
```
# The extra dimension marked with `None` is used to make it
# possible
init = torch.FloatTensor([0.8, 0.8])
optimizer = GradientDescent([Variable(init.clone(), requires_grad=True)], lr=0.1)
iterate_rec = minimize(f, optimizer, verbose=True)
def plot_trace(iterate_rec, ax, label='', tags=True):
iterate_rec = iterate_rec.numpy()
n_steps = len(iterate_rec)
line = ax.plot(iterate_rec[:, 0], iterate_rec[:, 1], linestyle=':',
marker='o', markersize=2,
label=label + " (%d steps)" % n_steps)
color = plt.getp(line[0], 'color')
bbox_props = dict(boxstyle="square,pad=0.3", ec=color, fc='white',
lw=1)
if tags:
for i in range(0, len(iterate_rec), 10):
ax.annotate(i, xy=(iterate_rec[i, 0], iterate_rec[i, 1]),
xycoords='data',
xytext=(5 + np.random.uniform(-2, 2),
5 + np.random.uniform(-2, 2)),
textcoords='offset points',
bbox=bbox_props)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 8))
plot_function(f, ax1)
plot_function(f, ax2)
plot_trace(iterate_rec, ax1, label='gradient_descent')
plot_trace(iterate_rec, ax2, label='gradient_descent', tags=False)
plt.legend();
```
## Exercices
- Try to move the initialization point to the other side of the yellow mountain, for instance at position `[0.8, 1.2]`. What do you observe? How to do you explain this?
- Implement the step method of `MomemtumGradientDescent`.
- Check that it behaves as `GradientDescent` for `momemtum=0`
- Can you find a value of `momentum` that makes it converge faster than gradient descent on for this objective function?
- Try to use [torch.optim.Adam](http://pytorch.org/docs/master/optim.html#torch.optim.Adam) in the minimization loop.
- Compare the three trajectories.
```
class MomentumGradientDescent(GradientDescent):
def __init__(self, params, lr=0.1, momentum=.9):
super(MomentumGradientDescent, self).__init__(params, lr)
self.momentum = momentum
self.velocities = [param.data.new(param.shape).zero_()
for param in params]
def step(self):
# TODO: implement me!
pass
# %load solutions/momentum_optimizer.py
fig, ax = plt.subplots(figsize=(8, 8))
plot_function(f, ax)
lr = 0.1
init = torch.FloatTensor([0.8, 0.8])
optimizer = GradientDescent([Variable(init.clone(), requires_grad=True)], lr=lr)
iterate_rec = minimize(f, optimizer)
plot_trace(iterate_rec, ax, label='gradient descent', tags=False)
optimizer = MomentumGradientDescent([Variable(init.clone(), requires_grad=True)],
lr=lr, momentum=0.9)
iterate_rec = minimize(f, optimizer)
plot_trace(iterate_rec, ax, label='momentum', tags=False)
# TODO: plot torch.optim.Adam
ax.legend();
```
What do you observe ? Try changing the momentum and the initialization to compare optimization traces.
| github_jupyter |
# Lake model continued
In the previous week you used the lake problem as a means of getting aquinted with the workbench. In this assignment we will continue with the lake problem, focussing explicitly on using it for open exploration. You can use the second part of [this tutoria]https://emaworkbench.readthedocs.io/en/latest/indepth_tutorial/open-exploration.html) for help.
**It is paramount that you are using the lake problem with 100 decision variables, rather than the one found on the website with the seperate anthropogenic release decision**
## Apply sensitivity analysis
There is substantial support in the ema_workbench for global sensitivity. For this, the workbench relies on [SALib](https://salib.readthedocs.io/en/latest/) and feature scoring which is a machine learning alternative for global sensitivity analysis.
1. As a start, generate a release policy without any release *(hint: use a Policy instance)*
2. use sobol sampling with the no release policy, and set n to 10.000
3. calculate sobol indices for max_P and reliability based on 10, 100, 1000, and 10.000 experiments. *(hint: sobol is a deterministic sequence, so you can slice results from the data generated in stap 2)*
```
from lakemodel_function import lake_problem
from ema_workbench import (Model, RealParameter, ScalarOutcome, Constant)
#instantiate the model
lake_model = Model('lakeproblem', function=lake_problem)
lake_model.time_horizon = 100 # used to specify the number of timesteps
#specify uncertainties
lake_model.uncertainties = [RealParameter('mean', 0.01, 0.05),
RealParameter('stdev', 0.001, 0.005),
RealParameter('b', 0.1, 0.45),
RealParameter('q', 2.0, 4.5),
RealParameter('delta', 0.93, 0.99)]
# set levers, one for each time step
lake_model.levers = [RealParameter(str(i), 0, 0.1) for i in
range(lake_model.time_horizon)] # we use time_horizon here
#specify outcomes
lake_model.outcomes = [ScalarOutcome('max_P'),
ScalarOutcome('utility'),
ScalarOutcome('inertia'),
ScalarOutcome('reliability')]
from ema_workbench import MultiprocessingEvaluator, ema_logging, Policy
from ema_workbench.em_framework.evaluators import SOBOL
ema_logging.log_to_stderr(ema_logging.INFO)
no_release = Policy("norelease", **{l.name:0 for l in lake_model.levers})
n_scenarios = int(1e4)
with MultiprocessingEvaluator(lake_model) as evaluator:
experiments, outcomes = evaluator.perform_experiments(n_scenarios, policies=[no_release],
uncertainty_sampling=SOBOL)
from SALib.analyze import sobol
from ema_workbench.em_framework import get_SALib_problem
ooi = 'max_P'
data = outcomes[ooi]
problem = get_SALib_problem(lake_model.uncertainties)
SI = []
for i in [1e1, 1e2, 1e3, 1e4]:
i = int(i) * (2*len(lake_model.uncertainties) + 2)
y = data[0:i]
si = sobol.analyze(problem, y)
SI.append(si)
def plot_si(Si):
Si_filter = {k:Si[k] for k in ['ST','ST_conf','S1','S1_conf']}
Si_df = pd.DataFrame(Si_filter, index=problem['names'])
sns.set_style('white')
fig, ax = plt.subplots(1)
indices = Si_df[['S1','ST']]
err = Si_df[['S1_conf','ST_conf']]
indices.plot.bar(yerr=err.values.T,ax=ax)
fig.set_size_inches(8,6)
fig.subplots_adjust(bottom=0.3)
for Si in SI:
plot_si(Si)
plt.show()
```
| github_jupyter |
##### Copyright 2020 Google
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Quantum Chess REST Client
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://quantumai.google/cirq/experiments/quantum_chess/quantum_chess_client"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/quantumlib/ReCirq/blob/master/docs/quantum_chess/quantum_chess_client.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/quantumlib/ReCirq/blob/master/docs/quantum_chess/quantum_chess_client.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/ReCirq/docs/quantum_chess/quantum_chess_client.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
</td>
</table>
This is a basic client meant to test the server implemented at the end of the [Quantum Chess REST API](./quantum_chess_rest_api.ipynb) documentation. You must run that previous Colab for this one to work.
## Setup
```
try:
import recirq
except ImportError:
!pip install git+https://github.com/quantumlib/ReCirq -q
try:
import requests
except ImportError:
!pip install requests -q
```
The server for the Quantum Chess Rest API endpoints should provide you with an ngrok url when you run it. **Paste the url provided by your server in the form below**. If your server is running, the following code should produce the message: "Running Flask on Google Colab!"
```
url = "http://bd626d83c9ec.ngrok.io/" # @param {type:"string"}
!curl -s $url
```
You should be able to see the server output indicting a connection was made.
## Initialization
Make a simple request to initialize a board with the starting occupancy state of all pieces. Using the bitboard format, the initial positions of pieces are given by the hex 0xFFFF00000000FFFF. This initializes all squares in ranks 1, 2, 7, and 8 to be occupied.
```
import requests
init_board_json = {"init_basis_state": 0xFFFF00000000FFFF}
response = requests.post(url + "/quantumboard/init", json=init_board_json)
print(response.content)
```
## Superposition
With the board initialized, you can execute a few moves to see what happens. You can create superposition by executing a split move from b1 to a3 and c3. Watch the server output to see the execution of this move.
```
from recirq.quantum_chess.enums import MoveType, MoveVariant
from recirq.quantum_chess.bit_utils import square_to_bit
split_b1_a3_c3 = {
"square1": square_to_bit("b1"),
"square2": square_to_bit("a3"),
"square3": square_to_bit("c3"),
"type": int(MoveType.SPLIT_JUMP.value),
"variant": int(MoveVariant.BASIC.value),
}
response = requests.post(url + "/quantumboard/do_move", json=split_b1_a3_c3)
print(response.content)
```
## Entanglement
You can see, in the probabilities returned, a roughly 50/50 split for two of the squares. A pawn two-step move, from c2 to c4, will entangle the pawn on c2 with the piece in superposition on a3 and c3.
```
move_c2_c4 = {
"square1": square_to_bit("c2"),
"square2": square_to_bit("c4"),
"square3": 0,
"type": int(MoveType.PAWN_TWO_STEP.value),
"variant": int(MoveVariant.BASIC.value),
}
response = requests.post(url + "/quantumboard/do_move", json=move_c2_c4)
print(response.content)
```
## Measurement
The probability distribution returned doesn't show the entanglement, but it still exists in the underlying state. You can see this by doing a move that forces a measurement. An excluded move from d1 to c2 will force a measurement of the c2 square. In the server output you should see the collapse of the state, with c2, c3, c4, and a3 taking definite 0 or 100% probabilities.
```
move_d1_c2 = {
"square1": square_to_bit("d1"),
"square2": square_to_bit("c2"),
"square3": 0,
"type": int(MoveType.JUMP.value),
"variant": int(MoveVariant.EXCLUDED.value),
}
response = requests.post(url + "/quantumboard/do_move", json=move_d1_c2)
print(response.content)
```
You can see the entanglement correlation by running the following cell a few times. There should be two different outcomes, the first with both c2 and c3 are 100%, and the second with c4 and a3 both 100%.
```
response = requests.post(url + "/quantumboard/undo_last_move")
print(response.content)
response = requests.post(url + "/quantumboard/do_move", json=move_d1_c2)
print(response.content)
```
| github_jupyter |
# Median for RDDs, Datasets, and Dataframes
### Getting `spark` up and running
```
classpath.add(
"org.apache.spark" %% "spark-core" % "2.0.2",
"org.apache.spark" %% "spark-sql" % "2.0.2",
"org.apache.spark" %% "spark-mllib" % "2.0.2"
);
import org.apache.spark.sql.{SparkSession, DataFrame, Dataset}
val spark = SparkSession.builder().master("local[*]").getOrCreate()
import spark.implicits._
```
### Creating a `Dataset[Double]`
```
val ds1 = spark.createDataset(Seq(1)).map(_.toDouble)
val ds2 = spark.createDataset(Seq(1, 2)).map(_.toDouble)
val ds3 = spark.createDataset(Seq(1, 2, 3)).map(_.toDouble)
val ds4 = spark.createDataset(Seq(1, 2, 3, 4)).map(_.toDouble)
val ds5 = spark.createDataset(Seq(1, 2, 3, 4, 5)).map(_.toDouble)
```
#### Dataset with odd number of elements
```
val Array(median) = ds5.stat.approxQuantile("value",
Array(0.5),
relativeError = 0.1)
```
This is strange to me. My understanding is that `relativeError=0` is supposed to result in an exact median calculation. I will have to look into this further.
```
val Array(median) = ds5.stat.approxQuantile("value",
Array(0.5),
relativeError = 0)
```
#### Dataset with even number of elements
```
val Array(median) = ds4.stat.approxQuantile("value",
Array(0.5),
relativeError = 0.1)
val Array(median) = ds4.stat.approxQuantile("value",
Array(0.5),
relativeError = 0)
```
#### Dataset of 1 element
```
val Array(median) = ds1.stat.approxQuantile("value",
Array(0.5),
relativeError = 0.1)
val Array(median) = ds1.stat.approxQuantile("value",
Array(0.5),
relativeError = 0)
```
### Exact median calculation with RDDs
This is not an efficient implementation but it works.
```
import org.apache.spark.sql.Dataset
def median(ds: Dataset[Double], column: String = "value"): Double = {
// Order the dataset
val dsOrdered = ds.orderBy(column)
val count = ds.count()
val dsDouble = dsOrdered.select(column).as[Double]
// Zip the Dataset with index so we can lookup
// values by index
val dsWithIndex = dsDouble.rdd.zipWithIndex()
if (count % 2 == 0) {
val left = dsWithIndex
.filter(_._2 == count / 2 - 1)
.collect()(0)._1
val right = dsWithIndex
.filter(_._2 == count / 2)
.collect()(0)._1
(left + right) / 2
} else {
dsWithIndex.
filter(_._2 == count / 2)
.collect()(0)._1
}
}
median(ds5)
median(ds4)
median(ds1)
median(ds2)
```
| github_jupyter |

## Data-X: Titanic Survival Analysis
**Authors:** Several public Kaggle Kernels, edits by Alexander Fred Ojala & Kevin Li
<img src="data/Titanic_Variable.png">
# Note
Install xgboost package in your pyhton enviroment:
try:
```
$ conda install py-xgboost
```
```
'''
# You can also install the package by running the line below
# directly in your notebook
''';
#!conda install py-xgboost --y
```
## Import packages
```
# No warnings
import warnings
warnings.filterwarnings('ignore') # Filter out warnings
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB # Gaussian Naive Bays
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier #stochastic gradient descent
from sklearn.tree import DecisionTreeClassifier
import xgboost as xgb
# Plot styling
sns.set(style='white', context='notebook', palette='deep')
plt.rcParams[ 'figure.figsize' ] = 9 , 5
```
### Define fancy plot to look at distributions
```
# Special distribution plot (will be used later)
def plot_distribution( df , var , target , **kwargs ):
row = kwargs.get( 'row' , None )
col = kwargs.get( 'col' , None )
facet = sns.FacetGrid( df , hue=target , aspect=4 , row = row , col = col )
facet.map( sns.kdeplot , var , shade= True )
facet.set( xlim=( 0 , df[ var ].max() ) )
facet.add_legend()
plt.tight_layout()
```
## References to material we won't cover in detail:
* **Gradient Boosting:** http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/
* **Naive Bayes:** http://scikit-learn.org/stable/modules/naive_bayes.html
* **Perceptron:** http://aass.oru.se/~lilien/ml/seminars/2007_02_01b-Janecek-Perceptron.pdf
## Input Data
```
train_df = pd.read_csv('data/train.csv')
test_df = pd.read_csv('data/test.csv')
combine = [train_df, test_df]
# NOTE! When we change train_df or test_df the objects in combine
# will also change
# (combine is only a pointer to the objects)
# combine is used to ensure whatever preprocessing is done
# on training data is also done on test data
```
# Exploratory Data Anlysis (EDA)
We will analyze the data to see how we can work with it and what makes sense.
```
print(train_df.columns.values)
# preview the data
train_df.head(5)
# General data statistics
train_df.describe()
# Data Frame information (null, data type etc)
train_df.info()
```
### Comment on the Data
<div class='alert alert-info'>
`PassengerId` is a random number and thus does not contain any valuable information. `Survived, Passenger Class, Age Siblings Spouses, Parents Children` and `Fare` are numerical values -- so we don't need to transform them, but we might want to group them (i.e. create categorical variables). `Sex, Embarked` are categorical features that we need to map to integer values. `Name, Ticket` and `Cabin` might also contain valuable information.
</div>
# Preprocessing Data
```
# check dimensions of the train and test datasets
print("Shapes Before: (train) (test) = ", \
train_df.shape, test_df.shape)
# Drop columns 'Ticket', 'Cabin', need to do it for both test
# and training
train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)
test_df = test_df.drop(['Ticket', 'Cabin'], axis=1)
combine = [train_df, test_df]
print("Shapes After: (train) (test) =", train_df.shape, test_df.shape)
# Check if there are null values in the datasets
print(train_df.isnull().sum())
print()
print(test_df.isnull().sum())
```
### Hypothesis
The Title of the person is a feature that can predict survival
```
# List example titles in Name column
train_df.Name[:5]
# from the Name column we will extract title of each passenger
# and save that in a column in the dataset called 'Title'
# if you want to match Titles or names with any other expression
# refer to this tutorial on regex in python:
# https://www.tutorialspoint.com/python/python_reg_expressions.htm
# Create new column called title
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.', expand=False)
# Double check that our titles makes sense (by comparing to sex)
pd.crosstab(train_df['Title'], train_df['Sex'])
# same for test set
pd.crosstab(test_df['Title'], test_df['Sex'])
# We see common titles like Miss, Mrs, Mr, Master are dominant, we will
# correct some Titles to standard forms and replace the rarest titles
# with single name 'Rare'
for dataset in combine:
dataset['Title'] = dataset['Title'].\
replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr',\
'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') #Mademoiselle
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') #Madame
# Now that we have more logical titles, and a few groups
# we can plot the survival chance for each title
train_df[['Title', 'Survived']].groupby(['Title']).mean()
# We can also plot it
sns.countplot(x='Survived', hue="Title", data=train_df, order=[1,0])
plt.xticks(range(2),['Made it','Deceased']);
train_df.isnull().sum()
# Title dummy mapping
for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Title)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head()
train_df = train_df.drop(['Name', 'Title', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name', 'Title'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape
# Map Sex to binary categories
for dataset in combine:
dataset['Sex'] = dataset['Sex']. \
map( {'female': 1, 'male': 0} ).astype(int)
train_df.head()
```
### Handle missing values for age
We will now guess values of age based on sex (male / female)
and socioeconomic class (1st,2nd,3rd) of the passenger.
The row indicates the sex, male = 0, female = 1
More refined estimate than only median / mean etc.
```
guess_ages = np.zeros((2,3),dtype=int) #initialize
guess_ages
# Fill the NA's for the Age columns
# with "qualified guesses"
for idx,dataset in enumerate(combine):
if idx==0:
print('Working on Training Data set\n')
else:
print('-'*35)
print('Working on Test Data set\n')
print('Guess values of age based on sex and pclass of the passenger...')
for i in range(0, 2):
for j in range(0,3):
guess_df = dataset[(dataset['Sex'] == i) \
&(dataset['Pclass'] == j+1)]['Age'].dropna()
# Extract the median age for this group
# (less sensitive) to outliers
age_guess = guess_df.median()
# Convert random age float to int
guess_ages[i,j] = int(age_guess)
print('Guess_Age table:\n',guess_ages)
print ('\nAssigning age values to NAN age values in the dataset...')
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) \
& (dataset.Pclass == j+1),'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
print()
print('Done!')
train_df.head()
# Split into age bands and look at survival rates
train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False)\
.mean().sort_values(by='AgeBand', ascending=True)
# Plot distributions of Age of passangers who survived
# or did not survive
plot_distribution( train_df , var = 'Age' , target = 'Survived' ,\
row = 'Sex' )
# Change Age column to
# map Age ranges (AgeBands) to integer values of categorical type
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']=4
train_df.head()
# Note we could just run
# dataset['Age'] = pd.cut(dataset['Age'], 5,labels=[0,1,2,3,4])
# remove AgeBand from before
train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head()
```
# Create variable for Family Size
How did the number of people the person traveled with impact the chance of survival?
```
# SibSp = Number of Sibling / Spouses
# Parch = Parents / Children
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
# Survival chance with FamilySize
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# Plot it, 1 is survived
sns.countplot(x='Survived', hue="FamilySize", data=train_df, order=[1,0]);
# Binary variable if the person was alone or not
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
# We will only use the binary IsAlone feature for further analysis
train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head()
# We can also create new features based on intuitive combinations
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(8)
```
# Port the person embarked from
Let's see how that influences chance of survival
```
# To replace Nan value in 'Embarked', we will use the mode
# in 'Embaraked'. This will give us the most frequent port
# the passengers embarked from
freq_port = train_df.Embarked.dropna().mode()[0]
freq_port
# Fill NaN 'Embarked' Values in the datasets
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# Let's plot it
sns.countplot(x='Survived', hue="Embarked", data=train_df, order=[1,0]);
# Create categorical dummy variables for Embarked values
for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Embarked)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head()
# Drop Embarked
for dataset in combine:
dataset.drop('Embarked', axis=1, inplace=True)
```
## Handle continuous values in the Fare column
```
# Fill the NA values in the Fares column with the median
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
test_df.head()
# q cut will find ranges equal to the quartile of the data
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & \
(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & \
(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
```
## Finished
```
train_df.head(7)
# All features are approximately on the same scale
# no need for feature engineering / normalization
test_df.head(7)
# Check correlation between features
# (uncorrelated features are generally more powerful predictors)
colormap = plt.cm.viridis
plt.figure(figsize=(12,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train_df.astype(float).corr().round(2)\
,linewidths=0.1,vmax=1.0, square=True, cmap=colormap, \
linecolor='white', annot=True);
```
# Next Up: Machine Learning!
Now we will Model, Predict, and Choose algorithm for conducting the classification
Try using different classifiers to model and predict. Choose the best model from:
* Logistic Regression
* KNN
* SVM
* Naive Bayes
* Decision Tree
* Random Forest
* Perceptron
* XGBoost
## Setup Train and Validation Set
```
X = train_df.drop("Survived", axis=1) # Training & Validation data
Y = train_df["Survived"] # Response / Target Variable
# Since we don't have labels for the test data
# this won't be used. It's only for Kaggle Submissions
X_submission = test_df.drop("PassengerId", axis=1).copy()
print(X.shape, Y.shape)
# Split training and test set so that we test on 20% of the data
# Note that our algorithms will never have seen the validation
# data during training. This is to evaluate how good our estimators are.
np.random.seed(1337) # set random seed for reproducibility
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2)
print(X_train.shape, Y_train.shape)
print(X_val.shape, Y_val.shape)
```
## Scikit-Learn general ML workflow
1. Instantiate model object
2. Fit model to training data
3. Let the model predict output for unseen data
4. Compare predicitons with actual output to form accuracy measure
# Logistic Regression
```
logreg = LogisticRegression() # instantiate
logreg.fit(X_train, Y_train) # fit
Y_pred = logreg.predict(X_val) # predict
acc_log = round(logreg.score(X_val, Y_val) * 100, 2) # evaluate
acc_log
# Support Vector Machines
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_val)
acc_svc = round(svc.score(X_val, Y_val) * 100, 2)
acc_svc
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_val)
acc_knn = round(knn.score(X_val, Y_val) * 100, 2)
acc_knn
# Perceptron
perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
Y_pred = perceptron.predict(X_val)
acc_perceptron = round(perceptron.score(X_val, Y_val) * 100, 2)
acc_perceptron
# XGBoost
gradboost = xgb.XGBClassifier(n_estimators=1000)
gradboost.fit(X_train, Y_train)
Y_pred = gradboost.predict(X_val)
acc_perceptron = round(gradboost.score(X_val, Y_val) * 100, 2)
acc_perceptron
# Random Forest
random_forest = RandomForestClassifier(n_estimators=1000)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_val)
acc_random_forest = round(random_forest.score(X_val, Y_val) * 100, 2)
acc_random_forest
# Look at importnace of features for random forest
def plot_model_var_imp( model , X , y ):
imp = pd.DataFrame(
model.feature_importances_ ,
columns = [ 'Importance' ] ,
index = X.columns
)
imp = imp.sort_values( [ 'Importance' ] , ascending = True )
imp[ : 10 ].plot( kind = 'barh' )
print ('Training accuracy Random Forest:',model.score( X , y ))
plot_model_var_imp(random_forest, X_train, Y_train)
# How to create a Kaggle submission:
Y_submission = random_forest.predict(X_submission)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_submission
})
submission.to_csv('titanic.csv', index=False)
```
# Legacy code (not used anymore)
```python
# Map title string values to numbers so that we can make predictions
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# Handle missing values
train_df.head()
```
```python
# Drop the unnecessary Name column (we have the titles now)
train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape
```
```python
# Create categorical dummy variables for Embarked values
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head()
```
| github_jupyter |
# Scientific Computing with Python (Second Edition)
# Chapter 14
We start by importing all from Numpy. As explained in Chapter 01 the examples are written assuming this import is initially done.
```
from numpy import *
```
## 14.1 File handling
### 14.1.1 Interacting with files
```
# creating a new file object from an existing file
myfile = open('measurement.dat','r')
print(myfile.read())
myfile.close() # closes the file object
with open('measurement.dat','r') as myfile:
print(myfile.read())
myfile = open('a_file.dat','w')
myfile.write('some data')
a = 1/0
myfile.write('other data')
myfile.close()
with open('a_file.dat','w') as myfile:
myfile.write('some data')
a = 1/0
myfile.write('other data')
```
### 14.1.2 Files are iterables
```
with open('temp.dat','r') as myfile:
for line in myfile:
data = line.split(';')
print(f'time {data[0]} sec temperature {data[1]} C')
data = 'aa;bb;cc;dd;ee;ff;gg'
data.split(';') # ['aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg']
data = 'aa bb cc dd ee ff gg'
data.split(' ') # ['aa', 'bb', 'cc', 'dd', 'ee', 'ff', 'gg']
with open('temp.dat','r') as myfile:
data = list(myfile)
data
```
### 14.1.3 File modes
```
with open('file3.dat','a') as myfile:
myfile.write('something new\n')
```
## 14.2 NumPy methods
### 14.2.1 savetxt
```
x = range(100) # 100 integers
savetxt('test.txt',x,delimiter=',') # use comma instead of space
savetxt('test.txt',x,fmt='%d') # integer format instead of float with e
```
### 14.2.3 loadtxt
```
filename = 'test.txt'
data = loadtxt(filename)
(data == x).all()
```
## 14.3 Pickling
```
import pickle
with open('file.dat','wb') as myfile:
a = random.rand(20,20)
b = 'hello world'
pickle.dump(a,myfile) # first call: first object
pickle.dump(b,myfile) # second call: second object
import pickle
with open('file.dat','rb') as myfile:
numbers = pickle.load(myfile) # restores the array
text = pickle.load(myfile) # restores the string
a = [1,2,3,4]
pickle.dumps(a) # returns a bytes object
b = {'a':1,'b':2}
pickle.dumps(b) # returns a bytes object
```
## 14.4 Shelves
```
from contextlib import closing
import shelve as sv
# opens a data file (creates it before if necessary)
with closing(sv.open('datafile')) as data:
A = array([[1,2,3],[4,5,6]])
data['my_matrix'] = A # here we created a key
from contextlib import closing
import shelve as sv
with closing(sv.open('datafile')) as data: # opens a data file
A = data['my_matrix'] # here we used the key
```
## 14.5 Reading and writing Matlab data files
You need Matlab to generate a *mat file. We skip this part here
## 14.6 Reading and writing images
```
import PIL.Image as pil # imports the Pillow module
# read image to array
im=pil.open("test.jpg")
print(im.size) # (275, 183)
# Number of pixels in horizontal and vertical directions
# resize image
im_big = im.resize((550, 366))
im_big_gray = im_big.convert("L") # Convert to grayscale
im_array=array(im)
print(im_array.shape)
print(im_array.dtype) # unint 8
# write result to new image file
im_big_gray.save("newimage.jpg")
```
| github_jupyter |
# Optical Detectors
**Scott Prahl**
**Aug 2021**
---
*If* `` ofiber `` *is not installed, uncomment the following cell (i.e., delete the initial #) and execute it with* `` shift-enter ``. *Afterwards, you may need to restart the kernel/runtime before the module will import successfully.*"
```
#!pip install --user ofiber
# Jupyterlite support for ofiber
try:
import micropip
await micropip.install("ofiber")
except ModuleNotFoundError:
pass
try:
import numpy as np
import matplotlib.pyplot as plt
import scipy
import ofiber
except ModuleNotFoundError:
print('ofiber is not installed. To install, uncomment and run the cell above.')
print('Once installation is successful, rerun this cell again.')
# to make graphs a bit better
%config InlineBackend.figure_format='retina'
plt.style.use('seaborn-talk')
```
## Analog and digital modulation
### Amplitude Modulation
Ghatak Figure 13.2
```
t=np.linspace(0,10,500)
fcarrier = 2*np.pi*2
fmod = 2*np.pi*0.15
plt.subplots(3,1,figsize=(8,8))
plt.subplot(3,1,1)
plt.plot(t, np.sin(fcarrier*t))
plt.title('Amplitude Modulation, Ghatak fig 13.2')
plt.ylabel('Carrier Signal')
plt.subplot(3,1,2)
plt.plot(t, np.sin(fmod*t))
plt.ylabel('Modulation Signal')
plt.subplot(3,1,3)
plt.plot(t, np.sin(fmod*t)*np.sin(fcarrier*t))
plt.plot(t, np.sin(fmod*t),':b')
plt.plot(t, -np.sin(fmod*t),':b')
plt.ylabel('AM Signal')
plt.xlabel('Time')
plt.show()
```
### Frequency Modulation
Figure 13.3
```
t=np.linspace(0,10,500)
fcarrier = 2*np.pi*2
fmod = 2*np.pi*0.15
plt.subplots(3,1,figsize=(8,8))
plt.subplot(3,1,1)
plt.plot(t, np.sin(fcarrier*t))
plt.title('Frequency Modulation, Ghatak fig 13.3')
plt.ylabel('Carrier Signal')
plt.subplot(3,1,2)
plt.plot(t, np.sin(fmod*t))
plt.ylabel('Modulation Signal')
plt.subplot(3,1,3)
ff = fcarrier + np.sin(fmod*t)
plt.plot(t, np.sin(ff*t))
plt.ylabel('FM Signal')
plt.xlabel('Time')
plt.show()
```
### Digital Sampling
Ghatak Figure 13.4. Pulse amplitude modulation. Encoding
```
def eqn13_1(omega,t):
sum = 4 + np.sin(omega*t) + np.sin(1.5*omega*t)
sum += np.cos(2*omega*t) + np.cos(3*omega*t) + np.sin(4*omega*t)
return 15*sum
t=np.linspace(0,4*np.pi,500)
tpulse = np.linspace(0,4*np.pi,17)
omega=1
plt.subplots(4,1,figsize=(8,8))
plt.subplot(4,1,1)
plt.title('Ghatak fig 13.4')
plt.plot(t, eqn13_1(omega,t))
plt.vlines(tpulse,[0], eqn13_1(omega,tpulse))
plt.ylim(0,120)
plt.ylabel('Signal')
plt.xlim(-0.5,6)
plt.subplot(4,1,2)
plt.vlines(tpulse,[0], eqn13_1(omega,tpulse))
plt.ylim(0,120)
plt.ylabel('PAM Signal')
plt.xlim(-0.5,6)
plt.subplot(4,1,3)
plt.vlines(tpulse,[0], eqn13_1(omega,tpulse))
for i in range(len(tpulse)):
t = tpulse[i]
y = eqn13_1(omega,t)
plt.annotate("%.0f"%y,(t,y),ha='center',va='bottom')
plt.ylabel('PCM Digital')
plt.ylim(0,120)
plt.xlim(-0.5,6)
plt.subplot(4,1,4)
plt.vlines(tpulse,[0], eqn13_1(omega,tpulse))
for i in range(len(tpulse)):
t = tpulse[i]
y = eqn13_1(omega,t)
s = bin(int(y))
plt.annotate(s[2:],(t,y),ha='center',va='bottom')
plt.ylabel('PCM Binary')
plt.ylim(0,120)
plt.xlim(-0.5,6)
plt.show()
```
### Pulse Code Modulation
Return-to-zero and non-return-to-zero. Like Ghatak figure 13.7
```
bits = [1,0,1,1,0,1,0,1,0,0,1,0,0,1]
data = np.repeat(bits, 2)
clock = 1 - np.arange(len(data)) % 2
t = 0.5 * np.arange(len(data))
plt.subplots(figsize=(8,8))
plt.step(t, clock + 4, 'r', linewidth = 2, where='post')
plt.step(t, data, 'r', linewidth = 2, where='post')
plt.step(t, data*clock + 2, 'r', linewidth = 2, where='post')
plt.ylim([-1,6])
for tbit, bit in enumerate(bits):
plt.text(tbit+0.1, 1.5, str(bit))
plt.annotate('clock',xy=(14,4))
plt.annotate('NRZ',xy=(14,0))
plt.annotate('RZ',xy=(14,2))
plt.gca().axis('off')
plt.title('Pulse Code Modulation (fig 13.7)')
plt.xlim(0,16)
plt.show()
```
## Noise in Detection Processes
### Shot Noise
$$
\left\langle i_{NS}^2\right\rangle = 2 q I \Delta f
$$
There is always some dark current $I_d$ that arises from thermal processes.
$$
\left\langle i_{NS}^2 \right\rangle = 2 q (I + I_d) \Delta f
$$
$$
I_d \approx 1-10\mbox{ nA in Silicon}
$$
$$
I_d \approx 50-500\mbox{ nA in Germanium}
$$
$$
I_d \approx 1-20\mbox{ nA in InGaAs}
$$
Ghatak Example 13.3
```
sensitivity = 0.65 # A/W
optical_signal = 1e-6 # W
I = optical_signal * sensitivity # A
print("The photo induced current is %.2f uA"%(I*1e6))
Idark = 1e-9
bandwidth = 100e6
ns = ofiber.shot_noise(I,Idark,bandwidth)
print("The shot noise is %.2f nA"%(ns*1e9))
```
### Thermal Noise
$$
\left\langle i_{NT}^2\right\rangle = \frac{ 4kT \Delta f}{R_{load}}
$$
Ghatak Example 13.4
```
T = 300 # K
Rload = 1000 # Ohms
bandwidth = 100e6
nt = ofiber.thermal_noise(T,Rload,bandwidth)
print("The thermal noise is %.1f nA"%(nt*1e9))
```
### Signal to Noise Ratio
$$
\mbox{SNR} = \frac{\mbox{average signal power}}{\mbox{total noise power}}
$$
Average signal power is
$$
P_{ave} = \eta P_{optical}
$$
where $\eta$ is the responsivitity of the detector.
$$
\mbox{SNR} = \frac{\eta^2 P_{ave}^2}{ 2q(I+I_d)\Delta f + 4kT\Delta F/R_{load} }
$$
The minimum detectable optical power corresponds to the situation when the signal power and the noise power are equal.
**This optical power is refered to as the noise equivalent power or NEP**
$$
\mbox{NEP} = \frac{1}{\eta} \sqrt{ 2qI_d + \frac{4kT}{R_{load}} }
$$
in units of watts/sqrt(Hz)
Ghatak Example 13.5
```
responsivity = 0.65 # A/W
Idark = 1e-9 # A
T = 300 # K
rload = 1000 # Ohms
nep = ofiber.NEP(responsivity,rload,Idark,T)
print("The noise equivalent power is %.1f pW/Hz**0.5"%(nep*1e12))
nep = ofiber.NEP(responsivity,rload,Idark,0)
print("The noise equivalent power is %.1f fW/Hz**0.5 (no thermal noise)"%(nep*1e15))
responsivity = 0.65 # A/W
Idark = 1e-9 # A
T = 300 # K
rload = 1000 # Ohms
f = np.geomspace(1,1e9)
nep = ofiber.NEP(responsivity,rload,Idark,T)
plt.loglog(f,nep*np.sqrt(f))
nep = ofiber.NEP(responsivity,rload,Idark,0)
plt.loglog(f,nep*np.sqrt(f))
plt.ylabel("NEP in Watts")
plt.xlabel("Frequency (Hz)")
plt.show()
```
## Relative noise magnitudes for photodetector
Ghatak Example 13.6
```
responsivity = 0.65 # A/W
Idark = 1e-9 # A
BW = 100e6 # Hz
T = 300 # K
rload = 1000 # Ohms
P = 500e-9 # W
I = P * responsivity # A
print('The photoinduced current is %.1f nA'%(I*1e9))
noise1 = ofiber.shot_noise(I,0,BW)
print('Shot noise due to induced current is %.1f nA'%(noise1*1e9))
noise2 = ofiber.shot_noise(0,Idark,BW)
print('Shot noise due to dark current is %.1f nA'%(noise2*1e9))
noise3 = ofiber.thermal_noise(T,rload,BW)
print('Thermal noise is %.1f nA'%(noise3*1e9))
noise = np.sqrt(noise1**2 + noise2**2 + noise3**2)
print('Total noise is %.1f nA'%(noise*1e9))
snr = I**2/noise**2
print('The SNR is %.0f or %.0f dB'%(snr, 10*np.log10(snr)))
responsivity = 0.65 # A/W
Idark = 1e-9 # A
BW = 100e6 # Hz
T = 300 # K
rload = 1000 # Ohms
P = 500e-9 # W
I = P * responsivity # A
BW = np.geomspace(1,1e9)
nep = ofiber.NEP(responsivity,rload,Idark,T)
noise1 = ofiber.shot_noise(I,0,BW)
noise2 = ofiber.shot_noise(0,Idark,BW)
noise3 = ofiber.thermal_noise(T,rload,BW)
noise = np.sqrt(noise1**2 + noise2**2 + noise3**2)
plt.loglog(BW,noise1,label="induced noise")
plt.loglog(BW,noise2,label="dark current noise")
plt.loglog(BW,noise3,label="thermal noise")
plt.ylabel("Noise (A)")
plt.xlabel("Bandwidth (Hz)")
plt.legend()
plt.show()
```
## Noise in an Avalanche Photodiode
Ghatak Example 13.7
```
responsivity = 0.65 # A/W
Idark = 1e-9 # A
BW = 100e6 # Hz
T = 300 # K
rload = 1000 # Ohms
P = 500e-9 # W
M = 50
x = 0.0
I = M * P * responsivity # A
print('The photoinduced current is %.1f uA'%(I*1e6))
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
print('Shot noise due to induced current is %.1f nA'%(noise1*1e9))
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
print('Shot noise due to dark current is %.1f nA'%(noise2*1e9))
noise3 = ofiber.thermal_noise(T,rload,BW)
print('Thermal noise is %.1f nA'%(noise3*1e9))
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
print('The SNR is %.0f or %.0f dB'%(snr, 10*np.log10(snr)))
responsivity = 0.65 # A/W
Idark = 1e-9 # A
BW = 100e6 # Hz
T = 300 # K
rload = 1000 # Ohms
P = 500e-9 # W
M = 50
x = 0.0
I = M * P * responsivity # A
BW = np.geomspace(1,1e9)
nep = ofiber.NEP(responsivity,rload,Idark,T)
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
noise3 = ofiber.thermal_noise(T,rload,BW)
noise = np.sqrt(noise1**2 + noise2**2 + noise3**2)
plt.loglog(BW,noise1,label="induced noise")
plt.loglog(BW,noise2,label="dark current noise")
plt.loglog(BW,noise3,label="thermal noise")
plt.ylabel("Noise (A)")
plt.xlabel("Bandwidth (Hz)")
plt.legend()
plt.show()
```
### Amplification for Maximum SNR
$$
\left\langle i_{NS}^2\right\rangle = M^{2+x} 2 q(\eta P_{optical}+I_d) \Delta f
$$
Silicon APD $x\approx0.3$
InGaAs APD $x\approx0.7$
Germanium APD $x\approx1$
$$
\mbox{SNR} = \frac{M^2\eta^2 P_{optical}^2}{ 2 qM^{2+x} (\eta P_{optical}+I_d) \Delta f + 4kT/R_{load} \Delta f}
$$
```
responsivity = 0.65 # A/W
Idark = 1e-9 # A
BW = 100e6 # Hz
T = 300 # K
rload = 1000 # Ohms
P = 500e-9 # W
M = np.linspace(1,60)
I = M * P * responsivity # A
x = 0.3
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
noise3 = ofiber.thermal_noise(T,rload,BW)
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
plt.plot(M,snr)
ii=np.argmax(snr)
plt.annotate('x=%.1f, M=%.0f'%(x,M[ii]),xy=(M[ii],snr[ii]),va='bottom',ha='left')
x = 0.7
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
noise3 = ofiber.thermal_noise(T,rload,BW)
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
plt.plot(M,snr)
ii=np.argmax(snr)
plt.annotate('x=%.1f, M=%.0f'%(x,M[ii]),xy=(M[ii],snr[ii]),va='bottom',ha='left')
x = 1.0
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
noise3 = ofiber.thermal_noise(T,rload,BW)
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
plt.plot(M,snr)
ii=np.argmax(snr)
plt.annotate('x=%.1f, M=%.0f'%(x,M[ii]),xy=(M[ii],snr[ii]),va='bottom',ha='left')
plt.xlabel('APD Gain Factor')
plt.ylabel('SNR')
plt.show()
```
### Silicon APD
Ghatak Example 13.8
```
responsivity = 0.65 # A/W
Idark = 1e-9 # A
BW = 100e6 # Hz
T = 300 # K
rload = 1000 # Ohms
P = 100e-9 # W
x = 0.3
I = P * responsivity # A
M = ofiber.best_APD_gain(I,rload,Idark,x,T)
print('For M=%.1f'%M)
I = M * P * responsivity # A
print('The photoinduced current is %.3f uA'%(I*1e6))
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
print('Shot noise due to induced current is %.1f nA'%(noise1*1e9))
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
print('Shot noise due to dark current is %.1f nA'%(noise2*1e9))
noise3 = ofiber.thermal_noise(T,rload,BW)
print('Thermal noise is %.1f nA'%(noise3*1e9))
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
print('The SNR is %.1f or %.1f dB'%(snr, 10*np.log10(snr)))
print()
M = 1
print('For M=%d'%M)
I = M * P * responsivity # A
print('The photoinduced current is %.3f uA'%(I*1e6))
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
print('Shot noise due to induced current is %.1f nA'%(noise1*1e9))
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
print('Shot noise due to dark current is %.1f nA'%(noise2*1e9))
noise3 = ofiber.thermal_noise(T,rload,BW)
print('Thermal noise is %.1f nA'%(noise3*1e9))
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
print('The SNR is %.1f or %.1f dB'%(snr, 10*np.log10(snr)))
```
### InGaAs APD
Ghatak Example 13.9
```
responsivity = 0.6 # A/W
Idark = 0 # A
T = 300 # K
rload = 1000 # Ohms
P = 500e-9 # W
x = 0.7
I = P * responsivity
M = ofiber.best_APD_gain(I,rload,Idark,x,T)
print('Optimal APD gain is M=%.1f'%M)
print('For M=%d'%M)
I = M * P * responsivity # A
print('The photoinduced current is %.3f uA'%(I*1e6))
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
print('Shot noise due to induced current is %.1f nA'%(noise1*1e9))
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
print('Shot noise due to dark current is %.1f nA'%(noise2*1e9))
noise3 = ofiber.thermal_noise(T,rload,BW)
print('Thermal noise is %.1f nA'%(noise3*1e9))
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
print('The SNR is %.1f or %.1f dB'%(snr, 10*np.log10(snr)))
print()
M = 1
print('For M=%d'%M)
I = M * P * responsivity # A
print('The photoinduced current is %.3f uA'%(I*1e6))
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
print('Shot noise due to induced current is %.1f nA'%(noise1*1e9))
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
print('Shot noise due to dark current is %.1f nA'%(noise2*1e9))
noise3 = ofiber.thermal_noise(T,rload,BW)
print('Thermal noise is %.1f nA'%(noise3*1e9))
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
print('The SNR is %.1f or %.1f dB'%(snr, 10*np.log10(snr)))
```
### Germanium Detector
Ghatak Example 13.10
```
responsivity = 0.45 # A/W
Idark = 0 # A
T = 300 # K
rload = 1000 # Ohms
P = 100e-9 # W
x = 1.0
I = P * responsivity
M = ofiber.best_APD_gain(I,rload,Idark,x,T)
print('Optimal APD gain is M=%.1f'%M)
print('For M=%d'%M)
I = M * P * responsivity # A
print('The photoinduced current is %.3f uA'%(I*1e6))
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
print('Shot noise due to induced current is %.1f nA'%(noise1*1e9))
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
print('Shot noise due to dark current is %.1f nA'%(noise2*1e9))
noise3 = ofiber.thermal_noise(T,rload,BW)
print('Thermal noise is %.1f nA'%(noise3*1e9))
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
print('The SNR is %.1f or %.1f dB'%(snr, 10*np.log10(snr)))
print()
M = 1
print('For M=%d'%M)
I = M * P * responsivity # A
print('The photoinduced current is %.3f uA'%(I*1e6))
noise1 = ofiber.shot_noise(I,0,BW,M=M,x=x)
print('Shot noise due to induced current is %.1f nA'%(noise1*1e9))
noise2 = ofiber.shot_noise(0,Idark,BW,M=M,x=x)
print('Shot noise due to dark current is %.1f nA'%(noise2*1e9))
noise3 = ofiber.thermal_noise(T,rload,BW)
print('Thermal noise is %.1f nA'%(noise3*1e9))
noise = np.sqrt(noise1**2+noise2**2+noise3**2)
snr = I**2/noise**2
print('The SNR is %.1f or %.1f dB'%(snr, 10*np.log10(snr)))
```
## Bit Error Rate (BER)
$$
\mbox{BER} = \frac{\mbox{bits read erroneously in a duration t}}{\mbox{total number of bits received in duration t}}
$$
and
$$
\mbox{BER} = \frac{1}{2}\left[1-\mbox{erf}\left(\frac{\sqrt{\mbox{SNR}}}{2\sqrt{2}} \right)\right]
$$
Ghatak Fig 13.12
```
snr = np.linspace(0,25,50) # dB
ber = ofiber.BER_at_SNR(10**(snr/10))
plt.plot([0,25],[1e-9,1e-9],':k')
plt.semilogy(snr,ber)
plt.xlabel('SNR (dB)')
plt.ylabel('BER')
plt.ylim(1e-15,1e0)
plt.show()
```
## Example 13.11
```
C = 1e-12 # Farads
T = 300 # K
responsivity = 0.5 # A/W
ber = 1e-12 #
snr = ofiber.SNR_at_BER(ber)
bitrate = 100e6 # bits/second
pmin = ofiber.thermal_min_power(bitrate,responsivity,C,T,snr)
dbm = 10*np.log10(pmin/1e-3)
print('for a bitrate of 100Mb the minimum optical power is %.2f uW or %.1f dBm'%(pmin*1e6,dbm))
bitrate = 1e9 # bits/second
snr = ofiber.SNR_at_BER(ber)
pmin = ofiber.thermal_min_power(bitrate,responsivity,C,T,snr)
dbm = 10*np.log10(pmin/1e-3)
print('for a bitrate of 1Gb the minimum optical power is %.2f uW or %.1f dBm'%(pmin*1e6,dbm))
```
## Example 13.12
```
C = 1e-12 # Farads
T = 300 # K
responsivity = 0.5 # A/W
ber = 1e-12 #
snr = ofiber.SNR_at_BER(ber)
bitrate = 100e6 # bits/second
pmin = ofiber.thermal_min_power(bitrate,responsivity,C,T,snr)
dbm = 10*np.log10(pmin/1e-3)
print('for a bitrate of 100Mb the minimum optical power is %.2f uW or %.1f dBm'%(pmin*1e6,dbm))
bitrate = 1e9 # bits/second
snr = ofiber.SNR_at_BER(ber)
pmin = ofiber.thermal_min_power(bitrate,responsivity,C,T,snr)
dbm = 10*np.log10(pmin/1e-3)
print('for a bitrate of 1Gb the minimum optical power is %.2f uW or %.1f dBm'%(pmin*1e6,dbm))
```
## Quantum baseline
```
bitrate = 1e9 # bits/second
lambda0 = 1550e-9 # m
ber = 1e-9
pquant = ofiber.quantum_min_power(bitrate,ber,lambda0)
dbm = 10*np.log10(pquant/1e-3)
print('for a bitrate of 1Gb the minimum optical power is %.2f nW or %.1f dBm'%(pquant*1e9,dbm))
```
| github_jupyter |
# Softmax exercise
*Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
This exercise is analogous to the SVM exercise. You will:
- implement a fully-vectorized **loss function** for the Softmax classifier
- implement the fully-vectorized expression for its **analytic gradient**
- **check your implementation** with numerical gradient
- use a validation set to **tune the learning rate and regularization** strength
- **optimize** the loss function with **SGD**
- **visualize** the final learned weights
```
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the linear classifier. These are the same steps as we used for the
SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# add bias dimension and transform into columns
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]).T
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]).T
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]).T
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
```
## Softmax Classifier
Your code for this section will all be written inside **cs231n/classifiers/softmax.py**.
```
# First implement the naive softmax loss function with nested loops.
# Open the file cs231n/classifiers/softmax.py and implement the
# softmax_loss_naive function.
from cs231n.classifiers.softmax import softmax_loss_naive
import time
# Generate a random softmax weight matrix and use it to compute the loss.
W = np.random.randn(10, 3073) * 0.0001
loss, grad = softmax_loss_naive(W, X_train, y_train, 0.0)
# As a rough sanity check, our loss should be something close to -log(0.1).
print 'loss: %f' % loss
print 'sanity check: %f' % (-np.log(0.1))
```
## Inline Question 1:
Why do we expect our loss to be close to -log(0.1)? **Explain briefly.**
**Your answer:**
* Because we only have 10 classes, with current setting, the softmax result should be similar to __random guess a class in those 10 classes__ . And this turns out to be near 0.10
```
# Complete the implementation of softmax_loss_naive and implement a (naive)
# version of the gradient that uses nested loops.
loss, grad = softmax_loss_naive(W, X_train, y_train, 0.0)
# As we did for the SVM, use numeric gradient checking as a debugging tool.
# The numeric gradient should be close to the analytic gradient.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: softmax_loss_naive(w, X_train, y_train, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# Now that we have a naive implementation of the softmax loss function and its gradient,
# implement a vectorized version in softmax_loss_vectorized.
# The two versions should compute the same results, but the vectorized version should be
# much faster.
tic = time.time()
loss_naive, grad_naive = softmax_loss_naive(W, X_train, y_train, 0.00001)
toc = time.time()
print 'naive loss: %e computed in %fs' % (loss_naive, toc - tic)
from cs231n.classifiers.softmax import softmax_loss_vectorized
tic = time.time()
loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_train, y_train, 0.00001)
toc = time.time()
print 'vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic)
# As we did for the SVM, we use the Frobenius norm to compare the two versions
# of the gradient.
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print 'Loss difference: %f' % np.abs(loss_naive - loss_vectorized)
print 'Gradient difference: %f' % grad_difference
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of over 0.35 on the validation set.
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [5e-7, 1e-7, 5e-6, 1e-6]
regularization_strengths = [5e4, 1e5]
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained softmax classifer in best_softmax. #
################################################################################
import sys
verbose = True
for lr in learning_rates:
for reg in regularization_strengths:
if verbose: sys.stdout.write("Training with hyper parameter learning rate: %e, regularization: %e\n"
% ( lr, reg ))
softmax = Softmax()
loss_hist = softmax.train(X_train, y_train, learning_rate=lr, reg=reg,
num_iters=1500, verbose=False)
y_train_pred = softmax.predict(X_train)
training_accuracy = np.mean(y_train == y_train_pred)
y_val_pred = softmax.predict(X_val)
val_accuracy = np.mean(y_val == y_val_pred)
results[lr, reg] = (training_accuracy, val_accuracy)
if val_accuracy > best_val:
best_val = val_accuracy
best_softmax = softmax
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print 'lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy)
print 'best validation accuracy achieved during cross-validation: %f' % best_val
# evaluate on test set
# Evaluate the best svm on test set
y_test_pred = best_softmax.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print 'softmax on raw pixels final test set accuracy: %f' % (test_accuracy, )
# Visualize the learned weights for each class
w = best_softmax.W[:,:-1] # strip out the bias
w = w.reshape(10, 32, 32, 3)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in xrange(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
```
| github_jupyter |
# Exploring advanced Symbol options in Magics
This notebook will help you discover lots of posibilities for plotting symbols on your maps in Magics.
**Msymbol** controls how symbols are plotted on our maps. Here we can set things like symbol type, size, colour, style, and many more things.
List of all **msymbol** parameters you can find [in Magics documentation](https://confluence.ecmwf.int/display/MAGP/Symbol "Symbol parameters")
Magics has built in collection of meteorological symbols and all you have to do is give it WMO code. Full list of WMO codes and descriptions for present/past weather, sky cover, pressure tendency and clouds you can find [on Wikipedia](https://commons.wikimedia.org/wiki/Weather_map "Weather map symbols").
Here are Magics names for meteorological symbols:
|marker type | names |
|---------|--------|
| present weather | ww_00,.., ww_99 |
| past weather | W_0,.., W_9 |
| low clouds | CL_1,.., CL_9 |
| medium clouds | CM_1,.., CM_9 |
| high clouds | CH_1,.., CH_9 |
| type of cloud| C_0,.., C_9|
| total amount all clouds | N_0,.., N9 |
| atmospheric pressure tendency | a_0,.., a_9 |
Here is list of built in symbol indices:

At ECMWF
#### From your workstation:
module swap (or load) Magics/new
jupyter notebook
load this notebook
### Import Magics and define non Symbol paramters
For start let's import Magics and define some **non symbol** parameters. We will try not to change these much in the rest of the notebook.
Both cartesian and two geographical projections are defined at the begining.
```
import Magics.macro as magics
import numpy as np
#Setting the cartesian view
cartesian_projection = magics.mmap(
subpage_y_position = 2.,
subpage_background_colour = 'black',
subpage_map_projection = 'cartesian',
subpage_x_axis_type = 'regular',
subpage_y_axis_type = 'regular',
subpage_x_min = 0.,
subpage_x_max = 100.,
subpage_y_min = 0.,
subpage_y_max = 100.)
#Vertical axis
vertical = magics.maxis(
axis_orientation = "vertical",
axis_type = "regular",
axis_tick_label_height = 0.4,
axis_tick_label_colour = 'navy',
axis_grid = "on",
axis_grid_colour = "white",
axis_grid_thickness = 1,
axis_grid_line_style = "dot")
#Horizontal axis
horizontal = magics.maxis(
axis_orientation = "horizontal",
axis_type = "regular",
axis_minor_tick = "on",
axis_grid = "on",
axis_tick_label_height = 0.4,
axis_grid_colour = "grey",
axis_grid_thickness = 1,
axis_grid_line_style = "dot")
#Geographical projection
central_europe = magics.mmap(
superpage_background_colour = 'black',
subpage_map_library_area = "on",
subpage_map_area_name = "central_europe"
)
#Geographical projection
north_america = magics.mmap(
subpage_upper_right_longitude = -15.00,
subpage_upper_right_latitude = 30.00,
subpage_lower_left_latitude = -5.00,
subpage_map_vertical_longitude = -100.00,
subpage_lower_left_longitude = -140.00,
subpage_map_projection = "polar_stereographic")
#Coastlines
coast = magics.mcoast(
map_coastline_colour = "RGB(0.8,0.8,0.8,0.5)",
map_coastline_resolution = "medium",
map_coastline_thickness = 1,
map_coastline_land_shade = "on",
map_coastline_land_shade_colour = "RGB(0.25,0.25,0.25)",
map_coastline_sea_shade = "on",
map_coastline_sea_shade_colour = "black",
map_grid_line_style = "dash",
map_label_height = 0.35,
map_grid_colour = "RGB(0.8,0.8,0.8,0.5)")
#Legend
legend = magics.mlegend(legend_display_type = "continuous")
# Different meteorological parameters we will plot in this notebook
WW = magics.mgeo(geo_input_file_name = "WW.gpt") # Present weather
airep = magics.mgeo(geo_input_file_name = "airep.geo") # Observed temperature from aircrafts
tcc = magics.mgeo(geo_input_file_name = "tcc.gpt") # Total cloud amount
```
### Plotting Present weather symbols on a map
To plot different meteorological symbols for present weather and colour them accordingly we are using the **advanced table mode**.
```
colour_list = ["none", "none", "none", "none", "cream", "cream", "cream", "cream",
"cream", "cream","yellow", "yellow", "yellow", "red", "kelly_green",
"kelly_green", "kelly_green", "red", "red", "red", "kelly_green",
"kelly_green", "white", "white", "red", "kelly_green", "white",
"red", "yellow", "red", "cream", "cream", "cream", "cream", "cream",
"cream", "white", "white", "white", "white", "yellow", "yellow",
"yellow", "yellow", "yellow", "yellow", "yellow", "yellow", "yellow",
"yellow", "kelly_green", "kelly_green", "kelly_green", "kelly_green",
"kelly_green", "kelly_green", "red", "red", "kelly_green",
"kelly_green", "kelly_green", "kelly_green", "kelly_green",
"kelly_green", "kelly_green", "kelly_green", "red", "red", "white",
"white", "white", "white","white", "white", "white", "white", "red",
"red", "red", "orange", "kelly_green", "kelly_green", "kelly_green",
"white", "white","white", "white", "red", "red", "red", "red", "red",
"red", "red", "red", "red", "red", "red", "red", "red"]
level_list = [0.,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,
25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,
47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,
69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,
91,92,93,94,95,96,97,98,99]
symb_name_list = ['ww_00', 'ww_01', 'ww_02', 'ww_03', 'ww_04', 'ww_05', 'ww_06', 'ww_07',
'ww_08', 'ww_09', 'ww_10', 'ww_11', 'ww_12', 'ww_13', 'ww_14', 'ww_15',
'ww_16', 'ww_17', 'ww_18', 'ww_19', 'ww_20', 'ww_21', 'ww_22', 'ww_23',
'ww_24', 'ww_25', 'ww_26', 'ww_27', 'ww_28', 'ww_29', 'ww_30', 'ww_31',
'ww_32', 'ww_33', 'ww_34', 'ww_35', 'ww_36', 'ww_37', 'ww_38', 'ww_39',
'ww_40', 'ww_41', 'ww_42', 'ww_43', 'ww_44', 'ww_45', 'ww_46', 'ww_47',
'ww_48', 'ww_49', 'ww_50', 'ww_51', 'ww_52', 'ww_53', 'ww_54', 'ww_55',
'ww_56', 'ww_57', 'ww_58', 'ww_59', 'ww_60', 'ww_61', 'ww_62', 'ww_63',
'ww_64', 'ww_65', 'ww_66', 'ww_67', 'ww_68', 'ww_69', 'ww_70', 'ww_71',
'ww_72', 'ww_73', 'ww_74', 'ww_75', 'ww_76', 'ww_77', 'ww_78', 'ww_79',
'ww_80', 'ww_81', 'ww_82', 'ww_83', 'ww_84', 'ww_85', 'ww_86', 'ww_87',
'ww_88', 'ww_89', 'ww_90', 'ww_91', 'ww_92', 'ww_93', 'ww_94', 'ww_95',
'ww_96', 'ww_97', 'ww_98', 'ww_99']
symbol_plotting = magics.msymb(
symbol_advanced_table_colour_method = "list",
symbol_advanced_table_height_method = "calculate",
symbol_advanced_table_height_min_value = 1.,
symbol_advanced_table_height_max_value = 2.,
symbol_type = "marker",
symbol_table_mode = "advanced",
symbol_marker_mode = "name",
symbol_advanced_table_selection_type = "list",
symbol_advanced_table_colour_list = colour_list,
symbol_advanced_table_level_list = level_list,
symbol_advanced_table_marker_name_list = symb_name_list)
magics.plot(central_europe, coast, WW, symbol_plotting)
```
### Plotting Present weather symbols in cartesian projection
Let's see what built in meteorological symbols look like on a graph. Here we are using the same symbol_plotting definition as in previous cell.
```
x = [3.,6,9,12,15,18,21,24,27,30,33,36,39,42,45,48,51,54,57,60,63,66,69,72,75,78,81,84,87,90,93,96,99]
values = [17.,41,55,18,76,21,72,22,96,83,52,24,26,95,27,59,28,
29,30,93,36,89,38,39,42,44,65,47,51,77,53,54,19,57]
y = [30.,60,40,90,70,95,80,50,70,40,60,30,50,5,30,10,30.,60,40,90,70,95,80,50,70,40,60,30,50,5,30,10]
input = magics.minput(
input_x_values = x,
input_y_values = y,
input_values = values)
magics.plot(cartesian_projection, vertical, horizontal, input, symbol_plotting)
```
### Total cloud amount symbols on geographical map
Let's see how we can plot total amount of clouds. We have this information stored in geopoints file, which is just an ascii file with a little header.
In order to plot different symbols on a map we need to use **advanced table mode**. In this mode we can control markers' shape, colour, size etc.
Also notice, that symbols can have a legend too.
```
tcc_symb = magics.msymb(
symbol_advanced_table_height_method = "calculate",
symbol_advanced_table_height_min_value = 1.,
symbol_advanced_table_height_max_value = 1.,
symbol_type = "marker",
legend = "on",
symbol_table_mode = "advanced",
symbol_marker_mode = "name",
symbol_advanced_table_selection_type = "list",
symbol_advanced_table_colour_method = "list",
symbol_advanced_table_colour_list = ['#696969','#5c5c5c','#525252','#464646','#3a3a3a',
'#303030','#262626','#1c1c1c','#101010','#000000'],
symbol_advanced_table_level_list = [0.0,0.1,12.5,25,37.5,50.0,62.5,75.0,87.5,100],
symbol_advanced_table_marker_name_list = ['N_0','N_1','N_2','N_3','N_4',
'N_5','N_6','N_7','N_8','N_9'])
symb_legend = magics.mlegend(
legend_user_lines = ["No clouds","1/8 or less", "2/8", "3/8", "4/8", "5/8", "6/8", "7/8", "8/8"],
legend_box_mode = "positional",
legend_text_composition = "user_text_only",
legend_text_colour = "charcoal",
legend_text_font_size = 0.5,
legend_column_count = 3,
legend_box_y_position = 17.,
legend_box_x_position = -1.1,
legend_box_x_length = 25.00,
legend_box_y_length = 3.30)
magics.plot(central_europe, tcc, tcc_symb, magics.mcoast(), symb_legend)
```
### Plotting observational data from aircrafts
**airep.geo** file contains observational AIREP data from aircrafts. Here we are using the same symbol for all the points but change their colour and size with temperature. Notice that we can use continuous legend with symbols too, which is nice when we plot all the same symbols.
```
airep_symbol = magics.msymb(
legend = "on",
symbol_type = "marker",
symbol_table_mode = "advanced",
symbol_advanced_table_selection_type = "interval",
symbol_advanced_table_interval = 5.,
symbol_advanced_table_min_level_colour = "lavender",
symbol_advanced_table_max_level_colour = "violet",
symbol_advanced_table_colour_direction = "clockwise",
symbol_advanced_table_height_method = "calculate",
symbol_advanced_table_height_min_value = 0.1,
symbol_advanced_table_height_max_value = 0.8,
symbol_marker_index = 15 )
magics.plot(north_america, coast, airep, airep_symbol, legend)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/keithvtls/Numerical-Method-Activities/blob/main/Midterm/58015_MidtermExam_Yon.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Members:
D.M. Fajardo © 2021
I.J. Timbungco © 2021
M.A. Rodriguez © 2021
N.K. Vitales © 2021
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn import datasets
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
```
### **System of Linear Equations**
## Problem:
The currents running through an electrical system are given by the following system of equations. The three currents, I1, I2, and I3, are measured in amps. Solve the system to find the currents in this circuit. [[1](http://www.algebralab.org/practice/practice.aspx?file=Word_ApplicationProblemFor3EquationsWith3Variables.xml)]
$$
I_1 + 2I_2 - I_3 = 0.425 \\
3I_1 - I_2 + 2I_3 = 2.225\\
5I_1 + I_2 + 2I_3 = 3.775\\
$$
tandard Bracketed Form:
$$ \left\{
\begin{array}\\
I_1 + 2I_2 - I_3 = 0.425 \\
3I_1 - I_2 + 2I_3 = 2.225\\
5I_1 + I_2 + 2I_3 = 3.775\\
\end{array}
\right.$$
$$\left[\begin{array}{ccc|c}
1&2&-1&0.425\\
3&-1&2&2.225\\
5&1&2&3.775\\
\end{array}\right]$$
Linear Combination Form (Matrix):
$$
\begin{bmatrix}1&2&-1\\3&-1&2\\5&1&2\end{bmatrix} \cdot \begin{bmatrix}I_1\\I_2\\I_3\end{bmatrix} = \begin{bmatrix}0.425\\2.225\\3.775\end{bmatrix}
$$
### Programmed Algorithm:
```
X = np.array([
[1,2,-1],
[3,-1,2],
[5,1,2]
])
Y = np.array([
[0.425],
[2.225],
[3.775]
])
currents = np.linalg.solve(X, Y)
for i in range(currents.size):
print(f'I{i+1} = {round(float(currents[i]),2)} A')
```
### Vector Visualization:
```
plt.figure(figsize=(5,5))
plt.plot(currents, 'o-')
plt.title('Vector Visualization')
plt.ylabel('Current')
plt.tick_params(bottom=False,
labelbottom=False)
plt.ylim(0.35,0.8)
plt.grid()
plt.show()
```
### **Curve Fitting**
### Problem:
Determining people who have diabetes that is high sugar level with high blood pressure.
```
diabetes_data = datasets.load_diabetes()
diabetes_data.keys()
diabetes = pd.DataFrame(diabetes_data.data, columns=diabetes_data.feature_names)
diabetes.describe()
print(diabetes_data.DESCR)
X = diabetes['bp'].values.reshape(-1,1)
y = diabetes['s6'].values.reshape(-1,1)
regr = linear_model.LinearRegression()
regr.fit(X, y)
print('w0:', regr.intercept_)
print('w1:', regr.coef_)
```
### Multiple Linear Regression
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=1)
model = LinearRegression()
model.fit(X_train, y_train)
model_summary = pd.DataFrame(['bp'], columns=['Features'])
model_summary['Weights Raw'] = model.coef_
model_summary = model_summary.append({'Features':'Intercept', 'Weights Raw':float(model.intercept_)}, ignore_index=True)
model_summary
preds = model.predict(X_test)
out = pd.DataFrame({'Actual': y_test.flatten(), 'Predicted': preds.flatten()})
plt.figure(figsize=(5*2,3*2))
plt.title('Predictions', fontsize=30)
plt.scatter(y_test, preds, s = 256, alpha=0.4)
plt.xlabel('Ground Truth', fontsize=20)
plt.ylabel('Prediction', fontsize=20)
plt.show()
plt.figure(figsize=(5*2,3*2))
plt.scatter(X_test, y_test, s = 256, color='blue', alpha=0.5)
plt.plot(X_test, preds, color='red', linewidth=2)
plt.show()
```
### Normal Equation
```
n = len(diabetes['bp']) #no. rows
x_bias =np.ones((n,1)) #column-1 of Matrix X
x_new = np.reshape(X,(n,1)) #reshaping the data
x_new =np.append(x_bias,x_new,axis=1) #forming Matrix X
x_new_transpose = np.transpose(x_new) #transpose
x_new_transpose_dot_x_new = x_new_transpose.dot(x_new) #matrix multiplication
temp_1 = np.linalg.inv(x_new_transpose_dot_x_new) #inverse of a matrix
temp_2 = x_new_transpose.dot(y)
#Finding coefficients:
theta = temp_1.dot(temp_2)
Intercept = theta[0]
Slope = theta[1]
print("Intercept:",Intercept)
print("Slope:",Slope)
#Predicting values:
def predict_value(input_feature,slope,intercept):
return slope*input_feature+intercept
bmi =3
prediction =predict_value(bmi,Slope,Intercept)
print(prediction)
#Plotting the regression Line:
plt.figure(figsize=(5*2,3*2))
plt.scatter(X,y, alpha=0.5)
plt.xlabel('bp')
plt.ylabel('s6')
plt.plot(X,Slope*X+Intercept, color="red")
```
### Polynomial curve
```
def linear_regressor(X,y):
X = np.array(X)
y = np.array(y)
n = X.size
w0 = (y.mean()*np.sum(X**2)-X.mean()*np.sum(X*y)) / (np.sum(X**2) - n*X.mean()**2)
w1 = (np.sum(X*y) - X.mean()*np.sum(y)) / (np.sum(X**2) - n*X.mean()**2)
return w0,w1
w0,w1 = linear_regressor(X,y)
print("Linear Regression Equation: y = {:.3f}x + {:.3f}".format(w1, w0))
def show_regline(X,y,w1,w0):
x_min, x_max = X.min() - 1, X.max() + 1
linex = np.linspace(x_min, x_max)
liney = w1*linex+w0
plt.figure(figsize=(5,5))
plt.grid()
plt.scatter(X,y, alpha=0.5)
plt.plot(linex, liney, c='red')
plt.show()
show_regline(X,y,w1,w0)
def lin_reg(val,w0,w1):
return w1*val + w0 #model
print(lin_reg(10, w0, w1))
X_new, y_new = X.copy(), y.copy()
for i in range(10,16):
X_new = np.insert(X_new,-1, i)
y_new = np.insert(y_new,-1, lin_reg(i,w0,w1))
show_regline(X_new, y_new, w1, w0)
X = diabetes['bp'].values.reshape(-1,1)
y = diabetes['s6'].values.reshape(-1,1)
X_1 = np.arange(0, 20,1)
y_1 = X_1 - 2 * (X_1 ** 2) + 0.5 * (X_1 ** 3) + np.random.normal(-3, 3, 20)
def show_regline(X,y,w1,w0):
x_min, x_max = X.min() - 1, X.max() + 1
linex = np.linspace(x_min, x_max)
liney = w1*linex+w0
plt.figure(figsize=(5,5))
plt.grid()
plt.scatter(X_1,y_1, s = 256, color='blue', alpha=0.5)
plt.plot(linex, liney, c='red')
plt.show()
def linear_regressor(X,y):
X = np.array(X)
y = np.array(y)
n = X.size
w0 = (y.mean()*np.sum(X**2)-X.mean()*np.sum(X*y)) / (np.sum(X**2) - n*X.mean()**2)
w1 = (np.sum(X*y) - X.mean()*np.sum(y)) / (np.sum(X**2) - n*X.mean()**2)
return w0,w1
w0,w1 = linear_regressor(X,y)
w0_q,w1_q = linear_regressor(X_1, y_1)
show_regline(X_1,y_1,w0_q,w1_q)
```
### References:
[1] AlgebraLAB (2021) [**AlgebraLAB: Word Problem Exercises: Applications of 3 Equations with 3 Variables**](http://www.algebralab.org/practice/practice.aspx?file=Word_ApplicationProblemFor3EquationsWith3Variables.xml)
[2] D.J Lopez, "Curve Fitting," in Numerical Method, 2021: [**Curve Fitting Techniques**](https://github.com/dyjdlopez/numeth2021/blob/main/Week%209-13%20-%20Curve%20Fitting%20Techniques/NuMeth_4_Curve_Fitting.ipynb)
[3] D.J Lopez, "Applied Linear Regression," in Numerical Method, 2021: [**Linear Regression**](https://github.com/dyjdlopez/numeth2021/blob/main/Week%209-13%20-%20Curve%20Fitting%20Techniques/NuMeth_4_5_Applied_Linear_Regression.ipynb)
[4] Pratik Shukla (2020): [**Pratik Shukla: Implementation of Simple Linear Regression Using Normal Equation (Matrices)**](https://medium.com/@shuklapratik22/implementation-of-simple-linear-regression-using-normal-equation-matrices-f9021c3590da)
| github_jupyter |
### CIS 9 ###
### Numpy, Basic Statistics -- Solution ###
Reading:
<br>- Python Data Science Handbook: Ch 2 except Fancy Indexing, Structured Arrays sections
<br>- Think Stats: Ch 1; Ch 2 up to and including Variance; Ch 3: Percentiles
To use numpy, we first need to import the `numpy` module:
```
import numpy as np
```
**Initializing an array, size of an array**
```
# 1. 1D array from list
oneD = np.array([1, 2, 3, 4])
print(oneD)
# print the size of the array?
print(oneD.shape)
# 2. 2D array from list of lists
twoD = np.array([[1,2,3],[4,5.1,6],[7,8,9],[10,11,12]])
print(twoD)
# print the size of the array?
print(twoD.shape)
# A 2D or higher dimension array must have the same number of elements across each dimension.
# Example: for a 2D array, all rows must have the same number of elements and
# all columns must have the same number of elements
# 3. array of int zeros
zeros = np.zeros(8, dtype=int)
print(zeros)
# what data type does the array store?
# we give numpy the type Python int, which gets converted to np.int32
# how to have the array store numpy's int?
# put numpy type directly: dtype=np.int32 or np.int8, np.int16, etc.
# 4. array of zeros
floatZeros = np.zeros((2,2))
print(floatZeros)
# what's the default data type for numpy? numpy's float64
print(type(floatZeros))
print(type(floatZeros[0]))
print(type(floatZeros[0,0]))
# 5. array of ones
ones = np.ones(3)
print(ones)
# 6. array of same values
filled = np.full((2,3),-2)
print(filled)
# what is (2,3)? the shape of the array, 2 rows x 3 cols
# 7. array of integers, in a range
countingNums = np.arange(1,11)
print(countingNums)
every2 = np.arange(10,0,-2)
print(every2)
# are the upper and lower limits of np.arange() the same
# as Python's range()? yes
# 8. array of float random numbers, always between 0 and 1
randNums = np.random.random((3,2))
print(randNums)
# 9. array of 80 int random numbers from 10 to 19
intRandNums = np.random.randint(10, 20, 80)
print(intRandNums)
print(intRandNums.shape, '\n')
# create a 3 rows x 4 cols array of random integers from 10 to 19?
# print the array and the shape?
intRandNums2 = np.random.randint(10, 20, (3,4))
print(intRandNums2)
print(intRandNums2.shape)
# 10. array from csv file
import csv
import random
with open("sample.csv", "w", newline='') as f :
writer = csv.writer(f)
for i in range(3) :
writer.writerow([random.randint(1,11) for i in range(4)])
data = np.loadtxt("sample.csv", delimiter=",")
print(data.shape)
print(data,'\n')
data = np.loadtxt("sample.csv", delimiter=",", dtype=np.int8)
print(data.shape)
print(data, '\n')
with open("sample.txt", "w") as f :
f.write("one two three")
data = np.genfromtxt("sample.txt", dtype=str) # another way
print(data)
with open("sample.txt", "w") as f :
f.write("1,2,3")
# read sample.txt into a numpy array with 3 integer elements and print the array?
data = np.genfromtxt("sample.txt", dtype=int, delimiter=',')
print(data)
```
**Array indexing**
```
# 11. numeric indexing
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr, '\n')
print(arr[2], '\n')
print(arr[2,3], '\n')
print(arr[-1], '\n')
print(arr[-2,-2])
# 12. slice indexing
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr, '\n')
print(arr[:2], '\n')
print(arr[1:,1:3], '\n')
# we can also mix integer indexing with slice indexing,
# however, this will yield an array of lower rank than the original array
print(arr[-1,:3], '\n') # 1D, lower rank than original arr
print(arr[:-2,1:-1]) # 2D, same rank as arr
# 13. Each time we create a slice of an array we get a view into the same array.
# We're not creating a new array, so modifying it will modify the original array.
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr)
view = arr[:2,1:3]
print(view)
view[0,0] = 100
print(view)
print(arr, '\n')
# to actually make a copy, use the copy() method:
copy = arr[:2,1:3].copy()
copy[0,0] = -1
print(copy)
print(arr)
# copying takes up memory so using a view is preferable if:
# a) data is for analysis only (no modification needed)
# or b) if data need to be changed but the original array must remain the same
# 14. index with an array
arr = np.array([1,12,3,4,8,5])
print(arr)
print(arr[[0,2,5]])
index = [0,1,2]
print(arr[index])
# 15. boolean indexing
arr = np.array([[1,12,3,4], [15,6,7,10], [2,20,8,1]])
print(arr, '\n')
print(arr[arr<10], '\n')
print(arr[arr%2==1], '\n')
# describe what the last print statement will print for any general array?
# print all elements that are odd numbers
```
**Changing array shape**
```
# 16. change the shape of an array, as long as the new shape has the same number of elements
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) # 12 elements
newArr1 = arr.reshape((6,2))
newArr2 = arr.reshape((12,))
print(arr, '\n')
print(newArr1,'\n')
print(newArr2)
# will the following will work? why or why not?
# newArr3 = arr.reshape((1,)) no, only room for 1 value
# newArr4 = arr.reshape((2,5)) no, only room for 10 values
# 17. transpose a 2D array (matrix)
arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
print(arr)
print(arr.T, '\n')
arr = np.array([[1,2,3]])
print(arr)
print(arr.T)
# 18. reverse a 1D array:
arr = np.array([1,2,3,4,5])
print(arr)
print(arr[::-1])
```
**Array math**
<br>Take advantage of numpy's ufuncs below and do not loop. Do the reading so you can see the speed difference between unfuncs and loops.
```
# 19. basic math functions operate element-wise on arrays, and are available both as operator overloads
# and as functions in the numpy module:
x = np.array([[1,2],[3,4]], dtype=np.float)
y = np.array([[5,6],[7,8]], dtype=np.float)
print(x + y)
print(np.add(x, y))
# 20. other arithmetic operations between 2 arrays
print(x - y)
print(x * y)
print(x / y)
print(x // y)
print(x % y)
# 21. arithmetic operations between an array and a scalar
x = np.array([[1,2],[3,4]], dtype=int)
print(x - 1)
print(x * 2)
print(x / 3)
print(x // 4)
print(x % 3)
print(x ** 2)
# 22. square root
print(np.sqrt(x), '\n')
# absolute value
print(np.abs([-1,2]), '\n')
```
**Aggregate functions:**
```
# 23. Math
#sum
arr = np.array([[1,2],[3,4]])
print(np.sum(arr))
print(np.sum(arr, axis=0))
print(np.sum(arr, axis=1), '\n')
# axis=0 means columns, axis=1 means rows
# min
print(arr.min())
print(arr.min(0))
print(arr.min(1), '\n')
# describe what the 3 statements above print? Don't copy the output, explain what the output is
# max
print(arr.max())
print(arr.max(axis=0))
print(arr.max(1), '\n')
# 24. Statistics
arr = np.array([[1,4,-3,2], [7,-1,3,8]])
# mean: central tendency
print(arr.mean())
print(arr.mean(axis=0))
print(arr.mean(1), '\n')
# variance: spread
# standard devision: spread from the mean
print(arr.std())
print(arr.std(axis=0))
print(arr.std(1), '\n')
# median: mid-point
print(np.median(arr))
print(np.median(arr, axis=0))
print(np.median(arr,1), '\n')
# percentile rank: percentage of values that are less than or equal to a given value
# percentile: value with a given percentile rank
print(np.percentile(arr,75))
print(np.percentile(arr,25))
```
**Broadcasting**
```
# 25. broadcasting or extending an array happens during computation between 2 arrays of
# different sizes, as long as the 2 arrays have specific dimensions that can be matched
arr = np.array([[1,2,3], [4,5,6]])
print(arr + 2) # the 2 is broadcasted to: [ [2,2,2], [2,2,2]] so it can be added to arr
arr1 = np.array([[1,2,3]])
arr2 = np.array([[1], [2], [3]])
print(arr1.shape, arr2.shape) # (1,3) (3,1)
print(arr1 + arr2, '\n') # [[1,2,3], [[1,1,1],
# [1,2,3], + [2,2,2],
# [1,2,3]] [3,3,3]]
arr3 = np.array([1,2])
print(arr2.shape, arr3.shape) # (3,1) (2,)
print(arr2 + arr3, '\n') # [[1,1], [[1,2],
# [2,2], + [1,2],
# [3,3]] [1,2]]
print(arr1.shape, arr3.shape) # (1,3) (2,)
print(arr1 + arr3[:,np.newaxis], '\n') # shape becomes: (1,3) (2,1)
# [[1,2,3]] + [[1], => [[1,2,3], [[1,1,1],
# [2]] [1,2,3]] + [2,2,2]]
# FYI only: more advanced broadcasting
arr4 = np.array([[1,2],[-1,-2]])
print(arr1.shape, arr4.shape) # (1,3) (2,2)
print(arr1[:,np.newaxis,:] + arr4[:,:,np.newaxis], '\n')
# (1,3) => (1,1,3) (2,2) => (2,2,1)
# or: print(arr1[...,np.newaxis,:] + arr4[...,np.newaxis])
```
**Boolean operations**
```
# 26. checking data in an array
arr = np.array([[1,2,-2],[-3,1,0]])
print(arr)
print("---###1---")
print(arr<0)
print("---###2---")
print(np.sum(arr<0))
print("---###3---")
print(np.sum(arr<0,axis=0), '\n')
print("---###4---")
# describe the output the last 2 print statements above?
# don't just copy the output for your answer.
# next to last statement: print number of elements that are less than 0
# last statement: print number of elements that are less than 0, for
# each column
print(np.any(arr<0))
print(np.all(arr<0), '\n')
print(np.all(arr<0,axis=1))
print(np.all(arr<0,axis=0))
```
**Sorting**
```
# 27. sort values in the array
arr = np.array([5,-2,0,2,-1,-2,4])
print(np.sort(arr),'\n')
arr = np.array([[2,0,-1],[1,8,3],[7,1,0]])
print(np.sort(arr), '\n')
print(np.sort(arr, axis=0), '\n')
print(np.sort(arr, axis=1), '\n')
# which axis is the default when no axis is specified?
# axis=1, or by row
```
**Get index values**
```
# 28. get the index of the sorted values
arr = np.array([5,-2,0,2,-1,-2,4])
print(np.argsort(arr))
ind = np.argsort(arr)
# print(ind)
print(arr[ind],'\n')
arr = np.array([[2,0,-1],[1,8,3],[7,1,0]])
print(np.argsort(arr), '\n')
ind = np.argsort(arr)
# print the smallest value of each row by using arr and ind?
print("smallest value of each row:", arr[0,ind[0,0]], arr[1,ind[1,0]], arr[2,ind[2,0]])
# the answer above is the "brute force" approach, typing out each individual index
# so that you can see which ones they are.
# the following are good answers from the class, which are shorter and preferred:
print("smallest value of each row:", arr[np.arange(3),ind[:,0]])
print("smallest value of each row:", arr[[0,1,2],ind[:,0]]) # similar to the one above
print("smallest value of each row:", arr[ind == 0])
# there are a couple variations of this one, which is a little longer and
# doesn't take full advantage of numpy's capability to access array elements
print("smallest value of each row:", [arr[i,ind[i,0]] for i in range(arr.shape[0])])
# 29. get the indices that match a boolean condition
arr = np.array([5,-2,0,2,-1,-2,4, -3,1])
print(np.where(arr>0))
ind = np.where(arr>0)
# print the positive values in arr by using ind?
print("positive values:", arr[ind])
```
| github_jupyter |
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Start-to-Finish Example: Head-On Black Hole Collision
## Author: Zach Etienne
### Formatting improvements courtesy Brandon Clark
## This module implements a basic numerical relativity code to merge two black holes in *spherical coordinates*
### Here we place the black holes initially on the $z$-axis, so the entire simulation is axisymmetric about the $\phi$-axis. Not sampling in the $\phi$ direction greatly speeds up the simulation.
**Notebook Status:** <font color = green><b> Validated </b></font>
**Validation Notes:** This module has been validated to exhibit convergence to zero of the Hamiltonian constraint violation at the expected order to the exact solution *after a short numerical evolution of the initial data* (see [plots at bottom](#convergence)), and all quantities have been validated against the [original SENR code](https://bitbucket.org/zach_etienne/nrpy).
### NRPy+ Source Code for this module:
* [BSSN/BrillLindquist.py](../edit/BSSN/BrillLindquist.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Brill-Lindquist.ipynb): Brill-Lindquist initial data; sets all ADM variables in Cartesian basis:
* [BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py](../edit/BSSN/ADM_Exact_Spherical_or_Cartesian_to_BSSNCurvilinear.py); [\[**tutorial**\]](Tutorial-ADM_Initial_Data-Converting_Exact_ADM_Spherical_or_Cartesian_to_BSSNCurvilinear.ipynb): Spherical/Cartesian ADM$\to$Curvilinear BSSN converter function, for which exact expressions are given for ADM quantities.
* [BSSN/BSSN_ID_function_string.py](../edit/BSSN/BSSN_ID_function_string.py): Sets up the C code string enabling initial data be set up in a point-by-point fashion
* [BSSN/BSSN_constraints.py](../edit/BSSN/BSSN_constraints.py); [\[**tutorial**\]](Tutorial-BSSN_constraints.ipynb): Hamiltonian constraint in BSSN curvilinear basis/coordinates
* [BSSN/BSSN_RHSs.py](../edit/BSSN/BSSN_RHSs.py); [\[**tutorial**\]](Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb): Generates the right-hand sides for the BSSN evolution equations in singular, curvilinear coordinates
* [BSSN/BSSN_gauge_RHSs.py](../edit/BSSN/BSSN_gauge_RHSs.py); [\[**tutorial**\]](Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb): Generates the right-hand sides for the BSSN gauge evolution equations in singular, curvilinear coordinates
## Introduction:
Here we use NRPy+ to generate the C source code necessary to set up initial data for two black holes (Brill-Lindquist, [Brill & Lindquist, Phys. Rev. 131, 471, 1963](https://journals.aps.org/pr/abstract/10.1103/PhysRev.131.471); see also Eq. 1 of [Brandt & Brügmann, arXiv:gr-qc/9711015v1](https://arxiv.org/pdf/gr-qc/9711015v1.pdf)). Then we use it to generate the RHS expressions for [Method of Lines](https://reference.wolfram.com/language/tutorial/NDSolveMethodOfLines.html) time integration based on an [explicit Runge-Kutta fourth-order scheme](https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods) (RK4 is chosen below, but multiple options exist).
The entire algorithm is outlined as follows, with links to the relevant NRPy+ tutorial notebooks listed at each step:
1. Allocate memory for gridfunctions, including temporary storage for the Method of Lines time integration
* [**NRPy+ tutorial on Method of Lines algorithm**](Tutorial-Method_of_Lines-C_Code_Generation.ipynb).
1. Set gridfunction values to initial data
* [**NRPy+ tutorial on Brill-Lindquist initial data**](Tutorial-ADM_Initial_Data-Brill-Lindquist.ipynb)
* [**NRPy+ tutorial on validating Brill-Lindquist initial data**](Tutorial-Start_to_Finish-BSSNCurvilinear-Setting_up_Exact_Initial_Data.ipynb).
1. Next, integrate the initial data forward in time using the Method of Lines coupled to a Runge-Kutta explicit timestepping algorithm:
1. At the start of each iteration in time, output the Hamiltonian constraint violation
* [**NRPy+ tutorial on BSSN constraints**](Tutorial-BSSN_constraints.ipynb).
1. At each RK time substep, do the following:
1. Evaluate BSSN RHS expressions
* [**NRPy+ tutorial on BSSN right-hand sides**](Tutorial-BSSN_time_evolution-BSSN_RHSs.ipynb) ([**BSSN Introduction Notebook**](Tutorial-BSSN_formulation.ipynb))
* [**NRPy+ tutorial on BSSN gauge condition right-hand sides**](Tutorial-BSSN_time_evolution-BSSN_gauge_RHSs.ipynb)
1. Apply singular, curvilinear coordinate boundary conditions [*a la* the SENR/NRPy+ paper](https://arxiv.org/abs/1712.07658)
* [**NRPy+ tutorial on setting up singular, curvilinear boundary conditions**](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)
1. Enforce constraint on conformal 3-metric: $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$
* [**NRPy+ tutorial on enforcing $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint**](Tutorial-BSSN_enforcing_determinant_gammabar_equals_gammahat_constraint.ipynb)
1. Repeat above steps at two numerical resolutions to confirm convergence to zero.
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
This notebook is organized as follows
1. [Step 1](#initializenrpy): Set core NRPy+ parameters for numerical grids and reference metric
1. [Step 1.a](#cfl) Output needed C code for finding the minimum proper distance between grid points, needed for [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673)-limited timestep
1. [Step 2](#adm_id): Import Brill-Lindquist ADM initial data C function from the [`BSSN.BrillLindquist`](../edit/BSSN/BrillLindquist.py) NRPy+ module
1. [Step 3](#bssn): Output C code for BSSN spacetime solve
1. [Step 3.a](#bssnrhs): Output C code for BSSN RHS expressions
1. [Step 3.b](#hamconstraint): Output C code for Hamiltonian constraint
1. [Step 3.c](#enforce3metric): Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint
1. [Step 3.d](#ccodegen): Generate C code kernels for BSSN expressions, in parallel if possible
1. [Step 3.e](#cparams_rfm_and_domainsize): Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h`
1. [Step 4](#bc_functs): Set up boundary condition functions for chosen singular, curvilinear coordinate system
1. [Step 5](#mainc): `BrillLindquist_Playground.c`: The Main C Code
1. [Step 6](#compileexec): Compile generated C codes & perform the black hole collision calculation
1. [Step 7](#visualize): Visualize the output!
1. [Step 7.a](#installdownload): Install `scipy` and download `ffmpeg` if they are not yet installed/downloaded
1. [Step 7.b](#genimages): Generate images for visualization animation
1. [Step 7.c](#genvideo): Generate visualization animation
1. [Step 8](#convergence): Plot the numerical error, and confirm that it converges to zero with increasing numerical resolution (sampling)
1. [Step 9](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='initializenrpy'></a>
# Step 1: Set core NRPy+ parameters for numerical grids and reference metric \[Back to [top](#toc)\]
$$\label{initializenrpy}$$
```
# Step P1: Import needed NRPy+ core modules:
from outputC import lhrh,outCfunction,outC_function_dict # NRPy+: Core C code output module
import finite_difference as fin # NRPy+: Finite difference C code generation module
import NRPy_param_funcs as par # NRPy+: Parameter interface
import grid as gri # NRPy+: Functions having to do with numerical grids
import reference_metric as rfm # NRPy+: Reference metric support
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
import shutil, os, sys, time # Standard Python modules for multiplatform OS-level functions, benchmarking
import pickle # Standard Python module for bytewise transfer of data between modules
# Step P2: Create C code output directory:
Ccodesdir = os.path.join("BSSN_Two_BHs_Collide_Ccodes/")
# First remove C code output directory if it exists
# Courtesy https://stackoverflow.com/questions/303200/how-do-i-remove-delete-a-folder-that-is-not-empty
# !rm -r ScalarWaveCurvilinear_Playground_Ccodes
shutil.rmtree(Ccodesdir, ignore_errors=True)
# Then create a fresh directory
cmd.mkdir(Ccodesdir)
# Step P3: Create executable output directory:
outdir = os.path.join(Ccodesdir,"output/")
cmd.mkdir(outdir)
# Step 1: Set the spatial dimension parameter
# to three (BSSN is a 3+1 decomposition
# of Einstein's equations), and then read
# the parameter as DIM.
par.set_parval_from_str("grid::DIM",3)
DIM = par.parval_from_str("grid::DIM")
# Step 1.a: Enable SIMD-optimized code?
# I.e., generate BSSN and Ricci C code kernels using SIMD-vectorized
# compiler intrinsics, which *greatly improve the code's performance*,
# though at the expense of making the C-code kernels less
# human-readable.
# * Important note in case you wish to modify the BSSN/Ricci kernels
# here by adding expressions containing transcendental functions
# (e.g., certain scalar fields):
# Note that SIMD-based transcendental function intrinsics are not
# supported by the default installation of gcc or clang (you will
# need to use e.g., the SLEEF library from sleef.org, for this
# purpose). The Intel compiler suite does support these intrinsics
# however without the need for external libraries.
enable_SIMD = True
# Step 2: Set some core parameters, including CoordSystem MoL timestepping algorithm,
# FD order, floating point precision, and CFL factor:
# Choices are: Spherical, SinhSpherical, SinhSphericalv2, Cylindrical, SinhCylindrical,
# SymTP, SinhSymTP
CoordSystem = "Spherical"
# Step 2.a: Set defaults for Coordinate system parameters.
# These are perhaps the most commonly adjusted parameters,
# so we enable modifications at this high level.
# domain_size sets the default value for:
# * Spherical's params.RMAX
# * SinhSpherical*'s params.AMAX
# * Cartesians*'s -params.{x,y,z}min & .{x,y,z}max
# * Cylindrical's -params.ZMIN & .{Z,RHO}MAX
# * SinhCylindrical's params.AMPL{RHO,Z}
# * *SymTP's params.AMAX
domain_size = 7.5 # Needed for all coordinate systems.
# sinh_width sets the default value for:
# * SinhSpherical's params.SINHW
# * SinhCylindrical's params.SINHW{RHO,Z}
# * SinhSymTP's params.SINHWAA
sinh_width = 0.4 # If Sinh* coordinates chosen
# sinhv2_const_dr sets the default value for:
# * SinhSphericalv2's params.const_dr
# * SinhCylindricalv2's params.const_d{rho,z}
sinhv2_const_dr = 0.05 # If Sinh*v2 coordinates chosen
# SymTP_bScale sets the default value for:
# * SinhSymTP's params.bScale
SymTP_bScale = 0.5 # If SymTP chosen
# Step 2.b: Set the order of spatial and temporal derivatives;
# the core data type, and the CFL factor.
# RK_method choices include: Euler, "RK2 Heun", "RK2 MP", "RK2 Ralston", RK3, "RK3 Heun", "RK3 Ralston",
# SSPRK3, RK4, DP5, DP5alt, CK5, DP6, L6, DP8
RK_method = "RK4"
FD_order = 4 # Finite difference order: even numbers only, starting with 2. 12 is generally unstable
REAL = "double" # Best to use double here.
default_CFL_FACTOR= 0.5 # (GETS OVERWRITTEN WHEN EXECUTED.) In pure axisymmetry (symmetry_axes = 2 below) 1.0 works fine. Otherwise 0.5 or lower.
# Step 3: Generate Runge-Kutta-based (RK-based) timestepping code.
# As described above the Table of Contents, this is a 3-step process:
# 3.A: Evaluate RHSs (RHS_string)
# 3.B: Apply boundary conditions (post_RHS_string, pt 1)
# 3.C: Enforce det(gammabar) = det(gammahat) constraint (post_RHS_string, pt 2)
import MoLtimestepping.C_Code_Generation as MoL
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
RK_order = Butcher_dict[RK_method][1]
cmd.mkdir(os.path.join(Ccodesdir,"MoLtimestepping/"))
MoL.MoL_C_Code_Generation(RK_method,
RHS_string = """
Ricci_eval(&rfmstruct, ¶ms, RK_INPUT_GFS, auxevol_gfs);
rhs_eval(&rfmstruct, ¶ms, auxevol_gfs, RK_INPUT_GFS, RK_OUTPUT_GFS);""",
post_RHS_string = """
apply_bcs_curvilinear(¶ms, &bcstruct, NUM_EVOL_GFS, evol_gf_parity, RK_OUTPUT_GFS);
enforce_detgammahat_constraint(&rfmstruct, ¶ms, RK_OUTPUT_GFS);\n""",
outdir = os.path.join(Ccodesdir,"MoLtimestepping/"))
# Step 4: Set the coordinate system for the numerical grid
par.set_parval_from_str("reference_metric::CoordSystem",CoordSystem)
rfm.reference_metric() # Create ReU, ReDD needed for rescaling B-L initial data, generating BSSN RHSs, etc.
# Step 5: Set the finite differencing order to FD_order (set above).
par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", FD_order)
enable_FD_functions = True
par.set_parval_from_str("finite_difference::enable_FD_functions", enable_FD_functions)
# Step 6: If enable_SIMD==True, then copy SIMD/SIMD_intrinsics.h to $Ccodesdir/SIMD/SIMD_intrinsics.h
# Otherwise just paste a #define SIMD_IS_DISABLED to that file.
cmd.mkdir(os.path.join(Ccodesdir,"SIMD"))
if enable_SIMD == True:
shutil.copy(os.path.join("SIMD","SIMD_intrinsics.h"),os.path.join(Ccodesdir,"SIMD/"))
else:
with open(os.path.join(Ccodesdir,"SIMD","SIMD_intrinsics.h"), "w") as file:
file.write("#define SIMD_IS_DISABLED\n")
# Step 7: Set the direction=2 (phi) axis to be the symmetry axis; i.e.,
# axis "2", corresponding to the i2 direction.
# This sets all spatial derivatives in the phi direction to zero.
par.set_parval_from_str("indexedexp::symmetry_axes","2")
```
<a id='cfl'></a>
## Step 1.a: Output needed C code for finding the minimum proper distance between grid points, needed for [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673)-limited timestep \[Back to [top](#toc)\]
$$\label{cfl}$$
In order for our explicit-timestepping numerical solution to the scalar wave equation to be stable, it must satisfy the [CFL](https://en.wikipedia.org/w/index.php?title=Courant%E2%80%93Friedrichs%E2%80%93Lewy_condition&oldid=806430673) condition:
$$
\Delta t \le \frac{\min(ds_i)}{c},
$$
where $c$ is the wavespeed, and
$$ds_i = h_i \Delta x^i$$
is the proper distance between neighboring gridpoints in the $i$th direction (in 3D, there are 3 directions), $h_i$ is the $i$th reference metric scale factor, and $\Delta x^i$ is the uniform grid spacing in the $i$th direction:
```
# Output the find_timestep() function to a C file.
rfm.out_timestep_func_to_file(os.path.join(Ccodesdir,"find_timestep.h"))
# In the parallel C codegen below, the
def pickled_outC_function_dict(outC_function_dict):
outstr = []
outstr.append(pickle.dumps(len(outC_function_dict)))
for Cfuncname, Cfunc in outC_function_dict.items():
outstr.append(pickle.dumps(Cfuncname))
outstr.append(pickle.dumps(Cfunc))
return outstr
```
<a id='adm_id'></a>
# Step 2: Import Brill-Lindquist ADM initial data C function from the [`BSSN.BrillLindquist`](../edit/BSSN/BrillLindquist.py) NRPy+ module \[Back to [top](#toc)\]
$$\label{adm_id}$$
The [`BSSN.BrillLindquist`](../edit/BSSN/BrillLindquist.py) NRPy+ module does the following:
1. Set up Brill-Lindquist initial data [ADM](https://en.wikipedia.org/wiki/ADM_formalism) quantities in the **Cartesian basis**, as [documented here](Tutorial-ADM_Initial_Data-Brill-Lindquist.ipynb).
1. Convert the ADM **Cartesian quantities** to **BSSN quantities in the desired Curvilinear basis** (set by reference_metric::CoordSystem), as [documented here](Tutorial-ADM_Initial_Data-Converting_ADMCartesian_to_BSSNCurvilinear.ipynb).
1. Sets up the standardized C function for setting all BSSN Curvilinear gridfunctions in a pointwise fashion, as [written here](../edit/BSSN/BSSN_ID_function_string.py), and returns the C function as a Python string.
```
import BSSN.BrillLindquist as bl
def BrillLindquistID():
print("Generating optimized C code for Brill-Lindquist initial data. May take a while, depending on CoordSystem.")
start = time.time()
bl.BrillLindquist() # Registers ID C function in dictionary, used below to output to file.
with open(os.path.join(Ccodesdir,"initial_data.h"),"w") as file:
file.write(outC_function_dict["initial_data"])
end = time.time()
print("(BENCH) Finished BL initial data codegen in "+str(end-start)+" seconds.")
return pickled_outC_function_dict(outC_function_dict)
```
<a id='bssn'></a>
# Step 3: Output C code for BSSN spacetime solve \[Back to [top](#toc)\]
$$\label{bssn}$$
<a id='bssnrhs'></a>
## Step 3.a: Output C code for BSSN RHS expressions \[Back to [top](#toc)\]
$$\label{bssnrhs}$$
```
import BSSN.BSSN_RHSs as rhs
import BSSN.BSSN_gauge_RHSs as gaugerhs
# Set the *covariant*, second-order Gamma-driving shift condition
par.set_parval_from_str("BSSN.BSSN_gauge_RHSs::ShiftEvolutionOption", "GammaDriving2ndOrder_Covariant")
print("Generating symbolic expressions for BSSN RHSs...")
start = time.time()
# Enable rfm_precompute infrastructure, which results in
# BSSN RHSs that are free of transcendental functions,
# even in curvilinear coordinates, so long as
# ConformalFactor is set to "W" (default).
cmd.mkdir(os.path.join(Ccodesdir,"rfm_files/"))
par.set_parval_from_str("reference_metric::enable_rfm_precompute","True")
par.set_parval_from_str("reference_metric::rfm_precompute_Ccode_outdir",os.path.join(Ccodesdir,"rfm_files/"))
# Evaluate BSSN + BSSN gauge RHSs with rfm_precompute enabled:
import BSSN.BSSN_quantities as Bq
par.set_parval_from_str("BSSN.BSSN_quantities::LeaveRicciSymbolic","True")
rhs.BSSN_RHSs()
gaugerhs.BSSN_gauge_RHSs()
# We use betaU as our upwinding control vector:
Bq.BSSN_basic_tensors()
betaU = Bq.betaU
import BSSN.Enforce_Detgammahat_Constraint as EGC
enforce_detg_constraint_symb_expressions = EGC.Enforce_Detgammahat_Constraint_symb_expressions()
# Next compute Ricci tensor
par.set_parval_from_str("BSSN.BSSN_quantities::LeaveRicciSymbolic","False")
Bq.RicciBar__gammabarDD_dHatD__DGammaUDD__DGammaU()
# Now register the Hamiltonian as a gridfunction.
H = gri.register_gridfunctions("AUX","H")
# Then define the Hamiltonian constraint and output the optimized C code.
import BSSN.BSSN_constraints as bssncon
bssncon.BSSN_constraints(add_T4UUmunu_source_terms=False)
# Now that we are finished with all the rfm hatted
# quantities in generic precomputed functional
# form, let's restore them to their closed-
# form expressions.
par.set_parval_from_str("reference_metric::enable_rfm_precompute","False") # Reset to False to disable rfm_precompute.
rfm.ref_metric__hatted_quantities()
end = time.time()
print("(BENCH) Finished BSSN symbolic expressions in "+str(end-start)+" seconds.")
includes = None
if enable_FD_functions:
includes = ["finite_difference_functions.h"]
def BSSN_RHSs():
print("Generating C code for BSSN RHSs in "+par.parval_from_str("reference_metric::CoordSystem")+" coordinates.")
start = time.time()
# Construct the left-hand sides and right-hand-side expressions for all BSSN RHSs
lhs_names = [ "alpha", "cf", "trK"]
rhs_exprs = [gaugerhs.alpha_rhs, rhs.cf_rhs, rhs.trK_rhs]
for i in range(3):
lhs_names.append( "betU"+str(i))
rhs_exprs.append(gaugerhs.bet_rhsU[i])
lhs_names.append( "lambdaU"+str(i))
rhs_exprs.append(rhs.lambda_rhsU[i])
lhs_names.append( "vetU"+str(i))
rhs_exprs.append(gaugerhs.vet_rhsU[i])
for j in range(i,3):
lhs_names.append( "aDD"+str(i)+str(j))
rhs_exprs.append(rhs.a_rhsDD[i][j])
lhs_names.append( "hDD"+str(i)+str(j))
rhs_exprs.append(rhs.h_rhsDD[i][j])
# Sort the lhss list alphabetically, and rhss to match.
# This ensures the RHSs are evaluated in the same order
# they're allocated in memory:
lhs_names,rhs_exprs = [list(x) for x in zip(*sorted(zip(lhs_names,rhs_exprs), key=lambda pair: pair[0]))]
# Declare the list of lhrh's
BSSN_evol_rhss = []
for var in range(len(lhs_names)):
BSSN_evol_rhss.append(lhrh(lhs=gri.gfaccess("rhs_gfs",lhs_names[var]),rhs=rhs_exprs[var]))
# Set up the C function for the BSSN RHSs
# Set outputC and loop parameters for BSSN_RHSs C function.
outC_params = "outCverbose=False"
loopoptions = "InteriorPoints,enable_rfm_precompute"
if enable_SIMD == True:
loopoptions += ",enable_SIMD"
outC_params += ",enable_SIMD=True"
desc="Evaluate the BSSN RHSs"
name="rhs_eval"
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), includes=includes, desc=desc, name=name,
params = """rfm_struct *restrict rfmstruct,const paramstruct *restrict params,
const REAL *restrict auxevol_gfs,const REAL *restrict in_gfs,REAL *restrict rhs_gfs""",
body = fin.FD_outputC("returnstring",BSSN_evol_rhss, params=outC_params,
upwindcontrolvec=betaU),
loopopts = loopoptions)
end = time.time()
print("(BENCH) Finished BSSN_RHS C codegen in " + str(end - start) + " seconds.")
return pickled_outC_function_dict(outC_function_dict)
def Ricci():
print("Generating C code for Ricci tensor in "+par.parval_from_str("reference_metric::CoordSystem")+" coordinates.")
start = time.time()
# Set up the C function for the Ricci tensor
# Set outputC and loop parameters for Ricci tensor function.
outC_params = "outCverbose=False"
loopoptions = "InteriorPoints,enable_rfm_precompute"
if enable_SIMD == True:
loopoptions += ",enable_SIMD"
outC_params += ",enable_SIMD=True"
desc="Evaluate the Ricci tensor"
name="Ricci_eval"
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), includes=includes, desc=desc, name=name,
params = """rfm_struct *restrict rfmstruct,const paramstruct *restrict params,
const REAL *restrict in_gfs,REAL *restrict auxevol_gfs""",
body = fin.FD_outputC("returnstring",
[lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD00"),rhs=Bq.RbarDD[0][0]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD01"),rhs=Bq.RbarDD[0][1]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD02"),rhs=Bq.RbarDD[0][2]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD11"),rhs=Bq.RbarDD[1][1]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD12"),rhs=Bq.RbarDD[1][2]),
lhrh(lhs=gri.gfaccess("auxevol_gfs","RbarDD22"),rhs=Bq.RbarDD[2][2])],
params=outC_params),
loopopts = loopoptions)
end = time.time()
print("(BENCH) Finished Ricci C codegen in " + str(end - start) + " seconds.")
return pickled_outC_function_dict(outC_function_dict)
```
<a id='hamconstraint'></a>
## Step 3.b: Output C code for Hamiltonian constraint \[Back to [top](#toc)\]
$$\label{hamconstraint}$$
Next output the C code for evaluating the Hamiltonian constraint [(**Tutorial**)](Tutorial-BSSN_constraints.ipynb). In the absence of numerical error, this constraint should evaluate to zero. However it does not due to numerical (typically truncation and roundoff) error. We will therefore measure the Hamiltonian constraint violation to gauge the accuracy of our simulation, and, ultimately determine whether errors are dominated by numerical finite differencing (truncation) error as expected.
```
def Hamiltonian():
start = time.time()
print("Generating optimized C code for Hamiltonian constraint. May take a while, depending on CoordSystem.")
# Set up the C function for the Hamiltonian RHS
desc="Evaluate the Hamiltonian constraint"
name="Hamiltonian_constraint"
outCfunction(
outfile = os.path.join(Ccodesdir,name+".h"), includes=includes, desc=desc, name=name,
params = """rfm_struct *restrict rfmstruct,const paramstruct *restrict params,
REAL *restrict in_gfs, REAL *restrict aux_gfs""",
body = fin.FD_outputC("returnstring",lhrh(lhs=gri.gfaccess("aux_gfs", "H"), rhs=bssncon.H),
params="outCverbose=False"),
loopopts = "InteriorPoints,enable_rfm_precompute")
end = time.time()
print("(BENCH) Finished Hamiltonian C codegen in " + str(end - start) + " seconds.")
return pickled_outC_function_dict(outC_function_dict)
```
<a id='enforce3metric'></a>
## Step 3.c: Enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint \[Back to [top](#toc)\]
$$\label{enforce3metric}$$
Then enforce conformal 3-metric $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint (Eq. 53 of [Ruchlin, Etienne, and Baumgarte (2018)](https://arxiv.org/abs/1712.07658)), as [documented in the corresponding NRPy+ tutorial notebook](Tutorial-BSSN_enforcing_determinant_gammabar_equals_gammahat_constraint.ipynb)
Applying curvilinear boundary conditions should affect the initial data at the outer boundary, and will in general cause the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint to be violated there. Thus after we apply these boundary conditions, we must always call the routine for enforcing the $\det{\bar{\gamma}_{ij}}=\det{\hat{\gamma}_{ij}}$ constraint:
```
def gammadet():
start = time.time()
print("Generating optimized C code for gamma constraint. May take a while, depending on CoordSystem.")
# Set up the C function for the det(gammahat) = det(gammabar)
EGC.output_Enforce_Detgammahat_Constraint_Ccode(Ccodesdir,
exprs=enforce_detg_constraint_symb_expressions)
end = time.time()
print("(BENCH) Finished gamma constraint C codegen in " + str(end - start) + " seconds.")
return pickled_outC_function_dict(outC_function_dict)
```
<a id='ccodegen'></a>
## Step 3.d: Generate C code kernels for BSSN expressions, in parallel if possible \[Back to [top](#toc)\]
$$\label{ccodegen}$$
```
# Step 3.d: Generate C code kernels for BSSN expressions, in parallel if possible;
# Step 3.d.i: Create a list of functions we wish to evaluate in parallel (if possible)
funcs = [BrillLindquistID,BSSN_RHSs,Ricci,Hamiltonian,gammadet]
# pickled_outC_func_dict stores outC_function_dict from all
# the subprocesses in the following parallel codegen
pickled_outC_func_dict = []
try:
if os.name == 'nt':
# It's a mess to get working in Windows, so we don't bother. :/
# https://medium.com/@grvsinghal/speed-up-your-python-code-using-multiprocessing-on-windows-and-jupyter-or-ipython-2714b49d6fac
raise Exception("Parallel codegen currently not available in certain environments, e.g., Windows")
# Step 3.d.ii: Import the multiprocessing module.
import multiprocessing
# Step 3.d.iii: Define master functions for parallelization.
# Note that lambdifying this doesn't work in Python 3
def master_func(arg):
return funcs[arg]()
# Step 3.d.iv: Evaluate list of functions in parallel if possible;
# otherwise fallback to serial evaluation:
pool = multiprocessing.Pool()
pickled_outC_func_dict.append(pool.map(master_func,range(len(funcs))))
pool.terminate()
pool.join()
except:
# Steps 3.d.ii-iv, alternate: As fallback, evaluate functions in serial.
# This will happen on Android and Windows systems
for func in funcs:
func()
pickled_outC_func_dict = [] # Reset, as pickling/unpickling unnecessary for serial codegen (see next line)
# Step 3.d.v Output functions for computing all finite-difference stencils
if enable_FD_functions and len(pickled_outC_func_dict)>0:
# First unpickle pickled_outC_func_dict
outCfunc_dict = {}
for WhichFunc in pickled_outC_func_dict[0]:
i=0
num_elements = pickle.loads(WhichFunc[i]); i+=1
for lst in range(num_elements):
funcname = pickle.loads(WhichFunc[i+0])
funcbody = pickle.loads(WhichFunc[i+1]) ; i+=2
outCfunc_dict[funcname] = funcbody
# Then store the unpickled outCfunc_dict to outputC's outC_function_dict
for key, item in outCfunc_dict.items():
outC_function_dict[key] = item
if enable_FD_functions:
# Finally generate finite_difference_functions.h
fin.output_finite_difference_functions_h(path=Ccodesdir)
```
<a id='cparams_rfm_and_domainsize'></a>
## Step 3.e: Output C codes needed for declaring and setting Cparameters; also set `free_parameters.h` \[Back to [top](#toc)\]
$$\label{cparams_rfm_and_domainsize}$$
Based on declared NRPy+ Cparameters, first we generate `declare_Cparameters_struct.h`, `set_Cparameters_default.h`, and `set_Cparameters[-SIMD].h`.
Then we output `free_parameters.h`, which sets initial data parameters, as well as grid domain & reference metric parameters, applying `domain_size` and `sinh_width`/`SymTP_bScale` (if applicable) as set above
```
# Step 3.e: Output C codes needed for declaring and setting Cparameters; also set free_parameters.h
# Step 3.e.i: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir))
# Step 3.e.ii: Set free_parameters.h
with open(os.path.join(Ccodesdir,"free_parameters.h"),"w") as file:
file.write("""
// Set free-parameter values.
// Set free-parameter values for BSSN evolution:
params.eta = 1.0;
// Set free parameters for the (Brill-Lindquist) initial data
params.BH1_posn_x = 0.0; params.BH1_posn_y = 0.0; params.BH1_posn_z =+0.5;
params.BH2_posn_x = 0.0; params.BH2_posn_y = 0.0; params.BH2_posn_z =-0.5;
params.BH1_mass = 0.5; params.BH2_mass = 0.5;\n""")
# Append to $Ccodesdir/free_parameters.h reference metric parameters based on generic
# domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale,
# parameters set above.
rfm.out_default_free_parameters_for_rfm(os.path.join(Ccodesdir,"free_parameters.h"),
domain_size,sinh_width,sinhv2_const_dr,SymTP_bScale)
# Step 3.e.iii: Generate set_Nxx_dxx_invdx_params__and__xx.h:
rfm.set_Nxx_dxx_invdx_params__and__xx_h(Ccodesdir)
# Step 3.e.iv: Generate xx_to_Cart.h, which contains xx_to_Cart() for
# (the mapping from xx->Cartesian) for the chosen
# CoordSystem:
rfm.xx_to_Cart_h("xx_to_Cart","./set_Cparameters.h",os.path.join(Ccodesdir,"xx_to_Cart.h"))
# Step 3.e.v: Generate declare_Cparameters_struct.h, set_Cparameters_default.h, and set_Cparameters[-SIMD].h
par.generate_Cparameters_Ccodes(os.path.join(Ccodesdir))
```
<a id='bc_functs'></a>
# Step 4: Set up boundary condition functions for chosen singular, curvilinear coordinate system \[Back to [top](#toc)\]
$$\label{bc_functs}$$
Next apply singular, curvilinear coordinate boundary conditions [as documented in the corresponding NRPy+ tutorial notebook](Tutorial-Start_to_Finish-Curvilinear_BCs.ipynb)
```
import CurviBoundaryConditions.CurviBoundaryConditions as cbcs
cbcs.Set_up_CurviBoundaryConditions(os.path.join(Ccodesdir,"boundary_conditions/"),Cparamspath=os.path.join("../"))
```
<a id='mainc'></a>
# Step 5: `BrillLindquist_Playground.c`: The Main C Code \[Back to [top](#toc)\]
$$\label{mainc}$$
```
# Part P0: Define REAL, set the number of ghost cells NGHOSTS (from NRPy+'s FD_CENTDERIVS_ORDER),
# and set the CFL_FACTOR (which can be overwritten at the command line)
with open(os.path.join(Ccodesdir,"BSSN_Playground_REAL__NGHOSTS__CFL_FACTOR.h"), "w") as file:
file.write("""
// Part P0.a: Set the number of ghost cells, from NRPy+'s FD_CENTDERIVS_ORDER
#define NGHOSTS """+str(int(FD_order/2)+1)+"""
// Part P0.b: Set the numerical precision (REAL) to double, ensuring all floating point
// numbers are stored to at least ~16 significant digits
#define REAL """+REAL+"""
// Part P0.c: Set the CFL Factor. Can be overwritten at command line.
REAL CFL_FACTOR = """+str(default_CFL_FACTOR)+";")
%%writefile $Ccodesdir/BrillLindquist_Playground.c
// Step P0: Define REAL and NGHOSTS; and declare CFL_FACTOR. This header is generated in NRPy+.
#include "BSSN_Playground_REAL__NGHOSTS__CFL_FACTOR.h"
#include "rfm_files/rfm_struct__declare.h"
#include "declare_Cparameters_struct.h"
// All SIMD intrinsics used in SIMD-enabled C code loops are defined here:
#include "SIMD/SIMD_intrinsics.h"
#ifdef SIMD_IS_DISABLED
// Algorithm for upwinding, SIMD-disabled version.
// *NOTE*: This upwinding is backwards from
// usual upwinding algorithms, because the
// upwinding control vector in BSSN (the shift)
// acts like a *negative* velocity.
#define UPWIND_ALG(UpwindVecU) UpwindVecU > 0.0 ? 1.0 : 0.0
#endif
// Step P1: Import needed header files
#include "stdio.h"
#include "stdlib.h"
#include "math.h"
#include "time.h"
#include "stdint.h" // Needed for Windows GCC 6.x compatibility
#ifndef M_PI
#define M_PI 3.141592653589793238462643383279502884L
#endif
#ifndef M_SQRT1_2
#define M_SQRT1_2 0.707106781186547524400844362104849039L
#endif
#define wavespeed 1.0 // Set CFL-based "wavespeed" to 1.0.
// Step P2: Declare the IDX4S(gf,i,j,k) macro, which enables us to store 4-dimensions of
// data in a 1D array. In this case, consecutive values of "i"
// (all other indices held to a fixed value) are consecutive in memory, where
// consecutive values of "j" (fixing all other indices) are separated by
// Nxx_plus_2NGHOSTS0 elements in memory. Similarly, consecutive values of
// "k" are separated by Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1 in memory, etc.
#define IDX4S(g,i,j,k) \
( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) + Nxx_plus_2NGHOSTS2 * (g) ) ) )
#define IDX4ptS(g,idx) ( (idx) + (Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2) * (g) )
#define IDX3S(i,j,k) ( (i) + Nxx_plus_2NGHOSTS0 * ( (j) + Nxx_plus_2NGHOSTS1 * ( (k) ) ) )
#define LOOP_REGION(i0min,i0max, i1min,i1max, i2min,i2max) \
for(int i2=i2min;i2<i2max;i2++) for(int i1=i1min;i1<i1max;i1++) for(int i0=i0min;i0<i0max;i0++)
#define LOOP_ALL_GFS_GPS(ii) _Pragma("omp parallel for") \
for(int (ii)=0;(ii)<Nxx_plus_2NGHOSTS_tot*NUM_EVOL_GFS;(ii)++)
// Step P3: Set UUGF and VVGF macros, as well as xx_to_Cart()
#include "boundary_conditions/gridfunction_defines.h"
// Step P4: Set xx_to_Cart(const paramstruct *restrict params,
// REAL *restrict xx[3],
// const int i0,const int i1,const int i2,
// REAL xCart[3]),
// which maps xx->Cartesian via
// {xx[0][i0],xx[1][i1],xx[2][i2]}->{xCart[0],xCart[1],xCart[2]}
#include "xx_to_Cart.h"
// Step P5: Defines set_Nxx_dxx_invdx_params__and__xx(const int EigenCoord, const int Nxx[3],
// paramstruct *restrict params, REAL *restrict xx[3]),
// which sets params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for
// the chosen Eigen-CoordSystem if EigenCoord==1, or
// CoordSystem if EigenCoord==0.
#include "set_Nxx_dxx_invdx_params__and__xx.h"
// Step P6: Include basic functions needed to impose curvilinear
// parity and boundary conditions.
#include "boundary_conditions/CurviBC_include_Cfunctions.h"
// Step P7: Implement the algorithm for upwinding.
// *NOTE*: This upwinding is backwards from
// usual upwinding algorithms, because the
// upwinding control vector in BSSN (the shift)
// acts like a *negative* velocity.
//#define UPWIND_ALG(UpwindVecU) UpwindVecU > 0.0 ? 1.0 : 0.0
// Step P8: Include function for enforcing detgammabar constraint.
#include "enforce_detgammahat_constraint.h"
// Step P9: Find the CFL-constrained timestep
#include "find_timestep.h"
// Step P10: Declare function necessary for setting up the initial data.
// Step P10.a: Define BSSN_ID() for BrillLindquist initial data
// Step P10.b: Set the generic driver function for setting up BSSN initial data
#include "initial_data.h"
// Step P11: Declare function for evaluating Hamiltonian constraint (diagnostic)
#include "Hamiltonian_constraint.h"
// Step P12: Declare rhs_eval function, which evaluates BSSN RHSs
#include "rhs_eval.h"
// Step P13: Declare Ricci_eval function, which evaluates Ricci tensor
#include "Ricci_eval.h"
// main() function:
// Step 0: Read command-line input, set up grid structure, allocate memory for gridfunctions, set up coordinates
// Step 1: Set up initial data to an exact solution
// Step 2: Start the timer, for keeping track of how fast the simulation is progressing.
// Step 3: Integrate the initial data forward in time using the chosen RK-like Method of
// Lines timestepping algorithm, and output periodic simulation diagnostics
// Step 3.a: Output 2D data file periodically, for visualization
// Step 3.b: Step forward one timestep (t -> t+dt) in time using
// chosen RK-like MoL timestepping algorithm
// Step 3.c: If t=t_final, output conformal factor & Hamiltonian
// constraint violation to 2D data file
// Step 3.d: Progress indicator printing to stderr
// Step 4: Free all allocated memory
int main(int argc, const char *argv[]) {
paramstruct params;
#include "set_Cparameters_default.h"
// Step 0a: Read command-line input, error out if nonconformant
if((argc != 4 && argc != 5) || atoi(argv[1]) < NGHOSTS || atoi(argv[2]) < NGHOSTS || atoi(argv[3]) < 2 /* FIXME; allow for axisymmetric sims */) {
fprintf(stderr,"Error: Expected three command-line arguments: ./BrillLindquist_Playground Nx0 Nx1 Nx2,\n");
fprintf(stderr,"where Nx[0,1,2] is the number of grid points in the 0, 1, and 2 directions.\n");
fprintf(stderr,"Nx[] MUST BE larger than NGHOSTS (= %d)\n",NGHOSTS);
exit(1);
}
if(argc == 5) {
CFL_FACTOR = strtod(argv[4],NULL);
if(CFL_FACTOR > 0.5 && atoi(argv[3])!=2) {
fprintf(stderr,"WARNING: CFL_FACTOR was set to %e, which is > 0.5.\n",CFL_FACTOR);
fprintf(stderr," This will generally only be stable if the simulation is purely axisymmetric\n");
fprintf(stderr," However, Nx2 was set to %d>2, which implies a non-axisymmetric simulation\n",atoi(argv[3]));
}
}
// Step 0b: Set up numerical grid structure, first in space...
const int Nxx[3] = { atoi(argv[1]), atoi(argv[2]), atoi(argv[3]) };
if(Nxx[0]%2 != 0 || Nxx[1]%2 != 0 || Nxx[2]%2 != 0) {
fprintf(stderr,"Error: Cannot guarantee a proper cell-centered grid if number of grid cells not set to even number.\n");
fprintf(stderr," For example, in case of angular directions, proper symmetry zones will not exist.\n");
exit(1);
}
// Step 0c: Set free parameters, overwriting Cparameters defaults
// by hand or with command-line input, as desired.
#include "free_parameters.h"
// Step 0d: Uniform coordinate grids are stored to *xx[3]
REAL *xx[3];
// Step 0d.i: Set bcstruct
bc_struct bcstruct;
{
int EigenCoord = 1;
// Step 0d.ii: Call set_Nxx_dxx_invdx_params__and__xx(), which sets
// params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the
// chosen Eigen-CoordSystem.
set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx);
// Step 0d.iii: Set Nxx_plus_2NGHOSTS_tot
#include "set_Cparameters-nopointer.h"
const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
// Step 0e: Find ghostzone mappings; set up bcstruct
#include "boundary_conditions/driver_bcstruct.h"
// Step 0e.i: Free allocated space for xx[][] array
for(int i=0;i<3;i++) free(xx[i]);
}
// Step 0f: Call set_Nxx_dxx_invdx_params__and__xx(), which sets
// params Nxx,Nxx_plus_2NGHOSTS,dxx,invdx, and xx[] for the
// chosen (non-Eigen) CoordSystem.
int EigenCoord = 0;
set_Nxx_dxx_invdx_params__and__xx(EigenCoord, Nxx, ¶ms, xx);
// Step 0g: Set all C parameters "blah" for params.blah, including
// Nxx_plus_2NGHOSTS0 = params.Nxx_plus_2NGHOSTS0, etc.
#include "set_Cparameters-nopointer.h"
const int Nxx_plus_2NGHOSTS_tot = Nxx_plus_2NGHOSTS0*Nxx_plus_2NGHOSTS1*Nxx_plus_2NGHOSTS2;
// Step 0h: Time coordinate parameters
const REAL t_final = domain_size; /* Final time is set so that at t=t_final,
* data at the origin have not been corrupted
* by the approximate outer boundary condition */
// Step 0i: Set timestep based on smallest proper distance between gridpoints and CFL factor
REAL dt = find_timestep(¶ms, xx);
//fprintf(stderr,"# Timestep set to = %e\n",(double)dt);
int N_final = (int)(t_final / dt + 0.5); // The number of points in time.
// Add 0.5 to account for C rounding down
// typecasts to integers.
int output_every_N = (int)((REAL)N_final/800.0);
if(output_every_N == 0) output_every_N = 1;
// Step 0j: Error out if the number of auxiliary gridfunctions outnumber evolved gridfunctions.
// This is a limitation of the RK method. You are always welcome to declare & allocate
// additional gridfunctions by hand.
if(NUM_AUX_GFS > NUM_EVOL_GFS) {
fprintf(stderr,"Error: NUM_AUX_GFS > NUM_EVOL_GFS. Either reduce the number of auxiliary gridfunctions,\n");
fprintf(stderr," or allocate (malloc) by hand storage for *diagnostic_output_gfs. \n");
exit(1);
}
// Step 0k: Allocate memory for gridfunctions
#include "MoLtimestepping/RK_Allocate_Memory.h"
REAL *restrict auxevol_gfs = (REAL *)malloc(sizeof(REAL) * NUM_AUXEVOL_GFS * Nxx_plus_2NGHOSTS_tot);
// Step 0l: Set up precomputed reference metric arrays
// Step 0l.i: Allocate space for precomputed reference metric arrays.
#include "rfm_files/rfm_struct__malloc.h"
// Step 0l.ii: Define precomputed reference metric arrays.
{
#include "set_Cparameters-nopointer.h"
#include "rfm_files/rfm_struct__define.h"
}
// Step 1: Set up initial data to an exact solution
initial_data(¶ms, xx, y_n_gfs);
// Step 1b: Apply boundary conditions, as initial data
// are sometimes ill-defined in ghost zones.
// E.g., spherical initial data might not be
// properly defined at points where r=-1.
apply_bcs_curvilinear(¶ms, &bcstruct, NUM_EVOL_GFS,evol_gf_parity, y_n_gfs);
enforce_detgammahat_constraint(&rfmstruct, ¶ms, y_n_gfs);
// Step 2: Start the timer, for keeping track of how fast the simulation is progressing.
#ifdef __linux__ // Use high-precision timer in Linux.
struct timespec start, end;
clock_gettime(CLOCK_REALTIME, &start);
#else // Resort to low-resolution, standards-compliant timer in non-Linux OSs
// http://www.cplusplus.com/reference/ctime/time/
time_t start_timer,end_timer;
time(&start_timer); // Resolution of one second...
#endif
// Step 3: Integrate the initial data forward in time using the chosen RK-like Method of
// Lines timestepping algorithm, and output periodic simulation diagnostics
for(int n=0;n<=N_final;n++) { // Main loop to progress forward in time.
// Step 3.a: Output 2D data file periodically, for visualization
if(n%100 == 0) {
// Evaluate Hamiltonian constraint violation
Hamiltonian_constraint(&rfmstruct, ¶ms, y_n_gfs, diagnostic_output_gfs);
char filename[100];
sprintf(filename,"out%d-%08d.txt",Nxx[0],n);
FILE *out2D = fopen(filename, "w");
LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS0-NGHOSTS,
NGHOSTS,Nxx_plus_2NGHOSTS1-NGHOSTS,
NGHOSTS,Nxx_plus_2NGHOSTS2-NGHOSTS) {
const int idx = IDX3S(i0,i1,i2);
REAL xx0 = xx[0][i0];
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
REAL xCart[3];
xx_to_Cart(¶ms,xx,i0,i1,i2,xCart);
fprintf(out2D,"%e %e %e %e\n",
xCart[1],xCart[2],
y_n_gfs[IDX4ptS(CFGF,idx)],log10(fabs(diagnostic_output_gfs[IDX4ptS(HGF,idx)])));
}
fclose(out2D);
}
// Step 3.b: Step forward one timestep (t -> t+dt) in time using
// chosen RK-like MoL timestepping algorithm
#include "MoLtimestepping/RK_MoL.h"
// Step 3.c: If t=t_final, output conformal factor & Hamiltonian
// constraint violation to 2D data file
if(n==N_final-1) {
// Evaluate Hamiltonian constraint violation
Hamiltonian_constraint(&rfmstruct, ¶ms, y_n_gfs, diagnostic_output_gfs);
char filename[100];
sprintf(filename,"out%d.txt",Nxx[0]);
FILE *out2D = fopen(filename, "w");
const int i0MIN=NGHOSTS; // In spherical, r=Delta r/2.
const int i1mid=Nxx_plus_2NGHOSTS1/2;
const int i2mid=Nxx_plus_2NGHOSTS2/2;
LOOP_REGION(NGHOSTS,Nxx_plus_2NGHOSTS0-NGHOSTS,
NGHOSTS,Nxx_plus_2NGHOSTS1-NGHOSTS,
NGHOSTS,Nxx_plus_2NGHOSTS2-NGHOSTS) {
REAL xx0 = xx[0][i0];
REAL xx1 = xx[1][i1];
REAL xx2 = xx[2][i2];
REAL xCart[3];
xx_to_Cart(¶ms,xx,i0,i1,i2,xCart);
int idx = IDX3S(i0,i1,i2);
fprintf(out2D,"%e %e %e %e\n",xCart[1],xCart[2], y_n_gfs[IDX4ptS(CFGF,idx)],
log10(fabs(diagnostic_output_gfs[IDX4ptS(HGF,idx)])));
}
fclose(out2D);
}
// Step 3.d: Progress indicator printing to stderr
// Step 3.d.i: Measure average time per iteration
#ifdef __linux__ // Use high-precision timer in Linux.
clock_gettime(CLOCK_REALTIME, &end);
const long long unsigned int time_in_ns = 1000000000L * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec;
#else // Resort to low-resolution, standards-compliant timer in non-Linux OSs
time(&end_timer); // Resolution of one second...
REAL time_in_ns = difftime(end_timer,start_timer)*1.0e9+0.5; // Round up to avoid divide-by-zero.
#endif
const REAL s_per_iteration_avg = ((REAL)time_in_ns / (REAL)n) / 1.0e9;
const int iterations_remaining = N_final - n;
const REAL time_remaining_in_mins = s_per_iteration_avg * (REAL)iterations_remaining / 60.0;
const REAL num_RHS_pt_evals = (REAL)(Nxx[0]*Nxx[1]*Nxx[2]) * 4.0 * (REAL)n; // 4 RHS evals per gridpoint for RK4
const REAL RHS_pt_evals_per_sec = num_RHS_pt_evals / ((REAL)time_in_ns / 1.0e9);
// Step 3.d.ii: Output simulation progress to stderr
if(n % 10 == 0) {
fprintf(stderr,"%c[2K", 27); // Clear the line
fprintf(stderr,"It: %d t=%.2f dt=%.2e | %.1f%%; ETA %.0f s | t/h %.2f | gp/s %.2e\r", // \r is carriage return, move cursor to the beginning of the line
n, n * (double)dt, (double)dt, (double)(100.0 * (REAL)n / (REAL)N_final),
(double)time_remaining_in_mins*60, (double)(dt * 3600.0 / s_per_iteration_avg), (double)RHS_pt_evals_per_sec);
fflush(stderr); // Flush the stderr buffer
} // End progress indicator if(n % 10 == 0)
} // End main loop to progress forward in time.
fprintf(stderr,"\n"); // Clear the final line of output from progress indicator.
// Step 4: Free all allocated memory
#include "rfm_files/rfm_struct__freemem.h"
#include "boundary_conditions/bcstruct_freemem.h"
#include "MoLtimestepping/RK_Free_Memory.h"
free(auxevol_gfs);
for(int i=0;i<3;i++) free(xx[i]);
return 0;
}
```
<a id='compileexec'></a>
# Step 6: Compile generated C codes & perform the black hole collision calculation \[Back to [top](#toc)\]
$$\label{compileexec}$$
To aid in the cross-platform-compatible (with Windows, MacOS, & Linux) compilation and execution, we make use of `cmdline_helper` [(**Tutorial**)](Tutorial-cmdline_helper.ipynb).
```
import cmdline_helper as cmd
CFL_FACTOR=1.0
cmd.C_compile(os.path.join(Ccodesdir,"BrillLindquist_Playground.c"),
os.path.join(outdir,"BrillLindquist_Playground"),compile_mode="optimized")
# cmd.C_compile(os.path.join(Ccodesdir,"BrillLindquist_Playground.c"),
# os.path.join(outdir,"BrillLindquist_Playground"),compile_mode="custom",
# custom_compile_string="gcc -O2 -g -march=native "+
# os.path.join(Ccodesdir,"BrillLindquist_Playground.c")+
# " -o "+os.path.join(outdir,"BrillLindquist_Playground")+" -lm")
# Change to output directory
os.chdir(outdir)
# Clean up existing output files
cmd.delete_existing_files("out*.txt")
cmd.delete_existing_files("out*.png")
# Run executable
cmd.Execute("BrillLindquist_Playground", "72 12 2 "+str(CFL_FACTOR))
cmd.Execute("BrillLindquist_Playground", "96 16 2 "+str(CFL_FACTOR))
# Return to root directory
os.chdir(os.path.join("../../"))
# with open("compilescript", "w") as file:
# count=0
# for custom_compile_string0 in ["-O2","-O",""]:
# for custom_compile_string1 in ["","-fp-model fast=2 -no-prec-div"]:
# for custom_compile_string2 in ["","-qopt-prefetch=3","-qopt-prefetch=4"]:
# for custom_compile_string3 in ["","-unroll"]:
# for custom_compile_string4 in ["","-qoverride-limits"]:
# exc= "BL"+custom_compile_string0+custom_compile_string1.replace(" ","")+custom_compile_string2+custom_compile_string3+custom_compile_string4
# ccs = "icc -qopenmp -xHost "+custom_compile_string0+" "+custom_compile_string1+" "+custom_compile_string2+" "+custom_compile_string3+" "+custom_compile_string4+" BSSN_Two_BHs_Collide_Ccodes/BrillLindquist_Playground.c -o "+exc
# file.write(ccs+" &\n")
# if count>0 and count%16==0:
# file.write("wait\n")
# count += 1
# file.write("wait\n")
# with open("compilescriptgcc", "w") as file:
# count=0
# for custom_compile_string0 in ["-Ofast","-O2","-O3","-O",""]:
# for custom_compile_string1 in ["-fopenmp"]:
# for custom_compile_string2 in ["","-march=native"]:
# for custom_compile_string3 in ["","-funroll-loops","-funroll-all-loops"]:
# for custom_compile_string4 in [""]:
# exc= "BL"+custom_compile_string0+custom_compile_string1+custom_compile_string2+custom_compile_string3+custom_compile_string4
# ccs = "gcc "+custom_compile_string0+" "+custom_compile_string1+" "+custom_compile_string2+" "+custom_compile_string3+" "+custom_compile_string4+" BSSN_Two_BHs_Collide_Ccodes/BrillLindquist_Playground.c -o "+exc
# file.write(ccs+" -lm &\n")
# if count>0 and count%16==0:
# file.write("wait\n")
# count += 1
# file.write("wait\n")
print("(BENCH) Finished this code cell.")
```
<a id='visualize'></a>
# Step 7: Visualize the output! \[Back to [top](#toc)\]
$$\label{visualize}$$
In this section we will generate a movie, plotting the conformal factor of these initial data on a 2D grid, such that darker colors imply stronger gravitational fields. Hence, we see the two black holes initially centered at $z/M=\pm 0.5$, where $M$ is an arbitrary mass scale (conventionally the [ADM mass](https://en.wikipedia.org/w/index.php?title=ADM_formalism&oldid=846335453) is chosen), and our formulation of Einstein's equations adopt $G=c=1$ [geometrized units](https://en.wikipedia.org/w/index.php?title=Geometrized_unit_system&oldid=861682626).
<a id='installdownload'></a>
## Step 7.a: Install `scipy` and download `ffmpeg` if they are not yet installed/downloaded \[Back to [top](#toc)\]
$$\label{installdownload}$$
Note that if you are not running this within `mybinder`, but on a Windows system, `ffmpeg` must be installed using a separate package (on [this site](http://ffmpeg.org/)), or if running Jupyter within Anaconda, use the command: `conda install -c conda-forge ffmpeg`.
```
!pip install scipy > /dev/null
check_for_ffmpeg = !which ffmpeg >/dev/null && echo $?
if check_for_ffmpeg != ['0']:
print("Couldn't find ffmpeg, so I'll download it.")
# Courtesy https://johnvansickle.com/ffmpeg/
!wget http://astro.phys.wvu.edu/zetienne/ffmpeg-static-amd64-johnvansickle.tar.xz
!tar Jxf ffmpeg-static-amd64-johnvansickle.tar.xz
print("Copying ffmpeg to ~/.local/bin/. Assumes ~/.local/bin is in the PATH.")
!mkdir ~/.local/bin/
!cp ffmpeg-static-amd64-johnvansickle/ffmpeg ~/.local/bin/
print("If this doesn't work, then install ffmpeg yourself. It should work fine on mybinder.")
```
<a id='genimages'></a>
## Step 7.b: Generate images for visualization animation \[Back to [top](#toc)\]
$$\label{genimages}$$
Here we loop through the data files output by the executable compiled and run in [the previous step](#mainc), generating a [png](https://en.wikipedia.org/wiki/Portable_Network_Graphics) image for each data file.
**Special thanks to Terrence Pierre Jacques. His work with the first versions of these scripts greatly contributed to the scripts as they exist below.**
```
## VISUALIZATION ANIMATION, PART 1: Generate PNGs, one per frame of movie ##
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from matplotlib.pyplot import savefig
from IPython.display import HTML
import matplotlib.image as mgimg
import glob
import sys
from matplotlib import animation
globby = glob.glob(os.path.join(outdir,'out96-00*.txt'))
file_list = []
for x in sorted(globby):
file_list.append(x)
bound=1.4
pl_xmin = -bound
pl_xmax = +bound
pl_ymin = -bound
pl_ymax = +bound
for filename in file_list:
fig = plt.figure()
x,y,cf,Ham = np.loadtxt(filename).T #Transposed for easier unpacking
plotquantity = cf
plotdescription = "Numerical Soln."
plt.title("Black Hole Head-on Collision (conf factor)")
plt.xlabel("y/M")
plt.ylabel("z/M")
grid_x, grid_y = np.mgrid[pl_xmin:pl_xmax:300j, pl_ymin:pl_ymax:300j]
points = np.zeros((len(x), 2))
for i in range(len(x)):
# Zach says: No idea why x and y get flipped...
points[i][0] = y[i]
points[i][1] = x[i]
grid = griddata(points, plotquantity, (grid_x, grid_y), method='nearest')
gridcub = griddata(points, plotquantity, (grid_x, grid_y), method='cubic')
im = plt.imshow(gridcub, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax))
ax = plt.colorbar()
ax.set_label(plotdescription)
savefig(os.path.join(filename+".png"),dpi=150)
plt.close(fig)
sys.stdout.write("%c[2K" % 27)
sys.stdout.write("Processing file "+filename+"\r")
sys.stdout.flush()
```
<a id='genvideo'></a>
## Step 7.c: Generate visualization animation \[Back to [top](#toc)\]
$$\label{genvideo}$$
In the following step, [ffmpeg](http://ffmpeg.org) is used to generate an [mp4](https://en.wikipedia.org/wiki/MPEG-4) video file, which can be played directly from this Jupyter notebook.
```
## VISUALIZATION ANIMATION, PART 2: Combine PNGs to generate movie ##
# https://stackoverflow.com/questions/14908576/how-to-remove-frame-from-matplotlib-pyplot-figure-vs-matplotlib-figure-frame
# https://stackoverflow.com/questions/23176161/animating-pngs-in-matplotlib-using-artistanimation
fig = plt.figure(frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
myimages = []
for i in range(len(file_list)):
img = mgimg.imread(file_list[i]+".png")
imgplot = plt.imshow(img)
myimages.append([imgplot])
ani = animation.ArtistAnimation(fig, myimages, interval=100, repeat_delay=1000)
plt.close()
ani.save(os.path.join(outdir,'BH_Head-on_Collision.mp4'), fps=5,dpi=150)
## VISUALIZATION ANIMATION, PART 3: Display movie as embedded HTML5 (see next cell) ##
# https://stackoverflow.com/questions/18019477/how-can-i-play-a-local-video-in-my-ipython-notebook
# Embed video based on suggestion:
# https://stackoverflow.com/questions/39900173/jupyter-notebook-html-cell-magic-with-python-variable
HTML("""
<video width="480" height="360" controls>
<source src=\""""+os.path.join(outdir,"BH_Head-on_Collision.mp4")+"""\" type="video/mp4">
</video>
""")
```
<a id='convergence'></a>
# Step 8: Plot the numerical error, and confirm that it converges to zero with increasing numerical resolution (sampling) \[Back to [top](#toc)\]
$$\label{convergence}$$
```
x96,y96,valuesCF96,valuesHam96 = np.loadtxt(os.path.join(outdir,'out96.txt')).T #Transposed for easier unpacking
pl_xmin = -2.5
pl_xmax = +2.5
pl_ymin = -2.5
pl_ymax = +2.5
grid_x, grid_y = np.mgrid[pl_xmin:pl_xmax:100j, pl_ymin:pl_ymax:100j]
points96 = np.zeros((len(x96), 2))
for i in range(len(x96)):
points96[i][0] = x96[i]
points96[i][1] = y96[i]
grid96 = griddata(points96, valuesCF96, (grid_x, grid_y), method='nearest')
grid96cub = griddata(points96, valuesCF96, (grid_x, grid_y), method='cubic')
grid96 = griddata(points96, valuesHam96, (grid_x, grid_y), method='nearest')
grid96cub = griddata(points96, valuesHam96, (grid_x, grid_y), method='cubic')
# fig, ax = plt.subplots()
plt.clf()
plt.title("96x16 Num. Err.: log_{10}|Ham|")
plt.xlabel("x/M")
plt.ylabel("z/M")
fig96cub = plt.imshow(grid96cub.T, extent=(pl_xmin,pl_xmax, pl_ymin,pl_ymax))
cb = plt.colorbar(fig96cub)
x72,y72,valuesCF72,valuesHam72 = np.loadtxt(os.path.join(outdir,'out72.txt')).T #Transposed for easier unpacking
points72 = np.zeros((len(x72), 2))
for i in range(len(x72)):
points72[i][0] = x72[i]
points72[i][1] = y72[i]
grid72 = griddata(points72, valuesHam72, (grid_x, grid_y), method='nearest')
griddiff_72_minus_96 = np.zeros((100,100))
griddiff_72_minus_96_1darray = np.zeros(100*100)
gridx_1darray_yeq0 = np.zeros(100)
grid72_1darray_yeq0 = np.zeros(100)
grid96_1darray_yeq0 = np.zeros(100)
count = 0
for i in range(100):
for j in range(100):
griddiff_72_minus_96[i][j] = grid72[i][j] - grid96[i][j]
griddiff_72_minus_96_1darray[count] = griddiff_72_minus_96[i][j]
if j==49:
gridx_1darray_yeq0[i] = grid_x[i][j]
grid72_1darray_yeq0[i] = grid72[i][j] + np.log10((72./96.)**4)
grid96_1darray_yeq0[i] = grid96[i][j]
count = count + 1
plt.clf()
fig, ax = plt.subplots()
plt.title("4th-order Convergence, at t/M=7.5 (post-merger; horiz at x/M=+/-1)")
plt.xlabel("x/M")
plt.ylabel("log10(Relative error)")
ax.plot(gridx_1darray_yeq0, grid96_1darray_yeq0, 'k-', label='Nr=96')
ax.plot(gridx_1darray_yeq0, grid72_1darray_yeq0, 'k--', label='Nr=72, mult by (72/96)^4')
ax.set_ylim([-8.5,0.5])
legend = ax.legend(loc='lower right', shadow=True, fontsize='x-large')
legend.get_frame().set_facecolor('C1')
plt.show()
```
<a id='latex_pdf_output'></a>
# Step 9: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.pdf](Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-Start_to_Finish-BSSNCurvilinear-Two_BHs_Collide")
```
| github_jupyter |
```
# import the necessary packages
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from sklearn.cross_validation import train_test_split
from imutils import paths
import numpy as np
import argparse
import imutils
import cv2
import os
def image_to_feature_vector(image, size=(128, 128)):
# resize the image to a fixed size, then flatten the image into
# a list of raw pixel intensities
return cv2.resize(image, size).flatten()
def extract_color_histogram(image, bins=(32, 32, 32)):
# extract a 3D color histogram from the HSV color space using
# the supplied number of `bins` per channel
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
hist = cv2.calcHist([hsv], [0, 1, 2], None, bins,
[0, 180, 0, 256, 0, 256])
# handle normalizing the histogram if we are using OpenCV 2.4.X
if imutils.is_cv2():
hist = cv2.normalize(hist)
# otherwise, perform "in place" normalization in OpenCV 3
else:
cv2.normalize(hist, hist)
# return the flattened histogram as the feature vector
return hist.flatten()
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
ap.add_argument("-k", "--neighbors", type=int, default=1,
help="# of nearest neighbors for classification")
args = vars(ap.parse_args())
# grab the list of images that we'll be describing
print("[INFO] handling images...")
imagePaths = list(paths.list_images(args["dataset"]))
# initialize the raw pixel intensities matrix, the features matrix,
# and labels list
rawImages = []
features = []
labels = []
# loop over the input images
for (i, imagePath) in enumerate(imagePaths):
# load the image and extract the class label
# our images were named as labels.image_number.format
image = cv2.imread(imagePath)
# get the labels from the name of the images by extract the string before "."
label = imagePath.split(os.path.sep)[-1].split(".")[0]
# extract raw pixel intensity "features"
#followed by a color histogram to characterize the color distribution of the pixels
# in the image
pixels = image_to_feature_vector(image)
hist = extract_color_histogram(image)
# add the messages we got to the raw images, features, and labels matricies
rawImages.append(pixels)
features.append(hist)
labels.append(label)
# show an update every 200 images until the last image
if i > 0 and ((i + 1)% 200 == 0 or i ==len(imagePaths)-1):
print("[INFO] processed {}/{}".format(i+1, len(imagePaths)))
# show some information on the memory consumed by the raw images
# matrix and features matrix
rawImages = np.array(rawImages)
features = np.array(features)
labels = np.array(labels)
print("[INFO] pixels matrix: {:.2f}MB".format(
rawImages.nbytes / (1024 * 1000.0)))
print("[INFO] features matrix: {:.2f}MB".format(
features.nbytes / (1024 * 1000.0)))
# partition the data into training and testing splits, using 85%
# of the data for training and the remaining 15% for testing
(trainRI, testRI, trainRL, testRL) = train_test_split(
rawImages, labels, test_size=0.15, random_state=42)
(trainFeat, testFeat, trainLabels, testLabels) = train_test_split(
features, labels, test_size=0.15, random_state=42)
# k-NN
print("\n")
print("[INFO] evaluating raw pixel accuracy...")
model = KNeighborsClassifier(n_neighbors=args["neighbors"])
model.fit(trainRI, trainRL)
acc = model.score(testRI, testRL)
print("[INFO] k-NN classifier: k=%d" % args["neighbors"])
print("[INFO] raw pixel accuracy: {:.2f}%".format(acc * 100))
# k-NN
print("\n")
print("[INFO] evaluating histogram accuracy...")
model = KNeighborsClassifier(n_neighbors=args["neighbors"])
model.fit(trainFeat, trainLabels)
acc = model.score(testFeat, testLabels)
print("[INFO] k-NN classifier: k=%d" % args["neighbors"])
print("[INFO] histogram accuracy: {:.2f}%".format(acc * 100))
#neural network
print("\n")
print("[INFO] evaluating raw pixel accuracy...")
model = MLPClassifier(hidden_layer_sizes=(50,), max_iter=1000, alpha=1e-4,
solver='sgd', tol=1e-4, random_state=1,
learning_rate_init=.1)
model.fit(trainRI, trainRL)
acc = model.score(testRI, testRL)
print("[INFO] neural network raw pixel accuracy: {:.2f}%".format(acc * 100))
#neural network
print("\n")
print("[INFO] evaluating histogram accuracy...")
model = MLPClassifier(hidden_layer_sizes=(50,), max_iter=1000, alpha=1e-4,
solver='sgd', tol=1e-4, random_state=1,
learning_rate_init=.1)
model.fit(trainFeat, trainLabels)
acc = model.score(testFeat, testLabels)
print("[INFO] neural network histogram accuracy: {:.2f}%".format(acc * 100))
#SVC
print("\n")
print("[INFO] evaluating raw pixel accuracy...")
model = SVC(max_iter=1000,class_weight='balanced')
model.fit(trainRI, trainRL)
acc = model.score(testRI, testRL)
print("[INFO] SVM-SVC raw pixel accuracy: {:.2f}%".format(acc * 100))
#SVC
print("\n")
print("[INFO] evaluating histogram accuracy...")
model = SVC(max_iter=1000,class_weight='balanced')
model.fit(trainFeat, trainLabels)
acc = model.score(testFeat, testLabels)
print("[INFO] SVM-SVC histogram accuracy: {:.2f}%".format(acc * 100))
```
| github_jupyter |
# Introducción: Computación científica y ciencia de datos
*Ciencia: Scientia: Conocimiento*
## Los paradigmas de la ciencia:
<img src="https://www.researchgate.net/profile/Marcio_Costa13/publication/331216708/figure/fig4/AS:728393284870152@1550673900958/The-four-science-paradigms-empirical-theoretical-computational-and-data-driven-Each.png" width="600">
- El primer paradigma es basado en observación y experimentación
- El segundo paradigma agregó es el teórico, se basa en probar modelos y refutar hipótesis
- El tercer paradigma es basado en simulaciones: Probamos modelos y análisamos fenómenos utilizando simulaciones computacionales
- El [cuarto paradigma](https://en.wikipedia.org/wiki/The_Fourth_Paradigm) es basado en los datos: Entender fenómenos en base a datos masivos generados por sensores o simulaciones
## Tercer paradigma: Computación Científica
Hoy en día muchos trabajos científicos e ingenieriles incluyen al menos uno de los siguientes aspectos
- cálculos numéricos
- simulaciones
- modelamiento computacional
- análisis de datos
En resumen
> El computador se ha vuelto esencial para hacer ciencia
y por ende
> El software se ha vuelto esencial para hacer ciencia
En este escenario surge
> La **computación científica** es la disciplina que se encarga de desarrollar la teoría y las técnicas necesarias para resolver problemas matemáticos complejos de ciencias e ingeniería de forma eficiente
**Ejemplo:** Simulación de la unión de dos agujeros negros utilizando datos de LIGO
<img src="https://2.bp.blogspot.com/-h3SA26JUbB4/Vt86wugg1rI/AAAAAAAAzWE/DYuiVN4B8QA/s400/Two-Black-Holes-Merge-into-One.gif" width="500">
## Cuarto paradigma: Ciencia de los Datos
Los avances tecnológicos han permitido la generación y captura de una enorme cantidad de datos
Considere por ejemplo las Redes sociales, el internet de las cosas o los proyectos smart-city
Esto también ocurre en las ciencias
- Simulaciones con cada vez mayor nivel de resolución
- Telescopios que cubren mayores áreas del cielo y con mayor profunidad
- Digitalización de exámenes médicos
El término [*Big Data*](https://www.ibmbigdatahub.com/infographic/four-vs-big-data) se ocupa para describir el escenario tecnológico actual respecto a la abundancia, altas tazas de generación y variedad de datos
En este escenario surge:
> La **ciencia de datos** es la disciplina que busca extraer conocimiento (información) a partir de datos (masivos)
**Ejemplo:** [Super-resolución en imágenes astronómicas usando modelo adversario generativo entrenado en datos de SDSS](https://academic.oup.com/mnrasl/article/467/1/L110/2931732)
<img src="https://scx1.b-cdn.net/csz/news/800/2017/1-neuralnetwor.png" width="500">
**Competencias**
La computación científica y la ciencia de los datos combinan competencias de
- Ciencias de la computación: Diseñar algoritmos eficientes y escalables para procesar grandes bases de datos
- Matemáticas aplicadas y estadística: Diseñar modelos que caractericen y resuman los datos
**Interdiscipina**
La computación científica y la ciencia de los datos son paradigmas naturalmente **interdisciplinares**
> Se debe colaborar con expertos del dominio para formular preguntas científicas e intepretar los resultados de nuestros rutinas computacionales
**Reproducibilidad**
> Un buen software científico debe permitir la replicación y reproducibilidad de los resultados de los experimentos
## ¿Qué podemos esperar de este curso?
En este curso aprenderemos a manejar librerías de computación científica basadas en el lenguaje de programación **Python**
## ¿Qué es Python?
Python es un lenguaje de programación interpretado y multi-paradigma liberado en 1991 y ampliamente usado en ciencia de los datos
¿Por qué?
- Enfoque en la simpleza, lo cual resulta en código más fácil de mantener y desarrollo más rápido
- Menor curva de aprendizaje
- Rico ecosistema de librerías abiertas para computo científico: [*Numpy*](http://www.numpy.org/), [*Scipy*](https://www.scipy.org/), [*Pandas*](https://pandas.pydata.org/), entre otras que serán estudiadas en este curso
¿Y que hay del desempeño de mis algoritmos versus lenguajes de programación de más bajo nivel?
1. Trade-off entre tiempo de cómputo y horas hombre de programación
1. Librerías de cómputo científico de Python: Están compiladas con librerías optimizadas de bajo nivel (*e.g.* BLAS, LAPACK)
1. Si la función que necesito no está en las librerias: Integrar Python con lenguajes de bajo nivel (*e.g.* C++, Fortran)
## ¿Python2 o Python3?
- Python 2.7 sigue existiendo (y "compitiendo" con Python 3)
- [Un 95% de las librerias han sido portadas Python 3](https://wiki.python.org/moin/Python2orPython3)
- Python 2 ya no se sigue desarrollando
- En este curso usaremos **Python 3** como lenguaje oficial
| github_jupyter |
```
import tensorflow
tensorflow.keras.__version__
```
# Overfitting and underfitting
This notebook contains the code samples found in Chapter 3, Section 6 of [Deep Learning with Python](https://www.manning.com/books/deep-learning-with-python?a_aid=keras&a_bid=76564dff). Note that the original text features far more content, in particular further explanations and figures: in this notebook, you will only find source code and related comments.
----
In all the examples we saw in the previous chapter -- movie review sentiment prediction, topic classification, and house price regression --
we could notice that the performance of our model on the held-out validation data would always peak after a few epochs and would then start
degrading, i.e. our model would quickly start to _overfit_ to the training data. Overfitting happens in every single machine learning
problem. Learning how to deal with overfitting is essential to mastering machine learning.
The fundamental issue in machine learning is the tension between optimization and generalization. "Optimization" refers to the process of
adjusting a model to get the best performance possible on the training data (the "learning" in "machine learning"), while "generalization"
refers to how well the trained model would perform on data it has never seen before. The goal of the game is to get good generalization, of
course, but you do not control generalization; you can only adjust the model based on its training data.
At the beginning of training, optimization and generalization are correlated: the lower your loss on training data, the lower your loss on
test data. While this is happening, your model is said to be _under-fit_: there is still progress to be made; the network hasn't yet
modeled all relevant patterns in the training data. But after a certain number of iterations on the training data, generalization stops
improving, validation metrics stall then start degrading: the model is then starting to over-fit, i.e. is it starting to learn patterns
that are specific to the training data but that are misleading or irrelevant when it comes to new data.
To prevent a model from learning misleading or irrelevant patterns found in the training data, _the best solution is of course to get
more training data_. A model trained on more data will naturally generalize better. When that is no longer possible, the next best solution
is to modulate the quantity of information that your model is allowed to store, or to add constraints on what information it is allowed to
store. If a network can only afford to memorize a small number of patterns, the optimization process will force it to focus on the most
prominent patterns, which have a better chance of generalizing well.
The processing of fighting overfitting in this way is called _regularization_. Let's review some of the most common regularization
techniques, and let's apply them in practice to improve our movie classification model from the previous chapter.
Note: in this notebook we will be using the IMDB test set as our validation set. It doesn't matter in this context.
Let's prepare the data using the code from Chapter 3, Section 5:
```
from tensorflow.keras.datasets import imdb
import numpy as np
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
def vectorize_sequences(sequences, dimension=10000):
# Create an all-zero matrix of shape (len(sequences), dimension)
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1. # set specific indices of results[i] to 1s
return results
# Our vectorized training data
x_train = vectorize_sequences(train_data)
# Our vectorized test data
x_test = vectorize_sequences(test_data)
# Our vectorized labels
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
```
# Fighting overfitting
## Reducing the network's size
The simplest way to prevent overfitting is to reduce the size of the model, i.e. the number of learnable parameters in the model (which is
determined by the number of layers and the number of units per layer). In deep learning, the number of learnable parameters in a model is
often referred to as the model's "capacity". Intuitively, a model with more parameters will have more "memorization capacity" and therefore
will be able to easily learn a perfect dictionary-like mapping between training samples and their targets, a mapping without any
generalization power. For instance, a model with 500,000 binary parameters could easily be made to learn the class of every digits in the
MNIST training set: we would only need 10 binary parameters for each of the 50,000 digits. Such a model would be useless for classifying
new digit samples. Always keep this in mind: deep learning models tend to be good at fitting to the training data, but the real challenge
is generalization, not fitting.
On the other hand, if the network has limited memorization resources, it will not be able to learn this mapping as easily, and thus, in
order to minimize its loss, it will have to resort to learning compressed representations that have predictive power regarding the targets
-- precisely the type of representations that we are interested in. At the same time, keep in mind that you should be using models that have
enough parameters that they won't be underfitting: your model shouldn't be starved for memorization resources. There is a compromise to be
found between "too much capacity" and "not enough capacity".
Unfortunately, there is no magical formula to determine what the right number of layers is, or what the right size for each layer is. You
will have to evaluate an array of different architectures (on your validation set, not on your test set, of course) in order to find the
right model size for your data. The general workflow to find an appropriate model size is to start with relatively few layers and
parameters, and start increasing the size of the layers or adding new layers until you see diminishing returns with regard to the
validation loss.
Let's try this on our movie review classification network. Our original network was as such:
```
from tensorflow.keras import models
from tensorflow.keras import layers
original_model = models.Sequential()
original_model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
original_model.add(layers.Dense(16, activation='relu'))
original_model.add(layers.Dense(1, activation='sigmoid'))
original_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
```
Now let's try to replace it with this smaller network:
```
smaller_model = models.Sequential()
smaller_model.add(layers.Dense(4, activation='relu', input_shape=(10000,)))
smaller_model.add(layers.Dense(4, activation='relu'))
smaller_model.add(layers.Dense(1, activation='sigmoid'))
smaller_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
```
Here's a comparison of the validation losses of the original network and the smaller network. The dots are the validation loss values of
the smaller network, and the crosses are the initial network (remember: a lower validation loss signals a better model).
```
original_hist = original_model.fit(x_train, y_train,
epochs=20,
batch_size=512,
validation_data=(x_test, y_test))
smaller_model_hist = smaller_model.fit(x_train, y_train,
epochs=20,
batch_size=512,
validation_data=(x_test, y_test))
epochs = range(1, 21)
original_val_loss = original_hist.history['val_loss']
smaller_model_val_loss = smaller_model_hist.history['val_loss']
import matplotlib.pyplot as plt
# b+ is for "blue cross"
plt.plot(epochs, original_val_loss, 'b+', label='Original model')
# "bo" is for "blue dot"
plt.plot(epochs, smaller_model_val_loss, 'bo', label='Smaller model')
plt.xlabel('Epochs')
plt.ylabel('Validation loss')
plt.legend()
plt.show()
```
As you can see, the smaller network starts overfitting later than the reference one (after 6 epochs rather than 4) and its performance
degrades much more slowly once it starts overfitting.
Now, for kicks, let's add to this benchmark a network that has much more capacity, far more than the problem would warrant:
```
bigger_model = models.Sequential()
bigger_model.add(layers.Dense(512, activation='relu', input_shape=(10000,)))
bigger_model.add(layers.Dense(512, activation='relu'))
bigger_model.add(layers.Dense(1, activation='sigmoid'))
bigger_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
bigger_model_hist = bigger_model.fit(x_train, y_train,
epochs=20,
batch_size=512,
validation_data=(x_test, y_test))
```
Here's how the bigger network fares compared to the reference one. The dots are the validation loss values of the bigger network, and the
crosses are the initial network.
```
bigger_model_val_loss = bigger_model_hist.history['val_loss']
plt.plot(epochs, original_val_loss, 'b+', label='Original model')
plt.plot(epochs, bigger_model_val_loss, 'bo', label='Bigger model')
plt.xlabel('Epochs')
plt.ylabel('Validation loss')
plt.legend()
plt.show()
```
The bigger network starts overfitting almost right away, after just one epoch, and overfits much more severely. Its validation loss is also
more noisy.
Meanwhile, here are the training losses for our two networks:
```
original_train_loss = original_hist.history['loss']
bigger_model_train_loss = bigger_model_hist.history['loss']
plt.plot(epochs, original_train_loss, 'b+', label='Original model')
plt.plot(epochs, bigger_model_train_loss, 'bo', label='Bigger model')
plt.xlabel('Epochs')
plt.ylabel('Training loss')
plt.legend()
plt.show()
```
As you can see, the bigger network gets its training loss near zero very quickly. The more capacity the network has, the quicker it will be
able to model the training data (resulting in a low training loss), but the more susceptible it is to overfitting (resulting in a large
difference between the training and validation loss).
## Adding weight regularization
You may be familiar with _Occam's Razor_ principle: given two explanations for something, the explanation most likely to be correct is the
"simplest" one, the one that makes the least amount of assumptions. This also applies to the models learned by neural networks: given some
training data and a network architecture, there are multiple sets of weights values (multiple _models_) that could explain the data, and
simpler models are less likely to overfit than complex ones.
A "simple model" in this context is a model where the distribution of parameter values has less entropy (or a model with fewer
parameters altogether, as we saw in the section above). Thus a common way to mitigate overfitting is to put constraints on the complexity
of a network by forcing its weights to only take small values, which makes the distribution of weight values more "regular". This is called
"weight regularization", and it is done by adding to the loss function of the network a _cost_ associated with having large weights. This
cost comes in two flavors:
* L1 regularization, where the cost added is proportional to the _absolute value of the weights coefficients_ (i.e. to what is called the
"L1 norm" of the weights).
* L2 regularization, where the cost added is proportional to the _square of the value of the weights coefficients_ (i.e. to what is called
the "L2 norm" of the weights). L2 regularization is also called _weight decay_ in the context of neural networks. Don't let the different
name confuse you: weight decay is mathematically the exact same as L2 regularization.
In Keras, weight regularization is added by passing _weight regularizer instances_ to layers as keyword arguments. Let's add L2 weight
regularization to our movie review classification network:
```
from tensorflow.keras import regularizers
l2_model = models.Sequential()
l2_model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001),
activation='relu', input_shape=(10000,)))
l2_model.add(layers.Dense(16, kernel_regularizer=regularizers.l2(0.001),
activation='relu'))
l2_model.add(layers.Dense(1, activation='sigmoid'))
l2_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
```
`l2(0.001)` means that every coefficient in the weight matrix of the layer will add `0.001 * weight_coefficient_value` to the total loss of
the network. Note that because this penalty is _only added at training time_, the loss for this network will be much higher at training
than at test time.
Here's the impact of our L2 regularization penalty:
```
l2_model_hist = l2_model.fit(x_train, y_train,
epochs=20,
batch_size=512,
validation_data=(x_test, y_test))
l2_model_val_loss = l2_model_hist.history['val_loss']
plt.plot(epochs, original_val_loss, 'b+', label='Original model')
plt.plot(epochs, l2_model_val_loss, 'bo', label='L2-regularized model')
plt.xlabel('Epochs')
plt.ylabel('Validation loss')
plt.legend()
plt.show()
```
As you can see, the model with L2 regularization (dots) has become much more resistant to overfitting than the reference model (crosses),
even though both models have the same number of parameters.
As alternatives to L2 regularization, you could use one of the following Keras weight regularizers:
```
from tensorflow.keras import regularizers
# L1 regularization
regularizers.l1(0.001)
# L1 and L2 regularization at the same time
regularizers.l1_l2(l1=0.001, l2=0.001)
```
## Adding dropout
Dropout is one of the most effective and most commonly used regularization techniques for neural networks, developed by Hinton and his
students at the University of Toronto. Dropout, applied to a layer, consists of randomly "dropping out" (i.e. setting to zero) a number of
output features of the layer during training. Let's say a given layer would normally have returned a vector `[0.2, 0.5, 1.3, 0.8, 1.1]` for a
given input sample during training; after applying dropout, this vector will have a few zero entries distributed at random, e.g. `[0, 0.5,
1.3, 0, 1.1]`. The "dropout rate" is the fraction of the features that are being zeroed-out; it is usually set between 0.2 and 0.5. At test
time, no units are dropped out, and instead the layer's output values are scaled down by a factor equal to the dropout rate, so as to
balance for the fact that more units are active than at training time.
Consider a Numpy matrix containing the output of a layer, `layer_output`, of shape `(batch_size, features)`. At training time, we would be
zero-ing out at random a fraction of the values in the matrix:
```
# At training time: we drop out 50% of the units in the output
layer_output *= np.randint(0, high=2, size=layer_output.shape)
```
At test time, we would be scaling the output down by the dropout rate. Here we scale by 0.5 (because we were previous dropping half the
units):
```
# At test time:
layer_output *= 0.5
```
Note that this process can be implemented by doing both operations at training time and leaving the output unchanged at test time, which is
often the way it is implemented in practice:
```
# At training time:
layer_output *= np.randint(0, high=2, size=layer_output.shape)
# Note that we are scaling *up* rather scaling *down* in this case
layer_output /= 0.5
```
This technique may seem strange and arbitrary. Why would this help reduce overfitting? Geoff Hinton has said that he was inspired, among
other things, by a fraud prevention mechanism used by banks -- in his own words: _"I went to my bank. The tellers kept changing and I asked
one of them why. He said he didn’t know but they got moved around a lot. I figured it must be because it would require cooperation
between employees to successfully defraud the bank. This made me realize that randomly removing a different subset of neurons on each
example would prevent conspiracies and thus reduce overfitting"_.
The core idea is that introducing noise in the output values of a layer can break up happenstance patterns that are not significant (what
Hinton refers to as "conspiracies"), which the network would start memorizing if no noise was present.
In Keras you can introduce dropout in a network via the `Dropout` layer, which gets applied to the output of layer right before it, e.g.:
```
model.add(layers.Dropout(0.5))
```
Let's add two `Dropout` layers in our IMDB network to see how well they do at reducing overfitting:
```
dpt_model = models.Sequential()
dpt_model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
dpt_model.add(layers.Dropout(0.5))
dpt_model.add(layers.Dense(16, activation='relu'))
dpt_model.add(layers.Dropout(0.5))
dpt_model.add(layers.Dense(1, activation='sigmoid'))
dpt_model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
dpt_model_hist = dpt_model.fit(x_train, y_train,
epochs=20,
batch_size=512,
validation_data=(x_test, y_test))
```
Let's plot the results:
```
dpt_model_val_loss = dpt_model_hist.history['val_loss']
plt.plot(epochs, original_val_loss, 'b+', label='Original model')
plt.plot(epochs, dpt_model_val_loss, 'bo', label='Dropout-regularized model')
plt.xlabel('Epochs')
plt.ylabel('Validation loss')
plt.legend()
plt.show()
```
Again, a clear improvement over the reference network.
To recap: here the most common ways to prevent overfitting in neural networks:
* Getting more training data.
* Reducing the capacity of the network.
* Adding weight regularization.
* Adding dropout.
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.