code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
# Modeling - Titanic Challenge
```
# instalamos la librería de kaggle API
!pip install kaggle -q # -q sería quiet es decir que no deja rastro de lo que instala...a veces es útil ocultarlo
!mkdir -p ~/.kaggle
!cp /kaggle.json ~/.kaggle/ # en caso de Google Colab
!ls ~/.kaggle
!chmod 600 ~/.kaggle/kaggle.json
!chmod 600 ~/.kaggle/kaggle.json # directorio que apunta a la raíz en Docker
!kaggle
```
## Cargamos las librerías
```
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
from scipy.stats import norm, skew
from scipy import stats
import xlrd, xdrlib
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# data mining
#from sklearn.impute import KNNImputer, MissingIndicator, SimpleImputer
from sklearn import impute
#from sklearn_pandas import categorical_imputer, CategoricalImputer
from sklearn.pipeline import make_pipeline, make_union, Pipeline
from sklearn import preprocessing
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# machine learning
from sklearn import linear_model
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neural_network import MLPClassifier
## scikit modeling libraries
from sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier,
GradientBoostingClassifier, ExtraTreesClassifier,
VotingClassifier)
from sklearn.model_selection import (GridSearchCV, cross_val_score, cross_val_predict,
StratifiedKFold, learning_curve)
## Load metrics for predictive modeling
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
#from sklearn.feature_selection import RFE, rfe
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import mean_absolute_error, mean_squared_error
## Warnings and other tools
import itertools
import warnings
warnings.filterwarnings("ignore")
```
***
## 5. Model, predict and solve the problem.
### 5.1 Modelos de clasificación standalone
### 5.2 Modelos de clasificación con CV (Cross Validation)
#### 5.2.1 - K-fold Cross Validation
#### **5.2.2 - Ajustes de parámetros del CV**
#### 5.2.3 - Ajuste de Hyperparámetros (modelos ensemble)
## 5.2.1 - k-fold Cross Validation
K-Folds cross-validator
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.KFold.html?highlight=k%20fold#sklearn.model_selection.KFold
Provides train/test indices to split data in train/test sets. Split dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining folds form the training set.
### - Validation Set
Al entrenar un modelo, el uso de diferentes parámetros puede conducir a una solución muy diferente. Con el fin de evitar de minimizar el error, que no siempre es un buen punto de partida, creamos una solución de **conjunto de validación**, que nos sirve para validar la selección de parámetros. Difiere del **conjunto de prueba o test** que este sirve solo para validar la calidad del modelo.

```
# Cargamos training y testing dataset (previamente preparados)
df_test = pd.read_csv("testingDF.csv")
df_test.head(5)
df_train = pd.read_csv("trainDF.csv")
df_train.head(5)
# Se importa el train.csv solo para extraer la variable dep. Survived
y = pd.read_csv("train.csv")
y_train = y['Survived']
y_train.head(5)
# guardamos las IDS de los pasajeros de testing para la submission con las predicciones
test = pd.read_csv("test.csv")
ids = test['PassengerId']
ids.head(5)
# Para poder validar correctamente el modelo aplicamos un 80/20 a mi fichero de training
X_train, X_test, y_train, y_test = train_test_split(df_train, y_train,
test_size=0.20,
random_state=666)
# Para validar los parámetros del modelo realizamos un split validation
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train,
test_size=0.20,
random_state=666)
X_train.shape
X_val.shape
y_train.shape
y_val.shape
## PARTE 2 n_jobs = 10 // n_splits=10
# Realizamos la iteración Cross Validation con Kfold
K_fold = StratifiedKFold(n_splits=10)
# modelado con un estado random
random_state = 17
# Pasos para generar de forma conjunta un k-fold para diferentes estimadores
models = []
cv_results = []
cv_means = []
cv_std = []
# Parte II
# generamos los modelos con los estimadores confjgurando los valores por defecto
models.append(KNeighborsClassifier())
models.append(AdaBoostClassifier(DecisionTreeClassifier(random_state=random_state), random_state=random_state, learning_rate=0.1))
models.append(DecisionTreeClassifier(random_state=random_state))
models.append(RandomForestClassifier(random_state=random_state))
models.append(ExtraTreesClassifier(random_state=random_state))
models.append(SVC(random_state=random_state))
models.append(GradientBoostingClassifier(random_state=random_state))
models.append(LogisticRegression(random_state=random_state))
models.append(LinearDiscriminantAnalysis())
models.append(MLPClassifier(random_state=random_state))
# Realizamos una iteración con el cross_val
for model in models:
cv_results.append(cross_val_score(model, X_train, y_train,
scoring='accuracy',
cv = K_fold,
n_jobs = 10,
verbose = 2))
# Iteramos los resultados del cross-validation (mean y std)
for cv_result in cv_results:
cv_means.append(cv_result.mean())
cv_std.append(cv_result.std())
# Creamos un dataframe con los valores almacenados
cv_frame = pd.DataFrame(
{
"CrossValMeans": cv_means,
"CrossValErros": cv_std,
"Algorithms":[
"KNeighboors",
"AdaBoost",
"DecisionTree",
"RandomForest",
"ExtraTrees",
"SVC",
"GradientBoosting",
"LogisticRegression",
"LinearDiscriminantAnalysis",
"MultipleLayerPerceptron"
]
}
)
# Representamos estos valores del dataframe en un gráfico de barras
cv_plot = sns.barplot("CrossValMeans", "Algorithms", data = cv_frame,
palette="husl", orient='h', **{'xerr':cv_std})
cv_plot.set_xlabel("Mean Accuracy")
cv_plot = cv_plot.set_title("CV Scores")
```
***
## 5.2.2 - Realizamos la tarea de micro-ajuste a los parámetros
***
`Linear Discriminant Analysis`
https://es.wikipedia.org/wiki/An%C3%A1lisis_discriminante_lineal
https://scikit-learn.org/stable/modules/generated/sklearn.discriminant_analysis.LinearDiscriminantAnalysis.html?highlight=linear#sklearn.discriminant_analysis.LinearDiscriminantAnalysis
```
# Linear Discriminant Analysis kfol=10
LDA_Model= LinearDiscriminantAnalysis()
LDA_scores = cross_val_score(LDA_Model, X_train, y_train, cv = K_fold,
n_jobs = 4, scoring = 'accuracy')
# Mostramos los resultados en pantalla
print(LDA_scores)
round(np.mean(LDA_scores)*100, 2)
# Linear Discriminant Analysis kfol=15
LDA_Model= LinearDiscriminantAnalysis()
LDA_scores15 = cross_val_score(LDA_Model, X_train, y_train, cv = 15,
n_jobs = 4, scoring = 'accuracy')
# Mostramos los resultados en pantalla
print(LDA_scores15)
round(np.mean(LDA_scores15)*100, 2)
# Linear Discriminant Analysis kfol=5
LDA_Model= LinearDiscriminantAnalysis()
LDA_scores5 = cross_val_score(LDA_Model, X_train, y_train, cv = 5,
n_jobs = 4, scoring = 'accuracy')
# Mostramos los resultados en pantalla
print(LDA_scores5)
round(np.mean(LDA_scores5)*100, 2)
# Realizamos el tuneado de los parámetros del estimador LDA
LDA = LinearDiscriminantAnalysis()
# Creamos una tarea de tuneado basado en grid
lda_param_grid = {
# Creamos un grid con los parámetros a iterar, para ello necesitaremos realizar iteración de n valores por parámetros
"solver" : ['svd', 'lsqr', 'eigen'],
"tol": [0.0001, 0.0002, 0.0003]
}
# Construimos nuestro modelo con estos parámetros con K-fold = 15
gsLDA = GridSearchCV(LDA, param_grid=lda_param_grid, cv=15,
scoring='accuracy', n_jobs=, verbose=2)
# Aplicamos el ajuste
gsLDA.fit(X_train, y_train)
# Extraemos el mejor estimador
LDA_best = gsLDA.best_estimator_
# Extraemos el mejor resultado
gsLDA.best_score_
# Observamos los mejores parámetro para este modelo
LDA_best.get_params()
gsLDA.cv_results_
```
## Aplicamos el resultado final del modelo obtenido y lo aplicamos primero al Validation y luego al Testing dataset
```
# Aplicamos el modelo al validación
data_val = gsLDA.predict(<dataset de validación>)
X_val.shape # 143
df_test.shape #418
# Sucesivamente hay que aplicarlo al testing dataset
prediction = gsLDA.predict(df_test)
# Los resultados de nuestro modelo aplicado al dataset de test
submission = pd.DataFrame({
'PassengerId' : ids,
'Survived' : prediction
})
submission.to_csv('titanic_model.gsLDA.csv', index=False)
submission.head(15)
# Submission to Kaggle
!kaggle competitions submit titanic -f 'titanic_model.gsLDA.csv' -m "Modelo ML utilizado tuning LDA"
!kaggle competitions submissions titanic
!kaggle competitions leaderboard titanic -s
```
***
## Ajuste de parámetros de otros estimadores
| github_jupyter |
# Data structuring, part 2
### The Pandas way
*Andreas Bjerre-Nielsen*
## Recap
*What do we know about explanatory plotting?*
- ...
- ...
*What do we know about exploratory plotting?*
- ...
## Motivation
*Reminder: Why do we want to learn data structuring?*
- ..
- ..
## Agenda
We will learn about new data types
1. [string data](#String-data)
1. [temporal data](#Temporal-data)
1. [missing data](#Missing-data)
1. [useful tools](#Useful-tools)
## Loading the software
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
```
# String data
## String operations vectorized (1)
*Quiz: Which operators could work for string?*
Operators **+**, **+=**. Example:
```
str_ser1 = pd.Series(['Andreas', 'Snorre', 'Ulf'])
str_ser1 + ' works @ SODAS'
```
## String operations vectorized (2)
Addition also work for two series
```
# adding two series together is also possible
str_ser2 = pd.Series(['Bjerre-Nielsen', 'Ralund', 'Aslak'])
# str_ser1 + str_ser2
```
## String operations vectorized (3)
The powerful .str has several powerful methods e.g. `contains`, `capitalize`. Example:
```
str_ser1.str.upper()
# str_ser1.str.contains('M')
```
## String operations vectorized (4)
The .str methods include slicing - example:
```
str_ser2.str[0]
```
## String operations vectorized (5)
Many more `str` methods in pandas,
- most basic strings methods translate directly
- see Table 7-5 in PDA for an overview
## Categorical data type (1)
*Are string columns smart for storage and speed?*
No, sometimes it is better to convert to categorical data type:
- use categorical when many characters and repeated.
## Categorical data type (2)
*How do we convert to categorical?*
```
edu_list = ['B.Sc. Political Science', 'Secondary school'] + ['High school']*2
edu_cats = ['Secondary school', 'High school', 'B.Sc. Political Science']
str_ser3 = pd.Series(edu_list)
# option 1
cats = pd.Categorical(str_ser3, categories=edu_cats, ordered=True)
cat_ser = pd.Series(cats, index=str_ser3)
# option 2 - no order - fast
cat_ser2 = str_ser3.astype('category')
```
## Categorical data type (3)
*How do we work with categorical data?*
- Using the `cat` attribute of series. Has a few methods. E.g. `.cat.codes`
```
print(cat_ser)
print()
print(cat_ser.cat.codes)
```
*Why categorical?*
-
-
# Temporal data
## Temporal data type (1)
*Why is time so fundamental?*
Every measurement made by humans was made at a point in time, therefore it has a "timestamp".
## Temporal data type (2)
*How are timestamps measured?*
1. Datetime (ISO 8601): standard calendar
- year, month, day: minute, second, miliseconds etc. [timezone]
- comes as strings in raw data
2. Epoch time: seconds since January 1, 1970 - 00:00, GMT.
- nanoseconds in pandas
## Temporal data type (3)
*Does Pandas store it in a smart way?*
Pandas has native support for temporal data combining datetime and epoch time.
```
str_ser4 = pd.Series(['20170101', '20170727', '20170803', '20171224'])
dt_ser1 = pd.to_datetime(str_ser4)
print(dt_ser1)
```
## Time series (1)
*Why is temporal data powerful?*
We can easily make and plot time series.
```
T = 1000
data = {v:np.cumsum(np.random.randn(T)) for v in ['A', 'B']}
data['time'] = pd.date_range(start='20150101', freq='D', periods=T)
ts_df = pd.DataFrame(data)
ts_df.set_index('time').plot()
```
## Time series (2)
*Why is pandas good at time data?*
It handles irregular data well:
- missing values;
- duplicate entries.
It has specific tools for resampling and interpolating data
- See 11.3, 11.5, 11.6 in PDA book.
## Datetime variables (1)
*What other uses might time data have?*
We can extract data from datetime columns. These columns have the `dt` attribute and its sub-methods. Example:
```
dt_ser2 = ts_df.time
#dt_ser2.dt.day.head(3)
#dt_ser2.dt.month.head(3)
```
## Datetime variables (2)
The `dt` sub-methods include `year`, `weekday`, `hour`, `second`.
*To note:* Your temporal data may need conversion. `dt` includes `tz_localize` and `tz_convert` which does that.
## Datetime variables (3)
*Quiz: What are you to do if get time data with numbers of around 1-2 billion?*
It is likely to be epoch time measured in seconds. We can convert it as follows:
```
pd.to_datetime([123512321,2132321321], unit='s')
```
# Missing data
## Missing data type (1)
*Which data type have we not covered yet?*
Missing data, i.e. empty observations.
- In python: `None`
- In pandas: numpy's 'Not a Number', abbreviated `NaN` or `nan`
## Missing data type (2)
*What does a DataFrame with missing data look like?*
```
nan_data = [[1,np.nan,3],
[4,5,None],
[7,8,9]]
nan_df = pd.DataFrame(nan_data, columns=['A','B','C'])
print(nan_df)
```
## Handling missing data
*What options do we in working with missing data?*
1. Ignore the problem
2. Drop missing data: columns and/or rows
3. Fill in the blanks
4. If time and money permits: collect the data or new data
## Removing missing data (1)
*How do we remove data?*
Using the `dropna` method.
```
nan_df.dropna() # subset=['B'], axis=1
```
## Filling missing data (1)
*How do we fill observations with a constant?*
```
nan_df.fillna(0)
```
Note: we can also select missing `isnull` and the replace values using `loc`.
## Filling missing data (2)
*Are there other methods?*
Yes, many methods:
- Filling sorted temporal data, see `ffill`, `bfill`
- Filling with a model
- e.g. linear interpolation, by mean of nearest observations etc.
- `sklearn` in next week can impute data
# Useful tools
## Duplicates in data (1)
*What does it mean there are duplicates in the data?*
...
## Duplicates in data (2)
*How do we drop duplicates?*
```
str_ser3.drop_duplicates()
```
## Duplicates in data (3)
*How do we use duplicates?*
Tomorrow morning we will get introduced to groupby which can be used to compute various statistics (e.g. mean, median)
## Binning numerical data
*Can we convert our numerical data to bins in a smart way?*
Yes, to methods are useful:
- `cut` which divides data by user specified bins
- `qcut` which divides data by user specified quantiles (e.g. median, q=0.5)
```
x = pd.Series(np.random.normal(size=10**6))
cat_ser3 = pd.qcut(x, q=[0,.95,1])
cat_ser3.cat.categories
```
## Other tricks
Pandas is packed with smart tools
- we can create dummy variables from categorical with `to_dummies`
- we can combine many of the tools we have learned with `groupby` (tomorrow)
# The end
[Return to agenda](#Agenda)
| github_jupyter |
# Ramp Optimization Examples
This notebook outlines an example to optimize the ramp settings for a few different types of observations.
In these types of optimizations, we must consider observations constraints such as saturation levels, SNR requirements, and limits on acquisition time.
**Note**: The reported acquisition time does not include obsevatory and instrument-level overheads, such as slew times, filter changes, script compilations, etc. It only includes detector readout times (including reset frames).
```
# Import the usual libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Enable inline plotting at lower left
%matplotlib inline
import pynrc
from pynrc import nrc_utils
from pynrc.nrc_utils import S, jl_poly_fit
from pynrc.pynrc_core import table_filter
pynrc.setup_logging('WARNING', verbose=False)
from astropy.table import Table
# Progress bar
from tqdm.auto import tqdm, trange
```
## Example 1: M-Dwarf companion (imaging vs coronagraphy)
We want to observe an M-Dwarf companion (K=18 mag) in the vicinity of a brighter F0V (K=13 mag) in the F430M filter. Assume the M-Dwarf flux is not significantly impacted by the brighter PSF (ie., in the background limited regime). In this scenario, the F0V star will saturate much more quickly compared to the fainter companion, so it limits which ramp settings we can use.
We will test a couple different types of observations (direct imaging vs coronagraphy).
```
# Get stellar spectra and normalize at K-Band
# The stellar_spectrum convenience function creates a Pysynphot spectrum
bp_k = S.ObsBandpass('k')
sp_M2V = pynrc.stellar_spectrum('M2V', 18, 'vegamag', bp_k)#, catname='ck04models')
sp_F0V = pynrc.stellar_spectrum('F0V', 13, 'vegamag', bp_k)#, catname='ck04models')
# Initiate a NIRCam observation
nrc = pynrc.NIRCam(filter='F430M', wind_mode='WINDOW', xpix=160, ypix=160)
# Set some observing constraints
# Let's assume we want photometry on the primary to calibrate the M-Dwarf for direct imaging
# - Set well_frac_max=0.75
# Want a SNR~100 in the F430M filter
# - Set snr_goal=100
res = nrc.ramp_optimize(sp_M2V, sp_bright=sp_F0V, snr_goal=100, well_frac_max=0.75, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Do the same thing, but for coronagraphic mask instead
nrc = pynrc.NIRCam(filter='F430M', image_mask='MASK430R', pupil_mask='CIRCLYOT',
wind_mode='WINDOW', xpix=320, ypix=320)
# We assume that longer ramps will give us the best SNR for time
patterns = ['MEDIUM8', 'DEEP8']
res = nrc.ramp_optimize(sp_M2V, sp_bright=sp_F0V, snr_goal=100,
patterns=patterns, even_nints=True)
# Take the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
```
**RESULTS**
Based on these two comparisons, it looks like direct imaging is much more efficient in getting to the requisite SNR. In addition, direct imaging gives us a photometric comparison source that is inaccessible when occulting the primary with the coronagraph masks. **Of course, this assumes the companion exists in the background limit as opposed to the contrast limit.**
## Example 2: Exoplanet Coronagraphy
We want to observe GJ 504 for an hour in the F444W filter using the MASK430R coronagraph.
- What is the optimal ramp settings to maximize the SNR of GJ 504b?
- What is the final background sensitivity limit?
```
# Get stellar spectra and normalize at K-Band
# The stellar_spectrum convenience function creates a Pysynphot spectrum
bp_k = pynrc.bp_2mass('ks')
sp_G0V = pynrc.stellar_spectrum('G0V', 4, 'vegamag', bp_k)
# Choose a representative planet spectrum
planet = pynrc.planets_sb12(atmo='hy3s', mass=8, age=200, entropy=8, distance=17.5)
sp_pl = planet.export_pysynphot()
# Renormalize to F360M = 18.8
bp_l = pynrc.read_filter('F360M') #
sp_pl = sp_pl.renorm(18.8, 'vegamag', bp_l)
# Initiate a NIRCam observation
nrc = pynrc.NIRCam(filter='F444W', pupil_mask='CIRCLYOT', image_mask='MASK430R',
wind_mode='WINDOW', xpix=320, ypix=320)
# Set even_nints=True assume 2 roll angles
res = nrc.ramp_optimize(sp_pl, sp_bright=sp_G0V, tacq_max=3600, tacq_frac=0.05,
even_nints=True, verbose=True)
# Take the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# The SHALLOWs, DEEPs, and MEDIUMs are very similar for SNR and efficiency.
# Let's go with SHALLOW2 for more GROUPS & INTS
# MEDIUM8 would be fine as well.
nrc.update_detectors(read_mode='SHALLOW2', ngroup=10, nint=70)
keys = list(nrc.multiaccum_times.keys())
keys.sort()
for k in keys:
print("{:<10}: {: 12.5f}".format(k, nrc.multiaccum_times[k]))
# Background sensitivity (5 sigma)
sens_dict = nrc.sensitivity(nsig=5, units='vegamag', verbose=True)
```
## Example 3: Single-Object Grism Spectroscopy
Similar to the above, but instead we want to obtain a slitless grism spectrum of a K=12 mag M0V dwarf. Each grism resolution element should have SNR~100.
```
# M0V star normalized to K=12 mags
bp_k = S.ObsBandpass('k')
sp_M0V = pynrc.stellar_spectrum('M0V', 12, 'vegamag', bp_k)
nrc = pynrc.NIRCam(filter='F444W', pupil_mask='GRISMR', wind_mode='STRIPE', ypix=128)
# Set a minimum of 10 integrations to be robust against cosmic rays
# Also set a minimum of 10 groups for good ramp sampling
res = nrc.ramp_optimize(sp_M0V, snr_goal=100, nint_min=10, ng_min=10, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Let's say we choose SHALLOW4, NGRP=10, NINT=10
# Update detector readout
nrc.update_detectors(read_mode='SHALLOW4', ngroup=10, nint=10)
keys = list(nrc.multiaccum_times.keys())
keys.sort()
for k in keys:
print("{:<10}: {: 12.5f}".format(k, nrc.multiaccum_times[k]))
# Print final wavelength-dependent SNR
# For spectroscopy, the snr_goal is the median over the bandpass
snr_dict = nrc.sensitivity(sp=sp_M0V, forwardSNR=True, units='mJy', verbose=True)
```
**Mock observed spectrum**
Create a series of ramp integrations based on the current NIRCam settings. The gen_exposures() function creates a series of mock observations in raw DMS format by default. By default, it's point source objects centered in the observing window.
```
# Ideal spectrum and wavelength solution
wspec, imspec = nrc.calc_psf_from_coeff(sp=sp_M0V, return_hdul=False, return_oversample=False)
# Resize to detector window
nx = nrc.det_info['xpix']
ny = nrc.det_info['ypix']
# Shrink/expand nx (fill value of 0)
# Then shrink to a size excluding wspec=0
# This assumes simulated spectrum is centered
imspec = nrc_utils.pad_or_cut_to_size(imspec, (ny,nx))
wspec = nrc_utils.pad_or_cut_to_size(wspec, nx)
# Add simple zodiacal background
im_slope = imspec + nrc.bg_zodi()
# Create a series of ramp integrations based on the current NIRCam settings
# Output is a single HDUList with 10 INTs
# Ignore detector non-linearity to return output in e-/sec
kwargs = {
'apply_nonlinearity' : False,
'apply_flats' : False,
}
res = nrc.simulate_level1b('M0V Target', 0, 0, '2023-01-01', '12:00:00',
im_slope=im_slope, return_hdul=True, **kwargs)
res.info()
tvals = nrc.Detector.times_group_avg
header = res['PRIMARY'].header
data_all = res['SCI'].data
slope_list = []
for data in tqdm(data_all):
ref = pynrc.ref_pixels.NRC_refs(data, header, DMS=True, do_all=False)
ref.calc_avg_amps()
ref.correct_amp_refs()
# Linear fit to determine slope image
cf = jl_poly_fit(tvals, ref.data, deg=1)
slope_list.append(cf[1])
# Create a master averaged slope image
slopes_all = np.array(slope_list)
slope_sim = slopes_all.mean(axis=0) * nrc.Detector.gain
fig, ax = plt.subplots(1,1, figsize=(12,3))
ax.imshow(slope_sim, vmin=0, vmax=10)
fig.tight_layout()
ind = wspec>0
# Estimate background emission and subtract from slope_sim
bg = np.median(slope_sim[:,~ind])
slope_sim -= bg
ind = wspec>0
plt.plot(wspec[ind], slope_sim[63,ind])
# Extract 2 spectral x 5 spatial pixels
# First, cut out the central 5 pixels
wspec_sub = wspec[ind]
sh_new = (5, len(wspec_sub))
slope_sub = nrc_utils.pad_or_cut_to_size(slope_sim, sh_new)
slope_sub_ideal = nrc_utils.pad_or_cut_to_size(imspec, sh_new)
# Sum along the spatial axis
spec = slope_sub.sum(axis=0)
spec_ideal = slope_sub_ideal.sum(axis=0)
spec_ideal_rebin = nrc_utils.frebin(spec_ideal, scale=0.5, total=False)
# Build a quick RSRF from extracted ideal spectral slope
sp_M0V.convert('mjy')
rsrf = spec_ideal / sp_M0V.sample(wspec_sub*1e4)
# Rebin along spectral direction
wspec_rebin = nrc_utils.frebin(wspec_sub, scale=0.5, total=False)
spec_rebin_cal = nrc_utils.frebin(spec/rsrf, scale=0.5, total=False)
# Expected noise per extraction element
snr_interp = np.interp(wspec_rebin, snr_dict['wave'], snr_dict['snr'])
_spec_rebin = spec_ideal_rebin / snr_interp
_spec_rebin_cal = _spec_rebin / nrc_utils.frebin(rsrf, scale=0.5, total=False)
fig, ax = plt.subplots(1,1, figsize=(12,8))
ax.plot(sp_M0V.wave/1e4, sp_M0V.flux, label='Input Spectrum')
ax.plot(wspec_rebin, spec_rebin_cal, alpha=0.7, label='Extracted Observation')
ax.errorbar(wspec_rebin, spec_rebin_cal, yerr=_spec_rebin_cal, zorder=3,
fmt='none', label='Expected Error Bars', alpha=0.7, color='C2')
ax.set_ylim([0,10])
ax.set_xlim([3.7,5.1])
ax.set_xlabel('Wavelength ($\mu m$)')
ax.set_ylabel('Flux (mJy)')
ax.set_title('Simulated Spectrum')
ax.legend(loc='upper right');
```
## Example 4: Exoplanet Transit Spectroscopy
Let's say we want to observe an exoplanet transit using NIRCam grisms in the F322W2 filter.
We assume a 2.1-hour transit duration for a K6V star (K=8.4 mag).
```
nrc = pynrc.NIRCam('F322W2', pupil_mask='GRISM0', wind_mode='STRIPE', ypix=64)
# K6V star at K=8.4 mags
bp_k = S.ObsBandpass('k')
sp_K6V = pynrc.stellar_spectrum('K6V', 8.4, 'vegamag', bp_k)
# Constraints
well = 0.5 # Keep well below 50% full
tacq = 2.1*3600. # 2.1 hour transit duration
ng_max = 30 # Transit spectroscopy allows for up to 30 groups per integrations
nint_max = int(1e6) # Effectively no limit on number of integrations
# Let's bin the spectrum to R~100
# dw_bin is a passable parameter for specifiying spectral bin sizes
R = 100
dw_bin = (nrc.bandpass.avgwave() / 10000) / R
res = nrc.ramp_optimize(sp_K6V, tacq_max=tacq, nint_max=nint_max,
ng_min=10, ng_max=ng_max, well_frac_max=well,
dw_bin=dw_bin, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# Even though BRIGHT1 has a slight efficiency preference over RAPID
# and BRIGHT2, we decide to choose RAPID, because we are convinced
# that saving all data (and no coadding) is a better option.
# If APT informs you that the data rates or total data shorage is
# an issue, you can select one of the other options.
# Update to RAPID, ngroup=30, nint=700 and plot PPM
nrc.update_detectors(read_mode='RAPID', ngroup=30, nint=700)
snr_dict = nrc.sensitivity(sp=sp_K6V, dw_bin=dw_bin, forwardSNR=True, units='Jy')
wave = np.array(snr_dict['wave'])
snr = np.array(snr_dict['snr'])
# Let assume bg subtraction of something with similar noise
snr /= np.sqrt(2.)
ppm = 1e6 / snr
# NOTE: We have up until now neglected to include a "noise floor"
# which represents the expected minimum achievable ppm from
# unknown systematics. To first order, this can be added in
# quadrature to the calculated PPM.
noise_floor = 30 # in ppm
ppm_floor = np.sqrt(ppm**2 + noise_floor**2)
plt.plot(wave, ppm, marker='o', label='Calculated PPM')
plt.plot(wave, ppm_floor, marker='o', label='PPM + Noise Floor')
plt.xlabel('Wavelength ($\mu m$)')
plt.ylabel('Noise Limit (PPM)')
plt.xlim([2.4,4.1])
plt.ylim([20,100])
plt.legend()
```
## Example 5: Extended Souce
Expect some faint galaxies of 25 ABMag/arcsec^2 in our field. What is the best we can do with 10,000 seconds of acquisition time?
```
# Detection bandpass is F200W
nrc = pynrc.NIRCam(filter='F200W')
# Flat spectrum (in photlam) with ABMag = 25 in the NIRCam bandpass
sp = pynrc.stellar_spectrum('flat', 25, 'abmag', nrc.bandpass)
res = nrc.ramp_optimize(sp, is_extended=True, tacq_max=10000, tacq_frac=0.05, verbose=True)
# Print the Top 2 settings for each readout pattern
res2 = table_filter(res, 2)
print(res2)
# MEDIUM8 10 10 looks like a good option
nrc.update_detectors(read_mode='MEDIUM8', ngroup=10, nint=10, verbose=True)
# Calculate flux/mag for various nsigma detection limits
tbl = Table(names=('Sigma', 'Point (nJy)', 'Extended (nJy/asec^2)',
'Point (AB Mag)', 'Extended (AB Mag/asec^2)'))
tbl['Sigma'].format = '.0f'
for k in tbl.keys()[1:]:
tbl[k].format = '.2f'
for sig in [1,3,5,10]:
snr_dict1 = nrc.sensitivity(nsig=sig, units='nJy', verbose=False)
snr_dict2 = nrc.sensitivity(nsig=sig, units='abmag', verbose=False)
tbl.add_row([sig, snr_dict1[0]['sensitivity'], snr_dict1[1]['sensitivity'],
snr_dict2[0]['sensitivity'], snr_dict2[1]['sensitivity']])
tbl
```
| github_jupyter |
<font size="+5">#05 | Transforming Basic Objects into the Powerful DataFrame</font>
<div class="alert alert-warning">
<ul>
<li>
<b>Python</b> + <b>Data Science</b> Tutorials in ↓
<ul>
<li>
<a href="https://www.youtube.com/c/PythonResolver?sub_confirmation=1"
>YouTube</a
>
</li>
<li>
<a href="https://blog.pythonresolver.com/">Blog</a>
</li>
<li>
<a href="https://github.com/jsulopz/00-python-resolver-discipline">GitHub</a>
</li>
</ul>
</li>
<li>
Author: <a href="https://twitter.com/jsulopz"><b>@jsulopz</b></a>
</li>
</ul>
</div>
<a href="https://colab.research.google.com/github/jsulopz/resolving-python-data-science/blob/main/05_Transforming%20Basic%20Objects%20into%20the%20Powerful%20DataFrame/05_transform-to-dataframe_session.ipynb">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
# The Starting *Thing*
https://github.com/jsulopz/data/blob/main/best_tennis_players_stats.json
# All that glitters is not gold
- Not all objects can call the same functions
- Even though they may store the same information
## `list`
- [ ] Create a list with your math `grades`
- [ ] Compute the mean
- You cannot do the mean
- [ ] But, could you `sum()` and `len()`?
- [ ] And divide the sum by the number of players?
- [ ] Isn't it an `object` that could calculate the `mean()`?
<div class="alert alert-info">
<b>Tip</b>
<br>Programming is the <i>Art of Possibilities</i>
<br>If you think there is an easier way to do things,
<br>probably someone has already create something for it
<br>Apply the <a href="https://github.com/jsulopz/python-resolver-discipline/blob/main/01_Code%20of%20Discipline/01_Sketch%20the%20Result.md">Python Resolver Discipline</a> to look for the function in Google ↓
<br><i>"python list mean"</i>
</div>
## `Series`
- [ ] Use the `.` + `[tab]` key to see the `functions/methods` of the object
# Store the information in Python `objects` for the Best Tennis Players
- income
- titles
- grand slams
- turned professional
- wins
- losses
## Create a `dictionary` for Roger Federer
## Create a `dictionary` for Rafa Nadal
```
rafa = {'income': 127, #!
'titles': 90,
'grand slams': 21,
'turned professional': 2001,
'wins': 1038,
'losses': 209}
rafa
```
## Create a `dictionary` for Novak Djokovic
## How much wealth did all of them earned?
- Let's put all of them into a `list`
- [ ] And `sum()` the `income`
```
list_best_players = [roger, rafa, nole] #!
list_best_players
```
- The `sum()` is not an action
- that a simple object `list` can perform.
- [ ] Could we convert the list into a
- more powerful object `DataFrame`
- that could compute the `sum()`?
1. [ ] Access the `income` column
2. [ ] and compute the `sum()`
- [ ] Which type of `instance` is the table?
- [ ] What else can we do with this `instance`?
# Can we select specific parts of the `DataFrame`?
- Remember that an `instance` is a **structure of data**
- In other words, it may contain **more objects**
```
df_best_players #!
```
## Names of rows `index`
## Names of `columns`
## Number of rows & columns `shape`
## Just the `values` within the `DataFrame`
# The Uncovered Solution
| github_jupyter |
```
%run ../wikiqa/init.ipynb
import numpy as np
import pandas as pd
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
import matchzoo as mz
from matchzoo.contrib.models.esim import ESIM
def load_filtered_data(preprocessor, data_type):
assert ( data_type in ['train', 'dev', 'test'])
data_pack = mz.datasets.wiki_qa.load_data(data_type, task='ranking')
if data_type == 'train':
X, Y = preprocessor.fit_transform(data_pack).unpack()
else:
X, Y = preprocessor.transform(data_pack).unpack()
new_idx = []
for i in range(Y.shape[0]):
if X["length_left"][i] == 0 or X["length_right"][i] == 0:
continue
new_idx.append(i)
new_idx = np.array(new_idx)
print("Removed empty data. Found ", (Y.shape[0] - new_idx.shape[0]))
for k in X.keys():
X[k] = X[k][new_idx]
Y = Y[new_idx]
pos_idx = (Y == 1)[:, 0]
pos_qid = X["id_left"][pos_idx]
keep_idx_bool = np.array([ qid in pos_qid for qid in X["id_left"]])
keep_idx = np.arange(keep_idx_bool.shape[0])
keep_idx = keep_idx[keep_idx_bool]
print("Removed questions with no pos label. Found ", (keep_idx_bool == 0).sum())
print("shuffling...")
np.random.shuffle(keep_idx)
for k in X.keys():
X[k] = X[k][keep_idx]
Y = Y[keep_idx]
return X, Y, preprocessor
fixed_length_left = 10
fixed_length_right = 40
batch_size = 32
epochs = 5
# prepare data
preprocessor = mz.preprocessors.BasicPreprocessor(fixed_length_left=fixed_length_left,
fixed_length_right=fixed_length_right,
remove_stop_words=False,
filter_low_freq=10)
train_X, train_Y, preprocessor = load_filtered_data(preprocessor, 'train')
val_X, val_Y, _ = load_filtered_data(preprocessor, 'dev')
pred_X, pred_Y = val_X, val_Y
# pred_X, pred_Y, _ = load_filtered_data(preprocessor, 'test') # no prediction label for quora dataset
embedding_matrix = glove_embedding.build_matrix(preprocessor.context['vocab_unit'].state['term_index'], initializer=lambda: 0)
model = ESIM()
model.params['task'] = mz.tasks.Ranking()
model.params['mask_value'] = 0
model.params['input_shapes'] = [[fixed_length_left, ],
[fixed_length_right, ]]
model.params['lstm_dim'] = 300
model.params['embedding_input_dim'] = preprocessor.context['vocab_size']
model.params['embedding_output_dim'] = 300
model.params['embedding_trainable'] = False
model.params['dropout_rate'] = 0.5
model.params['mlp_num_units'] = 300
model.params['mlp_num_layers'] = 0
model.params['mlp_num_fan_out'] = 300
model.params['mlp_activation_func'] = 'tanh'
model.params['optimizer'] = Adam(lr=4e-4)
model.guess_and_fill_missing_params()
model.build()
model.compile()
model.backend.summary() # not visualize
# run as classification task
model.load_embedding_matrix(embedding_matrix)
evaluate = mz.callbacks.EvaluateAllMetrics(model,
x=pred_X,
y=pred_Y,
once_every=1,
batch_size=len(pred_Y))
history = model.fit(x = [train_X['text_left'],
train_X['text_right']],
y = train_Y,
validation_data = (val_X, val_Y),
batch_size = batch_size,
epochs = epochs,
callbacks=[evaluate]
)
# run as classification task
classification_task = mz.tasks.Classification(num_classes=2)
classification_task.metrics = 'acc'
model = ESIM()
model.params['task'] = classification_task
model.params['mask_value'] = 0
model.params['input_shapes'] = [[fixed_length_left, ],
[fixed_length_right, ]]
model.params['lstm_dim'] = 300
model.params['embedding_input_dim'] = preprocessor.context['vocab_size']
model.params['embedding_output_dim'] = 300
model.params['embedding_trainable'] = False
model.params['dropout_rate'] = 0.5
model.params['mlp_num_units'] = 300
model.params['mlp_num_layers'] = 0
model.params['mlp_num_fan_out'] = 300
model.params['mlp_activation_func'] = 'tanh'
model.params['optimizer'] = Adam(lr=4e-4)
model.guess_and_fill_missing_params()
model.build()
model.compile()
model.backend.summary() # not visualize
evaluate = mz.callbacks.EvaluateAllMetrics(model,
x=pred_X,
y=pred_Y,
once_every=1,
batch_size=len(pred_Y))
train_Y = to_categorical(train_Y)
val_Y = to_categorical(val_Y)
model.load_embedding_matrix(embedding_matrix)
history = model.fit(x = [train_X['text_left'],
train_X['text_right']],
y = train_Y,
validation_data = (val_X, val_Y),
batch_size = batch_size,
epochs = epochs,
callbacks=[evaluate]
)
model.evaluate(val_X, val_Y)
```
| github_jupyter |
# How to separate your credentials, secrets, and configurations from your source code with environment variables
## <a id="intro"></a>Introduction
As a modern application, your application always deal with credentials, secrets and configurations to connect to other services like Authentication service, Database, Cloud services, Microservice, ect. It is not a good idea to keep your username, password and other credentials hard code in your source code as your credentials may leak when you share or publish the application. You need to delete or remark those credentials before you share the code which adds extra work for you. And eventually, you may forgot to do it.
The services configurations such as API endpoint, Database URL should not be embedded in the source code too. The reason is every time you change or update the configurations you need to modify the code which may lead to more errors.
How should we solve this issue?
### <a id=""></a>Store config in the environment
The [Twelve-Factor App methodology](https://12factor.net/) which is one of the most influential pattern to designing scalable software-as-a-service application. The methodology [3rd factor](https://12factor.net/config) (aka Config principle) states that configuration information should be kept as environment as environment variables and injected into the application on runtime.
>The twelve-factor app stores config in environment variables (often shortened to env vars or env). Env vars are easy to change between deploys without changing any code; unlike config files, there is little chance of them being checked into the code repo accidentally; and unlike custom config files, or other config mechanisms such as Java System Properties, they are a language- and OS-agnostic standard.
### Introduction to .ENV file and dotenv
The dotenv method lets the application loads variables from a ```.env``` file into environment/running process the same way as the application load variables from environment variables. The application can load or modify the environment variables from the OS and ```.env``` file with a simple function call.
[dotenv](https://github.com/bkeepers/dotenv) is a library that originates from [Ruby](https://www.ruby-lang.org/en/) developers (especially the [Ruby on Rails](https://rubyonrails.org/) framework) and has been widely adopted and ported to many programming languages such as [python-dotenv](https://github.com/theskumar/python-dotenv), [dotenv-java](https://github.com/cdimascio/dotenv-java), [Node.js](https://github.com/motdotla/dotenv), etc.
The ```.env``` file is a simple text file locates at the root of the project with a key-value pair setting as the following:
```
# DB
DB_USER=User
DB_PASSWORD=MyPassword
# Cloud
CLOUD_URL=192.168.1.1
```
**Caution**:
You *should not* share this ```.env``` file to your peers or commit/push it to the version control. You should add the file to the ```.gitignore``` file to avoid adding it to a version control or public repo accidentally.
This notebook application demonstrate how to use python-dotenv library to store and read the [Refinitiv Data Platform (RDP) APIs](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-apis) credentials and configurations, then request the RDP data.
### <a id="whatis_rdp"></a>What is Refinitiv Data Platform (RDP) APIs?
The [Refinitiv Data Platform (RDP) APIs](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-apis) provide various Refinitiv data and content for developers via easy to use Web based API.
RDP APIs give developers seamless and holistic access to all of the Refinitiv content such as Historical Pricing, Environmental Social and Governance (ESG), News, Research, etc and commingled with their content, enriching, integrating, and distributing the data through a single interface, delivered wherever they need it. The RDP APIs delivery mechanisms are the following:
* Request - Response: RESTful web service (HTTP GET, POST, PUT or DELETE)
* Alert: delivery is a mechanism to receive asynchronous updates (alerts) to a subscription.
* Bulks: deliver substantial payloads, like the end of day pricing data for the whole venue.
* Streaming: deliver real-time delivery of messages.
This example project is focusing on the Request-Response: RESTful web service delivery method only.
For more detail regarding Refinitiv Data Platform, please see the following APIs resources:
- [Quick Start](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-apis/quick-start) page.
- [Tutorials](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-apis/tutorials) page.
- [RDP APIs: Introduction to the Request-Response API](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-apis/tutorials#introduction-to-the-request-response-api) page.
- [RDP APIs: Authorization - All about tokens](https://developers.refinitiv.com/en/api-catalog/refinitiv-data-platform/refinitiv-data-platform-apis/tutorials#authorization-all-about-tokens) page.
## Importing Libraries
The first step is importing all required libraries including the python-dotenv, requests, Pandas, etc.
```
import os
from dotenv import load_dotenv
import requests
import pandas as pd
import numpy as np
```
You should save a text file with **filename** `.env` or Environment Variables having the following configurations:
```
# RDP Core Credentials
RDP_USER=<Your RDP username>
RDP_PASSWORD=<Your RDP password>
RDP_APP_KEY=<Your RDP appkey>
# RDP Core Endpoints
RDP_BASE_URL = https://api.refinitiv.com
RDP_AUTH_URL=/auth/oauth2/v1/token
RDP_ESG_URL=/data/environmental-social-governance/v2/views/scores-full
```
You can use the python-dotenv library in IPython environment such as Jupyter Notebook or Jupyter Lab by executing the following Magic statements.
```
%load_ext dotenv
# Use find_dotenv to locate the file
%dotenv
```
By default, it will use find_dotenv to search for a .env file in a current directory location. Please note that the OS/System's environment variables always override ```.env``` configurations by default as the following example.
Let's test with the ```USERNAME``` value which will be loaded from your System's environment variables.
```
%load_ext dotenv
%dotenv
print('User: ', os.getenv('USERNAME'))
```
Next, the notebook application uses ```os.getenv``` statement to get RDP APIs Auth service endpoint and user's RDP credentials configurations from environment.
```
# Get RDP Token service information from Environment Variables
base_URL = os.getenv('RDP_BASE_URL')
auth_endpoint = base_URL + os.getenv('RDP_AUTH_URL')
# Get RDP Credentials information from Environment Variables
username = os.getenv('RDP_USER')
password = os.getenv('RDP_PASSWORD')
app_key = os.getenv('RDP_APP_KEY')
```
Refinitiv Data Platform entitlement check is based on OAuth 2.0 specification. The first step of an application workflow is to get a token from RDP Auth Service, which will allow access to the protected resource, i.e. data REST API's.
We create the RDP Auth Service reqeust message with additional variables in the next step.
```
# RDP Auth Services request message variables
client_secret = ''
scope = 'trapi'
auth_obj = None
# -- Init and Authenticate Session
auth_request_msg = {
'username': username ,
'password': password ,
'grant_type': "password",
'scope': scope,
'takeExclusiveSignOnControl': "true"
}
```
Now notebook is ready to send the HTTP request message with the *requests* library.
It keeps the response JSON message which contains the RDP Access Token information in the *auth_obj* variable.
```
# Authentication with RDP Auth Service
try:
response = requests.post(auth_endpoint, headers = {'Accept':'application/json'}, data = auth_request_msg, auth = (app_key, client_secret))
except Exception as exp:
print('Caught exception: %s' % str(exp))
if response.status_code == 200: # HTTP Status 'OK'
print('Authentication success')
auth_obj = response.json()
else:
print('RDP authentication result failure: %s %s' % (response.status_code, response.reason))
print('Text: %s' % (response.text))
```
After the application received the Access Token (an authorization token) from RDP Auth Service, all subsequent REST API calls will use this token to get the data. The application needs to input Access Token via *Authorization* HTTP request message header as shown below.
- Header:
* Authorization = ```Bearer <RDP Access Token>```
Please notice *the space* between the ```Bearer``` and ```RDP Access Token``` values.
The next step is requesting ESG (Environmental, Social, and Governance) data from RDP. We use the ESG scores-full API endpoint which provides full coverage of Refinitiv's proprietary ESG Scores with full history for consumers as an example API.
We get the RDP ESG Service API endpoint from a ```.env``` file.
```
# Get RDP Token service information from Environment Variables
esg_url = base_URL + os.getenv('RDP_ESG_URL')
# ESG Score Full request messages variables
universe = 'TSLA.O'
payload = {'universe': universe}
esg_object = None
# Request data for ESG Score Full Service
try:
response = requests.get(esg_url, headers={'Authorization': 'Bearer {}'.format(auth_obj['access_token'])}, params = payload)
except Exception as exp:
print('Caught exception: %s' % str(exp))
if response.status_code == 200: # HTTP Status 'OK'
print('Receive ESG Data from RDP APIs success')
#print(response.json())
esg_object=response.json()
else:
print('RDP APIs: ESG data request failure: %s %s' % (response.status_code, response.reason))
print('Text: %s' % (response.text))
```
Once the we receive ESG Data from RDP, we can convert the data from JSON object to a Pandas Dataframe by the following steps:
1. Gets the data and column name from JSON object and then re-constructs it as a new map object.
2. Converts the JSON's data field to the numpy array.
3. Create a new Pandas Dataframe from the numpy data array and headers map.
```
headers=esg_object['headers']
#Get column headers/titles using lambda
titles=map(lambda header:header['title'], headers)
dataArray=np.array(esg_object['data'])
df=pd.DataFrame(data=dataArray,columns=titles)
df.head()
```
## Plotting Graph
Then we can plot a graph of the ESG Dataframe object using [matplotlib library](https://matplotlib.org/).
```
# Import matplotlib
from matplotlib import pyplot as plt
```
The ESG Data from RDP contains a lot of information, so we will create a new Dataframe object from the original Dataframe to compare only *ESG Score*, *ESG Combined Score* and *ESG Controversies Score* fields.
```
df_plot=pd.DataFrame(df,columns=['Instrument','Period End Date','ESG Score','ESG Combined Score','ESG Controversies Score'])
df_plot.head()
```
The data for the Y-axis is the *Period End Date* field and the X-axis is the ESG scores fields. We want to display only the year (eg "2017", "2016") therefore we reformat the data in "Period End Date" column using below codes.
```
df_plot['Period End Date']= df_plot['Period End Date'].str.split('-').str[0]
df_plot.head(2)
```
Then sort data as ascending order.
```
df_plot.sort_values('Period End Date',ascending=True,inplace=True)
# Plotting a Graph
fig = plt.figure()
plt.ticklabel_format(style = 'plain')
plt.title('%s ESG Data' % (universe), color='black',fontsize='x-large')
ax = fig.gca()
df_plot.plot(kind='line', ax = fig.gca(),x='Period End Date', y=['ESG Score','ESG Combined Score','ESG Controversies Score'],figsize=(14,7) )
plt.show()
```
## Conclusion
The above code shows that you do not need to change the code if the RDP credentials or service endpoint is changed (example update the API version). We can just update the configurations in a ```.env``` file (or System environment variables) and re-run the application.
| github_jupyter |
# Preprocessing the ABA spatial transcriptomics data
```
# Importing libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from morphontogeny.functions.IO import nifti_to_array
from sklearn.preprocessing import StandardScaler
# List of the genes used in this analysis are taken from:
# Bohland et al., 2010: https://doi.org/10.1016/j.ymeth.2009.09.001
# Loading the list of top genes,
# which is included in this repo at files/list_top_genes.csv
list = pd.read_csv("files/list_top_genes.csv")
list_top = list[list['Used in analysis?']==1]
# All gene expression files are downloaded using the tools provided by
# Allen Institute in R language (not provided here) and converted to CSV format
# Make the E(v,g) matrix of ISH data for top genes
# Voxels stored in rows, genes as columns
# (but some of the genes in thre list can't be found on ABA dataset)
# Making a numpy MEMMAP since the file is large for the RAM
topgenes_mmp = np.memmap('data/genes_mmp.mymemmap',\
dtype='float32', mode='w+', shape=(list_top.shape[0],gene.shape[0]))
for i in range (list_top.shape[0]):
try:
topgenes_mmp[:,i] = np.genfromtxt('data/'+list_top['ID'][i]+'.csv',\
delimiter=',', skip_header=1, usecols=range(1,2379)).flatten()
except:
pass
# To deal with -1s in the data
# basic method is to convert them to 0s
# or, impute them (recommended)
# Chaning -1s to 0s
X_pos = np.where(topgenes_mmp < 0, 0, topgenes_mmp)
# Standardizing features by removing the mean and scaling to unit variance
X_std = StandardScaler().fit_transform(X_pos)
# Masking the data for half brain
# Using the NIFTI files for the Allen Mouse Brain Average in 200 um res.
# Provided in the 'files' folder
# Loading the neuroanatomy
anat_arr = nifti_to_array('files/allen_annot200.nii')
# Halving by Neuroanatomy
anat_half = np.zeros_like(anat_arr)
anat_half[:,:29,:] = anat_3D[:,:29,:]
# Vectorizing and getting indices of non-zero values
anat_half_vec = anat_half.flatten()
half_indices = np.nonzero(anat_half_vec)
anat_half_masked = anat_half_vec[half_indices]
# Saving indices to file
# Indices file is provided in this repo
np.save('files/half_indices.npy', half_indices)
# Making a new array to save the half-brain masked gene expression data
half_arr = np.zeros((half_indices.shape[0],X_std.shape[1]))
for i in range(X_std.shape[1]):
half_arr[:,i] = X_std[half_indices,i]
# Standardizing the matrix
half_mask_pos_std = StandardScaler().fit_transform(half_arr)
# Masking the data for whole brain
# Using the NIFTI files for the Allen Mouse Brain Average in 200 um res.
# Provided in the 'files' folder
# Loading the neuroanatomy
anat_vec = nifti_to_vector('files/allen_annot200.nii')
# Getting indices of non-zero values
indices = np.nonzero(anat_vec)
# Masking the anatomy
anat_masked = anat_vec[indices]
# Saving indices to file
# Indices file is provided in this repo
np.save('files/mask_indices.npy', indices)
# Making a new array to save the half-brain masked gene expression data
mask_arr = np.zeros((indices.shape[0],X_std.shape[1]))
for i in range(X_std.shape[1]):
mask_arr[:,i] = X_std[indices,i]
# Standardizing the matrix
mask_pos_std = StandardScaler().fit_transform(mask_arr)
```
| github_jupyter |
```
from pynq import Overlay
bs = Overlay("/usr/local/lib/python3.6/dist-packages/pynq_cv/overlays/xv2Filter2DDilate.bit")
bs.download()
from pynq import Xlnk
Xlnk.set_allocator_library('/usr/local/lib/python3.6/dist-packages/pynq_cv/overlays/xv2Filter2DDilate.so')
mem_manager = Xlnk()
import pynq_cv.overlays.xv2Filter2DDilate as xv2
import numpy as np
import cv2
import time
img = cv2.imread('/home/xilinx/jupyter_notebooks/eeve.jpeg')
imgY = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
print("Size of imgY is ",imgY.shape);
height, width, channels = img.shape
kernel_g = {
'average blur': np.array([
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]],np.float32)/9,
'gaussian blur': np.array([
[1.0, 2.0, 1.0],
[2.0, 4.0, 2.0],
[1.0, 2.0, 1.0]],np.float32)/16,
'sobel ver': np.array([
[1.0,0.0,-1.0],
[2.0,0.0,-2.0],
[1.0,0.0,-1.0]],np.float32),
'sobel hor': np.array([
[1.0,2.0,1.0],
[0.0,0.0,0.0],
[-1.0,-2.0,-1.0]],np.float32),
'laplacian': np.array([
[0.0, 1.0, 0],
[1.0, -4, 1.0],
[0, 1.0, 0.0]],np.float32),
'sharpen': np.array([
[-1,-1, -1],
[-1, 9, -1],
[-1, -1, -1]],np.float32),
'sharpen1': np.array([
[0,-1,0],
[-1, 5, -1],
[0, -1, 0]],np.float32),
}
kernel = kernel_g.get('sharpen1')
numberOfIterations=50
dstSW = np.ones((height,width),np.uint8);
xFimgY = mem_manager.cma_array((height,width),np.uint8) #allocated physically contiguous numpy array
xFimgY[:] = imgY[:] # copy source data
xFdst = mem_manager.cma_array((height,width),np.uint8) #allocated physically contiguous numpy array
startSW=time.time()
for i in range(numberOfIterations):
cv2.filter2D(imgY,-1,kernel,dst=dstSW,borderType=cv2.BORDER_CONSTANT) #filter2D on ARM
stopSW=time.time()
print("Start SW loop = ", (stopSW - startSW))
print("SW frames per second: ", ((numberOfIterations) / (stopSW - startSW)))
startPL=time.time()
for i in range(numberOfIterations):
xv2.filter2D(xFimgY,-1,kernel,dst=xFdst,borderType=cv2.BORDER_CONSTANT) #filter2D offloaded to PL, working on physically continuous numpy arrays
stopPL=time.time()
print("Start HW loop = ", (stopPL - startPL))
print("PL frames per second: ", ((numberOfIterations) / (stopPL - startPL)))
import PIL.Image
image = PIL.Image.fromarray(dstSW)
image
xFdst
dstSW
imgY
kernel
print(len(imgY))
print(len(dstSW))
print(len(xFdst))
```
| github_jupyter |
# Training a Custom TensorFlow.js Audio Model
In this notebook, we show how to train a custom audio model based on the model topology of the
[TensorFlow.js Speech Commands model](https://www.npmjs.com/package/@tensorflow-models/speech-commands).
The training is done in Python by using a set of audio examples stored as .wav files.
The trained model is convertible to the
[TensorFlow.js LayersModel](https://js.tensorflow.org/api/latest/#loadLayersModel) format for
inference and further fine-tuning in the browser.
It may also be converted to the [TFLite](https://www.tensorflow.org/lite) format
for inference on mobile devices.
This example uses a small subset of the
[Speech Commands v0.02](https://arxiv.org/abs/1804.03209) dataset, and builds
a model that detects two English words ("yes" and "no") against background noises. But the methodology demonstrated here is general and can be applied to
other sounds, as long as they are stored in the same .wav file format as in this example.
## Data format
The training procedure in this notebook makes the following assumption about the raw audio data:
1. The root data directory contains a number of folders. The name of each folder is the name
of the audio class. You can select any subset of the folders (i.e., classes) to train the
model on.
2. Within each folder, there are a number of .wav files. Each .wav file corresponds to an
example. Each .wav file is mono (single-channel) and has the typical pulse-code modulation
(PCM) encoding. The duration of each wave file should be 1 second or slightly longer.
3. There can be a special folder called "_background_noise_" that contains .wav files for
audio samples that fall into the background noise class. Each of these .wav files can be
much longer than 1 second in duration. This notebook contains code that extracts 1-second
snippets from these .wav files
The Speech Commands v0.3 dataset used in this notebook meets these data format requirements.
```
!pip install librosa tensorflowjs
import glob
import json
import os
import random
import librosa
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from scipy.io import wavfile
import tensorflow as tf
import tensorflowjs as tfjs
import tqdm
print(tf.__version__)
print(tfjs.__version__)
# Download the TensorFlow.js Speech Commands model and the associated
# preprocesssing model.
!mkdir -p /tmp/tfjs-sc-model
!curl -o /tmp/tfjs-sc-model/metadata.json -fsSL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/metadata.json
!curl -o /tmp/tfjs-sc-model/model.json -fsSL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/model.json
!curl -o /tmp/tfjs-sc-model/group1-shard1of2 -fSsL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/group1-shard1of2
!curl -o /tmp/tfjs-sc-model/group1-shard2of2 -fsSL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/v0.3/browser_fft/18w/group1-shard2of2
!curl -o /tmp/tfjs-sc-model/sc_preproc_model.tar.gz -fSsL https://storage.googleapis.com/tfjs-models/tfjs/speech-commands/conversion/sc_preproc_model.tar.gz
!cd /tmp/tfjs-sc-model/ && tar xzvf sc_preproc_model.tar.gz
# Download Speech Commands v0.02 dataset. The dataset contains 30+ word and
# sound categories, but we will only use a subset of them
!mkdir -p /tmp/speech_commands_v0.02
!curl -o /tmp/speech_commands_v0.02/speech_commands_v0.02.tar.gz -fSsL http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz
!cd /tmp/speech_commands_v0.02 && tar xzf speech_commands_v0.02.tar.gz
# Load the preprocessing model, which transforms audio waveform into
# spectrograms (2D image-like representation of sound).
# This preprocessing model replicates WebAudio's AnalyzerNode.getFloatFrequencyData
# (https://developer.mozilla.org/en-US/docs/Web/API/AnalyserNode/getFloatFrequencyData).
# It performs short-time Fourier transform (STFT) using a length-2048 Blackman
# window. It opeartes on mono audio at the 44100-Hz sample rate.
preproc_model_path = '/tmp/tfjs-sc-model/sc_preproc_model'
preproc_model = tf.keras.models.load_model(preproc_model_path)
preproc_model.summary()
preproc_model.input_shape
# Create some constants to be used later.
# Target sampling rate. It is required by the audio preprocessing model.
TARGET_SAMPLE_RATE = 44100
# The specific audio tensor length expected by the preprocessing model.
EXPECTED_WAVEFORM_LEN = preproc_model.input_shape[-1]
# Where the Speech Commands v0.02 dataset has been downloaded.
DATA_ROOT = "/tmp/speech_commands_v0.02"
WORDS = ("_background_noise_snippets_", "no", "yes")
# Unlike word examples, the noise samples in the Speech Commands v0.02 dataset
# are not divided into 1-second snippets. Instead, they are stored as longer
# recordings. Therefore we need to cut them up in to 1-second snippet .wav
# files.
noise_wav_paths = glob.glob(os.path.join(DATA_ROOT, "_background_noise_", "*.wav"))
snippets_dir = os.path.join(DATA_ROOT, "_background_noise_snippets_")
os.makedirs(snippets_dir, exist_ok=True)
def extract_snippets(wav_path, snippet_duration_sec=1.0):
basename = os.path.basename(os.path.splitext(wav_path)[0])
sample_rate, xs = wavfile.read(wav_path)
assert xs.dtype == np.int16
n_samples_per_snippet = int(snippet_duration_sec * sample_rate)
i = 0
while i + n_samples_per_snippet < len(xs):
snippet_wav_path = os.path.join(snippets_dir, "%s_%.5d.wav" % (basename, i))
snippet = xs[i : i + n_samples_per_snippet].astype(np.int16)
wavfile.write(snippet_wav_path, sample_rate, snippet)
i += n_samples_per_snippet
for noise_wav_path in noise_wav_paths:
print("Extracting snippets from %s..." % noise_wav_path)
extract_snippets(noise_wav_path, snippet_duration_sec=1.0)
def resample_wavs(dir_path, target_sample_rate=44100):
"""Resample the .wav files in an input directory to given sampling rate.
The resampled waveforms are written to .wav files in the same directory with
file names that ends in "_44100hz.wav".
44100 Hz is the sample rate required by the preprocessing model. It is also
the most widely supported sample rate among web browsers and mobile devices.
For example, see:
https://developer.mozilla.org/en-US/docs/Web/API/AudioContextOptions/sampleRate
https://developer.android.com/ndk/guides/audio/sampling-audio
Args:
dir_path: Path to a directory that contains .wav files.
target_sapmle_rate: Target sampling rate in Hz.
"""
wav_paths = glob.glob(os.path.join(dir_path, "*.wav"))
resampled_suffix = "_%shz.wav" % target_sample_rate
for i, wav_path in tqdm.tqdm(enumerate(wav_paths)):
if wav_path.endswith(resampled_suffix):
continue
sample_rate, xs = wavfile.read(wav_path)
xs = xs.astype(np.float32)
xs = librosa.resample(xs, sample_rate, TARGET_SAMPLE_RATE).astype(np.int16)
resampled_path = os.path.splitext(wav_path)[0] + resampled_suffix
wavfile.write(resampled_path, target_sample_rate, xs)
for word in WORDS:
word_dir = os.path.join(DATA_ROOT, word)
assert os.path.isdir(word_dir)
resample_wavs(word_dir, target_sample_rate=TARGET_SAMPLE_RATE)
@tf.function
def read_wav(filepath):
file_contents = tf.io.read_file(filepath)
return tf.expand_dims(tf.squeeze(tf.audio.decode_wav(
file_contents,
desired_channels=-1,
desired_samples=TARGET_SAMPLE_RATE).audio, axis=-1), 0)
@tf.function
def filter_by_waveform_length(waveform, label):
return tf.size(waveform) > EXPECTED_WAVEFORM_LEN
@tf.function
def crop_and_convert_to_spectrogram(waveform, label):
cropped = tf.slice(waveform, begin=[0, 0], size=[1, EXPECTED_WAVEFORM_LEN])
return tf.squeeze(preproc_model(cropped), axis=0), label
@tf.function
def spectrogram_elements_finite(spectrogram, label):
return tf.math.reduce_all(tf.math.is_finite(spectrogram))
def get_dataset(input_wav_paths, labels):
"""Get a tf.data.Dataset given input .wav files and their labels.
The returned dataset emits 2-tuples of `(spectrogram, label)`, wherein
- `spectrogram` is a tensor of dtype tf.float32 and shape [43, 232, 1].
It is z-normalized (i.e., have a mean of ~0.0 and variance of ~1.0).
- `label` is a tensor of dtype tf.int32 and shape [] (scalar).
Args:
input_wav_paths: Input audio .wav file paths as a list of string.
labels: integer labels (class indices) of the input .wav files. Must have
the same lengh as `input_wav_paths`.
Returns:
A tf.data.Dataset object as described above.
"""
ds = tf.data.Dataset.from_tensor_slices(input_wav_paths)
# Read audio waveform from the .wav files.
ds = ds.map(read_wav)
ds = tf.data.Dataset.zip((ds, tf.data.Dataset.from_tensor_slices(labels)))
# Keep only the waveforms longer than `EXPECTED_WAVEFORM_LEN`.
ds = ds.filter(filter_by_waveform_length)
# Crop the waveforms to `EXPECTED_WAVEFORM_LEN` and convert them to
# spectrograms using the preprocessing layer.
ds = ds.map(crop_and_convert_to_spectrogram)
# Discard examples that contain infinite or NaN elements.
ds = ds.filter(spectrogram_elements_finite)
return ds
input_wav_paths_and_labels = []
for i, word in enumerate(WORDS):
wav_paths = glob.glob(os.path.join(DATA_ROOT, word, "*_%shz.wav" % TARGET_SAMPLE_RATE))
print("Found %d examples for class %s" % (len(wav_paths), word))
labels = [i] * len(wav_paths)
input_wav_paths_and_labels.extend(zip(wav_paths, labels))
random.shuffle(input_wav_paths_and_labels)
input_wav_paths, labels = ([t[0] for t in input_wav_paths_and_labels],
[t[1] for t in input_wav_paths_and_labels])
dataset = get_dataset(input_wav_paths, labels)
# Show some example spectrograms for inspection.
fig = plt.figure(figsize=(40, 100))
dataset_iter = iter(dataset)
num_spectrograms_to_show = 10
for i in range(num_spectrograms_to_show):
ax = fig.add_subplot(1, num_spectrograms_to_show, i + 1)
spectrogram, label = next(dataset_iter)
spectrogram = spectrogram.numpy()
label = label.numpy()
plt.imshow(np.flipud(np.squeeze(spectrogram, -1).T), aspect=0.2)
ax.set_title("Example of \"%s\"" % WORDS[label])
ax.set_xlabel("Time frame #")
if i == 0:
ax.set_ylabel("Frequency bin #")
# The amount of data we have is relatively small. It fits into typical host RAM
# or GPU memory. For better training performance, we preload the data and
# put it into numpy arrays:
# - xs: The audio features (normalized spectrograms).
# - ys: The labels (class indices).
print(
"Loading dataset and converting data to numpy arrays. "
"This may take a few minutes...")
xs_and_ys = list(dataset)
xs = np.stack([item[0] for item in xs_and_ys])
ys = np.stack([item[1] for item in xs_and_ys])
print("Done.")
tfjs_model_json_path = '/tmp/tfjs-sc-model/model.json'
# Load the Speech Commands model. Weights are loaded along with the topology,
# since we train the model from scratch. Instead, we will perform transfer
# learning based on the model.
orig_model = tfjs.converters.load_keras_model(tfjs_model_json_path, load_weights=True)
# Remove the top Dense layer and add a new Dense layer of which the output
# size fits the number of sound classes we care about.
model = tf.keras.Sequential(name="TransferLearnedModel")
for layer in orig_model.layers[:-1]:
model.add(layer)
model.add(tf.keras.layers.Dense(units=len(WORDS), activation="softmax"))
# Freeze all but the last layer of the model. The last layer will be fine-tuned
# during transfer learning.
for layer in model.layers[:-1]:
layer.trainable = False
model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy", metrics=["acc"])
model.summary()
# Train the model.
model.fit(xs, ys, batch_size=256, validation_split=0.3, shuffle=True, epochs=50)
# Convert the model to TensorFlow.js Layers model format.
tfjs_model_dir = "/tmp/tfjs-model"
tfjs.converters.save_keras_model(model, tfjs_model_dir)
# Create the metadata.json file.
metadata = {"words": ["_background_noise_"] + WORDS[1:], "frameSize": model.input_shape[-2]}
with open(os.path.join(tfjs_model_dir, "metadata.json"), "w") as f:
json.dump(metadata, f)
!ls -lh /tmp/tfjs_model
```
To deploy this model to the web, you can use the
[speech-commands NPM package](https://www.npmjs.com/package/@tensorflow-models/speech-commands).
The model.json and metadata.json should be hosted together with the two weights (.bin) files in the same HTTP/HTTPS directory.
Then the custom model can be loaded in JavaScript with:
```js
import * as tf from '@tensorflow/tfjs';
import * as speechCommands from '@tensorflow-models/speech-commands';
const recognizer = speechCommands.create(
'BROWSER_FFT',
null,
'http://test.com/my-audio-model/model.json', // URL to the custom model's model.json
'http://test.com/my-audio-model/metadata.json' // URL to the custom model's metadata.json
);
```
```
# Convert the model to TFLite.
# We need to combine the preprocessing model and the newly trained 3-class model
# so that the resultant model will be able to preform STFT and spectrogram
# calculation on mobile devices (i.e., without web browser's WebAudio).
combined_model = tf.keras.Sequential(name='CombinedModel')
combined_model.add(preproc_model)
combined_model.add(model)
combined_model.build([None, EXPECTED_WAVEFORM_LEN])
combined_model.summary()
tflite_output_path = '/tmp/tfjs-sc-model/combined_model.tflite'
converter = tf.lite.TFLiteConverter.from_keras_model(combined_model)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS
]
with open(tflite_output_path, 'wb') as f:
f.write(converter.convert())
print("Saved tflite file at: %s" % tflite_output_path)
```
| github_jupyter |
```
# designed to be run after 03-clinical_variables_final. this notebook does some data cleaning/processing. run before -___ notebook.
## cleans many aspects of the raw clinical variables.
## collapses and formats all of the various categorical variables into discrete variables as well.
import pandas as pd
import matplotlib.pyplot as plt
import os
from pathlib import Path
import seaborn as sns
import numpy as np
import glob
from sklearn.externals.joblib import Memory
memory = Memory(cachedir='/tmp', verbose=0)
#@memory.cache above any def fxn.
%matplotlib inline
plt.style.use('ggplot')
from notebook.services.config import ConfigManager
cm = ConfigManager()
cm.update('livereveal', {
'width': 1024,
'height': 768,
'scroll': True,
})
%load_ext autotime
#patients of interest from rotation_cohort_generation
from parameters import final_pt_df_v, date, repository_path
#patients of interest from rotation_cohort_generation
final_pt_df2 = final_pt_df_v
del(final_pt_df_v)
patients= list(final_pt_df2['subject_id'].unique())
hadm_id= list(final_pt_df2['hadm_id'].unique())
icustay_id= list(final_pt_df2['icustay_id'].unique())
icustay_id= [int(x) for x in icustay_id]
final_pt_df2['final_bin'].value_counts()
```
# extracting clinical data for our patients
## IMPORTANT, USE THIS TO TUNE TIMEWINDOW OF EXTRACTION AND FOLDER TO SAVE IN
#NOTE ON MY DF NAMING CONVENTION:
origionally when I coded this workbook, it was for 72 hour timewindows, so every dataframe had _72 at the end. this was changed on 6/5/19 and was made more generalizable by finding name of each corresponding df in the df list and using this variable.
```
from parameters import lower_window, upper_window, folder, date, time_col, time_var, patient_df, save_boolean
```
### begin pipeline:
# changing my code structure to be a dictionary of dataframes
```
#folder to save files to:
save_path= str(repository_path)+'/data/cleaned/'
#folder=None
def save_df(df, df_name='default', save_path=save_path, add_subfolder=False):
#uses the date and supplied df name and saves to the savepath specified above.
if df_name == 'default':
df_name= "%s"%(df)
address=save_path+'%s/'%(folder)
if not os.path.exists(address):
print(address)
os.makedirs(address)
pd.DataFrame(df).to_csv(Path(address+'%s_%s_prepped.csv' %(date, df_name)))
save_path+'%s/'%(folder)
##folder with all clinical variable csv's
allFiles = glob.glob(str(repository_path)+ '/data/raw/%s/'%(folder) + "{}_*.csv".format(date))
allFiles
#making a dictionary of all my dataframes for easier cycling through
df_list=[]
for element in allFiles:
df_list.append(element.split('{}_'.format(date))[1].split('.csv')[0]) #making an list of all my dataframes in order they appear in file
dfs = {}
i=0
for name in df_list:
dfs[name] = pd.read_csv(allFiles[i], index_col=0)
i+=1
df_list
#assigning the appropriate name to each df in a flexible way
indices = [i for i, s in enumerate(df_list) if 'bg' in s]
bg_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'cancer' in s]
cancer_elix_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'uti' in s]
uti_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'vent' in s]
vent_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'vitals' in s]
vitals_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'vaso' in s]
vaso_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'pt_info' in s]
pt_info_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'gcs' in s]
gcs_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'sum_elix' in s]
sum_elix_df = df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'sofa' in s]
sofa_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'weight' in s]
weight_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'labs' in s]
labs_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'height' in s]
height_df= df_list[indices[0]]
indices = [i for i, s in enumerate(df_list) if 'rrt' in s]
rrt_df= df_list[indices[0]]
#adding a t_0 to each df that doesn't currently have it
for element in df_list:
#print(element,':',list(dfs[element]))
if ('t_0' in list(dfs[element]))==False and 'icustay_id' in list(dfs[element]) :
#print("true")
dfs[element]= pd.merge(dfs[element], final_pt_df2[['icustay_id','t_0']], how='left')
elif ('t_0' in list(dfs[element]))==False and 'hadm_id' in list(dfs[element]) :
#print("true")
dfs[element]= pd.merge(dfs[element], final_pt_df2[['hadm_id','t_0']], how='left')
else:
print("false")
for element in df_list:
print(element,':',list(dfs[element]))
from parameters import time_var, value_fill, delta_fill, uom_fill
def yn_convert(df, #df in format where each row corresponds to a test, and a patient can have many rows
label_fill, # value that will be filled to na's
pt= final_pt_df2,
time_var=time_var,#'t_0', #
value_fill=value_fill,#0,
delta_fill=delta_fill,# pd.to_timedelta('0 days'),
uom_fill=uom_fill):#'y/n'):
"""
description: collapses (binarizes) a dataframe where each row corresponds to a test, and a patient can have many rows ->
1 row per patient where value is binary variable yes or no a patient has any value within the timewindow (specified in data collection).
said a different way, for patient this fxn collapses values down to does pt have a non NA value in the clinical time window y/n?
label_fill: the variable name in the label column of the specified dataframe that will considered for y/n value within timewindow. if any non NA value is present
it will be considered positive.
pt: the by patient spreadsheet be be used to supply patient information.
time_var: the variable used to create the time window of interest.
value_fill: the variable value that missing values will be filled if the value is not present (default =0) in the origional dataset
delta_fill: the time delta value that will be filled in if a patient doesn't have any instances of the label_fill.
uom_fill: fills in the unit of measurement to this for missing values.
returns a flat 1 row per icustay_id of 1 or 0 if any value was present for the patient.
"""
yn_df = pd.merge(pt[['icustay_id', time_var]],
df[['icustay_id','value','label','uom','delta']],
left_on= 'icustay_id',
right_on= 'icustay_id',
how='left') #merging all icustay_id's with time_var, where value,label,uom, and delta are nan's if no value exists for that icustay.
#the idea is that if any value exists then it is pos.
yn_df['value']= yn_df['value'].fillna(value_fill) #converts na to 0 in above na rows.
yn_df.loc[yn_df.loc[:,'value']!=value_fill, 'value']= 1 #squashes all other values into a binary 1 = yes
yn_df['delta']= yn_df['delta'].fillna(delta_fill)
yn_df['delta']= pd.to_timedelta(yn_df['delta']) #filling in the time delta to time =0 for filled rows
yn_df['uom']= yn_df['uom'].fillna(uom_fill)
yn_df.loc[yn_df.loc[:,'uom']!=uom_fill, 'uom']= uom_fill
yn_df['label']= yn_df['label'].fillna(label_fill)
##this is new as of 12-12-19L i think i wasn't truely converting to 1row per icustay id by not droping duplicates filtered on value first (desc) then delta (asc)
yn_df= yn_df.sort_values(['value','delta'], ascending=[False, True]).drop_duplicates(subset='icustay_id',keep='first')
return(yn_df)
```
## vaso dose
```
# #renaming starttime to charttime and dropping endtime
dfs[vaso_df]= dfs[vaso_df].rename(
columns={'starttime':'charttime','label':'vaso_type'})
len(dfs[vaso_df])
dfs[vaso_df].head()
#removing units/hour because that is a different use of vasopressin
dfs[vaso_df] = dfs[vaso_df].loc[dfs[vaso_df].loc[:,'rate_uom']!= 'units/hour',:]
dfs[vaso_df] = dfs[vaso_df].loc[dfs[vaso_df].loc[:,'rate_uom']!= 'Uhr',:]
len(dfs[vaso_df])
dfs[vaso_df]['rate_uom'].unique()
```
#### removing outliers/extreme values
```
# Use transform to add a column back to the orig df from a groupby aggregation, transform returns a Series with its index aligned to the orig df:
def vaso_outlier_removal(df):
test_group=(dfs[vaso_df][['vaso_rate','rate_uom','amount_uom','vaso_type']]#.groupby('vaso_type', as_index=False)
.groupby(['vaso_type','rate_uom'])
)
dfs[vaso_df]['std']=test_group.transform(lambda x : x.std())
dfs[vaso_df]['mean']=test_group.transform(lambda x : x.mean())
normal_high_value= pd.DataFrame({
'vaso_type' : ['dobutamine','dopamine','epinephrine','norepinephrine','vasopressin','phenylephrine'],
'high_value': [40, 20, 0.5, 1, 0.1, 2] #highest values one might expect to see in a clinic, ie above this is likely erroneous
}) #found from literature, see notes
dfs[vaso_df] = pd.merge(dfs[vaso_df], normal_high_value, left_on='vaso_type', right_on='vaso_type')
vaso_dose_72_rmout =(dfs[vaso_df][
~((dfs[vaso_df]['vaso_rate'] > dfs[vaso_df]['high_value']) & ((dfs[vaso_df]['vaso_rate']-dfs[vaso_df]['mean'])>= (3*dfs[vaso_df]['std'])))
])
#ie vaso_dose_72_rmout is a dataframe of all rows that excludes rows where vaso rate > literature high value and where vaso_rate >3sd from teh mean
return(vaso_dose_72_rmout)
dfs[vaso_df]= vaso_outlier_removal(dfs[vaso_df])
len(dfs[vaso_df]) #52976 ->49340
dfs[vaso_df]['vaso_type'].unique()
#standardizing names, dropping unneeded columns for analysis
dfs[vaso_df]= dfs[vaso_df].drop(['vaso_amount', 'amount_uom','std','mean','high_value'], axis=1)
dfs[vaso_df]= dfs[vaso_df].rename(index=str, columns={'vaso_rate': 'value', 'rate_uom':'uom','vaso_type':'label'})
dfs[vaso_df]['label'].unique()
dfs[vaso_df].head()
epinephrine_df= dfs[vaso_df][dfs[vaso_df]['label']=='epinephrine']
norepinephrine_df= dfs[vaso_df][dfs[vaso_df]['label']=='norepinephrine']
phenylephrine_df= dfs[vaso_df][dfs[vaso_df]['label']=='phenylephrine']
vasopressin_df= dfs[vaso_df][dfs[vaso_df]['label']=='vasopressin']
dopamine_df= dfs[vaso_df][dfs[vaso_df]['label']=='dopamine']
dobutamine_df= dfs[vaso_df][dfs[vaso_df]['label']=='dobutamine']
#making a all vasoactive
all_vaso=pd.concat([epinephrine_df, norepinephrine_df,phenylephrine_df, vasopressin_df,dopamine_df,dobutamine_df ], sort=False)#.sort_values(['icustay_id','delta','label','source'], ascending=True)
#sorting so that a patient's most positive and earliest cases are kept and everything else droped.
all_vaso= all_vaso.sort_values(['value','delta'], ascending=[False, True]).drop_duplicates(subset='icustay_id',keep='first')
all_vaso['label']='any_vasoactives'
#(could be transfered to a different spdsheet for collapsing values)
#y/n convert, seperating out vaso_dose into 6 constitutient dataframes, and for each am collapsing values down to does pt have in time window y/n?
epinephrine_df=yn_convert(epinephrine_df, label_fill='epinephrine', pt= final_pt_df2, value_fill=0, delta_fill=0, uom_fill='y/n', time_var=time_var)
norepinephrine_df=yn_convert(norepinephrine_df, label_fill='norepinephrine', pt= final_pt_df2, value_fill=0, delta_fill=0, uom_fill='y/n', time_var=time_var)
phenylephrine_df=yn_convert(phenylephrine_df, label_fill='phenylephrine', pt= final_pt_df2, value_fill=0, delta_fill=0, uom_fill='y/n', time_var=time_var)
vasopressin_df=yn_convert(vasopressin_df, label_fill='vasopressin', pt= final_pt_df2, value_fill=0, delta_fill=0, uom_fill='y/n', time_var=time_var)
dopamine_df=yn_convert(dopamine_df, label_fill='dopamine', pt= final_pt_df2, value_fill=0, delta_fill=0, uom_fill='y/n', time_var=time_var)
dobutamine_df=yn_convert(dobutamine_df, label_fill='dobutamine', pt= final_pt_df2, value_fill=0, delta_fill=0, uom_fill='y/n', time_var=time_var)
all_vaso=yn_convert(all_vaso, label_fill='any_vasoactives', pt= final_pt_df2, value_fill=0, delta_fill=0, uom_fill='y/n', time_var=time_var)
norepinephrine_df[norepinephrine_df['icustay_id']==299654.0]
all_vaso['icustay_id'].nunique()
#all_vaso=all_vaso.sort_values(['value','delta'], ascending=[False, True]).drop_duplicates(subset='icustay_id',keep='first')
len(all_vaso)
save_df(all_vaso, df_name='all_vaso')
del(all_vaso)
save_df(epinephrine_df, df_name='epinephrine')
del(epinephrine_df)
save_df(norepinephrine_df, df_name='norepinephrine')
del(norepinephrine_df)
save_df(phenylephrine_df, df_name='phenylephrine')
del(phenylephrine_df)
save_df(vasopressin_df, df_name='vasopressin')
del(vasopressin_df)
save_df(dopamine_df, df_name='dopamine')
del(dopamine_df)
save_df(dobutamine_df, df_name='dobutamine')
del(dobutamine_df)
del(dfs[vaso_df])
```
## ELIX
```
#convert cancer elix to y/n:
dfs[cancer_elix_df]= yn_convert(dfs[cancer_elix_df], label_fill=0, pt= final_pt_df2, value_fill=0, delta_fill=0, uom_fill='y/n', time_var=time_var)
save_df(dfs[cancer_elix_df], df_name='cancer_elix')
del(dfs[cancer_elix_df])
save_df(dfs[sum_elix_df], 'sum_elix')
del(dfs[sum_elix_df])
```
## vitals -
```
dfs[vitals_df].head()
dfs[vitals_df].loc[:,'vitalid'].unique()
dfs[bg_df].loc[dfs[bg_df].loc[:,'label']=='fio2_chartevents',:]#unique()
dfs[vitals_df] = dfs[vitals_df].rename(index=str, columns={"valueuom":"uom","vitalid":'label', 'valuenum':'value'}) #change valueom to uom
dfs[vitals_df] = dfs[vitals_df].loc[dfs[vitals_df]['label'].notnull(),:]#.count() #removing null values
dfs[vitals_df].loc[dfs[vitals_df].loc[:,'uom']=='BPM','uom']='bpm'
#overall the values are extremely similar and are likely the same thing
#i will combine them.
dfs[vitals_df].loc[
(dfs[vitals_df]['label']=='RespRate') &
(dfs[vitals_df]['uom']=='bpm'),'uom']='insp/min'
dfs[vitals_df].loc[
(dfs[vitals_df]['label']=='TempC') &
(dfs[vitals_df]['uom']=='?C'),'uom']='Deg. C'
dfs[vitals_df].loc[
(dfs[vitals_df]['label']=='TempF') &
(dfs[vitals_df]['uom']=='Deg. F'),'uom']='Deg. C'
dfs[vitals_df].loc[
(dfs[vitals_df]['label']=='TempF') &
(dfs[vitals_df]['uom']=='?F'),'uom']='Deg. C'
dfs[vitals_df].loc[
(dfs[vitals_df]['label']=='TempF'),'label']='temperature'
dfs[vitals_df].loc[
(dfs[vitals_df]['label']=='TempC'),'label']='temperature'
```
- glucose max
- glucose min
- diasBP min
- heartrate min
- meanart pressure min
- RespRate min
- SYSbp min
- TEMPC min
### most likely erroneous value removal
```
#erroneous value cutoff summary
## setting a conservative threshold for erroneous values to not skew my data.
(dfs[vitals_df].loc[(dfs[vitals_df].loc[:,'icustay_id']==228393.0) &
(dfs[vitals_df].loc[:,'label']=='Glucose') &
(dfs[vitals_df].loc[:,'value']>99999), 'value'])=np.nan
(dfs[vitals_df].loc[(dfs[vitals_df].loc[:,'label']=='Glucose') &
(dfs[vitals_df].loc[:,'value']>99998), 'value'])=np.nan
(dfs[vitals_df].loc[(dfs[vitals_df].loc[:,'label']=='Glucose') &
(dfs[vitals_df].loc[:,'value']<15), 'value'])=np.nan
(dfs[vitals_df].loc[(dfs[vitals_df].loc[:,'label']=='DiasBP') &
(dfs[vitals_df].loc[:,'value']<15), 'value'])=np.nan
(dfs[vitals_df].loc[(dfs[vitals_df].loc[:,'label']=='HeartRate') &
(dfs[vitals_df].loc[:,'value'].between(1,29)), 'value'])=np.nan
(dfs[vitals_df].loc[(dfs[vitals_df].loc[:,'label']=='RespRate') &
(dfs[vitals_df].loc[:,'value']<4), 'value'])=np.nan
(dfs[vitals_df].loc[(dfs[vitals_df].loc[:,'label']=='SysBP') &
(dfs[vitals_df].loc[:,'value']<40), 'value'])=np.nan
(dfs[vitals_df].loc[(dfs[vitals_df].loc[:,'label']=='TempC') &
(dfs[vitals_df].loc[:,'value']<28), 'value'])=np.nan
dfs[vitals_df] = dfs[vitals_df].loc[dfs[vitals_df]['value'].notnull(),:]#.count()
### saving spo2 for later use in bloodgas 11/25/19
spo2=dfs[vitals_df][dfs[vitals_df]['label']=='SpO2'].copy()
### seperating fio2 out
fio2_chart_df = dfs[vitals_df].loc[dfs[vitals_df].loc[:,'label']=='fio2_chartevents',:]#unique()
dfs[vitals_df]= dfs[vitals_df].loc[dfs[vitals_df].loc[:,'label']!='fio2_chartevents',:]
save_df(dfs[vitals_df], 'vitals')
del(dfs[vitals_df])
```
# labs -
```
dfs[labs_df].head(10)
dfs[labs_df]= dfs[labs_df].rename(
columns={'valuenum':'value'}) #changing valuenum to value
dfs[labs_df].groupby('label')['uom'].value_counts() #looks good
```
### most likely erroneous value removal
```
#summary value removal- was explored and coded adhoc, difficult to automate
(dfs[labs_df].loc[
(dfs[labs_df].loc[:,'icustay_id']==261887) &
(dfs[labs_df].loc[:,'label']=='CHLORIDE')
& (dfs[labs_df].loc[:,'value']==3.4),'value'])=np.nan
(dfs[labs_df].loc[
(dfs[labs_df].loc[:,'icustay_id']==236290) &
(dfs[labs_df].loc[:,'label']=='CHLORIDE')
& (dfs[labs_df].loc[:,'value']==11.0),'value'])=np.nan
(dfs[labs_df].loc[
(dfs[labs_df].loc[:,'icustay_id']==292769) &
(dfs[labs_df].loc[:,'label']=='INR')
& (dfs[labs_df].loc[:,'value']==28.1),'value'])=np.nan
(dfs[labs_df].loc[
(dfs[labs_df].loc[:,'icustay_id']==298457) &
(dfs[labs_df].loc[:,'label']=='INR')
& (dfs[labs_df].loc[:,'value']==48.8),'value'])=np.nan
(dfs[labs_df].loc[
(dfs[labs_df].loc[:,'icustay_id']==234174) &
(dfs[labs_df].loc[:,'label']=='INR')
& (dfs[labs_df].loc[:,'value']==48.7),'value'])=np.nan
(dfs[labs_df].loc[
(dfs[labs_df].loc[:,'icustay_id']==290264) &
(dfs[labs_df].loc[:,'label']=='INR')
& (dfs[labs_df].loc[:,'value']==42.0),'value'])=np.nan
(dfs[labs_df].loc[
(dfs[labs_df].loc[:,'icustay_id']==290264) &
(dfs[labs_df].loc[:,'label']=='INR')
& (dfs[labs_df].loc[:,'value']==22.8),'value'])=np.nan
dfs[labs_df]= dfs[labs_df].loc[dfs[labs_df].loc[:,'value'].notnull(),:] #removing null values
dfs[labs_df].head()
#removing unwanted values
unwanted_values= ['HEMATOCRIT','ANION GAP','PT','ALBUMIN']
dfs[labs_df]= dfs[labs_df].loc[~dfs[labs_df].loc[:,'label'].isin(unwanted_values),:]
```
# factorizing bands start
* converting bands into a categorical variable since it is very sparse
```
#df in format where each row corresponds to a test, and a patient can have many rows
def yn_convert_band(df,
label_fill="absent",
threshold=10,
pt= final_pt_df2,
time_var='t_0',
value_fill=9999,
delta_fill=pd.to_timedelta('0 days'),
uom_fill='y/n'):
"""
binarizing bands value since it's extremely sparse. Wrote a new function instead of adjusting yn_convert a while back. on my todo to remove this and add functionality to regular yn_conmert function.
"""
yn_df = pd.merge(pt[['icustay_id','hadm_id','subject_id', time_var]],
df[['icustay_id','value','label','uom','delta']],
left_on= 'icustay_id',
right_on= 'icustay_id',
how='left') #merging all icustay_id's with time_var, where value,label,uom, and delta are nan's if no value exists for that icustay.
#the idea is that if any value exists then it is pos.
yn_df['value']= yn_df['value'].fillna(value_fill) #converts na to 0 in above na rows.
criteria0=yn_df.loc[:,'value']==value_fill
criteria1=pd.to_numeric(yn_df.loc[:,'value'])<=threshold
criteria2=pd.to_numeric(yn_df.loc[:,'value'])>threshold
yn_df.loc[criteria1, 'value']= "<{}".format(threshold)
yn_df.loc[criteria2, 'value']= ">{}".format(threshold)
yn_df.loc[criteria0, 'value']= "absent"
yn_df['delta']= yn_df['delta'].fillna(delta_fill)
yn_df['delta']= pd.to_timedelta(yn_df['delta']) #filling in the time delta to time =0 for filled rows
yn_df['uom']= yn_df['uom'].fillna(uom_fill)
yn_df.loc[yn_df.loc[:,'uom']!=uom_fill, 'uom']= uom_fill
yn_df['label']= yn_df['label'].fillna(label_fill)
return(yn_df)
band_df=dfs[labs_df][dfs[labs_df]['label']=='BANDS']
max_bands=band_df.loc[band_df.groupby('icustay_id', as_index=False)['value'].idxmax(),:]
del(band_df)
band_cat=yn_convert_band(df=max_bands,
label_fill="BANDS",
threshold=10,
pt= final_pt_df2,
time_var='t_0',
value_fill=9999,
delta_fill=pd.to_timedelta('0 days'),
uom_fill='y/n')
#drop bands from lab_df
dfs[labs_df]=dfs[labs_df].drop(dfs[labs_df][dfs[labs_df]['label']=='BANDS'].index)
##dropping charttime, may be problematic later.
dfs[labs_df]=dfs[labs_df].drop('charttime', axis=1)
#appending categorical bands
dfs[labs_df]=dfs[labs_df].append(band_cat)
dfs[labs_df].loc[dfs[labs_df]['label']=='BANDS','value'].value_counts()
# pd.DataFrame(dfs[labs_df]).to_csv(Path(
# save_path+'/%s_labs_prepped.csv' %(date)))
save_df(dfs[labs_df], 'labs')
del(dfs[labs_df])
```
## vent category -
```
dfs[vent_df]['icustay_id'].nunique()
#13978 patients with someform of vent data.
dfs[vent_df]['uom']='mech/O2/none category'
dfs[vent_df].head()
dfs[vent_df]=dfs[vent_df].rename(index=str, columns={'day':'delta'})
dfs[vent_df]['label']='vent_recieved'
dfs[vent_df]['delta']=pd.to_timedelta(dfs[vent_df]['delta'], unit='d')
#dfs[vent_df]= dfs[vent_df].drop(columns=['day'], axis=1) #removing day column
dfs[vent_df].head()
#collapsing into 1 column for N days
def vent_day_collapser(x):
"""
collapsing the ventilation days into a single value. Mech> Oxygen > None.
"""
if 'Mech' in list(x.unique()):
x= 'Mech'
elif 'Oxygen' in list(x.unique()):
x= 'Oxygen'
else:
x='None'
return(x)
#collapsing all days into the worst day.
ventcategory_1day_df= dfs[vent_df].copy()
ventcategory_1day_df['value']=ventcategory_1day_df.groupby('icustay_id',as_index=False)['value'].transform(vent_day_collapser)
ventcategory_1day_df= ventcategory_1day_df.drop_duplicates(['icustay_id','value']).sort_values('icustay_id') #
ventcategory_1day_df= ventcategory_1day_df.loc[ventcategory_1day_df.loc[:,'icustay_id'].isin(icustay_id),:] #had icustay ids not in final cohort, fail safe mesure
ventcategory_1day_df.head()
ventcategory_1day_df['value'].value_counts()
save_df(ventcategory_1day_df, 'ventcategory')
# will be deleted after pao2:fio2 calc
```
## weight and height firstday -
i explored weightdurations and it had more missing values than weightfirstday, so i will use that. we can revisit this if we need longitudinal weights
```
dfs[weight_df]['uom']='kg'
dfs[weight_df].head()
#weight column is the conglomerate of weight_admin>weight_daily> weight_echoinhosp> weight_echoprehosp
dfs[weight_df]= dfs[weight_df][dfs[weight_df]['weight'].notnull()]
dfs[weight_df]= dfs[weight_df][['icustay_id','weight','uom']]
dfs[weight_df]['label']= 'weight'
dfs[weight_df]=dfs[weight_df].rename(index=str, columns={'weight':'value'})
#adding the assumed first day delta column to standardize all columns
dfs[weight_df]['delta']=pd.to_timedelta(0,'days')
#adding t_0
dfs[weight_df]= pd.merge(dfs[weight_df], final_pt_df2[['icustay_id',time_var]], left_on='icustay_id', right_on='icustay_id')
# pd.DataFrame(dfs[weight_df]).to_csv(Path(
# save_path+'/%s_weight_prepped.csv' %(date)))
save_df(dfs[weight_df], 'weight')
del(dfs[weight_df])
dfs[height_df]['uom']='cm'
dfs[height_df]= dfs[height_df][dfs[height_df]['height'].notnull()]
dfs[height_df]= dfs[height_df][['icustay_id','height','uom']]
dfs[height_df]['label']= 'height'
dfs[height_df]=dfs[height_df].rename(index=str, columns={'height':'value'})
#adding the assumed first day delta column to standardize all columns
dfs[height_df]['delta']=pd.to_timedelta(0,'days')
#adding t_0
dfs[height_df]= pd.merge(dfs[height_df], final_pt_df2[['icustay_id',time_var]], left_on='icustay_id', right_on='icustay_id')
#heightfirstday
save_df(dfs[height_df], 'height')
del(dfs[height_df])
```
# UTI
```
dfs[uti_df]['value'].unique()#seems good #all uti within clinical timewindow
dfs[uti_df]= dfs[uti_df].loc[(dfs[uti_df].loc[:,'value']!='NEG')&
dfs[uti_df].loc[:,'value'].notnull(),:] #filter to only pos rows
dfs[uti_df]= dfs[uti_df].loc[(dfs[uti_df].loc[:,'value']!='COMPUTER NETWORK FAILURE. TEST NOT RESULTED.')&
dfs[uti_df].loc[:,'value'].notnull(),:] #filter to only pos rows
dfs[uti_df]= dfs[uti_df].drop_duplicates(subset=['hadm_id','value','charttime'])
dfs[uti_df].loc[dfs[uti_df].loc[:,'value'].notnull(),'value']= 1
dfs[uti_df].loc[dfs[uti_df].loc[:,'value'].isna(),'value']= 0
dfs[uti_df].head()
def uti_categorizer(uti_df):
"useful to get all rows of days with positive values for patients (if multiple pos in a day there will be only 1 row for that day). ie more longitudinal format "
#gives the max pos or neg value per day for
df_timewindow_perday=uti_df.groupby(['hadm_id','delta'], as_index=False)['value'].agg({'value':'max'})
df_timewindow_perday= pd.merge(df_timewindow_perday,final_pt_df2[['icustay_id','hadm_id', time_var]], left_on='hadm_id', right_on='hadm_id', how='left')
df_timewindow_perday=df_timewindow_perday.sort_values(['hadm_id','value','delta'], ascending=[True,False,True])
return(df_timewindow_perday)
def yn_uti(uti_df, label):
"collapsing longitudinal data into 1 value. will return pos or neg if patient has a positive uti in their stay. one row per icustay_id"
df_timewindow_perday=uti_df.groupby(['hadm_id','delta'], as_index=False)['value'].agg({'value':'max'})
first_pos= df_timewindow_perday.drop_duplicates(['hadm_id'])
collapsed= pd.merge(final_pt_df2[['hadm_id','icustay_id','subject_id', time_var]],first_pos, left_on='hadm_id', right_on='hadm_id', how='left')
collapsed['value']= collapsed['value'].fillna(0)
collapsed.loc[collapsed.loc[:,'value']==1,'value']= 'pos'
collapsed.loc[collapsed.loc[:,'value']==0,'value']= 'Neg/Not_tested'
collapsed['delta']= collapsed['delta'].fillna(pd.Timedelta(1, unit='d'))
collapsed['label']= label
collapsed['uom']='pos/neg category'
return(collapsed)
uti_nit_pos= dfs[uti_df][dfs[uti_df]['label']=="Nitrite"]
uti_leuk_pos= dfs[uti_df][dfs[uti_df]['label']=="Leukocytes"]
leuk_collapsed= yn_uti(uti_leuk_pos, 'leukocyte')
nit_collapsed= yn_uti(uti_nit_pos, 'nitrite')
save_df(leuk_collapsed, 'leuk')
del(leuk_collapsed)
save_df(nit_collapsed, 'nit')
del(nit_collapsed)
del(dfs[uti_df])
```
# bloodgas
```
dfs[bg_df]= dfs[bg_df].loc[dfs[bg_df]['value'].notnull(),:]
dfs[bg_df] = dfs[bg_df].rename(index=str, columns={'valueuom':'uom'})
dfs[bg_df]= dfs[bg_df].loc[~(dfs[bg_df].loc[:,'value']=='.'),:]
#may need to remove outliers, haven't done as of 10/22/18
dfs[bg_df].head()
```
### most likely erroneous value removal
the code below manually removes values that are a high likelyhood of being erroneous in a way that doesn't follow great programming practices. on my list of things to improve, but it is currently functional.
```
##calcium
#fixing the calcium errors w/o hard coding
(dfs[bg_df].loc[
(dfs[bg_df].loc[:,'icustay_id']==249571) &
(dfs[bg_df].loc[:,'label']=='CALCIUM')
& (dfs[bg_df].loc[:,'valuenum']==94.00),'valuenum'])=0.94#.where('valuenum'==94.00))
(dfs[bg_df].loc[
(dfs[bg_df].loc[:,'icustay_id']==249571) &
(dfs[bg_df].loc[:,'label']=='CALCIUM')
& (dfs[bg_df].loc[:,'value']=='094'),'value'])=0.94#.where('valuenum'==94.00))
(dfs[bg_df].loc[
(dfs[bg_df].loc[:,'icustay_id']==219600) &
(dfs[bg_df].loc[:,'label']=='CALCIUM')
& (dfs[bg_df].loc[:,'valuenum']==97.00),'valuenum'])=0.97#.where('valuenum'==94.00))
(dfs[bg_df].loc[
(dfs[bg_df].loc[:,'icustay_id']==219600) &
(dfs[bg_df].loc[:,'label']=='CALCIUM')
& (dfs[bg_df].loc[:,'value']=='097'),'value'])=0.97#.where('valuenum'==94.00))
##min chloride
#converting it to a null value without hard coding
(dfs[bg_df].loc[(dfs[bg_df].loc[:,'icustay_id']==261887.0) &
(dfs[bg_df].loc[:,'label']=='CHLORIDE') &
(dfs[bg_df].loc[:,'valuenum']==3.4),'value'])=np.nan
#converting it to a null value without hard coding
(dfs[bg_df].loc[(dfs[bg_df].loc[:,'icustay_id']==261887.0) &
(dfs[bg_df].loc[:,'label']=='CHLORIDE') &
(dfs[bg_df].loc[:,'valuenum']==3.4),'valuenum'])=np.nan
#changing the values without hard coding.
(dfs[bg_df].loc[(dfs[bg_df].loc[:,'icustay_id']==236290.0) &
(dfs[bg_df].loc[:,'label']=='CHLORIDE') &
(dfs[bg_df].loc[:,'valuenum']==11.0),'valuenum'])=np.nan
(dfs[bg_df].loc[(dfs[bg_df].loc[:,'icustay_id']==236290.0) &
(dfs[bg_df].loc[:,'label']=='CHLORIDE') &
(dfs[bg_df].loc[:,'valuenum']==11.0),'value'])=np.nan
#peep changes summary:
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label']=='PEEP')&
(dfs[bg_df]['valuenum']>38),'valuenum']=np.nan #remove this or set to 50?
#temp changes summary:
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label']=='TEMPERATURE')&
(dfs[bg_df]['icustay_id']==253821)&
(dfs[bg_df]['valuenum']==18.9),
'value']= np.nan
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label']=='TEMPERATURE')&
(dfs[bg_df]['icustay_id']==253821)&
(dfs[bg_df]['valuenum']==18.9),
'valuenum']= np.nan
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label']=='TEMPERATURE')&
(dfs[bg_df]['icustay_id']==251788)&
(dfs[bg_df]['valuenum']==10.0),
'value']= np.nan
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label']=='TEMPERATURE')&
(dfs[bg_df]['icustay_id']==251788)&
(dfs[bg_df]['valuenum']==10.0),
'valuenum']= np.nan
#fio2 changes summary:
##converting a few values to null, thus removing them from the dataset
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label']=='FIO2') &
(dfs[bg_df].loc[:,'value']=='0'),'value']=np.nan
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label']=='FIO2') &
(dfs[bg_df].loc[:,'value']=='-'),'value']=np.nan
##removing all fio2 values between 1-20.9
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label'].isin(['FIO2']))&
(dfs[bg_df]['valuenum'].between(1.0,20.9)),'valuenum']=np.nan
##values between 0-1 were found to be ratios, not %, so converting these to %
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label'].isin(['FIO2']))&
(dfs[bg_df]['valuenum'].between(1.0,20.9)),'valuenum']=np.nan
fio2_dec= dfs[bg_df].loc[(dfs[bg_df].loc[:,'label'].isin(['FIO2']))&
(dfs[bg_df]['valuenum'].between(0.0,1.0)),'valuenum']
dfs[bg_df].loc[(dfs[bg_df].loc[:,'label'].isin(['FIO2']))&
(dfs[bg_df]['valuenum'].between(0.0,1.0)),'valuenum'] = fio2_dec *100
del(fio2_dec)
```
#### removing null values annotated abov
```
dfs[bg_df]= dfs[bg_df].loc[dfs[bg_df]['value'].notnull(),:]
```
### splitting specimen out for vent vs non-vent bg data
```
#adding specimen tag to filter only arterial samples for vent data.
specimen_df= dfs[bg_df].loc[dfs[bg_df].loc[:,'label']=='SPECIMEN',['unique_var','label','value']]#unique()
specimen_df=specimen_df.rename(index=str, columns={'value':'specimen'})
specimen_df=specimen_df.loc[specimen_df.loc[:,"specimen"]=='ART',:]
dfs[bg_df]= pd.merge(dfs[bg_df],specimen_df[['unique_var','specimen']], left_on='unique_var', right_on='unique_var', how='left')
bg_ART_nosummary=dfs[bg_df].loc[dfs[bg_df].loc[:,'specimen']=='ART',:].copy()
del(specimen_df)
bg_labels=['PH','LACTATE','CALCIUM','TEMPERATURE','POTASSIUM',
'GLUCOSE','HEMOGLOBIN','SODIUM','CHLORIDE','BICARBONATE','FIO2']
bg_vent_labels=['PCO2','PaO2','PO2','PEEP','O2FLOW']
#restricting to tests that were chosen to be analysed based on %missingness and clinical relevance
dfs[bg_df]= dfs[bg_df].loc[dfs[bg_df].loc[:,'label'].isin(bg_labels),:]
bg_ART_nosummary= bg_ART_nosummary.loc[bg_ART_nosummary.loc[:,'label'].isin(bg_vent_labels),:]
dfs[bg_df].head()
bg_col=['subject_id','hadm_id','icustay_id','charttime','delta',time_var,'label','valuenum','uom']
bg_ART_nosummary= bg_ART_nosummary[bg_col]
dfs[bg_df]= dfs[bg_df][bg_col]
del(bg_col)
bg_ART_nosummary['label'].unique()
##quickly investingating o2flow
o2_pt= list(dfs[vent_df].loc[dfs[vent_df].loc[:,'value']=='Oxygen','icustay_id'].unique())
#o2_pt= list(ventcategory_1day_df.loc[ventcategory_1day_df.loc[:,'value']=='Oxygen','icustay_id'].unique())
bg_ART_nosummary.loc[bg_ART_nosummary.loc[:,'label']=='O2FLOW',:]
#bg_ART_nosummary.loc[bg_ART_nosummary.loc[:,'icustay_id']==217847,:]
bg_ART_nosummary.loc[(bg_ART_nosummary.loc[:,'label']=='O2FLOW') & (bg_ART_nosummary.loc[:,'icustay_id'].isin(o2_pt)),:]
#dfs[vent_df].loc[dfs[vent_df].loc[:,'icustay_id']==217847,:]
#converting O2 to y/n
o2_flow_df= bg_ART_nosummary.loc[bg_ART_nosummary.loc[:,'label']=='O2FLOW',:]
o2_flow_df=o2_flow_df.rename(index=str, columns={'valuenum':'value'})
o2_flow_df= yn_convert(o2_flow_df, label_fill='o2_flow', time_var=time_var)
o2_flow_df['label']="o2_flow" #fixing label
#removing o2_flow from bg_ART
bg_ART_nosummary= bg_ART_nosummary.loc[bg_ART_nosummary.loc[:,'label']!='O2FLOW',:]
bg_ART_nosummary= bg_ART_nosummary.loc[bg_ART_nosummary.loc[:,'label']!='PEEP',:]
```
## PaO2:FiO2
* PaO2: measurement of oxygen pressure in arterial blood
* FiO2: % of oxygen in the air a patient is breathing. in normal air this is 21% oxygen
* SpO2: Peripheral capillary oxygen saturation, estimate of the amount of oxygen in the blood. this is % of oxygenated haemoglobin compared to total haemoglobin
***requirements: run vitals, ventilation before this to get spo2 & ventilation category.
I would modify the criteria to be more strict:
* first isolate patients on mech ventilation: these are only patients we calc P:F for
* find PaO2:
* if no PaO2:
* find SpO2 <=97 --> ~PaO2
* SpO2 to estimate and PaO2 equivalent via the equation used by Knox et al to convert to PaO2-equivalent.
* once have all PaO2 and estimated PaO2:
* find last measured fio2 between t- 6hours :t, where t= time of PaO2
* impute P:F= 476 for every other icustay_id.
* Use P:F as a continuous value unless there is a clear need to bucket, and if so use <100,100-200, 200-300, >300
##### first isolate patients who had mech or oxygen ventilation during their N hours
these are only patients we calc P:F for
first, filter pao2 and fio2 to only icustay who have ventilation (pf_ratio_icu)
```
vent_icu=list(ventcategory_1day_df[ventcategory_1day_df['value'].isin(['Mech'])]['icustay_id'].unique())
```
next isolate the pao2 and fio2 from the bg arterial dataframe.
* pao2: all pao2 measurements in cohort -> pao2 measurements for icustay with ventilation
* fio2: all fio2 measurements (chartevents) in cohort -> fio2 measurements for icustay with ventilation
```
#using bloodgas
fio2_chart_df['delta']=pd.to_timedelta(fio2_chart_df['delta'])
fio2_chart_df= fio2_chart_df[fio2_chart_df['value'].notnull()].copy()
pao2= bg_ART_nosummary.loc[bg_ART_nosummary.loc[:,'label']=='PaO2',:].copy()
pao2.rename(index=str, columns={'valuenum':'value'}, inplace=True)
pao2_vent= pao2[pao2.loc[:,'icustay_id'].isin(vent_icu)].copy()
pao2_vent['delta']=pd.to_timedelta(pao2_vent['delta']).copy()
pao2_vent= pao2_vent[pao2_vent['value'].notnull()] #removing some null rows
```
##### next: find patients with ventilation but no PaO2 and collect their SpO2 values
```
# pao2_icu: icustay_id for pao2 with vent
pao2_icu= list(pao2_vent['icustay_id'].unique())
# find people with ventilation but no PaO2 measured (ie those who need it approximatd)
vent_but_no_pao2_icu=set(vent_icu).difference(set(pao2_icu))
## if on vent but no PaO2, then use SpO2 (if 97 or less) -> estimate PaO2 via equation below
spo2_filtered= spo2[(spo2['icustay_id'].isin(vent_but_no_pao2_icu)) &
(spo2['value']<=97.0)]
##quick qc output
print(
"total icustay:", len(icustay_id), '\n',
'mech during their 72 hours of clinical data: ', len(vent_icu), '\n',
' -pao2, +vent: ', pao2_vent['icustay_id'].nunique(), '\n',
' +fio2, +vent: ', fio2_chart_df[fio2_chart_df['icustay_id'].isin(vent_icu)]['icustay_id'].nunique(), '\n',
'+ vent, - pao2: ', len(vent_but_no_pao2_icu), '\n',
'+fio2, +vent, -pao2: ', fio2_chart_df[fio2_chart_df['icustay_id'].isin(vent_but_no_pao2_icu)]['icustay_id'].nunique(), '\n',
)
```
##### next: estimate PaO2 using SpO2 for patients who have ventilation but no PaO2 measurement
$$\text{Ellis Severinghaus inversion}: PaO2_{estimate} =\sqrt[3]{\frac{1}{2} (-y_N + \sqrt{y_N^2 -h^2})} + \sqrt[3]{\frac{1}{2} (-y_N - \sqrt{y_N^2 -h^2})}$$
$$ \begin{cases}
h^2 =-500000 \\
y_N = -23400 * \frac{s}{1-s} \\
\end{cases}
$$
* s is the fractional oxygen saturation (0–1) and p is the associated oxygen tension in mm Hg.
* qc: when s=0.5, p=26.856
* equation source: http://www.nickalls.org/dick/papers/anes/severinghaus.pdf
```
def Ellis_SpO2(S):
"""
ellis inversion equation to approximate PaO2 from SpO2.
where s is the fractional oxygen saturation (SpO2) (0–1) and p is the associated oxygen tension in mm Hg (PaO2).
"""
if S >1 and S<100:
S=S/100.
elif S==1:
S=0.9999
elif S>0 and S<1:
pass
h2= -500000.
yn= -23400. * (S/(1.-S))
term1= np.sqrt(np.power(yn,2.) - h2)
term2= 0.5*(-yn + term1)
term3= 0.5*(-yn - term1)
term4= np.sign(term2) * np.abs(term2)**(1/3)
term5= np.sign(term3) * np.abs(term3)**(1/3)
p= term4+term5
return(p)
# approximating Pao2 from SpO2:
## only usable for spo2 values <=97
spo2_filtered= spo2[(spo2['icustay_id'].isin(vent_but_no_pao2_icu)) &
(spo2['value']<=97.0)]
# run the Ellis_SpO2 to approx PaO2.
spo2_filtered.loc[:,'value']= spo2_filtered.value.apply(Ellis_SpO2) #note this produces a false positive SettingWithCopyWarning
spo2_filtered.loc[:,'label']='PaO2_estimate'
spo2_filtered.loc[:,'uom']='mm Hg estimate'
#append the pao2 (for those with ventilation) and new approximated pao2 together
pao2_appended=pao2_vent.append(spo2_filtered, sort=False).copy()
```
* once have all PaO2 and estimated PaO2:
* find fio2 measured between t- 6hours :t, where t= time of PaO2
* use most recent fio2 measure in this time?
```
#quick time conversion to make sure both in timedelta format
pao2_appended['delta']=pd.to_timedelta(pao2_appended['delta'])
pao2_appended['charttime']=pd.to_datetime(pao2_appended['charttime'])
fio2_chart_df['delta']=pd.to_timedelta(fio2_chart_df['delta'])
fio2_chart_df['charttime']=pd.to_datetime(fio2_chart_df['charttime'])
fio2_chart_df['label']='fio2'
#labeling all PaO2 (both measured and approx) with offset column= the delta time offset by the n hours of time we look back for Fio2 values
pao2_appended['offset']=pao2_appended['charttime'] - pd.to_timedelta(6, unit='h') #find most recent in this.
#left cartesian product of pao2 and fio2 measurements.
pao2_fio2_windowed= pd.merge(pao2_appended,
fio2_chart_df[['icustay_id','value','label','charttime','delta']].rename(index=str, columns={'value':'value2','label':'label2','charttime':'charttime2','delta':'delta2'}),
left_on='icustay_id',
right_on='icustay_id',
how='left')
##take this left cartesian product and filter to rows where charttime2 (ie fio2 time) is between t-6:t where t= time at pao2 measurement.
#now this should be all Pao2 (real and estimated) and Fio2 combinations that
pao2_fio2_windowed= pao2_fio2_windowed[pd.to_datetime(pao2_fio2_windowed['charttime2']).between(pd.to_datetime(pao2_fio2_windowed['offset']),pd.to_datetime(pao2_fio2_windowed['charttime']))]
## grabs the most recent FiO2 value for each associated PaO2
ratio_df= pao2_fio2_windowed.sort_values(['icustay_id','charttime','charttime2'], ascending=[True,True, False]).drop_duplicates(['icustay_id','charttime'],keep='first')
ratio_df.loc[ratio_df.loc[:,'value2'].isnull(),'value2']= 21 #impute 21 for FiO2 values that are missing (very few of them)
ratio_df['ratio']= ratio_df['value']/(ratio_df['value2']/100) #calculate the ratio
ratio_df.loc[ratio_df['charttime2'].isnull(),'charttime2']= ratio_df.loc[ratio_df['charttime2'].isnull(),'charttime']
ratio_df['deltadelta']=pd.to_datetime(ratio_df['charttime'])-pd.to_datetime(ratio_df['charttime2'])
#optional qc
# ratio_df.head()
# ratio_df['ratio'].describe()
# ratio_df[['icustay_id','ratio']].groupby('icustay_id')['ratio'].apply(max).describe()
### making a copy of ratio_df, merging in all icustay's that are missing, and imputing datetime and value for them.
ratio_df2= ratio_df.copy() # making sure all patients have a value
ratio_df2= pd.merge(final_pt_df2[['subject_id','hadm_id','icustay_id']], ratio_df2[['icustay_id', 'ratio','delta','uom','label']].rename(index=str, columns={'ratio':'value'}), left_on='icustay_id',right_on='icustay_id', how='outer')
ratio_df2['label']='pao2fio2ratio'
ratio_df2['uom']='mm HG:%'
ratio_df2.loc[ratio_df2['value'].isnull(),'value']=476
ratio_df2.loc[ratio_df2['delta'].isnull(),'delta']=pd.to_timedelta('0 days')
#should match the len(icustay_id)
ratio_df2['icustay_id'].nunique()
ratio_df2.head()
```
# factorizing pco2 start
```
# yn_convert_band(max_bands, #df in format where each row corresponds to a test, and a patient can have many rows
def yn_convert_pco2(df,
label_fill="absent",
threshold=10,
pt= final_pt_df2,
time_var='t_0',
value_fill=9999,
delta_fill=pd.to_timedelta('0 days'),
uom_fill='y/n'):
yn_df = pd.merge(pt[['icustay_id','hadm_id','subject_id', time_var]],
df[['icustay_id','value','label','uom','delta']],
left_on= 'icustay_id',
right_on= 'icustay_id',
how='left') #merging all icustay_id's with time_var, where value,label,uom, and delta are nan's if no value exists for that icustay.
#the idea is that if any value exists then it is pos.
yn_df['value']= yn_df['value'].fillna(value_fill) #converts na to 0 in above na rows.
criteria0=yn_df.loc[:,'value']==value_fill
criteria1=pd.to_numeric(yn_df.loc[:,'value'])<=threshold
criteria2=pd.to_numeric(yn_df.loc[:,'value'])>threshold
yn_df.loc[criteria1, 'value']= "<{}".format(threshold)
yn_df.loc[criteria2, 'value']= ">{}".format(threshold)
yn_df.loc[criteria0, 'value']= "absent"
yn_df['delta']= yn_df['delta'].fillna(delta_fill)
yn_df['delta']= pd.to_timedelta(yn_df['delta']) #filling in the time delta to time =0 for filled rows
yn_df['uom']= yn_df['uom'].fillna(uom_fill)
yn_df.loc[yn_df.loc[:,'uom']!=uom_fill, 'uom']= uom_fill
yn_df['label']= yn_df['label'].fillna(label_fill)
return(yn_df)
bg_ART_nosummary= bg_ART_nosummary.rename(
columns={'valuenum':'value'}) #changing valuenum to value
pco2_df=bg_ART_nosummary[bg_ART_nosummary['label']=='PCO2']
max_pco2=pco2_df.loc[pco2_df.groupby('icustay_id', as_index=False)['value'].idxmax(),:]
del(pco2_df)
pco2_cat=yn_convert_pco2(df=max_pco2,
label_fill="PCO2",
threshold=50,
pt= final_pt_df2,
time_var='t_0',
value_fill=9999,
delta_fill=pd.to_timedelta('0 days'),
uom_fill='y/n')
#drop bands from lab_df
bg_ART_nosummary=bg_ART_nosummary.drop(bg_ART_nosummary[bg_ART_nosummary['label']=='PCO2'].index)
##dropping charttime, may be problematic later. 06/13/19
bg_ART_nosummary=bg_ART_nosummary.drop('charttime', axis=1)
bg_ART_nosummary=bg_ART_nosummary.append(pco2_cat, sort=True)
bg_ART_nosummary.loc[bg_ART_nosummary['label']=='PCO2','value'].value_counts()
```
# factorizing bands end
```
save_df(ratio_df2, 'pfRatio')
del( pao2_fio2_windowed,pao2_appended, fio2_chart_df, pao2, pao2_icu )
del( ventcategory_1day_df, dfs[vent_df] )
save_df(dfs[bg_df],'bg_all') #all bloodgas
del(dfs[bg_df])
save_df(bg_ART_nosummary,'bg_ART') #only ARTERIAL bloodgas
del(bg_ART_nosummary)
save_df(o2_flow_df,'o2_flow') #need to investigate this more
del(o2_flow_df)
```
# RRT
```
#removing null values
dfs[rrt_df] = dfs[rrt_df].loc[dfs[rrt_df]['rrt'].notnull(),:]
dfs[rrt_df]['uom']='category'
dfs[rrt_df]['delta']=pd.to_timedelta(
pd.to_datetime(dfs[rrt_df]['first_charttime'])-
pd.to_datetime(dfs[rrt_df][time_var]),
'days')
dfs[rrt_df]['label']= 'rrt'
dfs[rrt_df]=dfs[rrt_df].rename(index=str, columns={'rrt':'value'})
```
### converting to yes/no
```
dfs[rrt_df].head()
dfs[rrt_df]['icustay_id'].nunique() #
rrt_yn= yn_convert(dfs[rrt_df],label_fill='rrt', time_var=time_var)
save_df(rrt_yn, 'rrt')
del(rrt_yn, dfs[rrt_df])
```
# GCS_72
```
dfs[gcs_df]['uom']='GCS_score'
list(dfs[gcs_df])
dfs[gcs_df]['label']= 'mingcs'
dfs[gcs_df]['uom']='gcs_score'
dfs[gcs_df]=dfs[gcs_df].rename(index=str, columns={'mingcs':'value'})
dfs[gcs_df]=dfs[gcs_df][['subject_id','hadm_id','icustay_id','delta','label','value',time_var,'uom']]
save_df(dfs[gcs_df], 'gcs')
del(dfs[gcs_df])
```
# SOFA
i'm going to remove all sofa variables except daily score, as we have other markers for those in our above data
i may later use this as qc check.
also added delta
```
dfs[sofa_df]['uom']='daily_sofa_score'
#adding day delta column
dfs[sofa_df]=dfs[sofa_df].sort_values(['hadm_id','day',time_var]) #good
dfs[sofa_df]['day_rank']=dfs[sofa_df].groupby('icustay_id')['day'].rank()
dfs[sofa_df]['delta']=pd.to_timedelta((dfs[sofa_df]['day_rank']-1), 'days')
dfs[sofa_df]['label']= 'daily_sofa'
dfs[sofa_df]=dfs[sofa_df].rename(index=str, columns={'sofa':'value'})
dfs[sofa_df]= dfs[sofa_df][['subject_id','hadm_id','icustay_id','delta','label','value',time_var,'uom']]
dfs[sofa_df].head()
save_df(dfs[sofa_df], 'sofa')
del(dfs[sofa_df])
```
# patient Demographic variables
```
dfs[pt_info_df]
### gender distribution qc. should be almost entirely/entirely populated
#icustay_id
#dfs[pt_info_df]
model_pts=list(final_pt_df2[final_pt_df2['final_bin'].isin(['C_pos/A_full','C_neg/A_partial'])]['icustay_id'].unique())
model_pts=list(final_pt_df2[final_pt_df2['final_bin'].isin(['C_pos/A_full','C_neg/A_partial'])]['icustay_id'].unique())
dfs[pt_info_df][(dfs[pt_info_df]['label']=='gender')& (dfs[pt_info_df]['icustay_id'].isin(model_pts))]['value'].value_counts()
len(model_pts)
#### gender qc
##adjusting ages over 90 (which were set to 300 to deidentify) to 90
admit_index=dfs[pt_info_df].loc[(dfs[pt_info_df]['label']=='yearsold')].index
age_tf=pd.to_numeric(dfs[pt_info_df].loc[admit_index,'value'])>90
dfs[pt_info_df].loc[(dfs[pt_info_df]['label']=='yearsold')&(age_tf),'value']=90.0
dfs[pt_info_df].loc[admit_index,'value'].value_counts()
#dfs[pt_info_df].loc[(dfs[pt_info_df]['label']=='first_admit_age') & (dfs[pt_info_df]['value']>90)]
#date= '22102018'
save_df(dfs[pt_info_df], 'pt_info')
del(dfs[pt_info_df] )
```
| github_jupyter |
# `bsym` – a basic symmetry module
`bsym` is a basic Python symmetry module. It consists of some core classes that describe configuration vector spaces, their symmetry operations, and specific configurations of objects withing these spaces. The module also contains an interface for working with [`pymatgen`](http://pymatgen.org) `Structure` objects, to allow simple generation of disordered symmetry-inequivalent structures from a symmetric parent crystal structure.
API documentation is [here](http://bsym.readthedocs.io).
## Configuration Spaces, Symmetry Operations, and Groups
The central object described by `bsym` is the **configuration space**. This defines a vector space that can be occupied by other objects. For example; the three points $a, b, c$ defined by an equilateral triangle,
<img src='figures/triangular_configuration_space.pdf'>
which can be described by a length 3 vector:
\begin{pmatrix}a\\b\\c\end{pmatrix}
If these points can be coloured black or white, then we can define a **configuration** for each different colouring (0 for white, 1 for black), e.g.
<img src='figures/triangular_configuration_example_1.pdf'>
with the corresponding vector
\begin{pmatrix}1\\1\\0\end{pmatrix}
A specific **configuration** therefore defines how objects are distributed within a particular **configuration space**.
The symmetry relationships between the different vectors in a **configuration space** are described by **symmetry operations**. A **symmetry operation** describes a transformation of a **configuration space** that leaves it indistinguishable. Each **symmetry operation** can be describes as a matrix that maps the vectors in a **configuration space** onto each other, e.g. in the case of the equiateral triangle the simplest **symmetry operation** is the identity, $E$, which leaves every corner unchanged, and can be represented by the matrix
\begin{equation}
E=\begin{pmatrix}1 & 0 & 0\\0 & 1 & 0 \\ 0 & 0 & 1\end{pmatrix}
\end{equation}
For this triangular example, there are other **symmetry operations**, including reflections, $\sigma$ and rotations, $C_n$:
<img src='figures/triangular_example_symmetry_operations.pdf'>
In this example reflection operation, $b$ is mapped to $c$; $b\to c$, and $c$ is mapped to $b$; $b\to c$.
The matrix representation of this **symmetry operation** is
\begin{equation}
\sigma_\mathrm{a}=\begin{pmatrix}1 & 0 & 0\\0 & 0 & 1 \\ 0 & 1 & 0\end{pmatrix}
\end{equation}
For the example rotation operation, $a\to b$, $b\to c$, and $c\to a$, with matrix representation
\begin{equation}
C_3=\begin{pmatrix}0 & 0 & 1\\ 1 & 0 & 0 \\ 0 & 1 & 0\end{pmatrix}
\end{equation}
Using this matrix and vector notation, the effect of a symmetry operation on a specific **configuration** can be calculated as the [matrix product](https://en.wikipedia.org/wiki/Matrix_multiplication#Square_matrix_and_column_vector) of the **symmetry operation** matrix and the **configuration** vector:
<img src='figures/triangular_rotation_operation.pdf'>
In matrix notation this is represented as
\begin{equation}
\begin{pmatrix}0\\1\\1\end{pmatrix} = \begin{pmatrix}0 & 0 & 1\\ 1 & 0 & 0 \\ 0 & 1 &
0\end{pmatrix}\begin{pmatrix}1\\1\\0\end{pmatrix}
\end{equation}
or more compactly
\begin{equation}
c_\mathrm{f} = C_3 c_\mathrm{i}.
\end{equation}
The set of all symmetry operations for a particular **configuration space** is a **group**.
For an equilateral triangle this group is the $C_{3v}$ [point group](https://en.wikipedia.org/wiki/Point_group), which contains six symmetry operations: the identity, three reflections (each with a mirror plane bisecting the triangle and passing through $a$, $b$, or $c$ respectively) and two rotations (120° clockwise and counterclockwise).
\begin{equation}
C_{3v} = \left\{ E, \sigma_\mathrm{a}, \sigma_\mathrm{b}, \sigma_\mathrm{c}, C_3, C_3^\prime \right\}
\end{equation}
## Modelling this using `bsym`
### The `SymmetryOperation` class
In `bsym`, a **symmetry operation** is represented by an instance of the `SymmetryOperation` class. A `SymmetryOperation` instance can be initialised from the matrix representation of the corresponding **symmetry operation**.
For example, in the trigonal **configuration space** above, a `SymmetryOperation` describing the identify, $E$, can be created with
```
from bsym import SymmetryOperation
SymmetryOperation([[ 1, 0, 0 ],
[ 0, 1, 0 ],
[ 0, 0, 1 ]])
```
Each `SymmetryOperation` has an optional `label` attribute. This can be set at records the matrix representation of the **symmetry operation** and an optional label. We can provide the label when creating a `SymmetryOperation`:
```
SymmetryOperation([[ 1, 0, 0 ],
[ 0, 1, 0 ],
[ 0, 0, 1 ]], label='E' )
```
or set it afterwards:
```
e = SymmetryOperation([[ 1, 0, 0 ],
[ 0, 1, 0 ],
[ 0, 0, 1 ]])
e.label = 'E'
e
```
Or for $C_3$:
```
c_3 = SymmetryOperation( [ [ 0, 0, 1 ],
[ 1, 0, 0 ],
[ 0, 1, 0 ] ], label='C3' )
c_3
```
#### Vector representations of symmetry operations
The matrix representation of a **symmetry operation** is a [permutation matrix](https://en.wikipedia.org/wiki/Permutation_matrix). Each row maps one position in the corresponding **configuration space** to one other position. An alternative, condensed, representation for each **symmetry operation** matrix uses vector notation, where each element gives the row containing `1` in the equivalent matrix column. e.g. for $C_3$ the vector mapping is given by $\left[2,3,1\right]$, corresponding to the mapping $1\to2$, $2\to3$, $3\to1$.
```
c_3_from_vector = SymmetryOperation.from_vector( [ 2, 3, 1 ], label='C3' )
c_3_from_vector
```
The vector representation of a `SymmetryOperation` can be accessed using the `as_vector()` method.
```
c_3.as_vector()
```
#### Inverting symmetry operations
For every **symmetry operation**, $A$, there is an **inverse** operation, $A^{-1}$, such that
\begin{equation}
A \cdot A^{-1}=E.
\end{equation}
For example, the inverse of $C_3$ (clockwise rotation by 120°) is $C_3^\prime$ (anticlockwise rotation by 120°):
```
c_3 = SymmetryOperation.from_vector( [ 2, 3, 1 ], label='C3' )
c_3_inv = SymmetryOperation.from_vector( [ 3, 1, 2 ], label='C3_inv' )
print( c_3, '\n' )
print( c_3_inv, '\n' )
```
The product of $C_3$ and $C_3^\prime$ is the identity, $E$.
```
c_3 * c_3_inv
```
<img src="figures/triangular_c3_inversion.pdf" />
`c_3_inv` can also be generated using the `.invert()` method
```
c_3.invert()
```
The resulting `SymmetryOperation` does not have a label defined. This can be set directly, or by chaining the `.set_label()` method, e.g.
```
c_3.invert( label= 'C3_inv')
c_3.invert().set_label( 'C3_inv' )
```
### The `SymmetryGroup` class
A `SymmetryGroup` is a collections of `SymmetryOperation` objects. A `SymmetryGroup` is not required to contain _all_ the symmetry operations of a particular **configuration space**, and therefore is not necessarily a complete mathematical <a href="https://en.wikipedia.org/wiki/Group_(mathematics)#Definition">group</a>.
For convenience `bsym` has `PointGroup` and `SpaceGroup` classes, that are equivalent to the `SymmetryGroup` parent class.
```
from bsym import PointGroup
# construct SymmetryOperations for C_3v group
e = SymmetryOperation.from_vector( [ 1, 2, 3 ], label='e' )
c_3 = SymmetryOperation.from_vector( [ 2, 3, 1 ], label='C_3' )
c_3_inv = SymmetryOperation.from_vector( [ 3, 1, 2 ], label='C_3_inv' )
sigma_a = SymmetryOperation.from_vector( [ 1, 3, 2 ], label='S_a' )
sigma_b = SymmetryOperation.from_vector( [ 3, 2, 1 ], label='S_b' )
sigma_c = SymmetryOperation.from_vector( [ 2, 1, 3 ], label='S_c' )
```
<img src="figures/triangular_c3v_symmetry_operations.pdf" />
```
c3v = PointGroup( [ e, c_3, c_3_inv, sigma_a, sigma_b, sigma_c ] )
c3v
```
### The `ConfigurationSpace` class
A `ConfigurationSpace` consists of a set of objects that represent the **configuration space** vectors, and the `SymmetryGroup` containing the relevant **symmetry operations**.
```
from bsym import ConfigurationSpace
c = ConfigurationSpace( objects=['a', 'b', 'c' ], symmetry_group=c3v )
c
```
### The `Configuration` class
A `Configuration` instance describes a particular **configuration**, i.e. how a set of objects are arranged within a **configuration space**. Internally, a `Configuration` is represented as a vector (as a `numpy` array).
Each element in a configuration is represented by a single digit non-negative integer.
```
from bsym import Configuration
conf_1 = Configuration( [ 1, 1, 0 ] )
conf_1
```
The effect of a particular **symmetry operation** acting on a **configuration** can now be calculated using the `SymmetryOperation.operate_on()` method, or by direct multiplication, e.g.
```
c1 = Configuration( [ 1, 1, 0 ] )
c_3 = SymmetryOperation.from_vector( [ 2, 3, 1 ] )
c_3.operate_on( c1 )
c_3 * conf_1
```
<img src="figures/triangular_rotation_operation.pdf" />
## Finding symmetry-inequivalent permutations.
A common question that comes up when considering the symmetry properties of arrangements of objects is: how many ways can these be arranged that are not equivalent by symmetry?
As a simple example of solving this problem using `bsym` consider four equivalent sites arranged in a square.
<img src="figures/square_configuration_space.pdf">
```
c = ConfigurationSpace( [ 'a', 'b', 'c', 'd' ] ) # four vector configuration space
```
This `ConfigurationSpace` has been created without a `symmetry_group` argument. The default behaviour in this case is to create a `SymmetryGroup` containing only the identity, $E$.
```
c
```
We can now calculate all symmetry inequivalent arrangements where two sites are occupied and two are unoccupied, using the `unique_configurations()` method. This takes as a argument a `dict` with the numbers of labels to be arranged in the **configuration space**. Here, we use the labels `1` and `0` to represent occupied and unoccupied sites, respectively, and the distribution of sites is given by `{ 1:2, 0:2 }`.
```
c.unique_configurations( {1:2, 0:2} )
```
Because we have not yet taken into account the symmetry of the **configuration space**, we get
\begin{equation}
\frac{4\times3}{2}
\end{equation}
unique configurations (where the factor of 2 comes from the occupied sites being indistinguishable).
The configurations generated by `unique_configurations` have a `count` attribute that records the number of *symmetry equivalent* configurations of each case:
In this example, each configuration appears once:
```
[ uc.count for uc in c.unique_configurations( {1:2, 0:2} ) ]
```
We can also calculate the result when all symmetry operations of this **configuration space** are included.
```
# construct point group
e = SymmetryOperation.from_vector( [ 1, 2, 3, 4 ], label='E' )
c4 = SymmetryOperation.from_vector( [ 2, 3, 4, 1 ], label='C4' )
c4_inv = SymmetryOperation.from_vector( [ 4, 1, 2, 3 ], label='C4i' )
c2 = SymmetryOperation.from_vector( [ 3, 4, 1, 2 ], label='C2' )
sigma_x = SymmetryOperation.from_vector( [ 4, 3, 2, 1 ], label='s_x' )
sigma_y = SymmetryOperation.from_vector( [ 2, 1, 4, 3 ], label='s_y' )
sigma_ac = SymmetryOperation.from_vector( [ 1, 4, 3, 2 ], label='s_ac' )
sigma_bd = SymmetryOperation.from_vector( [ 3, 2, 1, 4 ], label='s_bd' )
c4v = PointGroup( [ e, c4, c4_inv, c2, sigma_x, sigma_y, sigma_ac, sigma_bd ] )
# create ConfigurationSpace with the c4v PointGroup.
c = ConfigurationSpace( [ 'a', 'b', 'c', 'd' ], symmetry_group=c4v )
c
c.unique_configurations( {1:2, 0:2} )
[ uc.count for uc in c.unique_configurations( {1:2, 0:2 } ) ]
```
Taking symmetry in to account, we now only have two unique configurations: either two adjacent site are occupied (four possible ways), or two diagonal sites are occupied (two possible ways):
<img src="figures/square_unique_configurations.pdf" >
The `unique_configurations()` method can also handle non-binary site occupations:
```
c.unique_configurations( {2:1, 1:1, 0:2} )
[ uc.count for uc in c.unique_configurations( {2:1, 1:1, 0:2 } ) ]
```
<img src="figures/square_unique_configurations_2.pdf">
## Working with crystal structures using `pymatgen`
One example where the it can be useful to identify symmetry-inequivalent arrangements of objects in a vector space, is when considering the possible arrangements of disordered atoms on a crystal lattice.
To solve this problem for an arbitrary crystal structure, `bsym` contains an interface to [`pymatgen`](http://pymatgen.org) that will identify symmetry-inequivalent atom substitutions in a given `pymatgen` `Structure`.
As an example, consider a $4\times4$ square-lattice supercell populated by lithium atoms.
```
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import Structure
import numpy as np
# construct a pymatgen Structure instance using the site fractional coordinates
coords = np.array( [ [ 0.0, 0.0, 0.0 ] ] )
atom_list = [ 'Li' ]
lattice = Lattice.from_parameters( a=1.0, b=1.0, c=1.0, alpha=90, beta=90, gamma=90 )
parent_structure = Structure( lattice, atom_list, coords ) * [ 4, 4, 1 ]
parent_structure.cart_coords.round(2)
```
We can use the `bsym.interface.pymatgen.unique_structure_substitutions()` function to identify symmetry-inequivalent structures generated by substituting at different sites.
```
from bsym.interface.pymatgen import unique_structure_substitutions
print( unique_structure_substitutions.__doc__ )
```
As a trivial example, when substituting one Li atom for Na, we get a single unique structure
```
unique_structures = unique_structure_substitutions( parent_structure, 'Li', { 'Na':1, 'Li':15 } )
len( unique_structures )
```
<img src="figures/pymatgen_example_one_site.pdf">
```
na_substituted = unique_structures[0]
```
This Li$\to$Na substitution breaks the symmetry of the $4\times4$ supercell.
If we now replace a second lithium with a magnesium atom, we generate five symmetry inequivalent structures:
```
unique_structures_with_Mg = unique_structure_substitutions( na_substituted, 'Li', { 'Mg':1, 'Li':14 } )
len( unique_structures_with_Mg )
[ s.number_of_equivalent_configurations for s in unique_structures_with_Mg ]
```
`number_of_equivalent_configurations` only lists the number of equivalent configurations found when performing the second substitution, when the list of structures `unique_structures_with_Mg` was created. The full configuration degeneracy relative to the initial empty 4×4 lattice can be queried using `full_configuration_degeneracy`.
```
[ s.full_configuration_degeneracy for s in unique_structures_with_Mg ]
```
<img src="figures/pymatgen_example_two_sites.pdf">
```
# Check the squared distances between the Na and Mg sites in these unique structures are [1, 2, 4, 5, 8]
np.array( sorted( [ s.get_distance( s.indices_from_symbol('Na')[0],
s.indices_from_symbol('Mg')[0] )**2 for s in unique_structures_with_Mg ] ) )
```
This double substitution can also be done in a single step:
```
unique_structures = unique_structure_substitutions( parent_structure, 'Li', { 'Mg':1, 'Na':1, 'Li':14 } )
len(unique_structures)
np.array( sorted( [ s.get_distance( s.indices_from_symbol('Na')[0],
s.indices_from_symbol('Mg')[0] ) for s in unique_structures ] ) )**2
[ s.number_of_equivalent_configurations for s in unique_structures ]
```
Because both substitutions were performed in a single step, `number_of_equivalent_configurations` and `full_configuration_degeneracy` now contain the same data:
```
[ s.full_configuration_degeneracy for s in unique_structures ]
```
## Constructing `SpaceGroup` and `ConfigurationSpace` objects using `pymatgen`
The `bsym.interface.pymatgen` module contains functions for generating `SpaceGroup` and `ConfigurationSpace` objects directly from `pymatgen` `Structure` objects.
```
from bsym.interface.pymatgen import ( space_group_symbol_from_structure,
space_group_from_structure,
configuration_space_from_structure )
```
Documentation:
- [`space_group_symbol_from_structure`](http://bsym.readthedocs.io/en/latest/api/interface/pymatgen.html#bsym.interface.pymatgen.space_group_symbol_from_structure)
- [`space_group_from_structure`](http://bsym.readthedocs.io/en/latest/api/interface/pymatgen.html#bsym.interface.pymatgen.space_group_from_structure)
- [`configuration_space_from_structure`](http://bsym.readthedocs.io/en/latest/api/interface/pymatgen.html#bsym.interface.pymatgen.configuration_space_from_structure)
```
coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 0.5, 0.5, 0.0 ],
[ 0.0, 0.5, 0.5 ],
[ 0.5, 0.0, 0.5 ] ] )
atom_list = [ 'Li' ] * len( coords )
lattice = Lattice.from_parameters( a=3.0, b=3.0, c=3.0, alpha=90, beta=90, gamma=90 )
structure = Structure( lattice, atom_list, coords )
space_group_symbol_from_structure( structure )
space_group_from_structure( structure )
configuration_space_from_structure( structure )
```
## Progress bars
`bsym.ConfigurationSpace.unique_configurations()` and `bsym.interface.pymatgen.unique_structure_substitutions()` both accept optional `show_progress` arguments, which can be used to display progress bars (using `tqdm`(https://tqdm.github.io).
Setting `show_progress=True` will give a simple progress bar. If you are running `bsym` in a Jupyter notebook, setting `show_progress="notebook"` will give you a progress bar as a notebook widget.
(note, the widget status is not saved with this notebook, and may not display correctly on GitHub or using nbviewer)
In the example below, we find all unique configurations for the pseudo-ReO<sub>3</sub> structured TiOF<sub>2</sub> in a 2×2×2 supercell.
```
a = 3.798 # lattice parameter
coords = np.array( [ [ 0.0, 0.0, 0.0 ],
[ 0.5, 0.0, 0.0 ],
[ 0.0, 0.5, 0.0 ],
[ 0.0, 0.0, 0.5 ] ] )
atom_list = [ 'Ti', 'X', 'X', 'X' ]
lattice = Lattice.from_parameters( a=a, b=a, c=a, alpha=90, beta=90, gamma=90 )
unit_cell = Structure( lattice, atom_list, coords )
parent_structure = unit_cell * [ 2, 2, 2 ]
unique_structures = unique_structure_substitutions( parent_structure, 'X', { 'O':8, 'F':16 },
show_progress='notebook' )
%load_ext version_information
%version_information bsym, numpy, jupyter, pymatgen, tqdm
```
| github_jupyter |
Visualisation des différentes statistiques de Dbnary
=============
```
import datetime
# PLotting
import bqplot as bq
# Data analys
import numpy as np
from IPython.display import clear_output
from ipywidgets import widgets
from pandasdatacube import *
ENDPOINT: str = "http://kaiko.getalp.org/sparql"
PREFIXES: dict[str] = {'dbnary': 'http://kaiko.getalp.org/dbnary#',
'dbnstats': 'http://kaiko.getalp.org/dbnary/statistics/',
'lime': 'http://www.w3.org/ns/lemon/lime#'}
HTML_COLORS = ["red", "blue", "cyan", "pink", "lime", "purple", "orange", "fuchsia", 'Teal', 'Navy', 'Maroon', 'Olive',
'Gray', 'Lime', 'Silver', 'Green', 'Black']
```
### Classe qui retourne un DataFrame des résultats d'une requête SPARQL et autes fonctions utilitaires
```
def transformation_date(date: int) -> datetime.datetime:
"""
Function that transform a date of typr str (YYYYMMDD) to a datetime object
"""
if int(date[6:]) == 0: # if the date do'nt existv
return datetime.datetime(year=int(date[:4]), month=int(date[4:6]), day=int(date[6:]) + 1)
return datetime.datetime(year=int(date[:4]), month=int(date[4:6]), day=int(date[6:]))
```
### On commence par chercher tout les différents types de datasets et on va proposer à l'utilisateur de choisir quel dataset télécharger
### Traitement des certains Datasets particulier, le code ci-dessous n'est pas généralisable
#### 1. dbnaryNymRelationsCube
```
dataset: str = "dbnstats:dbnaryNymRelationsCube"
dimensions: list[str] = ['dbnary:wiktionaryDumpVersion', 'dbnary:nymRelation', 'dbnary:observationLanguage']
mesures: list[str] = ['dbnary:count']
dtypes: dict[str] = {'count': int}
data1: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index()
relations1: np.ndarray = data1['nymRelation'].unique() # All type of relation in this cube
labels1: list[str] = [item.split('#')[-1] for item in relations1]
data1 = data1.pivot_table(columns='nymRelation', index=['wiktionaryDumpVersion', 'observationLanguage'],
aggfunc=max).reset_index().sort_values(by=['wiktionaryDumpVersion', 'observationLanguage'])
data1["wiktionaryDumpVersion"] = data1["wiktionaryDumpVersion"].map(transformation_date)
out1 = widgets.Output()
choice1 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
def event1(obj):
with out1:
clear_output()
if choice1.value == "pays":
user_choice = widgets.Dropdown(options=list(data1["observationLanguage"].unique()), description="Choix:")
choosed_data = data1[data1["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data["count"][relations1].T,
stroke_width=1, display_legend=True, labels=labels1, scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[line], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Différentes relations lexicales dans l'extraction {user_choice.value}")
def edit_graph(obj):
choosed_data = data1[data1["observationLanguage"] == user_choice.value]
line.y = choosed_data["count"][relations1].T
line.x = choosed_data["wiktionaryDumpVersion"]
fig.title = f"Différentes relations lexicales dans l'extraction {user_choice.value}"
if choice1.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data1["wiktionaryDumpVersion"].unique()],
description="Choix:", value=max(data1["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
choosed_data = data1[data1["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data["count"][relations1].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=labels1,
color_mode='element',
display_legend=True, colors=HTML_COLORS)
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Nombre de relations lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}")
def edit_graph(obj):
choosed_data = data1[data1["wiktionaryDumpVersion"] == user_choice.value]
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data["count"][relations1].T
fig.title = f"Nombre de relations lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(pd.DataFrame(
data1[data1["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]["count"]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice1.observe(event1, 'value')
display(choice1, out1)
event1(None)
```
#### 2. dbnaryStatisticsCube
```
dataset: str = "dbnstats:dbnaryStatisticsCube"
dimensions: list[str] = ['dbnary:observationLanguage', 'dbnary:wiktionaryDumpVersion']
mesures: list[str] = ['dbnary:lexicalEntryCount', 'dbnary:lexicalSenseCount', 'dbnary:pageCount', 'dbnary:translationsCount']
dtypes: dict[str] = {"lexicalEntryCount": int, "translationsCount": int, "lexicalSenseCount": int, "pageCount": int}
data2: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index().sort_values(by=['wiktionaryDumpVersion', 'observationLanguage'])
categories2: list[str] = ["lexicalEntryCount", "translationsCount", "lexicalSenseCount", "pageCount"]
data2["wiktionaryDumpVersion"] = data2["wiktionaryDumpVersion"].map(transformation_date)
out2 = widgets.Output()
choice2 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
def event2(obj):
with out2:
clear_output()
if choice2.value == "pays":
user_choice = widgets.Dropdown(options=list(data2["observationLanguage"].unique()), description="Choix:")
choosed_data = data2[data2["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data[categories2].T, stroke_width=1,
display_legend=True, labels=categories2, scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[line], axes=[ax_x, ax_y],
title=f"Nombre d'éléments dans l'extraction {user_choice.value}", animation_duration=1000)
def edit_graph(obj):
choosed_data = data2[data2["observationLanguage"] == user_choice.value]
line.y = choosed_data[categories2].T
line.x = choosed_data["wiktionaryDumpVersion"]
fig.title = f"Nombre d'éléments dans l'extraction {user_choice.value}"
if choice2.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data2["wiktionaryDumpVersion"].unique()], description="Choix:",
value=max(data2["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
choosed_data = data2[data2["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data[categories2].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=categories2,
color_mode='element', display_legend=True, colors=HTML_COLORS)
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Nombre de relations lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}")
def edit_graph(obj):
choosed_data = data2[data2["wiktionaryDumpVersion"] == user_choice.value]
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data[categories2].T
fig.title = f"Nombre de relations lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(
pd.DataFrame(data2[data2["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice2.observe(event2, 'value')
display(choice2, out2)
event2(None)
```
#### 3. dbnaryTranslationsCube
```
dataset: str = "dbnstats:dbnaryTranslationsCube"
dimensions: list[str] = ['lime:language', 'dbnary:wiktionaryDumpVersion', 'dbnary:observationLanguage']
mesures: list[str] = ['dbnary:count']
dtypes: dict[str] = {'count': int}
data3: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index().sort_values(by=['wiktionaryDumpVersion', 'observationLanguage'])
relations3: np.ndarray = data3['language'].unique()
relations3 = relations3[relations3 != "number_of_languages"]
labels3: list[str] = [item.split('#')[-1] for item in relations3]
data3["wiktionaryDumpVersion"] = data3["wiktionaryDumpVersion"].map(transformation_date)
data3 = data3.pivot_table(columns='language', index=['wiktionaryDumpVersion', 'observationLanguage'],
aggfunc=max).reset_index().sort_values(by=['wiktionaryDumpVersion', 'observationLanguage'])
out3 = widgets.Output()
choice3 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
def event3(obj):
with out3:
clear_output()
if choice3.value == "pays":
user_choice = widgets.Dropdown(options=list(data3["observationLanguage"].unique()), description="Choix:")
choosed_data = data3[data3["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
y_sc2 = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data["count"][relations3].T,
stroke_width=1, display_legend=True, labels=labels3, scales={'x': x_ord, 'y': y_sc})
line1 = bq.Lines(x=choosed_data["wiktionaryDumpVersion"],
y=choosed_data["count"]["number_of_languages"].values, scales={'x': x_ord, 'y': y_sc2},
stroke_width=1, display_legend=True, labels=["Number of languages"], colors=['green'],
line_style="dashed")
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
ax_y2 = bq.Axis(scale=y_sc2, orientation='vertical', grid_lines='solid', label='Nombre de langues',
label_offset='+50', side="right", label_color="green")
fig = bq.Figure(marks=[line, line1], axes=[ax_x, ax_y, ax_y2], animation_duration=1000,
title=f"Nombre de traductions dans l'extraction {user_choice.value}")
def edit_graph(obj):
choosed_data = data3[data3["observationLanguage"] == user_choice.value]
line.y = choosed_data["count"][relations3].T
line.x = choosed_data["wiktionaryDumpVersion"]
line1.x = choosed_data["wiktionaryDumpVersion"]
line1.y = choosed_data["count"]["number_of_languages"].values
fig.title = f"Nombre de traductions dans l'extraction {user_choice.value}"
if choice3.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data3["wiktionaryDumpVersion"].unique()], description="Choix:",
value=max(data3["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
y_sc2 = bq.LinearScale()
choosed_data = data3[data3["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data["count"][relations3].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=labels3,
color_mode='element',
display_legend=True, colors=HTML_COLORS)
line = bq.Lines(x=x, y=choosed_data["count"]["number_of_languages"].values, scales={'x': x_ord, 'y': y_sc2},
stroke_width=1, display_legend=True, labels=["Number of languages"], colors=["green"])
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
ax_y2 = bq.Axis(scale=y_sc2, orientation='vertical', grid_lines='solid', label='Nombre de langues',
label_offset='+50', side="right", label_color="green")
fig = bq.Figure(marks=[bar, line], axes=[ax_x, ax_y, ax_y2], animation_duration=1000,
legend_location="top-left",
title=f"Nombre de traductions dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}")
def edit_graph(obj):
choosed_data = data3[data3["wiktionaryDumpVersion"] == user_choice.value].sort_values(
by="observationLanguage")
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data["count"][relations3].T
line.x = bar.x
line.y = choosed_data["count"]["number_of_languages"].values
fig.title = f"Nombre de traductions lexicales dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(pd.DataFrame(
data3[data3["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]["count"]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice3.observe(event3, 'value')
display(choice3, out3)
event3(None)
```
#### 4. enhancementConfidenceDataCube
```
dataset: str = "dbnstats:enhancementConfidenceDataCube"
dimensions: list[str] = ['dbnary:wiktionaryDumpVersion', 'dbnary:enhancementMethod', 'dbnary:observationLanguage']
mesures: list[str] = ['dbnary:precisionMeasure', 'dbnary:recallMeasure', 'dbnary:f1Measure']
dtypes: dict[str] = {"precisionMeasure": float, "recallMeasure": float, "f1Measure": float}
data4t: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index().sort_values(
by=['wiktionaryDumpVersion', 'observationLanguage'])
categories4: list[str] = ["precisionMeasure", "recallMeasure", "f1Measure"]
data4t["wiktionaryDumpVersion"] = data4t["wiktionaryDumpVersion"].map(transformation_date)
out4 = widgets.Output()
choice4 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
choice4bis = widgets.ToggleButtons(options=[('Aléatoire', 'random'), ('Dbnary tversky', 'dbnary_tversky')],
description='Méthode d\'amélioration:',
disabled=False)
def event4(obj):
with out4:
clear_output()
data4 = data4t[data4t["enhancementMethod"] == choice4bis.value]
if choice4.value == "pays":
user_choice = widgets.Dropdown(options=list(data4["observationLanguage"].unique()), description="Choix:")
choosed_data = data4[data4["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data[categories4].T, stroke_width=1,
display_legend=True, labels=categories4, scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[line], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Précision de la prédiction du contexte de traduction dans l'extraction du {user_choice.value}")
def edit_graph(obj):
choosed_data = data4[data4["observationLanguage"] == user_choice.value]
line.y = choosed_data[categories4].T
line.x = choosed_data["wiktionaryDumpVersion"]
fig.title = f"Précision de la prédiction du contexte de traduction dans l'extraction du {user_choice.value}"
if choice4.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data4["wiktionaryDumpVersion"].unique()], description="Choix:",
value=max(data4["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
choosed_data = data4[data4["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data[categories4].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=categories4,
color_mode='element', display_legend=True, colors=HTML_COLORS)
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y], animation_duration=1000,
title=f"Précision de la prédiction du contexte de traduction dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}")
def edit_graph(obj):
choosed_data = data4[data4["wiktionaryDumpVersion"] == user_choice.value]
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data[categories4].T
fig.title = f"Précision de la prédiction du contexte de traduction dans l'extraction du {np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(
pd.DataFrame(data4[data4["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice4.observe(event4, 'value')
choice4bis.observe(event4, 'value')
display(choice4, choice4bis, out4)
event4(None)
```
#### 5. translationGlossesCube
```
dataset: str = "dbnstats:translationGlossesCube"
dimensions: list[str] = ['dbnary:wiktionaryDumpVersion', 'dbnary:observationLanguage']
mesures: list[str] = ['dbnary:translationsWithNoGloss', 'dbnary:translationsWithSenseNumber', 'dbnary:translationsWithSenseNumberAndTextualGloss', 'dbnary:translationsWithTextualGloss']
dtypes: dict[str] = {"translationsWithSenseNumber": float, "translationsWithSenseNumberAndTextualGloss": float, "translationsWithTextualGloss": float, "translationsWithNoGloss": float}
data5: pd.DataFrame = get_datacube(ENDPOINT, dataset, dimensions, mesures, dtypes, PREFIXES).reset_index().sort_values(
by=['wiktionaryDumpVersion', 'observationLanguage'])
categories5: list[str] = ["translationsWithSenseNumber", "translationsWithSenseNumberAndTextualGloss",
"translationsWithTextualGloss", "translationsWithNoGloss"]
data5["wiktionaryDumpVersion"] = data5["wiktionaryDumpVersion"].map(transformation_date)
out5 = widgets.Output()
choice5 = widgets.ToggleButtons(options=[('Statistiques globales', 'glob'), ('Par pays', 'pays')], description='Choix:',
disabled=False, tooltips=['Statistiques de tout les pays par années',
'Statistiques d\' pays au cours du temps'])
def event5(obj):
with out5:
clear_output()
if choice5.value == "pays":
user_choice = widgets.Dropdown(options=list(data5["observationLanguage"].unique()), description="Choix:")
choosed_data = data5[data5["observationLanguage"] == user_choice.value]
y_sc = bq.LinearScale()
x_ord = bq.scales.DateScale()
line = bq.Lines(x=choosed_data["wiktionaryDumpVersion"], y=choosed_data[categories5].T, stroke_width=1,
display_legend=True, labels=categories5, scales={'x': x_ord, 'y': y_sc})
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Date', tick_format='%m %Y')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[line], axes=[ax_x, ax_y], title=f"{user_choice.value}", animation_duration=1000)
def edit_graph(obj):
choosed_data = data5[data5["observationLanguage"] == user_choice.value]
line.y = choosed_data[categories5].T
line.x = choosed_data["wiktionaryDumpVersion"]
fig.title = f"{user_choice.value}"
if choice5.value == "glob":
user_choice = widgets.Dropdown(options=[(np.datetime_as_string(item, unit='D'), item) for item in
data5["wiktionaryDumpVersion"].unique()], description="Choix:",
value=max(data5["wiktionaryDumpVersion"].unique()))
x_ord = bq.OrdinalScale()
y_sc = bq.LinearScale()
choosed_data = data5[data5["wiktionaryDumpVersion"] == user_choice.value]
x = choosed_data["observationLanguage"].values
y = choosed_data[categories5].T
bar = bq.Bars(x=x, y=y, scales={'x': x_ord, 'y': y_sc}, type='stacked', labels=categories5,
color_mode='element', display_legend=True, colors=HTML_COLORS)
ax_x = bq.Axis(scale=x_ord, grid_lines='solid', label='Pays')
ax_y = bq.Axis(scale=y_sc, orientation='vertical', grid_lines='solid', label='Valeur', label_offset='-50')
fig = bq.Figure(marks=[bar], axes=[ax_x, ax_y],
title=f"{np.datetime_as_string(user_choice.value, unit='D')}", animation_duration=1000)
def edit_graph(obj):
choosed_data = data5[data5["wiktionaryDumpVersion"] == user_choice.value]
bar.x = choosed_data["observationLanguage"].values
bar.y = choosed_data[categories5].T
fig.title = f"{np.datetime_as_string(user_choice.value, unit='D')}"
def add_pie_chart_in_tooltip(chart, d):
idx = d["data"]["index"]
bar.tooltip = widgets.HTML(
pd.DataFrame(data5[data5["wiktionaryDumpVersion"] == user_choice.value].iloc[idx]).to_html())
bar.on_hover(add_pie_chart_in_tooltip)
display(user_choice, fig)
user_choice.observe(edit_graph, 'value')
choice5.observe(event5, 'value')
display(choice5, out5)
event5(None)
```
| github_jupyter |
<CENTER>
<img src="img/PyDataLogoBig-Paris2015.png" width="50%">
<header>
<h1>Introduction to Pandas</h1>
<h3>April 3rd, 2015</h3>
<h2>Joris Van den Bossche</h2>
<p></p>
Source: <a href="https://github.com/jorisvandenbossche/2015-PyDataParis">https://github.com/jorisvandenbossche/2015-PyDataParis</a>
</header>
</CENTER>
# About me: Joris Van den Bossche
- PhD student at Ghent University and VITO, Belgium
- bio-science engineer, air quality research
- pandas core dev
->
- https://github.com/jorisvandenbossche
- [@jorisvdbossche](https://twitter.com/jorisvdbossche)
Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)
# Content of this talk
- Why do you need pandas?
- Basic introduction to the data structures
- Guided tour through some of the pandas features with a **case study about air quality**
If you want to follow along, this is a notebook that you can view or run yourself:
- All materials (notebook, data, link to nbviewer): https://github.com/jorisvandenbossche/2015-PyDataParis
- You need `pandas` > 0.15 (easy solution is using Anaconda)
Some imports:
```
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn
pd.options.display.max_rows = 8
```
# Let's start with a showcase
## Case study: air quality in Europe
AirBase (The European Air quality dataBase): hourly measurements of all air quality monitoring stations from Europe
Starting from these hourly data for different stations:
```
import airbase
data = airbase.load_data()
data
```
to answering questions about this data in a few lines of code:
**Does the air pollution show a decreasing trend over the years?**
```
data['1999':].resample('A').plot(ylim=[0,100])
```
**How many exceedances of the limit values?**
```
exceedances = data > 200
exceedances = exceedances.groupby(exceedances.index.year).sum()
ax = exceedances.loc[2005:].plot(kind='bar')
ax.axhline(18, color='k', linestyle='--')
```
**What is the difference in diurnal profile between weekdays and weekend?**
```
data['weekday'] = data.index.weekday
data['weekend'] = data['weekday'].isin([5, 6])
data_weekend = data.groupby(['weekend', data.index.hour])['FR04012'].mean().unstack(level=0)
data_weekend.plot()
```
We will come back to these example, and build them up step by step.
# Why do you need pandas?
## Why do you need pandas?
When working with *tabular or structured data* (like R dataframe, SQL table, Excel spreadsheet, ...):
- Import data
- Clean up messy data
- Explore data, gain insight into data
- Process and prepare your data for analysis
- Analyse your data (together with scikit-learn, statsmodels, ...)
# Pandas: data analysis in python
For data-intensive work in Python the [Pandas](http://pandas.pydata.org) library has become essential.
What is ``pandas``?
* Pandas can be thought of as NumPy arrays with labels for rows and columns, and better support for heterogeneous data types, but it's also much, much more than that.
* Pandas can also be thought of as `R`'s `data.frame` in Python.
* Powerful for working with missing data, working with time series data, for reading and writing your data, for reshaping, grouping, merging your data, ...
It's documentation: http://pandas.pydata.org/pandas-docs/stable/
## Key features
* Fast, easy and flexible input/output for a lot of different data formats
* Working with missing data (`.dropna()`, `pd.isnull()`)
* Merging and joining (`concat`, `join`)
* Grouping: `groupby` functionality
* Reshaping (`stack`, `pivot`)
* Powerful time series manipulation (resampling, timezones, ..)
* Easy plotting
# Basic data structures
Pandas does this through two fundamental object types, both built upon NumPy arrays: the ``Series`` object, and the ``DataFrame`` object.
## Series
A Series is a basic holder for **one-dimensional labeled data**. It can be created much as a NumPy array is created:
```
s = pd.Series([0.1, 0.2, 0.3, 0.4])
s
```
### Attributes of a Series: `index` and `values`
The series has a built-in concept of an **index**, which by default is the numbers *0* through *N - 1*
```
s.index
```
You can access the underlying numpy array representation with the `.values` attribute:
```
s.values
```
We can access series values via the index, just like for NumPy arrays:
```
s[0]
```
Unlike the NumPy array, though, this index can be something other than integers:
```
s2 = pd.Series(np.arange(4), index=['a', 'b', 'c', 'd'])
s2
s2['c']
```
In this way, a ``Series`` object can be thought of as similar to an ordered dictionary mapping one typed value to another typed value:
```
population = pd.Series({'Germany': 81.3, 'Belgium': 11.3, 'France': 64.3, 'United Kingdom': 64.9, 'Netherlands': 16.9})
population
population['France']
```
but with the power of numpy arrays:
```
population * 1000
```
We can index or slice the populations as expected:
```
population['Belgium']
population['Belgium':'Germany']
```
Many things you can do with numpy arrays, can also be applied on objects.
Fancy indexing, like indexing with a list or boolean indexing:
```
population[['France', 'Netherlands']]
population[population > 20]
```
Element-wise operations:
```
population / 100
```
A range of methods:
```
population.mean()
```
### Alignment!
Only, pay attention to **alignment**: operations between series will align on the index:
```
s1 = population[['Belgium', 'France']]
s2 = population[['France', 'Germany']]
s1
s2
s1 + s2
```
## DataFrames: Multi-dimensional Data
A DataFrame is a **tablular data structure** (multi-dimensional object to hold labeled data) comprised of rows and columns, akin to a spreadsheet, database table, or R's data.frame object. You can think of it as multiple Series object which share the same index.
<img src="img/dataframe.png" width=110%>
One of the most common ways of creating a dataframe is from a dictionary of arrays or lists.
Note that in the IPython notebook, the dataframe will display in a rich HTML view:
```
data = {'country': ['Belgium', 'France', 'Germany', 'Netherlands', 'United Kingdom'],
'population': [11.3, 64.3, 81.3, 16.9, 64.9],
'area': [30510, 671308, 357050, 41526, 244820],
'capital': ['Brussels', 'Paris', 'Berlin', 'Amsterdam', 'London']}
countries = pd.DataFrame(data)
countries
```
### Attributes of the DataFrame
A DataFrame has besides a `index` attribute, also a `columns` attribute:
```
countries.index
countries.columns
```
To check the data types of the different columns:
```
countries.dtypes
```
An overview of that information can be given with the `info()` method:
```
countries.info()
```
Also a DataFrame has a `values` attribute, but attention: when you have heterogeneous data, all values will be upcasted:
```
countries.values
```
If we don't like what the index looks like, we can reset it and set one of our columns:
```
countries = countries.set_index('country')
countries
```
To access a Series representing a column in the data, use typical indexing syntax:
```
countries['area']
```
As you play around with DataFrames, you'll notice that many operations which work on NumPy arrays will also work on dataframes.
Let's compute density of each country:
```
countries['population']*1000000 / countries['area']
```
Adding a new column to the dataframe is very simple:
```
countries['density'] = countries['population']*1000000 / countries['area']
countries
```
We can use masking to select certain data:
```
countries[countries['density'] > 300]
```
And we can do things like sorting the items in the array, and indexing to take the first two rows:
```
countries.sort_index(by='density', ascending=False)
```
One useful method to use is the ``describe`` method, which computes summary statistics for each column:
```
countries.describe()
```
The `plot` method can be used to quickly visualize the data in different ways:
```
countries.plot()
```
However, for this dataset, it does not say that much.
```
countries['population'].plot(kind='bar')
countries.plot(kind='scatter', x='population', y='area')
```
The available plotting types: ‘line’ (default), ‘bar’, ‘barh’, ‘hist’, ‘box’ , ‘kde’, ‘area’, ‘pie’, ‘scatter’, ‘hexbin’.
```
countries = countries.drop(['density'], axis=1)
```
## Some notes on selecting data
One of pandas' basic features is the labeling of rows and columns, but this makes indexing also a bit more complex compared to numpy. We now have to distuinguish between:
- selection by label
- selection by position.
For a DataFrame, basic indexing selects the columns.
Selecting a single column:
```
countries['area']
```
or multiple columns:
```
countries[['area', 'density']]
```
But, slicing accesses the rows:
```
countries['France':'Netherlands']
```
For more advanced indexing, you have some extra attributes:
* `loc`: selection by label
* `iloc`: selection by position
```
countries.loc['Germany', 'area']
countries.loc['France':'Germany', :]
countries.loc[countries['density']>300, ['capital', 'population']]
```
Selecting by position with `iloc` works similar as indexing numpy arrays:
```
countries.iloc[0:2,1:3]
```
The different indexing methods can also be used to assign data:
```
countries.loc['Belgium':'Germany', 'population'] = 10
countries
```
There are many, many more interesting operations that can be done on Series and DataFrame objects, but rather than continue using this toy data, we'll instead move to a real-world example, and illustrate some of the advanced concepts along the way.
# Case study: air quality data of European monitoring stations (AirBase)
## AirBase (The European Air quality dataBase)
AirBase: hourly measurements of all air quality monitoring stations from Europe.
```
from IPython.display import HTML
HTML('<iframe src=http://www.eea.europa.eu/data-and-maps/data/airbase-the-european-air-quality-database-8#tab-data-by-country width=700 height=350></iframe>')
```
# Importing and cleaning the data
## Importing and exporting data with pandas
A wide range of input/output formats are natively supported by pandas:
* CSV, text
* SQL database
* Excel
* HDF5
* json
* html
* pickle
* ...
```
pd.read
countries.to
```
## Now for our case study
I downloaded some of the raw data files of AirBase and included it in the repo:
> station code: BETR801, pollutant code: 8 (nitrogen dioxide)
```
!head -1 ./data/BETR8010000800100hour.1-1-1990.31-12-2012
```
Just reading the tab-delimited data:
```
data = pd.read_csv("data/BETR8010000800100hour.1-1-1990.31-12-2012", sep='\t')
data.head()
```
Not really what we want.
With using some more options of `read_csv`:
```
colnames = ['date'] + [item for pair in zip(["{:02d}".format(i) for i in range(24)], ['flag']*24) for item in pair]
data = pd.read_csv("data/BETR8010000800100hour.1-1-1990.31-12-2012",
sep='\t', header=None, na_values=[-999, -9999], names=colnames)
data.head()
```
So what did we do:
- specify that the values of -999 and -9999 should be regarded as NaN
- specified are own column names
For now, we disregard the 'flag' columns
```
data = data.drop('flag', axis=1)
data
```
Now, we want to reshape it: our goal is to have the different hours as row indices, merged with the date into a datetime-index.
## Intermezzo: reshaping your data with `stack`, `unstack` and `pivot`
The docs say:
> Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
<img src="img/stack.png" width=70%>
```
df = pd.DataFrame({'A':['one', 'one', 'two', 'two'], 'B':['a', 'b', 'a', 'b'], 'C':range(4)})
df
```
To use `stack`/`unstack`, we need the values we want to shift from rows to columns or the other way around as the index:
```
df = df.set_index(['A', 'B'])
df
result = df['C'].unstack()
result
df = result.stack().reset_index(name='C')
df
```
`pivot` is similar to `unstack`, but let you specify column names:
```
df.pivot(index='A', columns='B', values='C')
```
`pivot_table` is similar as `pivot`, but can work with duplicate indices and let you specify an aggregation function:
```
df = pd.DataFrame({'A':['one', 'one', 'two', 'two', 'one', 'two'], 'B':['a', 'b', 'a', 'b', 'a', 'b'], 'C':range(6)})
df
df.pivot_table(index='A', columns='B', values='C', aggfunc='count') #'mean'
```
## Back to our case study
We can now use `stack` to create a timeseries:
```
data = data.set_index('date')
data_stacked = data.stack()
data_stacked
```
Now, lets combine the two levels of the index:
```
data_stacked = data_stacked.reset_index(name='BETR801')
data_stacked.index = pd.to_datetime(data_stacked['date'] + data_stacked['level_1'], format="%Y-%m-%d%H")
data_stacked = data_stacked.drop(['date', 'level_1'], axis=1)
data_stacked
```
For this talk, I put the above code in a separate function, and repeated this for some different monitoring stations:
```
import airbase
no2 = airbase.load_data()
```
- FR04037 (PARIS 13eme): urban background site at Square de Choisy
- FR04012 (Paris, Place Victor Basch): urban traffic site at Rue d'Alesia
- BETR802: urban traffic site in Antwerp, Belgium
- BETN029: rural background site in Houtem, Belgium
See http://www.eea.europa.eu/themes/air/interactive/no2
# Exploring the data
Some useful methods:
`head` and `tail`
```
no2.head(3)
no2.tail()
```
`info()`
```
no2.info()
```
Getting some basic summary statistics about the data with `describe`:
```
no2.describe()
```
Quickly visualizing the data
```
no2.plot(kind='box', ylim=[0,250])
no2['BETR801'].plot(kind='hist', bins=50)
no2.plot(figsize=(12,6))
```
This does not say too much ..
We can select part of the data (eg the latest 500 data points):
```
no2[-500:].plot(figsize=(12,6))
```
Or we can use some more advanced time series features -> next section!
## Working with time series data
When we ensure the DataFrame has a `DatetimeIndex`, time-series related functionality becomes available:
```
no2.index
```
Indexing a time series works with strings:
```
no2["2010-01-01 09:00": "2010-01-01 12:00"]
```
A nice feature is "partial string" indexing, where we can do implicit slicing by providing a partial datetime string.
E.g. all data of 2012:
```
no2['2012']
```
Or all data of January up to March 2012:
```
data['2012-01':'2012-03']
```
Time and date components can be accessed from the index:
```
no2.index.hour
no2.index.year
```
## The power of pandas: `resample`
A very powerfull method is **`resample`: converting the frequency of the time series** (e.g. from hourly to daily data).
The time series has a frequency of 1 hour. I want to change this to daily:
```
no2.resample('D').head()
```
By default, `resample` takes the mean as aggregation function, but other methods can also be specified:
```
no2.resample('D', how='max').head()
```
The string to specify the new time frequency: http://pandas.pydata.org/pandas-docs/dev/timeseries.html#offset-aliases
These strings can also be combined with numbers, eg `'10D'`.
Further exploring the data:
```
no2.resample('M').plot() # 'A'
# no2['2012'].resample('D').plot()
no2.loc['2009':, 'FR04037'].resample('M', how=['mean', 'median']).plot()
```
#### Question: The evolution of the yearly averages with, and the overall mean of all stations
```
no2_1999 = no2['1999':]
no2_1999.resample('A').plot()
no2_1999.mean(axis=1).resample('A').plot(color='k', linestyle='--', linewidth=4)
```
# Analysing the data
## Intermezzo - the groupby operation (split-apply-combine)
By "group by" we are referring to a process involving one or more of the following steps
* **Splitting** the data into groups based on some criteria
* **Applying** a function to each group independently
* **Combining** the results into a data structure
<img src="img/splitApplyCombine.png">
Similar to SQL `GROUP BY`
The example of the image in pandas syntax:
```
df = pd.DataFrame({'key':['A','B','C','A','B','C','A','B','C'],
'data': [0, 5, 10, 5, 10, 15, 10, 15, 20]})
df
df.groupby('key').aggregate('sum') # np.sum
df.groupby('key').sum()
```
## Back to the air quality data
**Question: how does the *typical monthly profile* look like for the different stations?**
First, we add a column to the dataframe that indicates the month (integer value of 1 to 12):
```
no2['month'] = no2.index.month
```
Now, we can calculate the mean of each month over the different years:
```
no2.groupby('month').mean()
no2.groupby('month').mean().plot()
```
#### Question: The typical diurnal profile for the different stations
```
no2.groupby(no2.index.hour).mean().plot()
```
#### Question: What is the difference in the typical diurnal profile between week and weekend days.
```
no2.index.weekday?
no2['weekday'] = no2.index.weekday
```
Add a column indicating week/weekend
```
no2['weekend'] = no2['weekday'].isin([5, 6])
data_weekend = no2.groupby(['weekend', no2.index.hour]).mean()
data_weekend.head()
data_weekend_FR04012 = data_weekend['FR04012'].unstack(level=0)
data_weekend_FR04012.head()
data_weekend_FR04012.plot()
```
#### Question: What are the number of exceedances of hourly values above the European limit 200 µg/m3 ?
```
exceedances = no2 > 200
# group by year and count exceedances (sum of boolean)
exceedances = exceedances.groupby(exceedances.index.year).sum()
ax = exceedances.loc[2005:].plot(kind='bar')
ax.axhline(18, color='k', linestyle='--')
```
#### Question: Visualize the typical week profile for the different stations as boxplots.
Tip: the boxplot method of a DataFrame expects the data for the different boxes in different columns)
```
# add a weekday and week column
no2['weekday'] = no2.index.weekday
no2['week'] = no2.index.week
no2.head()
# pivot table so that the weekdays are the different columns
data_pivoted = no2['2012'].pivot_table(columns='weekday', index='week', values='FR04037')
data_pivoted.head()
box = data_pivoted.boxplot()
```
**Exercise**: Calculate the correlation between the different stations
```
no2[['BETR801', 'BETN029', 'FR04037', 'FR04012']].corr()
no2[['BETR801', 'BETN029', 'FR04037', 'FR04012']].resample('D').corr()
no2 = no2[['BETR801', 'BETN029', 'FR04037', 'FR04012']]
```
# Further reading
- the documentation: http://pandas.pydata.org/pandas-docs/stable/
- Wes McKinney's book "Python for Data Analysis"
- lots of tutorials on the internet, eg http://github.com/jvns/pandas-cookbook
# What's new in pandas
Some recent enhancements of the last year (versions 0.14 to 0.16):
- Better integration for categorical data (`Categorical` and `CategoricalIndex`)
- The same for `Timedelta` and `TimedeltaIndex`
- More flexible SQL interface based on `sqlalchemy`
- MultiIndexing using slicers
- `.dt` accessor for accesing datetime-properties from columns
- Groupby enhancements
- And a lot of enhancements and bug fixes
# How can you help?
**We need you!**
Contributions are very welcome and can be in different domains:
- reporting issues
- improving the documentation
- testing release candidates and provide feedback
- triaging and fixing bugs
- implementing new features
- spreading the word
-> https://github.com/pydata/pandas
## Thanks for listening! Questions?
- https://github.com/jorisvandenbossche
- <mailto:jorisvandenbossche@gmail.com>
- [@jorisvdbossche](https://twitter.com/jorisvdbossche)
Slides and data: Source: https://github.com/jorisvandenbossche/2015-PyDataParis
Slides presented with 'live reveal' https://github.com/damianavila/RISE
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
from sklearn.neighbors import KNeighborsRegressor
from sklearn import metrics
from math import sqrt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
df = pd.read_csv("E:/Data Science/Modules/Module 4(ML)/KNN/Data/bigmartsales.csv")
df.head()
df.isnull().sum()
```
**fill null values**
```
mean = df["Item_Weight"].mean()
print("Item_Weight mean:",mean)
df["Item_Weight"].fillna(mean, inplace=True)
# Item_Weight has a Dtype(float) so that's y we are filled(Item_weight) using mean
# After that to fill the null value we used "fillna" function
mode = df["Outlet_Size"].mode()
print("Outlet_Size medium:", mode)
df["Outlet_Size"].fillna(mode[0], inplace=True)
# Outlet_Size has a Dtype(Object) so that's y we are filled(Outlet_Size) using mode
df.isnull().sum()
df.shape
df.info()
df.describe()
# descibe shows only Dtype "float" values.
```
#### Check values count
**it helps to find dublicate categorical names**
```
df["Item_Fat_Content"].value_counts()
df.replace("Low Fat", "Low Fat", inplace=True)
df.replace("LF", "Low Fat", inplace=True)
df.replace("low fat", "Low Fat", inplace=True)
df.replace("Regular", "regular", inplace = True)
df.replace("reg", "regular", inplace = True)
df["Item_Type"].value_counts()
df["Outlet_Identifier"].value_counts()
df["Outlet_Size"].value_counts()
df["Outlet_Location_Type"].value_counts()
df["Outlet_Type"].value_counts()
df.replace("Supermarket Type1", "Supermarket", inplace=True)
df.replace("Supermarket Type2", "Supermarket", inplace=True)
df.replace("Supermarket Type3", "Supermarket", inplace=True)
df.head()
df.drop(["Item_Identifier", "Outlet_Identifier"], axis=1, inplace = True )
df
df = pd.get_dummies(df)
# get_dummies convert categorical data into numerical (and it shows the contain which is in data frame)
# get_dummies created new columns.
# example { df["Item_Fat_Content"].value_counts(),
# Low Fat 5089, Regular 2889, LF 316, reg 117, low fat 112 }
# if this perticular category present in "Item_fat_content" then we are getting value 1 if not then 0
df
df.info()
df.head()
df.shape
```
**Train test split**
```
train, test = train_test_split(df, test_size = 0.3) # it divides whole data sets in 2 parts (training & testing)
# Why we are dividing train & test???
# because we are not sure about value of "K"??
# we used the training data to find the value of K. and that k value we will used for our K data.
# train = we want input(x_train) & output(y_train). **Algorithum training will be on this particular data.
# test = input(x_test) & outout(y_test)(real output values)
# y_hat is "predicted value" which is come from trained algorithum we used for training after give it input(x_test).
train.shape
test.shape
```
**Features & targets**
```
#feature
x_train = train.drop("Item_Outlet_Sales", axis=1)
#target
y_train = train["Item_Outlet_Sales"] # x_trian & y_train shape values should be similar.
x_train.shape
y_train.shape # which has got only 1 column.
x_test = test.drop("Item_Outlet_Sales", axis=1)
y_test = test["Item_Outlet_Sales"]
x_test.shape
y_test.shape
print("Training feature set size:", x_train.shape)
print("Test feature set size:",x_test.shape)
print("Training variable set size:", y_train.shape)
print("Test variable set size:", y_test.shape)
```
### Feature Scaling
**Standarisation Method MinMaxScaler**
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) X_scaled = X_std * (max - min) + min
where min, max = feature_range.
```
scaler = MinMaxScaler(feature_range=(0,1))
x_train_scaled = scaler.fit_transform(x_train) # fit_transform normalised the data.
x_train = pd.DataFrame(x_train_scaled)
x_test_scaled = scaler.fit_transform(x_test)
x_test = pd.DataFrame(x_test_scaled)
x_train
x_test
```
### Model fit and training
```
rmse_val = []
for K in range(20):
K=K+1
model = KNeighborsRegressor(n_neighbors = K)
model.fit(x_train, y_train)
y_pred = model.predict(x_test) # (x_test) because we are going to use this input in trained algorithum(KNeighborsRegressor(n_neighbors = K).
error = np.sqrt(metrics.mean_squared_error(y_test, y_pred)) #(y_test) used here because it's "real output" and "y_pred" is our "predicted output"
rmse_val.append(error)
print("RMSE value for K:", K, "is", error)
#plotting the rmse values against k values
curve = pd.DataFrame(rmse_val)
ax = curve.plot(kind = 'line', title = "Plotting RMSE for different K values", legend = False )
ax.set_xticks(range(0,20,1))
final_model = KNeighborsRegressor(n_neighbors = 8)
final_model.fit(x_train, y_train)
y_pred = final_model.predict(x_test)
y_pred
data = {"y_actual": y_test,
"y_predicted": y_pred}
df_check = pd.DataFrame(data, columns = ["y_actual", "y_predicted"])
df_check.head()
```
### Evaluation
```
print("R-squared value of this fit:", round(metrics.r2_score(y_test,y_pred),3))
print("Mean absolute error", metrics.mean_absolute_error(y_test,y_pred))
print("Mean square error", metrics.mean_squared_error(y_test,y_pred))
print("Root mean square error",np.sqrt(metrics.mean_squared_error(y_test,y_pred)))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/daanishrasheed/DS-Unit-2-Applied-Modeling/blob/master/DS_Sprint_Challenge_7.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
_Lambda School Data Science, Unit 2_
# Applied Modeling Sprint Challenge: Predict Chicago food inspections 🍔
For this Sprint Challenge, you'll use a dataset with information from inspections of restaurants and other food establishments in Chicago from January 2010 to March 2019.
[See this PDF](https://data.cityofchicago.org/api/assets/BAD5301B-681A-4202-9D25-51B2CAE672FF) for descriptions of the data elements included in this dataset.
According to [Chicago Department of Public Health — Food Protection Services](https://www.chicago.gov/city/en/depts/cdph/provdrs/healthy_restaurants/svcs/food-protection-services.html), "Chicago is home to 16,000 food establishments like restaurants, grocery stores, bakeries, wholesalers, lunchrooms, mobile food vendors and more. Our business is food safety and sanitation with one goal, to prevent the spread of food-borne disease. We do this by inspecting food businesses, responding to complaints and food recalls."
#### Your challenge: Predict whether inspections failed
The target is the `Fail` column.
- When the food establishment failed the inspection, the target is `1`.
- When the establishment passed, the target is `0`.
#### Run this cell to install packages in Colab:
```
%%capture
import sys
if 'google.colab' in sys.modules:
# Install packages in Colab
!pip install category_encoders==2.*
!pip install eli5
!pip install pandas-profiling==2.*
!pip install pdpbox
!pip install shap
```
#### Run this cell to load the data:
```
import pandas as pd
train_url = 'https://drive.google.com/uc?export=download&id=13_tP9JpLcZHSPVpWcua4t2rY44K_s4H5'
test_url = 'https://drive.google.com/uc?export=download&id=1GkDHjsiGrzOXoF_xcYjdzBTSjOIi3g5a'
train = pd.read_csv(train_url)
test = pd.read_csv(test_url)
assert train.shape == (51916, 17)
assert test.shape == (17306, 17)
```
### Part 1: Preprocessing
You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding.
_To earn a score of 3 for this part, find and explain leakage. The dataset has a feature that will give you an ROC AUC score > 0.90 if you process and use the feature. Find the leakage and explain why the feature shouldn't be used in a real-world model to predict the results of future inspections._
### Part 2: Modeling
**Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.
Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**
_To earn a score of 3 for this part, get an ROC AUC test score >= 0.70 (without using the feature with leakage)._
### Part 3: Visualization
Make visualizations for model interpretation. (You may use any libraries.) Choose two of these types:
- Confusion Matrix
- Permutation Importances
- Partial Dependence Plot, 1 feature isolation
- Partial Dependence Plot, 2 features interaction
- Shapley Values
_To earn a score of 3 for this part, make four of these visualization types._
## Part 1: Preprocessing
> You may choose which features you want to use, and whether/how you will preprocess them. If you use categorical features, you may use any tools and techniques for encoding.
```
train.head(35)
n = train["Violations"].str.split(" - ", n = 1, expand = True)
train.drop(columns =["Violations"], inplace = True)
train['Violations'] = n[0]
train['Violations'].value_counts()
s = train['Violations'].str.split("|", n = 1, expand = True)
train['Violations'] = s[0]
train.head(1)
n = test["Violations"].str.split(" - ", n = 1, expand = True)
test.drop(columns =["Violations"], inplace = True)
test['Violations'] = n[0]
test['Facility Type'].value_counts()
s = test['Violations'].str.split("|", n = 1, expand = True)
test['Violations'] = s[0]
train.head(1)
target = 'Fail'
features = ['Facility Type', 'Risk', 'Inspection Type', 'Violations']
```
## Part 2: Modeling
> **Fit a model** with the train set. (You may use scikit-learn, xgboost, or any other library.) Use cross-validation or do a three-way split (train/validate/test) and **estimate your ROC AUC** validation score.
>
> Use your model to **predict probabilities** for the test set. **Get an ROC AUC test score >= 0.60.**
```
from sklearn.model_selection import train_test_split
train, val = train_test_split(train, train_size=0.80, test_size=0.20,
stratify=train['Fail'], random_state=42)
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
y_test = test[target]
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
transformers = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer()
)
X_train_transformed = transformers.fit_transform(X_train)
X_val_transformed = transformers.transform(X_val)
X_val_transformed = pd.DataFrame(X_val_transformed, columns=X_val.columns)
rf = RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
rf.fit(X_train_transformed, y_train)
print('Validation Accuracy', rf.score(X_val_transformed, y_val))
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from xgboost import XGBClassifier
processor = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median')
)
X_train_processed = processor.fit_transform(X_train)
X_val_processed = processor.transform(X_val)
eval_set = [(X_train_processed, y_train),
(X_val_processed, y_val)]
model = XGBClassifier(n_estimators=1000, n_jobs=-1)
model.fit(X_train_processed, y_train, eval_set=eval_set, eval_metric='auc',
early_stopping_rounds=10)
from sklearn.metrics import roc_auc_score
X_test_processed = processor.transform(X_test)
class_index = 1
y_pred_proba = model.predict_proba(X_test_processed)[:, class_index]
print(f'Test ROC AUC')
print(roc_auc_score(y_test, y_pred_proba)) # Ranges from 0-1, higher is better
```
## Part 3: Visualization
> Make visualizations for model interpretation. (You may use any libraries.) Choose two of these types:
>
> - Permutation Importances
> - Partial Dependence Plot, 1 feature isolation
> - Partial Dependence Plot, 2 features interaction
> - Shapley Values
```
%matplotlib inline
from pdpbox.pdp import pdp_isolate, pdp_plot
feature='Risk'
encoder = transformers.named_steps['ordinalencoder']
for item in encoder.mapping:
if item['col'] == feature:
feature_mapping = item['mapping']
feature_mapping = feature_mapping[feature_mapping.index.dropna()]
category_names = feature_mapping.index.tolist()
category_codes = feature_mapping.values.tolist()
isolated = pdp_isolate(
model=rf,
dataset=X_val_transformed,
model_features=X_val.columns,
feature=feature,
cust_grid_points=category_codes
)
fig, axes = pdp_plot(isolated, feature_name=feature,
plot_lines=True, frac_to_plot=0.01)
from pdpbox.pdp import pdp_interact, pdp_interact_plot
features = ['Risk', 'Inspection Type']
years_grid = [0, 5, 10, 15, 20, 25, 30]
interaction = pdp_interact(
model=rf,
dataset=X_val_transformed,
model_features=X_val.columns,
features=features,
cust_grid_points=[category_codes, years_grid]
)
pdp_interact_plot(interaction, plot_type='grid', feature_names=features);
from sklearn.metrics import accuracy_score
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Validation Accuracy', accuracy_score(y_val, y_pred))
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
import seaborn as sns
def plot_confusion_matrix(y_true, y_pred):
labels = unique_labels(y_true)
columns = [f'Predicted {label}' for label in labels]
index = [f'Actual {label}' for label in labels]
table = pd.DataFrame(confusion_matrix(y_true, y_pred),
columns=columns, index=index)
return sns.heatmap(table, annot=True, fmt='d', cmap='viridis')
plot_confusion_matrix(y_val, y_pred);
```
| github_jupyter |
# Chapter 3
`Original content created by Cam Davidson-Pilon`
`Ported to Python 3 and PyMC3 by Max Margenot (@clean_utensils) and Thomas Wiecki (@twiecki) at Quantopian (@quantopian)`
____
## Opening the black box of MCMC
The previous two chapters hid the inner-mechanics of PyMC3, and more generally Markov Chain Monte Carlo (MCMC), from the reader. The reason for including this chapter is three-fold. The first is that any book on Bayesian inference must discuss MCMC. I cannot fight this. Blame the statisticians. Secondly, knowing the process of MCMC gives you insight into whether your algorithm has converged. (Converged to what? We will get to that) Thirdly, we'll understand *why* we are returned thousands of samples from the posterior as a solution, which at first thought can be odd.
### The Bayesian landscape
When we setup a Bayesian inference problem with $N$ unknowns, we are implicitly creating an $N$ dimensional space for the prior distributions to exist in. Associated with the space is an additional dimension, which we can describe as the *surface*, or *curve*, that sits on top of the space, that reflects the *prior probability* of a particular point. The surface on the space is defined by our prior distributions. For example, if we have two unknowns $p_1$ and $p_2$, and priors for both are $\text{Uniform}(0,5)$, the space created is a square of length 5 and the surface is a flat plane that sits on top of the square (representing that every point is equally likely).
```
%matplotlib inline
import scipy.stats as stats
from IPython.core.pylabtools import figsize
import numpy as np
figsize(12.5, 4)
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
jet = plt.cm.jet
fig = plt.figure()
x = y = np.linspace(0, 5, 100)
X, Y = np.meshgrid(x, y)
plt.subplot(121)
uni_x = stats.uniform.pdf(x, loc=0, scale=5)
uni_y = stats.uniform.pdf(y, loc=0, scale=5)
M = np.dot(uni_x[:, None], uni_y[None, :])
im = plt.imshow(M, interpolation='none', origin='lower',
cmap=jet, vmax=1, vmin=-.15, extent=(0, 5, 0, 5))
plt.xlim(0, 5)
plt.ylim(0, 5)
plt.title("Landscape formed by Uniform priors.")
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(X, Y, M, cmap=plt.cm.jet, vmax=1, vmin=-.15)
ax.view_init(azim=390)
plt.title("Uniform prior landscape; alternate view");
```
Alternatively, if the two priors are $\text{Exp}(3)$ and $\text{Exp}(10)$, then the space is all positive numbers on the 2-D plane, and the surface induced by the priors looks like a water fall that starts at the point (0,0) and flows over the positive numbers.
The plots below visualize this. The more dark red the color, the more prior probability is assigned to that location. Conversely, areas with darker blue represent that our priors assign very low probability to that location.
```
figsize(12.5, 5)
fig = plt.figure()
plt.subplot(121)
exp_x = stats.expon.pdf(x, scale=3)
exp_y = stats.expon.pdf(x, scale=10)
M = np.dot(exp_x[:, None], exp_y[None, :])
CS = plt.contour(X, Y, M)
im = plt.imshow(M, interpolation='none', origin='lower',
cmap=jet, extent=(0, 5, 0, 5))
#plt.xlabel("prior on $p_1$")
#plt.ylabel("prior on $p_2$")
plt.title("$Exp(3), Exp(10)$ prior landscape")
ax = fig.add_subplot(122, projection='3d')
ax.plot_surface(X, Y, M, cmap=jet)
ax.view_init(azim=390)
plt.title("$Exp(3), Exp(10)$ prior landscape; \nalternate view");
```
These are simple examples in 2D space, where our brains can understand surfaces well. In practice, spaces and surfaces generated by our priors can be much higher dimensional.
If these surfaces describe our *prior distributions* on the unknowns, what happens to our space after we incorporate our observed data $X$? The data $X$ does not change the space, but it changes the surface of the space by *pulling and stretching the fabric of the prior surface* to reflect where the true parameters likely live. More data means more pulling and stretching, and our original shape becomes mangled or insignificant compared to the newly formed shape. Less data, and our original shape is more present. Regardless, the resulting surface describes the *posterior distribution*.
Again I must stress that it is, unfortunately, impossible to visualize this in large dimensions. For two dimensions, the data essentially *pushes up* the original surface to make *tall mountains*. The tendency of the observed data to *push up* the posterior probability in certain areas is checked by the prior probability distribution, so that less prior probability means more resistance. Thus in the double-exponential prior case above, a mountain (or multiple mountains) that might erupt near the (0,0) corner would be much higher than mountains that erupt closer to (5,5), since there is more resistance (low prior probability) near (5,5). The peak reflects the posterior probability of where the true parameters are likely to be found. Importantly, if the prior has assigned a probability of 0, then no posterior probability will be assigned there.
Suppose the priors mentioned above represent different parameters $\lambda$ of two Poisson distributions. We observe a few data points and visualize the new landscape:
```
# create the observed data
# sample size of data we observe, trying varying this (keep it less than 100 ;)
N = 1
# the true parameters, but of course we do not see these values...
lambda_1_true = 1
lambda_2_true = 3
#...we see the data generated, dependent on the above two values.
data = np.concatenate([
stats.poisson.rvs(lambda_1_true, size=(N, 1)),
stats.poisson.rvs(lambda_2_true, size=(N, 1))
], axis=1)
print("observed (2-dimensional,sample size = %d):" % N, data)
# plotting details.
x = y = np.linspace(.01, 5, 100)
likelihood_x = np.array([stats.poisson.pmf(data[:, 0], _x)
for _x in x]).prod(axis=1)
likelihood_y = np.array([stats.poisson.pmf(data[:, 1], _y)
for _y in y]).prod(axis=1)
L = np.dot(likelihood_x[:, None], likelihood_y[None, :])
figsize(12.5, 12)
# matplotlib heavy lifting below, beware!
plt.subplot(221)
uni_x = stats.uniform.pdf(x, loc=0, scale=5)
uni_y = stats.uniform.pdf(x, loc=0, scale=5)
M = np.dot(uni_x[:, None], uni_y[None, :])
im = plt.imshow(M, interpolation='none', origin='lower',
cmap=jet, vmax=1, vmin=-.15, extent=(0, 5, 0, 5))
plt.scatter(lambda_2_true, lambda_1_true, c="k", s=50, edgecolor="none")
plt.xlim(0, 5)
plt.ylim(0, 5)
plt.title("Landscape formed by Uniform priors on $p_1, p_2$.")
plt.subplot(223)
plt.contour(x, y, M * L)
im = plt.imshow(M * L, interpolation='none', origin='lower',
cmap=jet, extent=(0, 5, 0, 5))
plt.title("Landscape warped by %d data observation;\n Uniform priors on $p_1, p_2$." % N)
plt.scatter(lambda_2_true, lambda_1_true, c="k", s=50, edgecolor="none")
plt.xlim(0, 5)
plt.ylim(0, 5)
plt.subplot(222)
exp_x = stats.expon.pdf(x, loc=0, scale=3)
exp_y = stats.expon.pdf(x, loc=0, scale=10)
M = np.dot(exp_x[:, None], exp_y[None, :])
plt.contour(x, y, M)
im = plt.imshow(M, interpolation='none', origin='lower',
cmap=jet, extent=(0, 5, 0, 5))
plt.scatter(lambda_2_true, lambda_1_true, c="k", s=50, edgecolor="none")
plt.xlim(0, 5)
plt.ylim(0, 5)
plt.title("Landscape formed by Exponential priors on $p_1, p_2$.")
plt.subplot(224)
# This is the likelihood times prior, that results in the posterior.
plt.contour(x, y, M * L)
im = plt.imshow(M * L, interpolation='none', origin='lower',
cmap=jet, extent=(0, 5, 0, 5))
plt.scatter(lambda_2_true, lambda_1_true, c="k", s=50, edgecolor="none")
plt.title("Landscape warped by %d data observation;\n Exponential priors on \
$p_1, p_2$." % N)
plt.xlim(0, 5)
plt.ylim(0, 5);
```
The plot on the left is the deformed landscape with the $\text{Uniform}(0,5)$ priors, and the plot on the right is the deformed landscape with the exponential priors. Notice that the posterior landscapes look different from one another, though the data observed is identical in both cases. The reason is as follows. Notice the exponential-prior landscape, bottom right figure, puts very little *posterior* weight on values in the upper right corner of the figure: this is because *the prior does not put much weight there*. On the other hand, the uniform-prior landscape is happy to put posterior weight in the upper-right corner, as the prior puts more weight there.
Notice also the highest-point, corresponding the the darkest red, is biased towards (0,0) in the exponential case, which is the result from the exponential prior putting more prior weight in the (0,0) corner.
The black dot represents the true parameters. Even with 1 sample point, the mountains attempts to contain the true parameter. Of course, inference with a sample size of 1 is incredibly naive, and choosing such a small sample size was only illustrative.
It's a great exercise to try changing the sample size to other values (try 2,5,10,100?...) and observing how our "mountain" posterior changes.
### Exploring the landscape using the MCMC
We should explore the deformed posterior space generated by our prior surface and observed data to find the posterior mountain. However, we cannot naively search the space: any computer scientist will tell you that traversing $N$-dimensional space is exponentially difficult in $N$: the size of the space quickly blows-up as we increase $N$ (see [the curse of dimensionality](http://en.wikipedia.org/wiki/Curse_of_dimensionality)). What hope do we have to find these hidden mountains? The idea behind MCMC is to perform an intelligent search of the space. To say "search" implies we are looking for a particular point, which is perhaps not an accurate as we are really looking for a broad mountain.
Recall that MCMC returns *samples* from the posterior distribution, not the distribution itself. Stretching our mountainous analogy to its limit, MCMC performs a task similar to repeatedly asking "How likely is this pebble I found to be from the mountain I am searching for?", and completes its task by returning thousands of accepted pebbles in hopes of reconstructing the original mountain. In MCMC and PyMC3 lingo, the returned sequence of "pebbles" are the samples, cumulatively called the *traces*.
When I say MCMC intelligently searches, I really am saying MCMC will *hopefully* converge towards the areas of high posterior probability. MCMC does this by exploring nearby positions and moving into areas with higher probability. Again, perhaps "converge" is not an accurate term to describe MCMC's progression. Converging usually implies moving towards a point in space, but MCMC moves towards a *broader area* in the space and randomly walks in that area, picking up samples from that area.
#### Why Thousands of Samples?
At first, returning thousands of samples to the user might sound like being an inefficient way to describe the posterior distributions. I would argue that this is extremely efficient. Consider the alternative possibilities:
1. Returning a mathematical formula for the "mountain ranges" would involve describing a N-dimensional surface with arbitrary peaks and valleys.
2. Returning the "peak" of the landscape, while mathematically possible and a sensible thing to do as the highest point corresponds to most probable estimate of the unknowns, ignores the shape of the landscape, which we have previously argued is very important in determining posterior confidence in unknowns.
Besides computational reasons, likely the strongest reason for returning samples is that we can easily use *The Law of Large Numbers* to solve otherwise intractable problems. I postpone this discussion for the next chapter. With the thousands of samples, we can reconstruct the posterior surface by organizing them in a histogram.
### Algorithms to perform MCMC
There is a large family of algorithms that perform MCMC. Most of these algorithms can be expressed at a high level as follows: (Mathematical details can be found in the appendix.)
1. Start at current position.
2. Propose moving to a new position (investigate a pebble near you).
3. Accept/Reject the new position based on the position's adherence to the data and prior distributions (ask if the pebble likely came from the mountain).
4. 1. If you accept: Move to the new position. Return to Step 1.
2. Else: Do not move to new position. Return to Step 1.
5. After a large number of iterations, return all accepted positions.
This way we move in the general direction towards the regions where the posterior distributions exist, and collect samples sparingly on the journey. Once we reach the posterior distribution, we can easily collect samples as they likely all belong to the posterior distribution.
If the current position of the MCMC algorithm is in an area of extremely low probability, which is often the case when the algorithm begins (typically at a random location in the space), the algorithm will move in positions *that are likely not from the posterior* but better than everything else nearby. Thus the first moves of the algorithm are not reflective of the posterior.
In the above algorithm's pseudocode, notice that only the current position matters (new positions are investigated only near the current position). We can describe this property as *memorylessness*, i.e. the algorithm does not care *how* it arrived at its current position, only that it is there.
### Other approximation solutions to the posterior
Besides MCMC, there are other procedures available for determining the posterior distributions. A Laplace approximation is an approximation of the posterior using simple functions. A more advanced method is [Variational Bayes](http://en.wikipedia.org/wiki/Variational_Bayesian_methods). All three methods, Laplace Approximations, Variational Bayes, and classical MCMC have their pros and cons. We will only focus on MCMC in this book. That being said, my friend Imri Sofar likes to classify MCMC algorithms as either "they suck", or "they really suck". He classifies the particular flavour of MCMC used by PyMC3 as just *sucks* ;)
##### Example: Unsupervised Clustering using a Mixture Model
Suppose we are given the following dataset:
```
figsize(12.5, 4)
data = np.loadtxt("data/mixture_data.csv", delimiter=",")
plt.hist(data, bins=20, color="k", histtype="stepfilled", alpha=0.8)
plt.title("Histogram of the dataset")
plt.ylim([0, None]);
print(data[:10], "...")
```
What does the data suggest? It appears the data has a bimodal form, that is, it appears to have two peaks, one near 120 and the other near 200. Perhaps there are *two clusters* within this dataset.
This dataset is a good example of the data-generation modeling technique from last chapter. We can propose *how* the data might have been created. I suggest the following data generation algorithm:
1. For each data point, choose cluster 0 with probability $p$, else choose cluster 1.
2. Draw a random variate from a Normal distribution with parameters $\mu_i$ and $\sigma_i$ where $i$ was chosen in step 1.
3. Repeat.
This algorithm would create a similar effect as the observed dataset, so we choose this as our model. Of course, we do not know $p$ or the parameters of the Normal distributions. Hence we must infer, or *learn*, these unknowns.
Denote the Normal distributions $\text{N}_0$ and $\text{N}_1$ (having variables' index start at 0 is just Pythonic). Both currently have unknown mean and standard deviation, denoted $\mu_i$ and $\sigma_i, \; i =0,1$ respectively. A specific data point can be from either $\text{N}_0$ or $\text{N}_1$, and we assume that the data point is assigned to $\text{N}_0$ with probability $p$.
An appropriate way to assign data points to clusters is to use a PyMC3 `Categorical` stochastic variable. Its parameter is a $k$-length array of probabilities that must sum to one and its `value` attribute is a integer between 0 and $k-1$ randomly chosen according to the crafted array of probabilities (In our case $k=2$). *A priori*, we do not know what the probability of assignment to cluster 0 is, so we form a uniform variable on $(0, 1)$. We call call this $p_0$, so the probability of belonging to cluster 1 is therefore $p_1 = 1 - p_0$.
Unfortunately, we can't we just give `[p0, p1]` to our `Categorical` variable. PyMC3 uses Theano under the hood to construct the models so we need to use `theano.tensor.stack()` to combine $p_0$ and $p_1$ into a vector that it can understand. We pass this vector into the `Categorical` variable as well as the `testval` parameter to give our variable an idea of where to start from.
```
import pymc3 as pm
import theano.tensor as T
with pm.Model() as model:
p0 = pm.Uniform('p', 0, 1)
p1 = 1 - p0
p = T.stack([p0, p1])
assignment = pm.Categorical("assignment", p,
shape=data.shape[0],
testval=np.random.randint(0, 2, data.shape[0]))
print("prior assignment, with p = %.2f:" % p0.tag.test_value)
print(assignment.tag.test_value[:10])
```
Looking at the above dataset, I would guess that the standard deviations of the two Normals are different. To maintain ignorance of what the standard deviations might be, we will initially model them as uniform on 0 to 100. We will include both standard deviations in our model using a single line of PyMC3 code:
sds = pm.Uniform("sds", 0, 100, shape=2)
Notice that we specified `shape=2`: we are modeling both $\sigma$s as a single PyMC3 variable. Note that this does not induce a necessary relationship between the two $\sigma$s, it is simply for succinctness.
We also need to specify priors on the centers of the clusters. The centers are really the $\mu$ parameters in these Normal distributions. Their priors can be modeled by a Normal distribution. Looking at the data, I have an idea where the two centers might be — I would guess somewhere around 120 and 190 respectively, though I am not very confident in these eyeballed estimates. Hence I will set $\mu_0 = 120, \mu_1 = 190$ and $\sigma_0 = \sigma_1 = 10$.
```
with model:
sds = pm.Uniform("sds", 0, 100, shape=2)
centers = pm.Normal("centers",
mu=np.array([120, 190]),
sd=np.array([10, 10]),
shape=2)
center_i = pm.Deterministic('center_i', centers[assignment])
sd_i = pm.Deterministic('sd_i', sds[assignment])
# and to combine it with the observations:
observations = pm.Normal("obs", mu=center_i, sd=sd_i, observed=data)
print("Random assignments: ", assignment.tag.test_value[:4], "...")
print("Assigned center: ", center_i.tag.test_value[:4], "...")
print("Assigned standard deviation: ", sd_i.tag.test_value[:4])
```
Notice how we continue to build the model within the context of `Model()`. This automatically adds the variables that we create to our model. As long as we work within this context we will be working with the same variables that we have already defined.
Similarly, any sampling that we do within the context of `Model()` will be done only on the model whose context in which we are working. We will tell our model to explore the space that we have so far defined by defining the sampling methods, in this case `Metropolis()` for our continuous variables and `ElemwiseCategorical()` for our categorical variable. We will use these sampling methods together to explore the space by using `sample( iterations, step )`, where `iterations` is the number of steps you wish the algorithm to perform and `step` is the way in which you want to handle those steps. We use our combination of `Metropolis()` and `ElemwiseCategorical()` for the `step` and sample 25000 `iterations` below.
```
with model:
step1 = pm.Metropolis(vars=[p, sds, centers])
step2 = pm.ElemwiseCategorical(vars=[assignment])
trace = pm.sample(25000, step=[step1, step2])
```
We have stored the paths of all our variables, or "traces", in the `trace` variable. These paths are the routes the unknown parameters (centers, precisions, and $p$) have taken thus far. The individual path of each variable is indexed by the PyMC3 variable `name` that we gave that variable when defining it within our model. For example, `trace["sds"]` will return a `numpy array` object that we can then index and slice as we would any other `numpy array` object.
```
figsize(12.5, 9)
plt.subplot(311)
lw = 1
center_trace = trace["centers"]
# for pretty colors later in the book.
colors = ["#348ABD", "#A60628"] if center_trace[-1, 0] > center_trace[-1, 1] \
else ["#A60628", "#348ABD"]
plt.plot(center_trace[:, 0], label="trace of center 0", c=colors[0], lw=lw)
plt.plot(center_trace[:, 1], label="trace of center 1", c=colors[1], lw=lw)
plt.title("Traces of unknown parameters")
leg = plt.legend(loc="upper right")
leg.get_frame().set_alpha(0.7)
plt.subplot(312)
std_trace = trace["sds"]
plt.plot(std_trace[:, 0], label="trace of standard deviation of cluster 0",
c=colors[0], lw=lw)
plt.plot(std_trace[:, 1], label="trace of standard deviation of cluster 1",
c=colors[1], lw=lw)
plt.legend(loc="upper left")
plt.subplot(313)
p_trace = trace["p"]
plt.plot(p_trace, label="$p$: frequency of assignment to cluster 0",
color=colors[0], lw=lw)
plt.xlabel("Steps")
plt.ylim(0, 1)
plt.legend();
```
Notice the following characteristics:
1. The traces converges, not to a single point, but to a *distribution* of possible points. This is *convergence* in an MCMC algorithm.
2. Inference using the first few thousand points is a bad idea, as they are unrelated to the final distribution we are interested in. Thus is it a good idea to discard those samples before using the samples for inference. We call this period before converge the *burn-in period*.
3. The traces appear as a random "walk" around the space, that is, the paths exhibit correlation with previous positions. This is both good and bad. We will always have correlation between current positions and the previous positions, but too much of it means we are not exploring the space well. This will be detailed in the Diagnostics section later in this chapter.
To achieve further convergence, we will perform more MCMC steps. In the pseudo-code algorithm of MCMC above, the only position that matters is the current position (new positions are investigated near the current position), implicitly stored as part of the `trace` object. To continue where we left off, we pass the `trace` that we have already stored into the `sample()` function with the same step value. The values that we have already calculated will not be overwritten. This ensures that our sampling continues where it left off in the same way that it left off.
We will sample the MCMC fifty thousand more times and visualize the progress below:
```
with model:
trace = pm.sample(50000, step=[step1, step2], trace=trace)
figsize(12.5, 4)
center_trace = trace["centers"][25000:]
prev_center_trace = trace["centers"][:25000]
x = np.arange(25000)
plt.plot(x, prev_center_trace[:, 0], label="previous trace of center 0",
lw=lw, alpha=0.4, c=colors[1])
plt.plot(x, prev_center_trace[:, 1], label="previous trace of center 1",
lw=lw, alpha=0.4, c=colors[0])
x = np.arange(25000, 150000)
plt.plot(x, center_trace[:, 0], label="new trace of center 0", lw=lw, c="#348ABD")
plt.plot(x, center_trace[:, 1], label="new trace of center 1", lw=lw, c="#A60628")
plt.title("Traces of unknown center parameters")
leg = plt.legend(loc="upper right")
leg.get_frame().set_alpha(0.8)
plt.xlabel("Steps");
```
#### Cluster Investigation
We have not forgotten our main challenge: identify the clusters. We have determined posterior distributions for our unknowns. We plot the posterior distributions of the center and standard deviation variables below:
```
figsize(11.0, 4)
std_trace = trace["sds"][25000:]
prev_std_trace = trace["sds"][:25000]
_i = [1, 2, 3, 4]
for i in range(2):
plt.subplot(2, 2, _i[2 * i])
plt.title("Posterior of center of cluster %d" % i)
plt.hist(center_trace[:, i], color=colors[i], bins=30,
histtype="stepfilled")
plt.subplot(2, 2, _i[2 * i + 1])
plt.title("Posterior of standard deviation of cluster %d" % i)
plt.hist(std_trace[:, i], color=colors[i], bins=30,
histtype="stepfilled")
# plt.autoscale(tight=True)
plt.tight_layout()
```
The MCMC algorithm has proposed that the most likely centers of the two clusters are near 120 and 200 respectively. Similar inference can be applied to the standard deviation.
We are also given the posterior distributions for the labels of the data point, which is present in `trace["assignment"]`. Below is a visualization of this. The y-axis represents a subsample of the posterior labels for each data point. The x-axis are the sorted values of the data points. A red square is an assignment to cluster 1, and a blue square is an assignment to cluster 0.
```
import matplotlib as mpl
figsize(12.5, 4.5)
plt.cmap = mpl.colors.ListedColormap(colors)
plt.imshow(trace["assignment"][::400, np.argsort(data)],
cmap=plt.cmap, aspect=.4, alpha=.9)
plt.xticks(np.arange(0, data.shape[0], 40),
["%.2f" % s for s in np.sort(data)[::40]])
plt.ylabel("posterior sample")
plt.xlabel("value of $i$th data point")
plt.title("Posterior labels of data points");
```
Looking at the above plot, it appears that the most uncertainty is between 150 and 170. The above plot slightly misrepresents things, as the x-axis is not a true scale (it displays the value of the $i$th sorted data point.) A more clear diagram is below, where we have estimated the *frequency* of each data point belonging to the labels 0 and 1.
```
cmap = mpl.colors.LinearSegmentedColormap.from_list("BMH", colors)
assign_trace = trace["assignment"]
plt.scatter(data, 1 - assign_trace.mean(axis=0), cmap=cmap,
c=assign_trace.mean(axis=0), s=50)
plt.ylim(-0.05, 1.05)
plt.xlim(35, 300)
plt.title("Probability of data point belonging to cluster 0")
plt.ylabel("probability")
plt.xlabel("value of data point");
```
Even though we modeled the clusters using Normal distributions, we didn't get just a single Normal distribution that *best* fits the data (whatever our definition of best is), but a distribution of values for the Normal's parameters. How can we choose just a single pair of values for the mean and variance and determine a *sorta-best-fit* gaussian?
One quick and dirty way (which has nice theoretical properties we will see in Chapter 5), is to use the *mean* of the posterior distributions. Below we overlay the Normal density functions, using the mean of the posterior distributions as the chosen parameters, with our observed data:
```
norm = stats.norm
x = np.linspace(20, 300, 500)
posterior_center_means = center_trace.mean(axis=0)
posterior_std_means = std_trace.mean(axis=0)
posterior_p_mean = trace["p"].mean()
plt.hist(data, bins=20, histtype="step", normed=True, color="k",
lw=2, label="histogram of data")
y = posterior_p_mean * norm.pdf(x, loc=posterior_center_means[0],
scale=posterior_std_means[0])
plt.plot(x, y, label="Cluster 0 (using posterior-mean parameters)", lw=3)
plt.fill_between(x, y, color=colors[1], alpha=0.3)
y = (1 - posterior_p_mean) * norm.pdf(x, loc=posterior_center_means[1],
scale=posterior_std_means[1])
plt.plot(x, y, label="Cluster 1 (using posterior-mean parameters)", lw=3)
plt.fill_between(x, y, color=colors[0], alpha=0.3)
plt.legend(loc="upper left")
plt.title("Visualizing Clusters using posterior-mean parameters");
```
### Important: Don't mix posterior samples
In the above example, a possible (though less likely) scenario is that cluster 0 has a very large standard deviation, and cluster 1 has a small standard deviation. This would still satisfy the evidence, albeit less so than our original inference. Alternatively, it would be incredibly unlikely for *both* distributions to have a small standard deviation, as the data does not support this hypothesis at all. Thus the two standard deviations are *dependent* on each other: if one is small, the other must be large. In fact, *all* the unknowns are related in a similar manner. For example, if a standard deviation is large, the mean has a wider possible space of realizations. Conversely, a small standard deviation restricts the mean to a small area.
During MCMC, we are returned vectors representing samples from the unknown posteriors. Elements of different vectors cannot be used together, as this would break the above logic: perhaps a sample has returned that cluster 1 has a small standard deviation, hence all the other variables in that sample would incorporate that and be adjusted accordingly. It is easy to avoid this problem though, just make sure you are indexing traces correctly.
Another small example to illustrate the point. Suppose two variables, $x$ and $y$, are related by $x+y=10$. We model $x$ as a Normal random variable with mean 4 and explore 500 samples.
```
import pymc3 as pm
with pm.Model() as model:
x = pm.Normal("x", mu=4, tau=10)
y = pm.Deterministic("y", 10 - x)
trace_2 = pm.sample(10000, pm.Metropolis())
plt.plot(trace_2["x"])
plt.plot(trace_2["y"])
plt.title("Displaying (extreme) case of dependence between unknowns");
```
As you can see, the two variables are not unrelated, and it would be wrong to add the $i$th sample of $x$ to the $j$th sample of $y$, unless $i = j$.
#### Returning to Clustering: Prediction
The above clustering can be generalized to $k$ clusters. Choosing $k=2$ allowed us to visualize the MCMC better, and examine some very interesting plots.
What about prediction? Suppose we observe a new data point, say $x = 175$, and we wish to label it to a cluster. It is foolish to simply assign it to the *closer* cluster center, as this ignores the standard deviation of the clusters, and we have seen from the plots above that this consideration is very important. More formally: we are interested in the *probability* (as we cannot be certain about labels) of assigning $x=175$ to cluster 1. Denote the assignment of $x$ as $L_x$, which is equal to 0 or 1, and we are interested in $P(L_x = 1 \;|\; x = 175 )$.
A naive method to compute this is to re-run the above MCMC with the additional data point appended. The disadvantage with this method is that it will be slow to infer for each novel data point. Alternatively, we can try a *less precise*, but much quicker method.
We will use Bayes Theorem for this. If you recall, Bayes Theorem looks like:
$$ P( A | X ) = \frac{ P( X | A )P(A) }{P(X) }$$
In our case, $A$ represents $L_x = 1$ and $X$ is the evidence we have: we observe that $x = 175$. For a particular sample set of parameters for our posterior distribution, $( \mu_0, \sigma_0, \mu_1, \sigma_1, p)$, we are interested in asking "Is the probability that $x$ is in cluster 1 *greater* than the probability it is in cluster 0?", where the probability is dependent on the chosen parameters.
\begin{align}
& P(L_x = 1| x = 175 ) \gt P(L_x = 0| x = 175 ) \\\\[5pt]
& \frac{ P( x=175 | L_x = 1 )P( L_x = 1 ) }{P(x = 175) } \gt \frac{ P( x=175 | L_x = 0 )P( L_x = 0 )}{P(x = 175) }
\end{align}
As the denominators are equal, they can be ignored (and good riddance, because computing the quantity $P(x = 175)$ can be difficult).
$$ P( x=175 | L_x = 1 )P( L_x = 1 ) \gt P( x=175 | L_x = 0 )P( L_x = 0 ) $$
```
norm_pdf = stats.norm.pdf
p_trace = trace["p"][25000:]
prev_p_trace = trace["p"][:25000]
x = 175
v = (1 - p_trace) * norm_pdf(x, loc=center_trace[:, 1], scale=std_trace[:, 1]) > \
(p_trace) * norm_pdf(x, loc=center_trace[:, 0], scale=std_trace[:, 0])
print("Probability of belonging to cluster 1:", v.mean())
```
Giving us a probability instead of a label is a very useful thing. Instead of the naive
L = 1 if prob > 0.5 else 0
we can optimize our guesses using a *loss function*, which the entire fifth chapter is devoted to.
### Using `MAP` to improve convergence
If you ran the above example yourself, you may have noticed that our results were not consistent: perhaps your cluster division was more scattered, or perhaps less scattered. The problem is that our traces are a function of the *starting values* of the MCMC algorithm.
It can be mathematically shown that letting the MCMC run long enough, by performing many steps, the algorithm *should forget its initial position*. In fact, this is what it means to say the MCMC converged (in practice though we can never achieve total convergence). Hence if we observe different posterior analysis, it is likely because our MCMC has not fully converged yet, and we should not use samples from it yet (we should use a larger burn-in period ).
In fact, poor starting values can prevent any convergence, or significantly slow it down. Ideally, we would like to have the chain start at the *peak* of our landscape, as this is exactly where the posterior distributions exist. Hence, if we started at the "peak", we could avoid a lengthy burn-in period and incorrect inference. Generally, we call this "peak" the *maximum a posterior* or, more simply, the *MAP*.
Of course, we do not know where the MAP is. PyMC3 provides a function that will approximate, if not find, the MAP location. In the PyMC3 main namespace is the `find_MAP` function. If you call this function within the context of `Model()`, it will calculate the MAP which you can then pass to `pm.sample()` as a `start` parameter.
start = pm.find_MAP()
trace = pm.sample(2000, step=pm.Metropolis, start=start)
The `find_MAP()` function has the flexibility of allowing the user to choose which optimization algorithm to use (after all, this is a optimization problem: we are looking for the values that maximize our landscape), as not all optimization algorithms are created equal. The default optimization algorithm in function call is the Broyden-Fletcher-Goldfarb-Shanno ([BFGS](https://en.wikipedia.org/wiki/Broyden-Fletcher-Goldfarb-Shanno_algorithm)) algorithm to find the maximum of the log-posterior. As an alternative, you can use other optimization algorithms from the `scipy.optimize` module. For example, you can use Powell's Method, a favourite of PyMC blogger [Abraham Flaxman](http://healthyalgorithms.com/) [1], by calling `find_MAP(fmin=scipy.optimize.fmin_powell)`. The default works well enough, but if convergence is slow or not guaranteed, feel free to experiment with Powell's method or the other algorithms available.
The MAP can also be used as a solution to the inference problem, as mathematically it is the *most likely* value for the unknowns. But as mentioned earlier in this chapter, this location ignores the uncertainty and doesn't return a distribution.
#### Speaking of the burn-in period
It is still a good idea to decide on a burn-in period, even if we are using `find_MAP()` prior to sampling, just to be safe. We can no longer automatically discard sample with a `burn` parameter in the `sample()` function as we could in PyMC2, but it is easy enough to simply discard the beginning section of the trace just through array slicing. As one does not know when the chain has fully converged, a good rule of thumb is to discard the first *half* of your samples, sometimes up to 90% of the samples for longer runs. To continue the clustering example from above, the new code would look something like:
with pm.Model() as model:
start = pm.find_MAP()
step = pm.Metropolis()
trace = pm.sample(100000, step=step, start=start)
burned_trace = trace[50000:]
## Diagnosing Convergence
### Autocorrelation
Autocorrelation is a measure of how related a series of numbers is with itself. A measurement of 1.0 is perfect positive autocorrelation, 0 no autocorrelation, and -1 is perfect negative correlation. If you are familiar with standard *correlation*, then autocorrelation is just how correlated a series, $x_\tau$, at time $t$ is with the series at time $t-k$:
$$R(k) = Corr( x_t, x_{t-k} ) $$
For example, consider the two series:
$$x_t \sim \text{Normal}(0,1), \;\; x_0 = 0$$
$$y_t \sim \text{Normal}(y_{t-1}, 1 ), \;\; y_0 = 0$$
which have example paths like:
```
figsize(12.5, 4)
import pymc3 as pm
x_t = np.random.normal(0, 1, 200)
x_t[0] = 0
y_t = np.zeros(200)
for i in range(1, 200):
y_t[i] = np.random.normal(y_t[i - 1], 1)
plt.plot(y_t, label="$y_t$", lw=3)
plt.plot(x_t, label="$x_t$", lw=3)
plt.xlabel("time, $t$")
plt.legend();
```
One way to think of autocorrelation is "If I know the position of the series at time $s$, can it help me know where I am at time $t$?" In the series $x_t$, the answer is No. By construction, $x_t$ are random variables. If I told you that $x_2 = 0.5$, could you give me a better guess about $x_3$? No.
On the other hand, $y_t$ is autocorrelated. By construction, if I knew that $y_2 = 10$, I can be very confident that $y_3$ will not be very far from 10. Similarly, I can even make a (less confident guess) about $y_4$: it will probably not be near 0 or 20, but a value of 5 is not too unlikely. I can make a similar argument about $y_5$, but again, I am less confident. Taking this to it's logical conclusion, we must concede that as $k$, the lag between time points, increases the autocorrelation decreases. We can visualize this:
```
def autocorr(x):
# from http://tinyurl.com/afz57c4
result = np.correlate(x, x, mode='full')
result = result / np.max(result)
return result[result.size // 2:]
colors = ["#348ABD", "#A60628", "#7A68A6"]
x = np.arange(1, 200)
plt.bar(x, autocorr(y_t)[1:], width=1, label="$y_t$",
edgecolor=colors[0], color=colors[0])
plt.bar(x, autocorr(x_t)[1:], width=1, label="$x_t$",
color=colors[1], edgecolor=colors[1])
plt.legend(title="Autocorrelation")
plt.ylabel("measured correlation \nbetween $y_t$ and $y_{t-k}$.")
plt.xlabel("k (lag)")
plt.title("Autocorrelation plot of $y_t$ and $x_t$ for differing $k$ lags.");
```
Notice that as $k$ increases, the autocorrelation of $y_t$ decreases from a very high point. Compare with the autocorrelation of $x_t$ which looks like noise (which it really is), hence we can conclude no autocorrelation exists in this series.
#### How does this relate to MCMC convergence?
By the nature of the MCMC algorithm, we will always be returned samples that exhibit autocorrelation (this is because of the step `from your current position, move to a position near you`).
A chain that is not exploring the space well will exhibit very high autocorrelation. Visually, if the trace seems to meander like a river, and not settle down, the chain will have high autocorrelation.
This does not imply that a converged MCMC has low autocorrelation. Hence low autocorrelation is not necessary for convergence, but it is sufficient. PyMC3 has a built-in autocorrelation plotting function in the `plots` module.
### Thinning
Another issue can arise if there is high-autocorrelation between posterior samples. Many post-processing algorithms require samples to be *independent* of each other. This can be solved, or at least reduced, by only returning to the user every $n$th sample, thus removing some autocorrelation. Below we perform an autocorrelation plot for $y_t$ with differing levels of thinning:
```
max_x = 200 // 3 + 1
x = np.arange(1, max_x)
plt.bar(x, autocorr(y_t)[1:max_x], edgecolor=colors[0],
label="no thinning", color=colors[0], width=1)
plt.bar(x, autocorr(y_t[::2])[1:max_x], edgecolor=colors[1],
label="keeping every 2nd sample", color=colors[1], width=1)
plt.bar(x, autocorr(y_t[::3])[1:max_x], width=1, edgecolor=colors[2],
label="keeping every 3rd sample", color=colors[2])
plt.autoscale(tight=True)
plt.legend(title="Autocorrelation plot for $y_t$", loc="lower left")
plt.ylabel("measured correlation \nbetween $y_t$ and $y_{t-k}$.")
plt.xlabel("k (lag)")
plt.title("Autocorrelation of $y_t$ (no thinning vs. thinning) \
at differing $k$ lags.");
```
With more thinning, the autocorrelation drops quicker. There is a tradeoff though: higher thinning requires more MCMC iterations to achieve the same number of returned samples. For example, 10 000 samples unthinned is 100 000 with a thinning of 10 (though the latter has less autocorrelation).
What is a good amount of thinning? The returned samples will always exhibit some autocorrelation, regardless of how much thinning is done. So long as the autocorrelation tends to zero, you are probably ok. Typically thinning of more than 10 is not necessary.
### `pymc3.plots`
It seems silly to have to manually create histograms, autocorrelation plots and trace plots each time we perform MCMC. The authors of PyMC3 have included a visualization tool for just this purpose.
The `pymc3.plots` module contains a few different plotting functions that you might find useful. For each different plotting function contained therein, you simply pass a `trace` returned from sampling as well as a list, `varnames`, of the variables that you are interested in. This module can provide you with plots of autocorrelation and the posterior distributions of each variable and their traces, among others.
Below we use the tool to plot the centers of the clusters.
```
pm.plots.traceplot(trace=trace, varnames=["centers"])
pm.plots.plot_posterior(trace=trace["centers"][:,0])
pm.plots.plot_posterior(trace=trace["centers"][:,1])
pm.plots.autocorrplot(trace=trace, varnames=["centers"]);
```
The first plotting function gives us the posterior density of each unknown in the `centers` variable as well as the `trace` of each. `trace` plot is useful for inspecting that possible "meandering" property that is a result of non-convergence. The density plot gives us an idea of the shape of the distribution of each unknown, but it is better to look at each of them individually.
The second plotting function(s) provides us with a histogram of the samples with a few added features. The text overlay in the center shows us the posterior mean, which is a good summary of posterior distribution. The interval marked by the horizontal black line overlay represents the *95% credible interval*, sometimes called the *highest posterior density interval* and not to be confused with a *95% confidence interval*. We won't get into the latter, but the former can be interpreted as "there is a 95% chance the parameter of interest lies in this interval". When communicating your results to others, it is incredibly important to state this interval. One of our purposes for studying Bayesian methods is to have a clear understanding of our uncertainty in unknowns. Combined with the posterior mean, the 95% credible interval provides a reliable interval to communicate the likely location of the unknown (provided by the mean) *and* the uncertainty (represented by the width of the interval).
The last plots, titled `center_0` and `center_1` are the generated autocorrelation plots, similar to the ones displayed above.
## Useful tips for MCMC
Bayesian inference would be the *de facto* method if it weren't for MCMC's computational difficulties. In fact, MCMC is what turns most users off practical Bayesian inference. Below I present some good heuristics to help convergence and speed up the MCMC engine:
### Intelligent starting values
It would be great to start the MCMC algorithm off near the posterior distribution, so that it will take little time to start sampling correctly. We can aid the algorithm by telling where we *think* the posterior distribution will be by specifying the `testval` parameter in the `Stochastic` variable creation. In many cases we can produce a reasonable guess for the parameter. For example, if we have data from a Normal distribution, and we wish to estimate the $\mu$ parameter, then a good starting value would be the *mean* of the data.
mu = pm.Uniform( "mu", 0, 100, testval = data.mean() )
For most parameters in models, there is a frequentist estimate of it. These estimates are a good starting value for our MCMC algorithms. Of course, this is not always possible for some variables, but including as many appropriate initial values is always a good idea. Even if your guesses are wrong, the MCMC will still converge to the proper distribution, so there is little to lose.
This is what using `MAP` tries to do, by giving good initial values to the MCMC. So why bother specifying user-defined values? Well, even giving `MAP` good values will help it find the maximum a-posterior.
Also important, *bad initial values* are a source of major bugs in PyMC3 and can hurt convergence.
#### Priors
If the priors are poorly chosen, the MCMC algorithm may not converge, or atleast have difficulty converging. Consider what may happen if the prior chosen does not even contain the true parameter: the prior assigns 0 probability to the unknown, hence the posterior will assign 0 probability as well. This can cause pathological results.
For this reason, it is best to carefully choose the priors. Often, lack of covergence or evidence of samples crowding to boundaries implies something is wrong with the chosen priors (see *Folk Theorem of Statistical Computing* below).
#### Covariance matrices and eliminating parameters
### The Folk Theorem of Statistical Computing
> *If you are having computational problems, probably your model is wrong.*
## Conclusion
PyMC3 provides a very strong backend to performing Bayesian inference, mostly because it has abstracted the inner mechanics of MCMC from the user. Despite this, some care must be applied to ensure your inference is not being biased by the iterative nature of MCMC.
### References
1. Flaxman, Abraham. "Powell's Methods for Maximization in PyMC." Healthy Algorithms. N.p., 9 02 2012. Web. 28 Feb 2013. <http://healthyalgorithms.com/2012/02/09/powells-method-for-maximization-in-pymc/>.
```
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
| github_jupyter |
# How to Win in the Data Science Field
## A. Business Understanding
This project aims to answer the question: "How does one win in the Data Science field?"
To gain insight on this main inquiry, I focused on addressing the following:
- Are there major differences in salary among the different data science roles?
- What are the essential technical skills to do well in data science?
- Does educational background play a huge part?
- How much does continuous learning on online platforms help?
## B. Data Understanding
For this project I have chosen to use the 2019 Kaggle ML & DS Survey raw data. I think this is a good dataset choice for the following reasons:
- The Kaggle Community is the biggest data science and machine learning community, therefore would have a good representation of data scientist professionals.
- It features a lot of relevant variables, from salary, demographics, to characteristics and habits of data science professionals in the community.
### Data Access and Exploration
The first step is to import all the needed libraries.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import textwrap
%matplotlib inline
```
We then import the dataset to be used for the analysis.
```
# Import data
df = pd.read_csv('./multiple_choice_responses.csv')
df.head()
df.shape
```
There was a total of 19.7K data science professionals in the survey, and 246 fields corresponding to their responses to the survey. There are missing values, but we'll deal with them later depending on the analysis that will be implemented.
## C. Preparing the Data
### Cleaning the data
We do some necessary filtering to the data with the following rationale:
- Filtering among professionals / employed only because we are concerned about salary outcomes
- Focusing among US residents only to lessen the variation in pay due to region
- Focusing among professionals with salary >=30K USD only to most likely capture full-time employees
```
# Filter data among professionals only
df = df[~df.Q5.isin(['Student', 'Not employed']) & df.Q5.notnull()]
# Filter data among residents of the US only
df = df[df.Q3.isin(['United States of America'])]
# Filter data among annual salary of >=30K
df = df[~df.Q10.isin(['$0-999','1,000-1,999','2,000-2,999','3,000-3,999','4,000-4,999','5,000-7,499','7,500-9,999','10,000-19,999','20,000-29,999']) & df.Q10.notnull()]
# Recode some of the salary bins
df.loc[df['Q10'].isin(['300,000-500,000','> $500,000']), 'Q10'] = '>= $300,000'
# Shape of the dataframe
df.shape
```
From these filtering, we get the final sample size of 2,013 US Data Science Professionals, earning an annual wage of >=30K USD.
### Missing Values
As this analysis is highly descriptive and I will not employ any statistical modelling, I will address the missing values by simply dropping them from the computed percentages.
### Function Creation
I created a few helper functions for charts to be used all throughout the analysis.
Most of my charts are going to be bar plots and heatmaps. I created the functions depending on the data type (single respons and multiple response variables.
```
def barplots_single_answer(q_number, x_title, y_title, chart_title, order=None):
'''
INPUT:
q_number - question number for the variable of interest. It should be a single-answer question.
x_title - desired title of the x-axis
y_title - desired title of the y-axis
chart_title - desired main title
order_rows - desired sorting of the rows (will default to descending according to frequency of answers)
OUTPUT:
A barplot that shows the frequency in % for the variable of interest
This function prepares the data for the visualization and draws the bar plot.
'''
cat_values = round((df[pd.notnull(df[q_number])][q_number].value_counts()/len(df[pd.notnull(df[q_number])][q_number])) * 100,1)
cat_values = cat_values.reset_index().rename(columns = {'index':q_number, q_number:'pct'})
f, ax = plt.subplots(figsize=(8,8))
sns.barplot(x = 'pct', y = q_number, data=cat_values, color='dodgerblue', order=order)
ax.set_xlabel(x_title)
ax.set_ylabel(y_title)
plt.title(chart_title, fontsize = 14, fontweight ='bold')
print(cat_values)
def barplots_heatmap_single_answer(q_number, bar_chart_title, heatmap_title, order_rows = False):
'''
INPUT:
q_number - question number for the variable of interest. It should be a single-answer question.
bar_chart_title - desired title of the frequency bar chart
heatmap_title - desired title of the heatmap chart
order_rows - desired sorting of the rows (will default to descending according to frequency of answers)
OUTPUT:
Two charts: A barplot that shows the frequency in % for the variable of interest, and a heatmap
that visually correlates the variable of interest with salary range.
Table reference for the percentages
This function prepares the data for the visualization and provides the two visualizations specified.
'''
# Value count for the variable of interest
cat_values = df[pd.notnull(df[q_number])][q_number].value_counts()
# Set a threshold of 20 records for category to be included in plotting, otherwise it will distort the normalized heatmap
cat_values = cat_values[cat_values>=20]
cat_values = round((cat_values/len(df[pd.notnull(df[q_number])][q_number])) * 100,1)
if(order_rows == False):
cat_values = cat_values
else:
cat_values = cat_values.reindex(index = order_rows)
cat_values = cat_values.reset_index().rename(columns = {'index':q_number, q_number:'pct'})
# Sort order for the salary bins
order_col = ['30,000-39,999','40,000-49,999','50,000-59,999','60,000-69,999','70,000-79,999',
'80,000-89,999','90,000-99,999','100,000-124,999','125,000-149,999',
'150,000-199,999','200,000-249,999', '250,000-299,999','>= $300,000']
y_labels = cat_values[q_number]
# Crosstabs for the salary and variable of interest
crosstab = pd.crosstab(df[q_number],df['Q10'], normalize='index')
crosstab = crosstab.reindex(order_col, axis="columns")
if(order_rows == False):
crosstab = crosstab.reindex(y_labels, axis="rows")
else:
crosstab = crosstab.reindex(order_rows, axis="rows")
# Set-up subplots
fig = plt.figure(figsize=(14,6))
grid = plt.GridSpec(1, 10, wspace=10, hspace=1)
plt.subplot(grid[0, :3])
# Left plot (barplot)
ax1 = sns.barplot(x = 'pct', y = q_number, data=cat_values, color='dodgerblue', order=None)
plt.title(bar_chart_title, fontsize = 14, fontweight ='bold')
ax1.set_xlabel('Percentage %')
ax1.set_ylabel('')
# Text-wrapping of y-labels
f = lambda x: textwrap.fill(x.get_text(), 27)
ax1.set_yticklabels(map(f, ax1.get_yticklabels()))
# Right plot (heatmap)
plt.subplot(grid[0, 4:])
ax2 = sns.heatmap(crosstab, cmap="Blues", cbar=False)
plt.title(heatmap_title, fontsize = 14, fontweight ='bold')
ax2.set_xlabel('Yearly Salary')
ax2.set_ylabel('')
ax2.set_yticklabels(map(f, ax2.get_yticklabels()))
print(cat_values)
def barplots_heatmap_multi_answer(multi_question_list, bar_chart_title, heatmap_title, order_rows = False):
'''
INPUT:
multi_question_list - a list of fields containing the response for a multiple answer-type question
bar_chart_title - desired title of the frequency bar chart
heatmap_title - desired title of the heatmap chart
order_rows - desired sorting of the rows (will default to descending according to frequency of answers)
OUTPUT:
Two charts: A barplot that shows the frequency in % for the variable of interest, and a heatmap
that visually correlates the variable of interest with salary range.
Table reference for the percentages
This function prepares the data for the visualization and provides the two visualizations specified.
'''
multi_question = multi_question_list
df_store = []
for question in (multi_question):
df_temp = df[question].value_counts()
df_store.append(df_temp)
df_multi = pd.concat(df_store)
df_multi = pd.DataFrame(df_multi).reset_index().rename(columns = {'index':multi_question[0], 0:'pct'})
df_multi = df_multi[df_multi['pct']>=20]
df_multi['pct'] = round(df_multi['pct']/sum(df_multi['pct']) * 100,1)
if(order_rows == False):
df_multi = df_multi.sort_values('pct', ascending=False)
else:
df_multi = df_multi.reindex(index = order_rows)
# Sort order for the salary bins
order_col = ['30,000-39,999','40,000-49,999','50,000-59,999','60,000-69,999','70,000-79,999',
'80,000-89,999','90,000-99,999','100,000-124,999','125,000-149,999',
'150,000-199,999','200,000-249,999', '250,000-299,999','>= $300,000']
y_labels = df_multi[multi_question[0]]
# Crosstabs for the salary and variable of interest
df_store_xtab = []
for question in (multi_question):
df_temp_xtab = pd.crosstab(df[question],df['Q10'], normalize='index')
df_store_xtab.append(df_temp_xtab)
df_multi_xtab = pd.concat(df_store_xtab)
df_multi_xtab = df_multi_xtab.reindex(order_col, axis="columns")
if(order_rows == False):
df_multi_xtab = df_multi_xtab.reindex(y_labels, axis="rows")
else:
df_multi_xtab = df_multi_xtab.reindex(order_rows, axis="rows")
# Set-up subplots
#fig = plt.figure(figsize=(14,6))
fig = plt.figure(figsize=(14,8))
grid = plt.GridSpec(1, 10, wspace=10, hspace=1)
plt.subplot(grid[0, :3])
# Left plot (barplot)
ax1 = sns.barplot(x = 'pct', y = multi_question[0], data=df_multi, color='dodgerblue', order=None)
plt.title(bar_chart_title, fontsize = 14, fontweight ='bold')
ax1.set_xlabel('Percentage %')
ax1.set_ylabel('')
# Text-wrapping of y-labels
f = lambda x: textwrap.fill(x.get_text(), 27)
ax1.set_yticklabels(map(f, ax1.get_yticklabels()))
# Right plot (heatmap)
plt.subplot(grid[0, 4:])
ax2 = sns.heatmap(df_multi_xtab, cmap="Blues", cbar=False)
plt.title(heatmap_title, fontsize = 14, fontweight ='bold')
ax2.set_xlabel('Yearly Salary')
ax2.set_ylabel('')
ax2.set_yticklabels(map(f, ax2.get_yticklabels()))
print(df_multi)
```
## D. Analysis
### Question 1: Are there major differences in salary among the different data science roles?
We first look at the salary distribution of the sample. Most of the data science professionals have salaries that fall within the $100K-200K range.
#### Chart 1: Salary Distribution (Q10) - Bar Chart
```
barplots_single_answer('Q10', 'Percentage %', 'Salary Range', 'Annual Salary Distribution',
['30,000-39,999','40,000-49,999','50,000-59,999','60,000-69,999','70,000-79,999',
'80,000-89,999','90,000-99,999','100,000-124,999','125,000-149,999',
'150,000-199,999','200,000-249,999', '250,000-299,999','>= $300,000'])
```
#### Chart 2: Data Practitioners Distribution (Q5) - Bar Chart
```
barplots_heatmap_single_answer('Q5', 'Current Data Role (%)', 'Annual Salary by Current Data Role')
```
Interpretation:
- Data Scientists are heavy on the 100K-200K USD range which reflects our entire Kaggler sample. This makes sense because Data Scientist is the top profession at 34%.
- There is an obvious discrepancy between a data scientist and a data analyst salary, with the former showing a heavier concentration on the 100K-200K USD range, and the latter somewhere within 60K-125K. It seems that data scientists are paid much more than analysts.
- Other professions such as Statisticians and Database Engineers tend to have more variation in pay, while Data Engineers are more concentrated in the 120K-125K range.
### Question 2: What are the essential technical skills to do well in data science?
While the questionnaire is very detailed in terms of the technical skills asked among the Kagglers, I decided to focus on a few main items (so as not to bore the readers):
- "What programming languages do you use on a regular basis?"
- From the above question, I derive how many programming languages they regularly use
- Primary data analysis tools used
#### Chart 3: Programming Languages Used
```
barplots_heatmap_multi_answer(['Q18_Part_1', 'Q18_Part_2', 'Q18_Part_3', 'Q18_Part_4', 'Q18_Part_5', 'Q18_Part_6', 'Q18_Part_7', 'Q18_Part_8',
'Q18_Part_9', 'Q18_Part_10', 'Q18_Part_11', 'Q18_Part_12'],
'Programming Languages Used (%)',
'Annual Salary by Programming Languages Used',
order_rows = False)
```
Interpretation:
- Python is the most popular language; SQL and R are also popular
- Software engineering-oriented languages such as Java, C++, and C have more dense representation in the 150K-200K range.
- Other noteworthy languages that relate to higher pay are Matlab, Typescript, and Bash.
I also looked at the percentage of the sample who do not code at all:
```
# How many do not code at all?
df['Q18_Part_11'].value_counts()/len(df)*100
```
Only a small subset of the population does not code, which is not surprising given that these are Kagglers.
I also ran an analysis to check how many programming languages do these data science professionals use:
```
lang_list = ['Q18_Part_1', 'Q18_Part_2', 'Q18_Part_3', 'Q18_Part_4', 'Q18_Part_5', 'Q18_Part_6', 'Q18_Part_7', 'Q18_Part_8',
'Q18_Part_9', 'Q18_Part_10','Q18_Part_12']
order_col = ['30,000-39,999','40,000-49,999','50,000-59,999','60,000-69,999','70,000-79,999',
'80,000-89,999','90,000-99,999','100,000-124,999','125,000-149,999',
'150,000-199,999','200,000-249,999', '250,000-299,999','>= $300,000']
df['Count_Languages'] = df[lang_list].apply(lambda x: x.count(), axis=1)
# Group by salary range, get the average count of programming language used
table_lang_salary = df[['Count_Languages','Q10']].groupby(['Q10']).mean()
table_lang_salary = table_lang_salary.reindex(order_col, axis="rows").reset_index()
# Average number of programming languages used
table_lang_salary['Count_Languages'].mean()
```
On the average, they use 2-3 languages.
But how does this correlate with salary? To answer this question, I created this bar chart:
#### Chart 4: Number of Programming Languages Used
```
f, ax = plt.subplots(figsize=(5,8))
ax = sns.barplot(x='Count_Languages', y="Q10",
data=table_lang_salary, color='dodgerblue')
plt.title('Salary Range vs. \n How Many Programming Languages Used', fontsize = 14, fontweight ='bold')
ax.set_xlabel('Avg Languages Used')
ax.set_ylabel('Annual Salary Range')
```
Interpretation:
Plotting the number of languages used according to salary range, we see that the number of languages used tend to increase as pay increases — up to the 125K-150K point. So yes, it may be worth learning more than 1.
Apart from coding, I also looked at other tools that data science professionals use based on this question:
"What is the primary tool that you use at work or school to analyze data?"
#### Chart 5: Primary Data Analysis Tools
```
barplots_heatmap_single_answer('Q14', 'Primary Data Analysis Tool (%)', 'Annual Salary by Data Analysis Tool')
```
Interpretation:
- Local development environments are the most popular tools with half of the sample using it.
- Cloud-based software users have a large salary leverage though - those who use it appear to have a higher earning potential, most likely at 150K-200K, and even a high concentration of professionals earning more than 300K USD.
- There is a large variation in pay among basic and advanced statistical software users.
### Question 3: Does educational background play a huge part?
#### Chart 6. Highest level of educational attainment (Q4) - Bar chart and salary heatmap side by side
```
barplots_heatmap_single_answer('Q4', 'Highest Educational Attainment (%)', 'Annual Salary by Educational Attainment',
order_rows=['Doctoral degree', 'Master’s degree', 'Professional degree', 'Bachelor’s degree',
'Some college/university study without earning a bachelor’s degree'])
```
Interpretation:
- Data science professionals tend to be a highly educated group, with 72% having either a Master’s Degree or a PhD.
- The salary heatmaps do not really show anything remarkable, except that Professional Degrees have a high concentration in the 150K-250K USD bracket. This group only constitutes 1.3% of the sample, hence I would say this is inconclusive.
### Question 4: How much does continuous learning on online platforms help?
To answer this question, I referred to these items in the survey:
- "On which platforms have you begun or completed data science courses?"
- "Who/what are your favorite media sources that report on data science topics?"
First, I looked at the online platforms and computed for the percentage of those who learned through this medium (excluding formal university education):
```
# Compute for Percentage of Kagglers who learned through online platforms
platform_list = ['Q13_Part_1', 'Q13_Part_2', 'Q13_Part_3', 'Q13_Part_4', 'Q13_Part_5', 'Q13_Part_6',
'Q13_Part_7', 'Q13_Part_8', 'Q13_Part_9', 'Q13_Part_12']
df['Count_Platform'] = df[platform_list].apply(lambda x: x.count(), axis=1)
len(df[df['Count_Platform'] > 0]) / len(df['Count_Platform'])
```
Interpretation: A stunning majority or 82% learn data science from these platforms.
On the specific online platforms:
#### Chart 7. Platforms where learn data science (Q13) - Bar chart and salary heatmap side by side
```
barplots_heatmap_multi_answer(['Q13_Part_1', 'Q13_Part_2', 'Q13_Part_3', 'Q13_Part_4', 'Q13_Part_5', 'Q13_Part_6',
'Q13_Part_7', 'Q13_Part_8', 'Q13_Part_9', 'Q13_Part_11', 'Q13_Part_12'],
'Platforms Used to Learn Data Science(%)',
'Annual Salary by Platforms Used')
```
Interpretation:
- Coursera is by far the most popular, followed by Datacamp, Udemy, and Kaggle Courses.
- Interestingly, Fast.ai skewed heavily on the higher income levels 125K-150K.
- DataQuest on the other hand are much more spread over the lower and middle income levels, which suggests that beginners tend to use this site more.
Apart from online courses, I also looked at other online media sources based on this question:
"Who/what are your favorite media sources that report on data science topics?"
#### Chart 8. Favorite Media Sources (Q12) - Bar chart and salary heatmap side by side
```
barplots_heatmap_multi_answer(['Q12_Part_1', 'Q12_Part_2', 'Q12_Part_3', 'Q12_Part_4', 'Q12_Part_5', 'Q12_Part_6',
'Q12_Part_7', 'Q12_Part_8', 'Q12_Part_9', 'Q12_Part_10', 'Q12_Part_11', 'Q12_Part_12'],
'Favorite Data Science Media Sources (%)',
'Annual Salary by Media Sources',
order_rows = False)
```
Interpretation:
- Blogs are most popular with 21% choosing this as their favorite data science topic source.
- I did not see much pattern from the salary heatmap — most are just bunched within the 100K-200K USD range.
- Curiously, Hacker News appears to have more followers on the higher end with 150K-200K salaries.
## Conclusion
### To win in the data science field (AND if you define winning as having a high pay):
- Code! Learning more languages will probably help. Apart from Python and R consider adding other non-data science languages such as C++, Java, and Typescript into your toolkit.
- Cloud-based technologies are worth learning. Get ready to explore those AWS, GCP, and Azure platforms for big data.
- Continuously upskill and update through MOOCs and online courses, and through media such as blogs and technology news.
| github_jupyter |
# Session 1: Introduction to Python (Basics)
(A tutorial prepared by K. Indireshkumar of SEAS Computing)
## What is Python?
A general purpose programming language conceived first in late 1980s by Guido van Rossum.
Why python:
* Easy to learn
* Free software
* Active and large development community
* Large number of modules in every conceivable field
* Fast development cycle
* Program in any style: procedural, object oriented, functional
* Dynamic typing; No pointers
### Ipython Notebook:
This presentation is an ipython notebook. It interweaves the presentation (markdown cell) with python code (code cell) and code output. Code can be executed in situ. Presentation can include mathematical symbols via Latex!
### Suggestions for this tutorial:
If you want to modify any code and run it, open a new code cell, copy the existing code into that cell and modify that. Another option would be to make a copy of this notebook and work with the copy.
## Some essential python "family" of tools
* Numpy -- greatly expands numerics; optimized routines for performance
* Matplotlib --- plotting
* Pandas --- high performance data structures (for data intensive computing)
* Sympy --- symbolic mathematics and computer algebra
* Ipython -- Excellent interactive interface (Ipython Notebook is part of this)
* Nose --- A testing framework for python
* Scipy -- A vast array of scientific computing tools going beyond numpy. Includes all of the above!
* SciKits -- Add-on packages to Scipy (some still under development) covering differenct application areas.
* IDEs: IDLE, Spyder, Eclipse, Pycharm
## Some Preliminaries:
* Python is an interpreted language
* Every line is interpreted as code unless it is a designated comment
* Comments are preceeded by #
* Indentation matters
* Lists, arrays, etc. start with index 0 (Matlab starts at 1)
* Python is significantly different from the "c family" of languages (c, c++, and java)
## Python Language fundamentals:
* Variables and types -- start with a letter and can contain a-z, A-Z, 0-9.
* Variables are not statically typed.
* operators - +, -, '**', /, %, '***'
* logical operators or, and, not
* fundamental types: int, float, bool, complex (j used)
* Strings: Eg: 'This is a string' or "This is a string"
```
print('Hello World!')
#Python as calculator
3650*23
```
### Strings ###
One of python's main strengths is dealing with strings.
```
x="Hello World" # or 'Hello World'
#help("string") # help on string objects
#dir(x) # help on string objects
#x.swapcase()
#x.find('ld')
#x.replace("ello","ELLO")
#x.split("o")
x.split("o",1)
x="Hello World"
#len(x)
y="Hello World"
print(len(x),len(y))
y=x+", I hope you are having a good time"
y
"z" in x # Boolean (is z in x?)
"H" in x # Boolean (is H in x?)
```
## Collections:
### List -- ordered collection of objects; can be changed; enclosed between [ ]
```
#help(list) # Documentation
#list.__dict__ # Documentation
ll=[]
ll.append(1)
ll.append(0)
ll.append(0)
#direct list creation
lm=[0,5,0]
ln=[7,0,0]
#print(ll)
print(ll,lm,ln)
#collection of multiple lists
pp=ll,lm,ln #tuple
pq=ll+lm+ln
print(pp,pq)
type(pp)
ll[:0]=[1]
#ll[0]=1
#print(ll)
#del ll[0]
#print(ll)
#ll[-1]
#what methods in list?
#list.__dict__
#dir([ll])
#help(list) #Lots of details
#help(dir)
#len(ll)
#ll.sort()
print(ll)
ll.sort()
print(ll)
#information about the
#Ref: https://docs.python.org/3/reference/datamodel.html
list.__dict__
```
### More on lists
```
#List can be created in two ways
# First way:
names1=["John","Jill","Braginskii","Krauthenhammer","Gabriella"]
#Second way: Create an empty list and populate
names2=[] # Empty list
names2.append("John")
names2.append("Jill")
print(names1,names2)
len(names1),len(names1[0])
names1.index("Braginskii")
names1[0]="Luke"
print(names1)
names1.insert(0,'John')
print(names1)
names1[0],names1[1]=names1[1],names1[0]
print(names1)
'Donald' in names1, 'Jill' in names1
names1[2:]
```
## Control of Flow (quick intro): ##
### The "if" statement ###
```
#Simple if statement
#Note the indentation and colon
name_of_interest="Braginskii"
if name_of_interest in names1:
print(names1.index(name_of_interest))
#Nested if statements; Note the indentations
name_of_interest="Braginskii"
if name_of_interest in names1:
if "b" or "B" in name_of_interest:
print("This name has the consonant B")
#Nested if statements (including else)
#name_of_interest="Bieber"
name_of_interest="Braginskii"
if name_of_interest in names1:
if "b" in name_of_interest.lower():
print("This name has the consonant B")
else:
print("This name does not have the consonant B")
else:
print("This name is not in the list")
```
### The "for" statement ###
```
names1=["John","Jill","Braginskii","Krauthenhammer","Gabriella"]
# Again note indentation and colon
for name in names1:
print(name)
# Count the number of items in a list; Recall the len() function does this
names1=["John","Jill","Braginskii","Krauthenhammer","Gabriella"]
count=0
for name in names1:
count+=1 # means count=count+1
print(count,",",len(names1))
# A "complex example": Count the number of vowels in each name in a list
names1=["John","Jill","Braginskii","Krauthenhammer","Gabriella"]
# For each name in the list, count the number of vowels and form a list
vowels=["a","e","i","o","u"]
vowel_count=[]
for name in names1:
i=0
for letter in name:
if letter.lower() in vowels:
i+=1
vowel_count.append(i)
print(vowel_count)
consonant_count=[]
for name in names1:
i=0
for letter in name:
if letter.lower() not in vowels:
i+=1
consonant_count.append(i)
print(consonant_count)
#my way
consonant_count = []
i = 0
for name in names1:
consonant_count.append(len(name) - vowel_count[i])
i +=1
print(consonant_count)
```
## Functions (quick intro): ##
In the above example, what if we want it to work on any list of names?
```
def count_vowels(names1):
vowels=["a","e","i","o","u"]
vowel_count=[]
for name in names1:
i=0
for letter in name:
if letter.lower() in vowels:
i+=1
vowel_count.append(i)
return vowel_count
count_vowels(names1)
#Function to convert temperature from Fahrenheit to Celsius
def tempFtoC(tempinF=212.0):
tempinC=(tempinF-32.0)*(5.0/9.0)
return tempinC
tempcelsius=tempFtoC(14)
tempcelsius
```
### Lambda Expressions
```
ftoc=lambda x:(x-32.0)*(5.0/9.0)
ftoc(14)
```
### List Comprehension
List creation using for loop
```
#Squaring a few numbers
x=[8,10,15,9,2]
y=[]
for i in x:
if i>2 and i<15:
y.append(i**2)
print(y)
#Squaring again, via list comprehension (if not careful, code can become hard to read)
x=[8,10,15,9,2]
y=[i**2 for i in x if i>2 and i<15]
print(y)
```
## Modules ##
Much of the functionality provided via modules. Let's check what modules are loaded. To do this, we will start by importing the 'sys' module.
```
import sys # one way of importing a module
#What modules are available?
modulenames = set(sys.modules)&set(globals())
modulenames
```
One of the most useful modules is numpy. This allows us to perform sophisticated mathematical operations efficiently.
```
from numpy import *
log10(-1)
#?log10
#If the above does not work, you have to install numpy (also scipy as it needed next)
#!pip install numpy scipy
```
The above is convenient. However, there is a disadvantage. If several modules provide variables with same names (either same functionality or different), it is impossible to distinguish between them with the above way of importing. We need a way that keeps variables separate among modules (i.e. limit the scope of variables). The following allows that.
```
import scipy as spy
x=spy.log10(-1)
x
```
Why is Scipy result not 'NaN'? Work in the complex plane (see wikipedia article on "Complex logarithm"). Also note:
```
pi/log(10) # Which pi and log are being used: from numpy or scipy?m
```
### Tuple -- Just like lists; immutable; enclosed between ( )
```
#Tuple of days of a week
days=('Mon','Tue','Wed','Thu','Fri','Sat','Sun')
#convert to list
days_list=list(days)
#print(days, days_list)
del days_list[0]
print(days_list)
#del days[0]
#days_list.sort()
#days_list
#print(dir(days))
```
### Dictionary -- container with key, value pairs; enclosed between { }
```
number2alphabet={}
number2alphabet['1']='A'
number2alphabet['2']='B'
number2alphabet['3']='C'
#print(dir(number2alphabet))
number2alphabet.values()
```
### Exercises
1. Starting with a list [1,3,5], produce a list [1,2,3,4].
2. Write a function to count the consonants (all letters except for vowels) in the list:
names=["John","Jill","Braginskii","Krauthenhammer","Gabriella"]
```
from IPython.core.display import HTML
def css_styling():
styles = open("./custom.css", "r").read()
return HTML(styles)
css_styling()
#problem 1
vector = [1,3,5]
vector.insert(1,2)
vector[-1] = 4
print(vector)
```
## Resources:
https://www.python.org/
https://docs.python.org/3/tutorial/index.html
Numerics:
http://www.numpy.org/
https://jrjohansson.github.io/
https://scipy.org/
https://docs.scipy.org/doc/numpy-dev/user/numpy-for-matlab-users.html
http://www.scipy.org/getting-started.html
Data analysis/mining:
https://pandas.pydata.org/
http://scikit-learn.org/stable/
Plotting/Visualization:
https://matplotlib.org/
Ipython notebook:
http://ipython.org/ipython-doc/dev/notebook/
http://nbviewer.ipython.org/
| github_jupyter |
# classification models using tensorflow
```
#how to avoid overfitting using callback techniques and dropout layers
#dropout layer -> avaoid overfitting
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df=pd.read_csv("C:\\Users\\st\\Documents\\Tensorflow_Keras\\Tensorflow_Keras\\FINAL-TF2-FILES\\TF_2_Notebooks_and_Data\\DATA\\cancer_classification.csv")
df.describe().transpose()
#EDA
sns.countplot(x='benign_0__mal_1',data=df) #to check balanced data
df.corr()['benign_0__mal_1'].sort_values()
df.corr()['benign_0__mal_1'][:-1].sort_values().plot(kind='bar')
sns.heatmap(df.corr())
X=df.drop('benign_0__mal_1',axis=1).values
y=df['benign_0__mal_1'].values
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=101)
from sklearn.preprocessing import MinMaxScaler
scaler=MinMaxScaler()
X_train=scaler.fit_transform(X_train)
X_test=scaler.transform(X_test)#prevent data leakage
#prevent overfittig
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Dropout
X_train.shape
model =Sequential()
model.add(Dense(30,activation='relu'))
model.add(Dense(15,activation='relu'))
#BINARY CLASSIFICATION PROBLEM
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam')
model.fit(x=X_train,y=y_train,epochs=600,validation_data=(X_test,y_test))
losses=pd.DataFrame(model.history.history)
losses.plot()
#callbacks based on your validation loss
model =Sequential()
model.add(Dense(30,activation='relu'))
model.add(Dense(15,activation='relu'))
#BINARY CLASSIFICATION PROBLEM
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam')
from tensorflow.keras.callbacks import EarlyStopping
help(EarlyStopping)
#loss to min .
#accu max
early_stop = EarlyStopping(monitor="val_loss",mode='min',verbose=1,patience=25)
model.fit(x=X_train,y=y_train,epochs=600,validation_data=(X_test,y_test),callbacks=[early_stop])
model_loss=pd.DataFrame(model.history.history)
model_loss.plot()
#adding dropout layers to top overfitting
from tensorflow.keras.layers import Dropout
model =Sequential()
model.add(Dense(30,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(15,activation='relu'))
model.add(Dropout(0.5))
#BINARY CLASSIFICATION PROBLEM
model.add(Dense(1,activation='sigmoid'))
model.compile(loss='binary_crossentropy',optimizer='adam')
model.fit(x=X_train,y=y_train,epochs=600,validation_data=(X_test,y_test),callbacks=[early_stop])
model_loss=pd.DataFrame(model.history.history)
model_loss.plot()
#evaluation on classes
predictions=model.predict_classes(X_test)
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_test,predictions))
print(confusion_matrix(y_test,predictions))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/GalinaZh/Appl_alg2021/blob/main/Applied_Alg_sem_9_intergal_task.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Занятие 9
# Прикладная алгебра и численные методы
## Численное интегрирование
```
import numpy as np
import sympy
import scipy.integrate
import pandas as pd
from google.colab import files
#!python -m pip install scipy --upgrade
sympy.__version__, scipy.__version__, np.__version__
```
## Численное интегрирование
Определенный интеграл функции $f(x)$ есть предел интегральных сумм
при стремлении к нулю диаметра разбиения (если этот предел
существует и конечен):
\begin{align}
&\int_a^b f(x)\,dx=\lim_{d\to0}\sum_{k=1}^n f(\xi_k)\Delta_k,
\quad a=x_0<x_1<\ldots<x_n=b,\\
&\xi_k\in[x_{k-1};x_k],\Delta_k=x_k-x_{k-1},\ d=\max_{1\le k\le n}
\Delta_k.
\end{align}
## Метод трапеций
Приближенным значением интеграла $\int_a^b f(x)\,dx$ в методе
трапеций считается сумма площадей прямоугольных трапеций высоты
$h_k=x_k-x_{k-1}$ с основаниями длины $f(x_{k-1})$ и $f(x_k)$.
Площадь $S_k$ одной элементарной трапеции
\begin{align*}
S_k=\frac{f(x_{k-1})+f(x_k)}{2}h_k,
\end{align*}
сложив площади всех элементарных трапеций, получим
\begin{align*}
\int_a^b f(x)\,dx\approx\sum_{k=1}^n \frac{f(x_{k-1})+f(x_k)}{2}h_k.
\end{align*}
Пусть задана равномерная сетка $x_k=a+hk$, $k=0,\ldots,n$,
$h=(b-a)/n$, тогда
\begin{align*}
&\int_a^b f(x)\,dx\approx \frac{h }{2}\sum_{k=1}^n
(f(x_{k-1})+f(x_k))=h\Biggl(\frac{f(x_0)+ f(x_n)}{2}+\sum_{k=1}^{n-1}
f(x_k)\Biggr),
\end{align*}
погрешность этого приближения
\begin{align*}
R=-h^2\frac{b-a}{12}f''(\xi),\quad\xi\in[a;b].
\end{align*}
Если функция $f(x)$ имеет ограниченную на $[a;b]$ вторую
производную, то
\begin{align*}
|R|\le h^2M\frac{b-a}{12},\quad M=\max_{[a;b]}f''(x).
\end{align*}
## Метод Симпсона
Приближенным значением интеграла $\int_a^b f(x)\,dx$ в методе
Симпсона считается сумма площадей криволинейных трапеций ширины
$h_k=x_k-x_{k-1}$, ограниченных сверху параболой, проходящей через
точки $A_k(x_{k-1};f(x_{k-1}))$, $B_k(x_{k};f(x_k))$ и
$C_k(x_{k+1};f(x_{k+1}))$.
Обозначим $f_{-1}=f(-h)$,
$f_{0}=f(0)$,$f_{1}=f(h)$.
Вычислим вначале площадь одной элементарной криволинейной трапеции
при $x_{k-1}=-h$, $x_{k}=0$, $x_{k+1}=h$
\begin{align*}
S=\frac{h}{3}(f_{-1}+4f_0+f_1).
\end{align*}
Пусть задана равномерная сетка $x_k=a+hk$, $k=0,\ldots,2n$,
$h=(b-a)/2n$, тогда
\begin{align*}
&\int_a^b f(x)\,dx\approx \frac{h }{3}\sum_{k=1}^n
(f_{k-1}+4f_k+f_{k+1})= \frac{h }{3}\Biggl(f_0+f_{2n}+4\sum_{k=1}^n
f_{2k-1}+2\sum_{k=1}^{n-1} f_{2k}\Biggr),
\end{align*}
погрешность этого приближения
\begin{align*}
R=-h^4\frac{b-a}{180}f^{(4)}(\xi),\quad\xi\in[a;b].
\end{align*}
Если функция $f(x)$ имеет ограниченную на $[a;b]$ производную
четвертого порядка, то
\begin{align*}
|R|\le h^4M\frac{b-a}{180},\quad M=\max_{[a;b]}f^{(4)}(x).
\end{align*}
## Уточнение результата численного интегрирования.
В случае, если ничего не известно о производных функции $f(x)$,
можно уточнять значение интеграла $\int_a^b f(x)\,dx$, сравнивая
результаты численного интегрирования $I_h$ и $I_{h/2}$ (шаг
разбиения соответственно $h$ и $h/2$).
Пусть требуется вычислить $\int_a^b f(x)\,dx$ с точностью $\varepsilon$,
тогда
\begin{align*}
\int_a^b f(x)\,dx\approx I_{h/2},\quad\mbox{ если }
|I_h-I_{h/2}|<\varepsilon,
\end{align*}
если же условие не выполняется, уменьшаем шаг разбиения и вычисляем
$I_{h/4}$. Процесс продолжаем до тех пор, пока не выполнится условие
\begin{align*}
|I_{h_k}-I_{h_{k+1}}|<\varepsilon,
\end{align*}
тогда считаем
\begin{align*}
\int_a^b f(x)\,dx\approx I_{h_{k+1}}.
\end{align*}
## Задание 1
Найти методом трапеций интеграл таблично заданной функции из файла f.xlsx
```
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(name=fn, length=len(uploaded[fn])))
```
## Задание 2
Найти интеграл задания 1 методом Симпсона.
```
```
## Задание 3
Вычислить приближенно интеграл
$$
\int_2^5\sin^3(2x - 1)\,dx
$$
используя scipy.integrate
```
```
## Задание 4
Вычислить аналитически интеграл
$$
\int_0^{\pi/3}\sin^5(2x)\,dx
$$
с помощью sympy, затем вычислить этот интеграл приближенно,
используя scipy.integrate (scipy.integrate.quad и метод Ромберга).
```
```
## Задание 5
Вычислить интеграл задания 4 с помощью метода Симпсона из scipy, использовать равномерную сетку.
```
```
## Задание 6
Написать функцию, которая возвращает определенный интеграл, вычисленный методом трапеций с заданной точностью. Аргументы функции - имя интегрируемой функции, пределы интегрирования и точность. Для достижения заданной точности измельчать сетку X.
```
```
| github_jupyter |
## Section 7.0: Introduction to Plotly's Streaming API
Welcome to Plotly's Python API User Guide.
> Links to the other sections can be found on the User Guide's [homepage](https://plotly.com/python/userguide)
Section 7 is divided, into separate notebooks, as follows:
* [7.0 Streaming API introduction](https://plotly.com/python/intro_streaming)
* [7.1 A first Plotly streaming plot](https://plotly.com/python/streaming_part1)
Quickstart (initialize Plotly figure object and send 1 data point through a stream):
>>> import plotly.plotly as py
>>> from plotly.graph_objs import *
>>> # auto sign-in with credentials or use py.sign_in()
>>> trace1 = Scatter(
x=[],
y=[],
stream=dict(token='my_stream_id')
)
>>> data = Data([trace1])
>>> py.plot(data)
>>> s = py.Stream('my_stream_id')
>>> s.open()
>>> s.write(dict(x=1, y=2))
>>> s.close()
<hr>
Check which version is installed on your machine and please upgrade if needed.
```
# (*) Import plotly package
import plotly
# Check plolty version (if not latest, please upgrade)
plotly.__version__
```
<hr>
Plotly's Streaming API enables your Plotly plots to update in real-time, without refreshing your browser. In other words, users can *continuously* send data to Plotly's servers and visualize this data in *real-time*.
For example, imagine that you have a thermometer (hooked to an Arduino for example) in your attic and you would like to monitor the temperature readings from your laptop. Plotly together with its streaming API makes this project easy to achieve.
With Ploly's Streaming API:
> Everyone looking at a Plotly streaming plot sees the same data updating at the same time. Like all Plotly plots, Plotly streaming plots are immediately shareable by shortlink, embedded in website, or in an IPython notebook. Owners of the Plotly plot can edit the plot with the Plotly web GUI and all viewers will see these changes live.
And for the skeptical among us, *it's fast*: plots update up to 20 times per second.
In this section, we present examples of how to make Plotly streaming plots.
Readers looking for info about the nuts and bolts of Plotly's streaming API should refer to <a href="https://plotly.com/streaming/" target="_blank">this link</a>.
So, we first import a few modules and sign in to Plotly using our credentials file:
```
# (*) To communicate with Plotly's server, sign in with credentials file
import plotly.plotly as py
# (*) Useful Python/Plotly tools
import plotly.tools as tls
# (*) Graph objects
from plotly.graph_objs import *
import numpy as np # (*) numpy for math functions and arrays
```
##### What do Plotly streaming plots look like?
```
# Embed an existing Plotly streaming plot
tls.embed('streaming-demos','6')
# Note that the time point correspond to internal clock of the servers,
# that is UTC time.
```
Data is sent in real-time. <br>
Plotly draws the data in real-time. <br>
Plotly's interactibility happens in real-time.
##### Get your stream tokens
Making Plotly streaming plots requires no modifications to the sign in process; however, users must generate stream *tokens* or *ids*. To do so, first sign-in to <a href="https://plotly.com" target="_blank">plot.ly</a>. Once that is done, click on the *Settings* button in the upper-right corner of the page:
<img src="http://i.imgur.com/RNExysW.png" style="margin-top:1em; margin-bottom:1em" />
<p style="margin-top:1.5em;,margin-bottom:1.5em">Under the <b>Stream Tokens</b> tab, click on the <b>Generate Token</b> button:</p>
<img src="http://i.imgur.com/o5Uguye.png" />
And there you go, you have generated a stream token. Please note that:
> You must generate one stream token per **trace** in your Plotly streaming plot.
If you are looking to run this notebook with you own account, please generate 4 unique stream tokens and add them to your credentials file by entering:
>>> tls.set_credentials_file(stream_ids=[
"ab4kf5nfdn",
"kdf5bn4dbn",
"o72o2p08y5",
"81dygs4lcy"
])
where the `stream_ids` keyword argument is filled in with your own stream ids.
Note that, in the above, `tls.set_credentials()` overwrites the existing stream tokens (if any) but does not clear the other keys in your credentials file such as `username` and `api_key`.
Once your credentials file is updated with your stream tokens (or stream ids, a synonym), retrieve them as a list:
```
stream_ids = tls.get_credentials_file()['stream_ids']
```
We are now ready to start making Plotly streaming plots!
The content of this section has been divided into separate IPython notebooks as loading multiple streaming plots at once can cause performance slow downs on some internet connections.
Here are the links to the subsections' notebooks:
* [7.0 Streaming API introduction](https://plotly.com/python/intro_streaming)
* [7.1 A first Plotly streaming plot](https://plotly.com/python/streaming_part1)
In addition, here is a notebook of another Plotly streaming plot:
* <a href="http://nbviewer.ipython.org/gist/empet/a03885a54c256a21c514" target="_blank">Streaming the Poisson Pareto Burst Process</a> by
<a href="https://github.com/empet" target="_blank">Emilia Petrisor</a>
<div style="float:right; \">
<img src="http://i.imgur.com/4vwuxdJ.png"
align=right style="float:right; margin-left: 5px; margin-top: -10px" />
</div>
<h4>Got Questions or Feedback? </h4>
Reach us here at: <a href="https://community.plot.ly" target="_blank">Plotly Community</a>
<h4> What's going on at Plotly? </h4>
Check out our twitter:
<a href="https://twitter.com/plotlygraphs" target="_blank">@plotlygraphs</a>
```
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install publisher --upgrade
import publisher
publisher.publish(
's7_streaming.ipynb', 'python/intro_streaming//', 'Getting Started with Plotly Streaming',
'Getting Started with Plotly Streaming',
title = 'Getting Started with Plotly Streaming',
thumbnail='', language='python',
layout='user-guide', has_thumbnail='false')
```
| github_jupyter |
# Data Visualization: Rules and Guidelines
> **Co-author**
- [Paul Schrimpf *UBC*](https://economics.ubc.ca/faculty-and-staff/paul-schrimpf/)
**Prerequisites**
- [Introduction to Plotting](../scientific/plotting.ipynb)
**Outcomes**
- Understand steps of creating a visualization
- Know when to use each of the core plots
- Introductory ability to make effective visualizations
## Outline
- [Data Visualization: Rules and Guidelines](#Data-Visualization:-Rules-and-Guidelines)
- [Introduction](#Introduction)
- [Steps to Creating Effective Charts](#Steps-to-Creating-Effective-Charts)
- [Visualization Types](#Visualization-Types)
- [Color in Plots](#Color-in-Plots)
- [Visualization Rules](#Visualization-Rules)
- [References](#References)
- [Exercises](#Exercises)
```
# Uncomment following line to install on colab
#! pip install qeds fiona geopandas xgboost gensim folium pyLDAvis descartes
import matplotlib.colors as mplc
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
from pandas_datareader import DataReader
%matplotlib inline
# activate plot theme
import qeds
qeds.themes.mpl_style();
```
## Introduction
An economist’s (or data scientist’s) job goes beyond simply learning new things using data and
theory.
Learning these new things is meant to help economists or data scientists communicate these ideas to others,
whether the medium is an academic paper or a business meeting.
One of the most effective mediums of communication is visualization.
Well-done visualizations can help your audience remember your message.
They accomplish this through at least two main channels:
1. Psychology researchers have observed [*picture superiority*](https://en.wikipedia.org/wiki/Picture_superiority_effect): the fact that images are more likely to be remembered than
words. While the reasons and extent of the effect are debated, the consensus view is that the effect exists.
How large might this effect be? In a paper by Defeyter, Russo, and McPartlin (2009), the authors found that participants were able to identify pictures they had previously studied for approximately 500 ms each with 75-85% accuracy but words with only a 55-65% accuracy.
1. Data visualizations help people walk through the logic you used to build the chart,
allowing them to reason through the argument and thus makes your claim more convincing.
In this lecture, we will discuss the process of creating a data visualization, ways to best ensure
successfully communication, and some general design guidelines.
Along the way, you will be introduced to many features of `matplotlib` that were not discussed in the
introductory lecture.
We won’t discuss most of these features, but we encourage you to read more in the
[online documentation](https://matplotlib.org/contents.html) if you have questions.
## Steps to Creating Effective Charts
Before we begin discussions of specific recommendations, it is helpful to agree on
the goal of a data visualization and a process that can be used to accomplish the goal.
As mentioned in the introduction, the purpose of a visualization is to facilitate the communication
of a message or idea.
We have found the following steps to be useful for achieving this goal:
1. Identify the message.
1. Describe your visualization.
1. Create a draft of the visualization (and verify your data!).
1. Fine tune the visualization details.
After discussing the role research plays in data visualization, we will use an example to provide
deeper context for the remaining steps.
**Step 0: Research**
Before we proceed, note that prior to reaching this process, you will have spent
a significant amount of time exploring the data and thinking about your overall message.
In fact, during the research step, you will almost certainly produce visualizations to help yourself
understand the data.
These visualizations can highlight outliers or unlikely combinations of variables.
For example, NYC publishes data on taxi pickups and dropoffs.
One might expect tips to be somewhat independent of whether someone pays cash or credit, but
the 75th percentile tip for cash payers is 0!
Because it’s unlikely that cash payers choose not to tip, one likely explanation
is a reporting issue in the data collection.
The steps and guidelines that follow may be helpful in the research process, but refining each of your exploratory visualizations to a publishable version is not practical.
Some of these recommendations will be specific to creating a visualization after you understand the story you’re telling.
### Example
The output of this example will be a reproduction of a visualization from
[this NYT article](https://www.nytimes.com/2019/01/11/upshot/big-cities-low-skilled-workers-wages.html).
This NYT article is based on research done by David Autor <sup>[1](#ely)</sup> , an
economist at MIT, and his co-authors.
Autor’s research investigates the observable changes over time between work opportunities
in rural and urban locations in the United States.
This particular graph explores how both college-educated workers and non-college-educated workers were able
to find higher-paying jobs by working in urban areas in the 20th century.
More recently, this *city wage premium* is still apparent for those
with college educations. However, for those without a college education, it has
disappeared.
#### Identify the Message
The first step to creating a visualization might feel a little obvious, but is
the most important step.
If you fail to choose a concise message, then you won’t be able to clearly communicate the idea.
In this example, we want people to be able to answer the question, “What happened to the
rural/urban wage gap for non-college-educated workers since the 1950s?”.
Part of what makes the answer interesting is that the wage gap changes are unique to non-college-educated workers; we will want to display changes in the wage gap for college-educated workers as well.
#### Visualize
Choosing the type of visualization that best illustrates your point is an important
skill to develop.
Using the wrong type of visualization can inhibit the flow of information from the
graph to your audience’s brain.
In our case, we need to display the relationship between population density (our measure of
rural/urban) and wages for different years and education levels.
Since the wage gap will be the main focus, we want to choose a visualization that highlights
this aspect of the data.
Scatter plots are one of the most effective ways to demonstrate the relationship of two
variables.
We will place the log of population density on the x-axis and the log of wages on the y-axis.
We will then need to find a way to demonstrate this for different years and education levels.
One natural solution is to demonstrate one of these variables using color and the other using
different subplots.
In the original article, they chose to highlight differences in college
education using color and time using subplots.
#### Visualization Draft
Drafting an early version of your visualization without concerning yourself about its aesthetics allows you to think about whether it is able to answer the proposed question.
Sometimes you’ll get to this step and realize that you need to go back to one of the previous
steps…
It’s ok to scrap what you have and restart at square one.
In fact, you will frequently do this, no matter how much experience you’ve developed.
In our own work, we’ve found that it’s not uncommon to discard ten or more versions of a graph before
settling on a draft we are happy with.
Below, we create a draft of our plot.
```
# Read in data
df = pd.read_csv("https://datascience.quantecon.org/assets/data/density_wage_data.csv")
df["year"] = df.year.astype(int) # Convert year to int
def single_scatter_plot(df, year, educ, ax, color):
"""
This function creates a single year's and education level's
log density to log wage plot
"""
# Filter data to keep only the data of interest
_df = df.query("(year == @year) & (group == @educ)")
_df.plot(
kind="scatter", x="density_log", y="wages_logs", ax=ax, color=color
)
return ax
# Create initial plot
fig, ax = plt.subplots(1, 4, figsize=(16, 6), sharey=True)
for (i, year) in enumerate(df.year.unique()):
single_scatter_plot(df, year, "college", ax[i], "b")
single_scatter_plot(df, year, "noncollege", ax[i], "r")
ax[i].set_title(str(year))
```
<a id='exercise-0'></a>
> See exercise 1 in the [*exercise list*](#exerciselist-0)
```
# Your code here
```
#### Fine-tune
Great! We have now confirmed that our decisions up until this point have made sense and that a
version of this graphic can successfully convey our message.
The last step is to clean the graph. We want to ensure that no features
detract or distract from our message.
Much of the remaining lecture will be dedicated to this fine-tuning, so we will post-pone
presenting the details. However, the code we use to create the best version of this graphic is included below.
```
# Read in data
df = pd.read_csv("https://datascience.quantecon.org/assets/data/density_wage_data.csv")
df["year"] = df.year.astype(int) # Convert year to int
def single_scatter_plot(df, year, educ, ax, color):
"""
This function creates a single year's and education level's
log density to log wage plot
"""
# Filter data to keep only the data of interest
_df = df.query("(year == @year) & (group == @educ)")
_df.plot(
kind="scatter", x="density_log", y="wages_logs", ax=ax, color=color
)
return ax
# Create initial plot
fig, ax = plt.subplots(1, 4, figsize=(16, 6))
colors = {"college": "#1385ff", "noncollege": "#ff6d13"}
for (i, year) in enumerate(df.year.unique()):
single_scatter_plot(df, year, "college", ax[i], colors["college"])
single_scatter_plot(df, year, "noncollege", ax[i], colors["noncollege"])
ax[i].set_title(str(year))
bgcolor = (250/255, 250/255, 250/255)
fig.set_facecolor(bgcolor)
for (i, _ax) in enumerate(ax):
# Label with words
if i == 0:
_ax.set_xlabel("Population Density")
else:
_ax.set_xlabel("")
# Turn off right and top axis lines
_ax.spines['right'].set_visible(False)
_ax.spines['top'].set_visible(False)
# Don't use such a white background color
_ax.set_facecolor(bgcolor)
# Change bounds
_ax.set_ylim((np.log(4), np.log(30)))
_ax.set_xlim((0, 10))
# Change ticks
xticks = [10, 100, 1000, 10000]
_ax.set_xticks([np.log(xi) for xi in xticks])
_ax.set_xticklabels([str(xi) for xi in xticks])
yticks = list(range(5, 32, 5))
_ax.set_yticks([np.log(yi) for yi in yticks])
if i == 0:
_ax.set_yticklabels([str(yi) for yi in yticks])
_ax.set_ylabel("Average Wage")
else:
_ax.set_yticklabels([])
_ax.set_ylabel("")
ax[0].annotate("College Educated Workers", (np.log(75), np.log(14.0)), color=colors["college"])
ax[0].annotate("Non-College Educated Workers", (np.log(10), np.log(5.25)), color=colors["noncollege"]);
```
## Visualization Types
You have seen many kinds of visualizations throughout your life.
We discuss a few of the most frequently used visualization types and how they describe data below.
For a more complete list of visualization types, see the Duke library’s
[data visualization guide](https://guides.library.duke.edu/datavis/vis_types).
### Scatter Plots
Scatter plots can be used in various ways.
They are frequently used to show how two variables are related to one another or compare
various observations based on two variables.
[This article](https://qz.com/1235712/the-origins-of-the-scatter-plot-data-visualizations-greatest-invention/)
about the scatter plot is a good read.
One strength of a scatter plot is that its simplicity allows the data to speak for itself.
A plot of two variables allows viewers to immediately see whether the variables are
linearly related, quadratically related, or maybe not related at all.
We have already seen an example of a scatter plot which shows the relationship between two
variables.
Below, we demonstrate how it can be used to compare.
```
cities = [
"San Francisco", "Austin", "Las Vegas", "New York", "Seattle", "Pittsburgh",
"Detroit", "Fresno", "Phoenix", "Orlando", "Atlanta", "Madison"
]
unemp_wage = np.array([
[2.6, 39.89], [2.9, 29.97], [4.6, 24.38], [3.9, 33.09], [3.9, 40.11], [4.2, 27.98],
[4.1, 28.41], [7.1, 22.96], [4.5, 27.42], [3.0, 21.47], [3.6, 25.19], [2.2, 29.48]
])
df = pd.DataFrame(unemp_wage, index=cities, columns=["Unemployment", "Wage"])
fig, ax = plt.subplots()
df.plot(kind="scatter", x="Unemployment", y="Wage", ax=ax, s=25, color="#c90000")
# Add annotations
for (i, row) in df.iterrows():
city = row.name
if city in ["San Francisco", "Madison"]:
offset = (-35, -10.5)
elif city in ["Atlanta", "Phoenix", "Madison"]:
offset = (-25, -12.5)
elif city in ["Detroit"]:
offset = (-38, 0)
elif city in ["Pittsburgh"]:
offset = (5, 0)
else:
offset = (5, 2.5)
ax.annotate(
city, xy=(row["Unemployment"], row["Wage"]),
xytext=offset, textcoords="offset points"
)
bgcolor = (250/255, 250/255, 250/255)
fig.set_facecolor(bgcolor)
ax.set_facecolor(bgcolor)
ax.set_xlim(0, 10)
ax.set_ylim(20, 45)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
```
### Line Plots
Line plots are best used to either show how a variable evolves over time or to demonstrate the
relationship between variables.
Note: it differs from scatter plots in the way it displays relationships between variables.
A line plot is restricted to displaying a line, so you cannot just draw a line between all of your
datapoints.
Instead, before drawing the line, you must fit some kind of statistical model that can
show how one variable changes as the other changes.
Below, we add regression lines which estimate the relationship between population density and wages to
our college/non-college urban wage premium plot.
In fact, Dr. Autor’s original slides contain regression lines, but the New York Times
chose to remove them.
```
from sklearn.linear_model import LinearRegression
# Read in data
df = pd.read_csv("https://datascience.quantecon.org/assets/data/density_wage_data.csv")
df["year"] = df.year.astype(int) # Convert year to int
def single_scatter_plot(df, year, educ, ax, color):
"""
This function creates a single year's and education level's
log density to log wage plot
"""
# Filter data to keep only the data of interest
_df = df.query("(year == @year) & (group == @educ)")
_df.plot(
kind="scatter", x="density_log", y="wages_logs", ax=ax, color=color
)
lr = LinearRegression()
X = _df["density_log"].values.reshape(-1, 1)
y = _df["wages_logs"].values.reshape(-1, 1)
lr.fit(X, y)
x = np.linspace(2.0, 9.0).reshape(-1, 1)
y_pred = lr.predict(x)
ax.plot(x, y_pred, color=color)
return ax
# Create initial plot
fig, ax = plt.subplots(1, 4, figsize=(16, 6))
colors = {"college": "#1385ff", "noncollege": "#ff6d13"}
for (i, year) in enumerate(df.year.unique()):
single_scatter_plot(df, year, "college", ax[i], colors["college"])
single_scatter_plot(df, year, "noncollege", ax[i], colors["noncollege"])
ax[i].set_title(str(year))
bgcolor = (250/255, 250/255, 250/255)
fig.set_facecolor(bgcolor)
for (i, _ax) in enumerate(ax):
# Label with words
if i == 0:
_ax.set_xlabel("Population Density")
else:
_ax.set_xlabel("")
# Turn off right and top axis lines
_ax.spines['right'].set_visible(False)
_ax.spines['top'].set_visible(False)
# Don't use such a white background color
_ax.set_facecolor(bgcolor)
# Change bounds
_ax.set_ylim((np.log(4), np.log(30)))
_ax.set_xlim((0, 10))
# Change ticks
xticks = [10, 100, 1000, 10000]
_ax.set_xticks([np.log(xi) for xi in xticks])
_ax.set_xticklabels([str(xi) for xi in xticks])
yticks = list(range(5, 32, 5))
_ax.set_yticks([np.log(yi) for yi in yticks])
if i == 0:
_ax.set_yticklabels([str(yi) for yi in yticks])
_ax.set_ylabel("Average Wage")
else:
_ax.set_yticklabels([])
_ax.set_ylabel("")
ax[0].annotate("College Educated Workers", (np.log(75), np.log(14.0)), color=colors["college"])
ax[0].annotate("Non-College Educated Workers", (np.log(10), np.log(5.25)), color=colors["noncollege"])
```
### Bar Charts
Bar charts are mostly used to display differences for a variable between groups though they can also
be used to show how a variable changes over time (which in some ways, is just showing a difference as grouped by time…).
Bar charts show the differences between these groups using the length of each bar, so that comparing the different groups is straightforward.
In the example below, we show a bar chart of how the unemployment rate differs across several cities
in the United States.
```
cities = [
"San Francisco", "Austin", "Las Vegas", "New York", "Seattle", "Pittsburgh",
"Detroit", "Fresno", "Phoenix", "Orlando", "Atlanta", "Madison"
]
unemp_wage = np.array([
[2.6, 39.89], [2.9, 29.97], [4.6, 24.38], [3.9, 33.09], [3.9, 40.11], [4.2, 27.98],
[4.1, 28.41], [7.1, 22.96], [4.5, 27.42], [3.0, 21.47], [3.6, 25.19], [2.2, 29.48]
])
df = pd.DataFrame(unemp_wage, index=cities, columns=["Unemployment", "Wage"])
df = df.sort_values(["Unemployment"], ascending=False)
fig, ax = plt.subplots()
df["Unemployment"].plot(kind="barh", ax=ax, color="#1b48fc")
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title("Unemployment Rate in US Cities")
```
### Histograms
Histograms display the approximate distribution of a single variable.
They can be particularly important when your variables are not distributed normally
since we typically think of means and variances in terms of the normal distribution.
In the example below, we show a histogram of GDP growth rates over the period 1948 - 2019.
Our histogram indicates this variable is approximately normally distributed.
```
# GDP quarterly growth
gdp = DataReader("GDP", "fred", 1948, 2019).pct_change().dropna()
gdp = gdp * 100
fig, ax = plt.subplots()
gdp.plot(
kind="hist", y="GDP", color=(244/255, 77/255, 24/255),
bins=23, legend=False, density=True, ax=ax
)
ax.set_facecolor((0.96, 0.96, 0.96))
fig.set_facecolor((0.96, 0.96, 0.96))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title("US GDP Growth from 1948-2019")
```
## Color in Plots
Choosing colors for your plots is not always a straightforward task.
Visualization expert Edward Tufte <https://www.edwardtufte.com/tufte/> wrote,
> … Avoiding catastrophe becomes the first principle in bringing color to information: Above
all, do no harm ([*Envisioning Information*](https://www.edwardtufte.com/tufte/books_ei) by Edward Tufte)
So how do we “do no harm”?
### Hue Saturation Lightness
We will use the [Hue Saturation Value](https://en.wikipedia.org/wiki/HSL_and_HSV) (HSV) paradigm as a way to formalize our discussion of colors.
- **Hue**: This represents the share of each of the primary colors (red, green, blue)
as angles around a circle. The hue begins with red at 0 degrees, green at 120
degrees, and blue at 240 degrees (Note: matplotlib converts these back into numbers between 0 and
1 by dividing by 360). Angles between these colors are mixes of the primary colors.
- **Saturation**: Denotes how rich the color is using numbers between 0 and 1. At full saturation
(saturation is 1), the color is as rich as possible. At saturation 0, the color has no
color and is approximately a projection of the color into grayscale (Note that this is not
exactly true).
- **Value**: Denotes how dark the color is using numbers between 0 and 1. We view this as how much black
has been added to a color. If a color has value 0, then it is as dark as possible (the
color black). If the color has value 1 then it has no black and is just the original color.
The way in which HSV covers the color space is demonstrated in the following figure.
<img src="https://datascience.quantecon.org/assets/_static/visualization_files/HSV_color_solid_cylinder_saturation_gray.png" alt="HSL_cylinder.png" style="">
Image attribution: By [SharkD](https://commons.wikimedia.org/w/index.php?curid=9801673).
Below, we demonstrate how colors change as we move hue/saturation/value one at a time.
```
def color_swatches(colors):
ncolors = len(colors)
fig, ax = plt.subplots(figsize=(ncolors*2, 2))
for (start_x, color) in enumerate(colors):
color_rect = patches.Rectangle((start_x, 0), 1, 1, color=color)
ax.add_patch(color_rect)
ax.set_xlim(0, len(colors))
ax.set_ylim(0, 1)
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_xticks([])
ax.set_xticklabels([])
return fig
# Vary hue
colors = [mplc.hsv_to_rgb((i/360, 1, 1)) for i in np.linspace(0, 360, 6)]
fig = color_swatches(colors)
fig.suptitle("Varying Hue")
# Vary saturation
colors = [mplc.hsv_to_rgb((0, i, 1)) for i in np.linspace(0, 1, 5)]
fig = color_swatches(colors)
fig.suptitle("Varying Saturation")
# Vary value
colors = [mplc.hsv_to_rgb((0, 1, i)) for i in np.linspace(0, 1, 5)]
fig = color_swatches(colors)
fig.suptitle("Varying Value")
```
### Color Palettes
A good color palette will exploit aspects of hue, saturation, and value to emphasize the information
in the data visualization.
For example, for qualitatively different groups (where we just want to identify separate groups
which have no quantitative relationships between them), one could fix
the saturation and value then draw $ N $ evenly spaced values from hue space.
However, creating a good color palette sometimes requires more nuance than can be attributed to
rules of thumb.
Luckily, matplotlib and other Python packages can help us choose good color
palettes. Often, relying on these pre-built color palettes and
themes is better than than creating your own.
We can get a list of all of the color palettes (referred to as colormaps by matplotlib) included
with matplotlib by doing:
```
print(plt.colormaps())
```
The [matplotlib documentation](https://matplotlib.org/tutorials/colors/colormaps.html)
differentiates between colormaps used for varying purposes.
Colormaps are often split into several categories based on their function (see, e.g., [Moreland]):
- Sequential: incrementally change lightness and often saturation of color,
generally using a single hue; should be used for representing information that has ordering.
- Diverging: change lightness and possibly saturation of two different
colors that meet in the middle at an unsaturated color; should be used when the
information being plotted has a critical middle value, such as topography or
when the data deviates around zero.
- Cyclic: change lightness of two different colors that meet in
the middle and beginning/end at an unsaturated color; should be used for
values that wrap around at the endpoints, such as phase angle, wind direction, or time of day.
- Qualitative: often are miscellaneous colors; should be used to represent
information which does not have ordering or relationships.
Most of the examples we have used so far can use qualitative colormaps because they are simply
meant to distinguish between different variables/observations and not say something about how they
differ.
Additionally, three other sources of information on colors and color palettes are:
- The [seaborn documentation](https://seaborn.pydata.org/tutorial/color_palettes.html).
- A [talk](https://www.youtube.com/watch?v=xAoljeRJ3lU) given at the Scipy conference in 2015 by
Nathaniel Smith.
- A [website](https://colorusage.arc.nasa.gov/graphics_page_design.php) literally put together by
“rocket scientists” at NASA.
### Do No Harm
Now that we have a little background that we can use as a common language, we can proceed with
discussing how we can use color effectively.
#### Sometimes Value is More Effective than Hue
Sometimes, in a graph with many lines, using the same color with different values is a more effective way to highlight differences than using different colors.
Compare the following example, which is a modification of an example by Larry Arend, Alex Logan, and
Galina Havin’s [graphics website](https://colorusage.arc.nasa.gov/graphics_page_design.php) (the NASA one we linked above).
```
def confusing_plot(colors):
c1, c2, c3 = colors
fig, ax = plt.subplots()
x1 = np.linspace(0.2, 0.9, 5)
x2 = np.linspace(0.3, 0.8, 5)
ax.text(0.4, 0.10, "Not Important", color=c3, fontsize=15)
ax.text(0.25, 0.25, "Not Important", color=c3, fontsize=15)
ax.text(0.5, 0.70, "Not Important", color=c3, fontsize=15)
ax.plot(x1, 1.25*x1 - 0.2, color=c3, linewidth=2)
ax.plot(x1, 1.25*x1 + 0.1, color=c3, linewidth=2)
ax.plot(x1, 0*x1 + 0.3, color=c3, linewidth=2)
ax.plot(x2, 0.15*x1 + 0.4, color=c2, linewidth=3)
ax.plot(x1, -x1 + 1.2, color=c2, linewidth=3)
ax.plot(x1, -x1 + 1.25, color=c2, linewidth=3)
ax.text(0.10, 0.5, "Second order", color=c2, fontsize=22)
ax.text(0.5, 0.35, "Second order", color=c2, fontsize=22)
ax.text(0.40, 0.65, "Second order", color=c2, fontsize=22)
ax.plot(x2, 0.25*x1 + 0.1, color=c1, linewidth=5)
ax.text(0.05, 0.4, "Important", color=c1, fontsize=34)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
return fig
# All black
colors = [mplc.hsv_to_rgb((0, 1, x)) for x in [0.0, 0.0, 0.0]]
fig = confusing_plot(colors)
# Vary the hues
colors = [mplc.hsv_to_rgb((x, 1, 1)) for x in [0.0, 0.33, 0.66]]
fig = confusing_plot(colors)
# Vary the values
colors = [mplc.hsv_to_rgb((0, 0, x)) for x in [0.00, 0.35, 0.7]]
fig = confusing_plot(colors)
```
In our opinion, the last one with no color is actually the most readable.
The point of this exercise is **not** to not use color in your plots, but rather to
encourage you to think about whether hue or value will be more effective in
communicating your message.
#### Carelessness with Value Can Make Grayscale Impossible to Read
Recall that driving the saturation to 0 is approximately equivalent to projecting the colors
onto grayscale.
Well, if you aren’t careful in choosing your colors, then they may have the same projected
values and become unidentifiable once converted to grayscale.
This code is based on an [example](https://matplotlib.org/gallery/statistics/barchart_demo.html#barchart-demo)
from the matplotlib documentation.
```
n_groups = 5
means_men = (20, 35, 30, 35, 27)
means_women = (25, 32, 34, 20, 25)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
color_men = mplc.hsv_to_rgb((0.66, 0.35, 0.9))
rects1 = ax.bar(
index, means_men, bar_width, color=color_men, label='men'
)
color_women = mplc.hsv_to_rgb((0.10, 0.65, 0.85))
rects2 = ax.bar(
index + bar_width, means_women, bar_width, color=color_women, label='women'
)
ax.set_xlabel('group')
ax.set_ylabel('scores')
ax.set_title('scores by group and gender')
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(('a', 'b', 'c', 'd', 'e'))
ax.legend()
fig.tight_layout()
```
And here is the same image converted to grayscale.
<img src="https://datascience.quantecon.org/assets/_static/visualization_files/bar_grayscale.png" alt="bar_grayscale.png" style="">
The image below, from [this flowingdata blog entry](https://flowingdata.com/2012/11/09/incredibly-divided-nation-in-a-map),
shows what happens when you don’t check your colors… Don’t do this.
<img src="https://datascience.quantecon.org/assets/_static/visualization_files/Divided-nation.jpg" alt="Divided-nation.jpg" style="">
Warm colors (colors like red, yellow, and orange) often appear lighter than cool colors (colors
like blue, green and purple) when converted to grayscale even when they have similar values.
Sometimes to know whether colors are different enough, you just have to test it out.
#### Use Color to Draw Attention
If you are displaying information about various groups but are really only interested in how one
group differs from the others, then you should choose several close-together hues to represent the less
important groups and a distinct color to display the group of interest.
```
fig, ax = plt.subplots()
npts = 50
x = np.linspace(0, 1, npts)
np.random.seed(42) # Set seed for reproducibility
y1 = 1.20 + 0.75*x + 0.25*np.random.randn(npts)
y2 = 1.35 + 0.50*x + 0.25*np.random.randn(npts)
y3 = 1.40 + 0.65*x + 0.25*np.random.randn(npts)
y4 = 0.15 + 3.0*x + 0.15*np.random.randn(npts) # Group of interest
colors = [mplc.hsv_to_rgb((x, 0.4, 0.85)) for x in [0.40, 0.50, 0.60]]
colors.append(mplc.hsv_to_rgb((0.0, 0.85, 1.0)))
for (y, c) in zip([y1, y2, y3, y4], colors):
ax.scatter(x=x, y=y, color=c, s=36)
ax.text(0.25, 0.5, "Group of Interest", color=colors[-1])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
```
<a id='exercise-1'></a>
> See exercise 2 in the [*exercise list*](#exerciselist-0)
#### Don’t Use Color to Differentiate Small Objects
Color is a great differentiator when there is enough of the colored object to see… However, when the objects
become too small, differentiating between colors, no matter how distinct, becomes quite difficult.
Below is the same plot as we had above, but we have made the scatter plot’s points smaller.
```
fig, ax = plt.subplots()
npts = 50
x = np.linspace(0, 1, npts)
np.random.seed(42) # Set seed for reproducibility
y1 = 1.20 + 0.75*x + 0.25*np.random.randn(npts)
y2 = 1.35 + 0.50*x + 0.25*np.random.randn(npts)
y3 = 1.40 + 0.65*x + 0.25*np.random.randn(npts)
y4 = 0.15 + 3.0*x + 0.15*np.random.randn(npts) # Group of interest
colors = [mplc.hsv_to_rgb((x, 0.4, 0.85)) for x in [0.40, 0.50, 0.60]]
colors.append(mplc.hsv_to_rgb((0.0, 0.85, 1.0)))
for (y, c) in zip([y1, y2, y3, y4], colors):
ax.scatter(x=x, y=y, color=c, s=1)
ax.text(0.25, 0.5, "Group of Interest", color=colors[-1])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
```
It becomes harder to read, but because the red is so much darker than some of the other colors,
finding the group of interest is still possible (a lesson to be learned here!).
#### Colors’ Connotations
Some colors have connotations.
Using colors to mean the opposite of what they’re usually used for can be confusing.
For example, using red to denote positive profits and black to denote negative profits would be
a poor color choice because red is often associated with losses and black is associated with profits.
```
df = pd.DataFrame(
{"profits": [1.5, 2.5, 3.5, -6.7, -2.0, 1.0]},
index=[2005, 2006, 2007, 2008, 2009, 2010]
)
fig, ax = plt.subplots()
colors = ["k" if x < 0 else "r" for x in df["profits"].values]
bars = ax.bar(np.arange(len(colors)), df["profits"].values, color=colors, alpha=0.8)
ax.hlines(0, -1.0, 6.0)
ax.set_xticklabels(df.index)
ax.set_xlim(-0.5, 5.5)
ax.set_title("Profits for Company X")
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
for _spine in ["right", "top", "left", "bottom"]:
ax.spines[_spine].set_visible(False)
```
This plot becomes much more intuitive by using red for negative values.
```
fig, ax = plt.subplots()
colors = ["r" if x < 0 else "k" for x in df["profits"].values]
bars = ax.bar(np.arange(len(colors)), df["profits"].values, color=colors, alpha=0.8)
ax.hlines(0, -1.0, 6.0)
ax.set_xticklabels(df.index)
ax.set_xlim(-0.5, 5.5)
ax.set_title("Profits for Company X")
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
for _spine in ["right", "top", "left", "bottom"]:
ax.spines[_spine].set_visible(False)
```
#### Accounting for Color Blindness
Nearly 1 in 10 men have some form of color blindness.
The most prevalent form makes differentiating between red
and green difficult.
So, besides making your plots feel “Christmas-themed”, using both red and green to illustrate differences in a plot can often make your visualization difficult for some to follow.
Some Python libraries allow you to simulate different forms of color blindness or choose
sensible defaults for colors.
We recommend viewing the documentation for
[colorspacious](https://colorspacious.readthedocs.io/en/latest/tutorial.html#simulating-colorblindness)
and [viscm](https://github.com/matplotlib/viscm).
## Visualization Rules
We have already discussed some guidelines for color.
We will now discuss some guidelines for which elements to include and how to structure your graphs.
Violating each of these may make sense in particular situations, but please have a
good reason (and one you can explain when someone points out what you’ve done).
The main theme for these guidelines will be to keep the plot as simple as possible so that your
readers can get the clearest understanding of your story.
Many people try too hard to make their plot eye-catching, and in the process, they destroy the
message in the graph.
Graphs should be a simple as possible, but not simpler.
We will discuss some guidelines that we feel are most abused, but many very good books have
been written on this subject.
Some books that we have found extremely instructive are:
1. *Visual Display of Quantitative Information* by Edward Tufte.
1. *The Wall Street Journal Guide to Information Graphics: The Dos and Don’ts of Presenting Data,
Facts, and Figures* by Dona M Wong.
1. *The Functional Art: An introduction to information graphics and visualization* by Alberto Cairo.
Some blogs that we think are useful for seeing well-done visualizations are:
1. Flowing Data: [https://flowingdata.com/](https://flowingdata.com/)
1. Story Telling with Data: [http://www.storytellingwithdata.com/](http://www.storytellingwithdata.com/)
1. Visualizing Data: [http://www.visualisingdata.com/](http://www.visualisingdata.com/)
1. Junk Charts: [https://junkcharts.typepad.com/](https://junkcharts.typepad.com/)
As you begin to create more visualizations in your work, we recommend reading these books and blogs.
Seeing how others display their information will ensure that when you run into interesting
problems in the future, you’ll have a well of knowledge that you can call upon.
In fact, one friend of ours takes this very seriously.
He keeps an organized binder of graphics that he has seen and likes.
He reads this binder, sometimes for hours, when he is thinking about how to communicate messages
for his presentations.
A couple last links to specific articles we enjoyed:
- [This Financial Times article](https://ig.ft.com/science-of-charts) is a great exercise to
demonstrate how choice of graph type can affect a visualizations interpretability.
- [This article](https://towardsdatascience.com/data-visualization-best-practices-less-is-more-and-people-dont-read-ba41b8f29e7b)
does an exceptional job at redesigning graphics that were originally poorly done.
- [Duke library data visualization guide](https://guides.library.duke.edu/datavis/topten) has a
few concise rules worth reviewing.
### Bar Plot Recommendations
In Dona Wong’s book, she advises against using *zebra patterns*.
```
df = pd.DataFrame(
{
"Unemployment Rate": [5.20, 5.67, 9.20, 4.03, 3.80],
"Pension Expenditure (% of GDP)": [4.18, 4.70, 13.90, 6.24, 7.06],
"Social Welfare Expenditure (% of GDP)": [7.42, 9.84, 19.72, 12.98, 14.50],
"Highest Tax Rate": [47, 33, 59.6, 50, 39.6]
},
index = ["Australia", "Canada", "France", "UK", "USA"]
)
def create_barplot(df, colors):
fig, ax = plt.subplots(figsize=(14, 6))
df.T.plot(kind="bar", color=colors, ax=ax, edgecolor="k", rot=0)
ax.legend(bbox_to_anchor=(0, 1.02, 1.0, 1.02), loc=3, mode="expand", ncol=5)
ax.set_xticklabels(df.columns, fontsize=6)
return fig
```
Instead, she proposes using different shades of the same color (ordered from lightest to darkest!).
```
colors = [
(0.902, 0.902, 0.997), (0.695, 0.695, 0.993), (0.488, 0.488, 0.989),
(0.282, 0.282, 0.985), (0.078, 0.078, 0.980)
]
create_barplot(df, colors);
```
Notice that we put a legend at the top and maintain the same order as kept in the bars.
Additionally, the general consensus is that starting bar plots at any number besides 0 is a
misrepresentation of the data.
Always start your bar plots at 0!
An example of how starting at a non-zero number is misleading can be seen below and was originally from the
[flowingdata blog](https://flowingdata.com/2012/08/06/fox-news-continues-charting-excellence).
First, we look at a reproduction of the originally displayed image.
```
fig, ax = plt.subplots()
ax.bar([0, 1], [35, 39.6], color="orange")
ax.set_xticks([0, 1])
ax.set_xticklabels(["Now", "Jan 1, 2013"])
ax.set_ylim(34, 42)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
for _spine in ["right", "top", "left", "bottom"]:
ax.spines[_spine].set_visible(False)
ax.set_title("IF BUSH TAX CUTS EXPIRE\nTop Tax Rate")
```
This looks like a big difference!
In fact, your eyes are telling you that taxes will increase by a factor of 5 if the tax cuts expire.
If we start this same bar plot at 0, the chart becomes much less striking and tells you that the percentage
increase in the top tax rate is only 5-10 percent.
```
fig, ax = plt.subplots()
ax.bar([0, 1], [35, 39.6], color="orange")
ax.set_xticks([0, 1])
ax.set_xticklabels(["Now", "Jan 1, 2013"])
ax.set_ylim(0, 42)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
for _spine in ["right", "top", "left", "bottom"]:
ax.spines[_spine].set_visible(False)
ax.set_title("IF BUSH TAX CUTS EXPIRE\nTop Tax Rate")
```
We also have opinions about what type of person uses all caps, but we’ll keep that to ourselves for
now.
### Pie Plots
As a general rule, you should avoid pie plots.
When comparing groups, your reader can more easily measure the heights on a
bar graph than determine the size of the angles in a pie chart.
Let’s look at an example of this below.
```
df = pd.DataFrame(
{"values": [5.5, 4.5, 8.4, 4.75, 2.5]},
index=["Bob", "Alice", "Charlie", "Susan", "Jessie"]
)
colors = [mplc.hsv_to_rgb((0.66, 0.8, 0.9))]*2
colors += [mplc.hsv_to_rgb((0.05, 0.6, 0.9))]
colors += [mplc.hsv_to_rgb((0.66, 0.8, 0.9))]*2
fig, ax = plt.subplots(1, 2)
df.plot(kind="barh", y="values", ax=ax[0], legend=False, color=colors)
df.plot(kind="pie", y="values", ax=ax[1], legend=False, colors=colors, startangle=0)
ax[0].spines['right'].set_visible(False)
ax[0].spines['top'].set_visible(False)
ax[1].set_ylabel("")
fig.suptitle("How many pieces of pie eaten")
```
Using the pie chart, can you tell who ate more pie Alice or Susan? How about with the bar chart?
The pie chart can sometimes be used to illustrate whether one or two groups is much larger than the
others.
If you were making a case that Charlie ate too much of the pie and should pay more than an equal split,
then a pie chart works (though a bar plot also works…).
If you wanted to make a more precise point, then you might consider going with a bar plot instead.
### Simplify Line Plots
We’ve tried to emphasize repeatedly that simplifying your visualizations is essential to being able
to communicate your message.
We do it again here and will do it a few more times after this…
Don’t try and fit too much information into a single line plot.
We see people do this very frequently – remember that a visualization should have ONE main message.
Do not pollute your message with extra information.
In our example using World Bank data below, we will show that Japan’s population is aging faster
than that of many other economically successful countries.
We show this using the age dependency ratio, which is the number of individuals aged 65+ divided by the number of individuals who are 15-64, for each country over time.
A high age dependency ratio means that the government will have a smaller tax base to collect from
but have relatively higher health and pension expenditures to pay to the old.
```
download_url = "https://datascience.quantecon.org/assets/data/WorldBank_AgeDependencyRatio.csv"
df = pd.read_csv(download_url, na_values="..")
df = df[["Country Name", "1960", "1970", "1980", "1990", "2000", "2010", "2017"]]
df = df.set_index("Country Name").T
df.index = df.index.values.astype(int)
```
Let’s visualize these variables for a collection of many developed countries.
```
fig, ax = plt.subplots()
df.plot(ax=ax, legend=False)
ax.text(2007, 38, "Japan")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_title("Japan's Aging Population")
ax.set_ylabel("Age Dependency Ratio")
```
Notice that with so many lines, the message about Japan is hidden or polluted by noise.
If we did want to demonstrate that Japan is significantly different than many other developed countries,
we might try a plot like this:
```
fig, ax = plt.subplots()
not_japan = list(df.columns)
not_japan.remove("Japan")
df[not_japan].plot(ax=ax, color=[(0.8, 0.8, 0.8)], lw=0.4, legend=False)
ax.text(1970, 29, "Other Developed Countries")
df["Japan"].plot(ax=ax, color=(0.95, 0.05, 0.05), lw=2.5, legend=False)
ax.text(2006.5, 38, "Japan")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_title("Japan's Aging Population")
ax.set_ylabel("Age Dependency Ratio")
```
However, placing this many lines on a single plot is definitely an exception, and we encourage you
to do so sparingly.
Generally, you should only have a few informative lines for each plot.
We now will focus our graph on a few countries of interest.
To do so, the plot below uses many different line styles.
```
fig, ax = plt.subplots()
df["Japan"].plot(ax=ax, legend=False, linestyle="solid")
ax.text(2002, 35, "Japan")
df["United Kingdom"].plot(ax=ax, legend=False, linestyle="dashed")
ax.text(1975, 24, "UK")
df["United States"].plot(ax=ax, legend=False, linestyle="dashed")
ax.text(1980, 19, "US")
df["China"].plot(ax=ax, legend=False, linestyle="dotted")
ax.text(1990, 10, "China")
df["India"].plot(ax=ax, legend=False, linestyle="dotted")
ax.text(2005, 5, "India")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_title("Japan's Aging Population")
ax.set_ylabel("Age Dependency Ratio")
```
There are some good-use cases for using line styles to distinguish between different pieces
of data, but not many.
In particular, having this many different styles and colors makes it difficult to figure out what is going on.
Instead, we recommend using color and line width instead of line styles to highlight certain pieces of
information, as seen below.
```
fig, ax = plt.subplots()
emph_color = (0.95, 0.05, 0.05)
sec_color = [(0.05, 0.05+0.075*x, 0.95) for x in range(4)]
df["Japan"].plot(ax=ax, legend=False, color=emph_color, linewidth=2.5)
ax.text(2002, 35, "Japan")
df["United Kingdom"].plot(ax=ax, legend=False, color=sec_color[0], alpha=0.4, linewidth=0.75)
ax.text(1975, 24, "UK")
df["United States"].plot(ax=ax, legend=False, color=sec_color[1], alpha=0.4, linewidth=0.75)
ax.text(1980, 19, "US")
df["China"].plot(ax=ax, legend=False, color=sec_color[2], alpha=0.4, linewidth=0.75)
ax.text(1990, 10, "China")
df["India"].plot(ax=ax, legend=False, color=sec_color[3], alpha=0.4, linewidth=0.75)
ax.text(2005, 5, "India")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_title("Japan's Aging Population")
ax.set_ylabel("Age Dependency Ratio")
```
### Tick Steps
Use easy to interpret increments such as multiples of 2, 5, 10, 25 etc…
Using increments like `0, 3, 6, 9, 12, ...` make it more difficult for your reader to do mentally
determine what the values between the lines are:
```
fig, ax = plt.subplots(1, 2, figsize=(14, 6))
x = np.linspace(0, 26, 50)
ax[0].plot(x, np.sqrt(x))
ax[1].plot(x, np.sqrt(x))
ax[0].set_xticks(np.arange(0, 27, 3))
ax[0].set_xticklabels(np.arange(0, 27, 3))
ax[1].set_xticks(np.arange(0, 27, 5))
ax[1].set_xticklabels(np.arange(0, 27, 5))
```
### No Background Colors
There are no reasons to use background colors in your visualizations.
Research has shown that white or very light grays provide the best contrast as a background.
Compare the following graphs and think about which feels better.
```
fig, ax = plt.subplots()
ax.bar([0, 1], [35, 39.6], color="orange")
ax.set_xticks([0, 1])
ax.set_xticklabels(["Now", "Jan 1, 2013"])
ax.set_ylim(0, 42)
bgcolor = "blue"
fig.set_facecolor(bgcolor)
ax.set_facecolor(bgcolor)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
for _spine in ["right", "top", "left", "bottom"]:
ax.spines[_spine].set_visible(False)
ax.set_title("IF BUSH TAX CUTS EXPIRE\nTop Tax Rate")
```
versus
```
fig, ax = plt.subplots()
ax.bar([0, 1], [35, 39.6], color="orange")
ax.set_xticks([0, 1])
ax.set_xticklabels(["Now", "Jan 1, 2013"])
ax.set_ylim(0, 42)
bgcolor = (0.99, 0.99, 0.99)
fig.set_facecolor(bgcolor)
ax.set_facecolor(bgcolor)
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
for _spine in ["right", "top", "left", "bottom"]:
ax.spines[_spine].set_visible(False)
ax.set_title("IF BUSH TAX CUTS EXPIRE\nTop Tax Rate")
```
### Legends
Legends are quite common in charts, but many visualization experts advise against using them.
Legends have several weaknesses:
1. Relying solely on line color often makes a black and white version of your plot effectively
useless, since you don’t know whether the colors will be distinguishable in grayscale.
1. Legends require people to distinguish between small samples of colors. For
someone with weak eyesight or color blindness, this can make interpreting graphs nearly
impossible.
1. They add distance between the data and its description. This requires peoples’ eyes to go back
and forth between the lines and the legend when trying to understand the story being told. This
distracts from the ability to digest the story quickly and succinctly.
To demonstrate this, we revisit our age dependency ratio example from earlier.
```
download_url = "https://datascience.quantecon.org/assets/data/WorldBank_AgeDependencyRatio.csv"
df = pd.read_csv(download_url, na_values="..")
df = df[["Country Name", "1960", "1970", "1980", "1990", "2000", "2010", "2017"]]
df = df.set_index("Country Name").T
df.index = df.index.values.astype(int)
```
With a legend:
```
fig, ax = plt.subplots()
emph_color = (0.95, 0.05, 0.05)
sec_color = [(0.05, 0.05+0.075*x, 0.95) for x in range(4)]
df["Japan"].plot(ax=ax, legend=True, color=emph_color, linewidth=2.5)
df["United Kingdom"].plot(ax=ax, legend=True, color=sec_color[0], alpha=0.4, linewidth=0.75)
df["United States"].plot(ax=ax, legend=True, color=sec_color[1], alpha=0.4, linewidth=0.75)
df["China"].plot(ax=ax, legend=True, color=sec_color[2], alpha=0.4, linewidth=0.75)
df["India"].plot(ax=ax, legend=True, color=sec_color[3], alpha=0.4, linewidth=0.75)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_title("Japan's Aging Population")
ax.set_ylabel("Age Dependency Ratio")
```
With labels:
```
fig, ax = plt.subplots()
emph_color = (0.95, 0.05, 0.05)
sec_color = [(0.05, 0.05+0.075*x, 0.95) for x in range(4)]
df["Japan"].plot(ax=ax, legend=False, color=emph_color, linewidth=2.5)
ax.text(2002, 35, "Japan")
df["United Kingdom"].plot(ax=ax, legend=False, color=sec_color[0], alpha=0.4, linewidth=0.75)
ax.text(1975, 24, "UK")
df["United States"].plot(ax=ax, legend=False, color=sec_color[1], alpha=0.4, linewidth=0.75)
ax.text(1980, 19, "US")
df["China"].plot(ax=ax, legend=False, color=sec_color[2], alpha=0.4, linewidth=0.75)
ax.text(1990, 10, "China")
df["India"].plot(ax=ax, legend=False, color=sec_color[3], alpha=0.4, linewidth=0.75)
ax.text(2005, 5, "India")
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_title("Japan's Aging Population")
ax.set_ylabel("Age Dependency Ratio")
```
Most people find the example with labels to be a more readable graph.
### Limit the Information in a Single Plot
Don’t try to put too much information in a single plot!
We have tried to emphasize this point throughout this lecture, but it is so important that
we are emphasizing it again!
Don’t information overload your audience!
### Talk to Other People
Our last guideline: talk with others about your visualization.
The best way to determine whether other people understand your message is to show it to them.
## References
<a id='ely'></a>
**[1]** In particular, it is based on [this lecture](https://www.aeaweb.org/webcasts/2019/aea-ely-lecture-work-of-the-past-work-of-the-future)
by Autor presented at the annual AEA meeting in January, 2019. This
is a prestigious invited lecture with a large audience, so it is a more
“polished” than the typical academic lecture. It is worth
watching. Notice how almost every slide includes data
visualizations, and very few consist solely of text. Also, notice
the ways that the NYT modified Autor’s figures and think about
whether these changes improved the figures.
## Exercises
<a id='exerciselist-0'></a>
**Exercise 1**
Create a draft of the alternative way to organize time and education -- that is, have two subplots (one for each education level) and four groups of points (one for each year).
Why do you think they chose to organize the information as they did rather than this way?
([*back to text*](#exercise-0))
**Exercise 2**
Using the data on Canadian GDP growth below, create a bar chart which uses one color for the
bars for the years 2000 to 2008, a red for 2009, and the same color as before for 2010 to 2018.
```
ca_gdp = pd.Series(
[5.2, 1.8, 3.0, 1.9, 3.1, 3.2, 2.8, 2.2, 1.0, -2.8, 3.2, 3.1, 1.7, 2.5, 2.9, 1.0, 1.4, 3.0],
index=list(range(2000, 2018))
)
fig, ax = plt.subplots()
for side in ["right", "top", "left", "bottom"]:
ax.spines[side].set_visible(False)
```
([*back to text*](#exercise-1))
| github_jupyter |
<a href="https://colab.research.google.com/github/Yunxiang-Li/CodePen_GreenScreenImage/blob/master/term_statistics.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Working with Terms and Documents
This first homework assignment starts off with term statistics computations and graphing. In the final section (for CS6200 students), you collect new documents to experiment with.
Read through this Jupyter notebook and fill in the parts marked with `TODO`.
## Sample Data
Start by looking at some sample data. We donwload the counts of terms in documents for the first one million tokens of a newswire collection.
```
!wget -O ap201001.json.gz https://github.com/dasmiq/cs6200-hw1/blob/main/ap201001.json.gz?raw=true
!gunzip ap201001.json.gz
```
We convert this file with one JSON record on each line to a list of dictionaries.
```
import json
rawfile = open('ap201001.json')
terms = [json.loads(line) for line in rawfile]
```
Here are the first ten records, showing the count of each term for each document and field. In this dataset, field only takes the values `body` or `title`.
```
terms[1:10]
```
Each record has four fields:
* `id`, with the identifier for the document;
* `field`, with the region of the document containing a given term;
* `term`, with the lower-cased term; and
* `count`, with the number of times each term occurred in that field and document.
## Computing Term Statistics
If we look at the most frequent terms for a given document, we mostly see common function words, such as `the`, `and`, and `of`. Start exploring the dataset by computing some of these basic term statistics. You can make your life easier using data frame libraries such as `pandas`, core python libraries such as `collections`, or just simple list comprehensions.
Feel free to define helper functions in your code before computing the statistics we're looking for.
```
# TODO: Print the 6 terms from document APW_ENG_20100101.0001 with the highest count.
# TODO: Print the 10 terms from all fields of document APW_ENG_20100102.0077 with the highest count.
# TODO: Print the 10 terms with the highest total count in the corpus.
```
Raw counts may not be the most informative statistic. One common improvement is to use *inverse document frequency*, the inverse of the proportion of documents that contain a given term.
```
# TODO: Compute the number of distinct documents in the collection.
N = 0
# TODO: Compute the number of distinct documents each term appears in
# and store in a dictionary.
df = dict()
# TODO: Print the relative document frequency of 'the',
# i.e., the number of documents that contain 'the' divided by N.
```
Empricially, we usually see better retrieval results if we rescale term frequency (within documents) and inverse document frequency (across documents) with the log function. Let the `tfidf` of term _t_ in document _d_ be:
```
tfidf(t, d) = log(count(t, d) + 1) * log(N / df(t))
```
Later in the course, we will show a probabilistic derivation of this quantity based on smoothing language models.
```
# TODO: Compute the tf-idf value for each term in each document.
# Take the raw term data and add a tfidf field to each record.
tfidf_terms = None
# TODO: Print the 20 term-document pairs with the highest tf-idf values.
```
## Plotting Term Distributions
Besides frequencies and tf-idf values within documents, it is often helpful to look at the distrubitions of word frequencies in the whole collection. In class, we talk about the Zipf distribution of word rank versus frequency and Heaps' Law relating the number of distinct words to the number of tokens.
We might examine these distributions to see, for instance, if an unexpectedly large number of very rare terms occurs, which might indicate noise added to our data.
```
# TODO: Compute a list of the distinct words in this collection and sort it in descending order of frequency.
# Thus frequency[0] should contain the word "the" and the count 62216.
frequency = []
# TODO: Plot a graph of the log of the rank (starting at 1) on the x-axis,
# against the log of the frequency on the y-axis. You may use the matplotlib
# or other library.
# TODO: Compute the number of tokens in the corpus.
# Remember to count each occurrence of each word. For instance, the 62,216
# instances of "the" will all count here.
ntokens = 0
# TODO: Compute the proportion of tokens made up by the top 10 most
# frequent words.
# TODO: Compute the proportion of tokens made up by the words that occur
# exactly once in this collection.
```
## Acquiring New Documents (for CS6200)
For this assignment so far, you've worked with data that's already been extracted, tokenized, and counted. In this final section, you'll briefly explore acquiring new data.
Find a collection of documents that you're interested in. For the statistics to be meaningful, this collection should have at least 1,000 words.
The format could be anything you can extract text from: HTML, PDF, MS PowerPoint, chat logs, etc.
The collection should be in a natural language, not mostly code or numerical data. It could be in English or in any other language.
The final project for this course will involve designing an information retrieval task on some dataset. You could use this exercise to think about what kind of data you might be interested in, although that is not required.
**TODO**: Write code to download and extract the text from the collection. Describe choices you make about what contents to keep.
```
# TODO: Data acquisition code here.
```
**TODO**: Write code to tokenize the text and count the resulting terms in each document. Describe your tokenization approach here.
Each term may also be associated with a field, such as `body` and `title` in the newswire collection above. Describe the different fields in your data.
```
# TODO: Tokenization code here.
```
**TODO**: Plot a graph of the log rank against log frequency for your collection, as you did for the sample collection above. What do you observe about the differences between the distributions in these two collections?
| github_jupyter |
# Pre_processing: players_model_data
In this notebook I created a dataset with the NBA players from 1999 to 2021 with the necessary variables to solve the optimization problem:
- Season
- Team
- PER
- DRtg
- Salary
- Position
- Unit (feature created according to the minutes played during the season)
## Merging salaries with general stats have some challenges:
- Players can change teams in the middle of a season, and receiving the same salary. That makes that the entries in the player's statistics `stats` and the salary dataset `salaries` have different shapes.
- Players that do not play or played can still receive a salary. For example, Monta Ellis was waived in 2017 but he kept receiving salaries as the Indiana Pacers use on him the stretch provision.
- Players with accents and 'III' or 'Jr' suffix are different in the two datasets.
## Merging solutions:
- Players will keep the same salary or its season, regarless of the team they are playing for.
- Players without stats will be dropped, as players that do not play are not relevant for the project. I am not interested in players that receive salaries after retired or waived.
- Fuzzy merging
# Virtual environment and packages
```
import sys
sys.executable
print(os.getcwd())
path = 'c:/Users/pipeg/Documents/GitHub/nba-team-creator/'
os.chdir(path)
os.getcwd()
import numpy as np
import pandas as pd
from preprocessing_functions import remove_accents, create_unit_indicator
from thefuzz import fuzz, process
```
# Load raw data
```
adv_stats = pd.read_csv('raw_data/advanced.csv')
poss_stats = pd.read_csv('raw_data/100_possessions.csv')
salaries = pd.read_csv('raw_data/salaries.csv')
```
# Basic cleaning
```
# Remove combination of stats of different teams
stats = adv_stats[adv_stats.Tm != "TOT"][['Player', 'Pos', 'Team', 'Season', 'MP', 'PER']]
# Create a unit indicator
stats = create_unit_indicator(stats)
stats.drop(['MP', 'MP_Rank'], axis = 1, inplace = True)
# Add DRtg
stats = stats.merge(poss_stats[['Player', 'Team', 'Season', 'DRtg']], on = ['Player', 'Team', 'Season'])
# Remove accents of the names and dots
stats['Player'] = remove_accents(stats['Player'])
salaries['Player'] = remove_accents(salaries['Player'])
stats
salaries
stats[stats.duplicated()]
salaries[salaries.duplicated()]
```
# Merging the two datasets with different players naming
## Must change by hand from salaries dataset
- Aleksandar Pavlovic to Sasha Pavlovic
- BJ Mullens to Byron Mullens
- Cam Reynolds to Cameron Reynolds
- Didier Ilunga-Mbenga to DJ Mbenga
- Hidayet Turkoglu to Hedo Turkoglu
- Iakovos Tsakalidis to Jake Tsakalidis
- Ishmael Smith to Ish Smith
- Jeffery Taylor to Jeff Taylor
- Jose Juan Barea to JJ Barea
- Joseph Young to Joe Young
- Kiwane Garris to Kiwane Lemorris Garris
- Moe Harkless to Maurice Harkless
- Monty Williams to Mo Williams
- Patrick Mills to Patty Mills
- Predrag Stojakovic to Peja Stojakovic
- Radoslav Nesterovic to Rasho Nesterovic
- Raymond Spalding to Ray Spalding
- Sergey Monya to Sergei Monia
- Timothe Luwawu to Timothe Luwawu-Cabarrot
- Walter Tavares to Edy Tavares
```
# E.g. Timothe Luwawu-Cabarrot do not appear in salaries dataset
salaries[salaries.Player == 'Timothe Luwawu-Cabarrot']
# But he does in stats dataset
stats[stats.Player == 'Timothe Luwawu-Cabarrot']
# Changing some names by hand
dict_players = {
'Aleksandar Pavlovic': 'Sasha Pavlovic',
'BJ Mullens': 'Byron Mullens',
'Cam Reynolds': 'Cameron Reynolds',
'Didier Ilunga-Mbenga': 'DJ Mbenga',
'Hidayet Turkoglu': 'Hedo Turkoglu',
'Iakovos Tsakalidis': 'Jake Tsakalidis',
'Ishmael Smith': 'Ish Smith',
'Jeffery Taylor': 'Jeff Taylor',
'Jose Juan Barea': 'JJ Barea',
'Joseph Young': 'Joe Young',
'Kiwane Garris': 'Kiwane Lemorris Garris',
'Moe Harkless': 'Maurice Harkless',
'Patrick Mills': 'Patty Mills',
'Predrag Stojakovic': 'Peja Stojakovic',
'Radoslav Nesterovic': 'Rasho Nesterovic',
'Raymond Spalding': 'Ray Spalding',
'Sergey Monya': 'Sergei Monia',
'Timothe Luwawu': 'Timothe Luwawu-Cabarrot',
'Walter Tavares': 'Edy Tavares',
'KJ Martin': 'Kenyon Martin Jr',
'Maurice Williams': 'Mo Williams',}
salaries['Player'].replace(dict_players, inplace= True)
# Jaren Jackson is wrongly named in salaries dataset
salaries.at[[163,740, 1283], 'Player'] = 'Jaren Jackson Jr'
salaries.iloc[[163,740, 1283], :]
```
# Fuzzy merging using `thefuzz`
```
# Create a list of players of the 2 datasets
player_list_salaries = salaries['Player'].unique()
player_list_stats = stats['Player'].unique()
# Create a similarity dictionary between the 2 datasets
keys = {}
# Greedy algorithm, takes 2 min to complete the dictionary
for player in player_list_stats:
keys[player] = ((process.extract(player, player_list_salaries, limit = 1)))
keys[player][0]
keys[player][0][1]
# How many names are a perfect match?
clean_names = []
for player in keys:
if keys[player][0][1] == 100:
clean_names += [keys[player][0][0]]
print("Matched names :", len(clean_names))
print("No-matched names :", len(player_list_stats)- len(clean_names))
print(str(round((len(clean_names)/len(player_list_stats)*100),2)), '% match between the players names')
# Print the mostly similar names (90 to 100)
# From the 90 benchmark, it matched well 29 out of the 32.
for player in keys:
if 90 < keys[player][0][1] <100:
print(player, keys[player][0])
# Creating a dictionary with the good matches
players_matched = {}
for player in keys:
if 90 < keys[player][0][1] <100:
players_matched[keys[player][0][0]] = player
# Drop some wrongly matched names (e.g. 'Ervin Johnson' to 'Kevin Johnson')
wrong_matched = ['Brandon Knight', 'Ervin Johnson', 'Kenyon Martin', 'Jaren Jackson']
for i in wrong_matched:
players_matched.pop(i, None)
players_matched
salaries[salaries.Player == 'Troy Brown Jr']
salaries['Player'].replace(players_matched, inplace=True)
```
# Merge
```
outer_merge = stats.merge(salaries, on = ['Player', 'Season'], how = 'outer')
# Players without salary
outer_merge[outer_merge['Salary adjusted by inflation'].isnull()].sort_values(by = 'Unit', ascending = True)
# I will lose 272 Player by Season observations (normally residual players or players with no salry data in HypeHoops.com)
inner_merge = stats.merge(salaries, on = ['Player', 'Season'], how = 'inner')
inner_merge
```
# Save created data
```
inner_merge.to_csv('out_data/players_model_data.csv', index = False)
```
# Examples of the challenges
```
stats[stats.Player.duplicated()]
stats[stats.Player == 'Matt Thomas']
salaries[salaries.Player == 'Matt Thomas']
stats[stats.Player == 'Roshown McLeod']
salaries[salaries.Player == 'Roshown McLeod']
stats[stats.Player == 'Monta Ellis'] .head()
salaries[salaries.Player == 'Monta Ellis'] .head()
```
- Players with accents and 'III' or 'Jr' suffix are different in the two datasets.
```
stats[stats.Player == 'Nikola Vučević'] .head()
salaries[salaries.Player == 'Nikola Vučević'] .head()
salaries[salaries.Player == 'Nikola Vucevic'] .head()
stats[stats.Player == 'Gary Trent Jr.'] .head()
salaries[salaries.Player == 'Gary Trent Jr.'] .head()
salaries[salaries.Player == 'Gary Trent Jr'] .head()
# Matt Thomas keeps the same salary for the Jazz and the Raptors, as he played for them within same season.
total_stats[total_stats.Player == 'Matt Thomas']
# Roshown McLeod in 2001/02 didn't play, so he has NaN values
total_stats[total_stats.Player == 'Roshown McLeod']
# Players with accent names like Nikola Vučević were merged correctly
total_stats[total_stats.Player == 'Nikola Vucevic'] .head()
# What about the "Junior names"?
total_stats[total_stats.Player == 'Gary Trent Jr'] .head()
total_stats.shape
total_stats[total_stats.Salary.isna()]
salaries[salaries.Player == 'Popeye Jones']
```
| github_jupyter |
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Version Check
Note: `Facet Grids and Trellis Plots` are available in version <b>2.0.12+</b><br>
Run `pip install plotly --upgrade` to update your Plotly version
```
import plotly
plotly.__version__
```
#### Facet by Column
A `facet grid` is a generalization of a scatterplot matrix where we can "facet" a row and/or column by another variable. Given some tabular data, stored in a `pandas.DataFrame`, we can plot one variable against another to form a regular scatter plot, _and_ we can pick a third faceting variable to form panels along the rows and/or columns to segment the data even further, forming a bunch of panels. We can also assign a coloring rule or a heatmap based on a color variable to color the plot.
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
fig = ff.create_facet_grid(
mpg,
x='displ',
y='cty',
facet_col='cyl',
)
py.iplot(fig, filename='facet by col')
```
#### Facet by Row
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
fig = ff.create_facet_grid(
mpg,
x='displ',
y='cty',
facet_row='cyl',
marker={'color': 'rgb(86, 7, 100)'},
)
py.iplot(fig, filename='facet by row')
```
#### Facet by Row and Column
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
fig = ff.create_facet_grid(
mpg,
x='displ',
y='cty',
facet_row='cyl',
facet_col='drv',
marker={'color': 'rgb(234, 239, 155)'},
)
py.iplot(fig, filename='facet by row and col')
```
#### Color by Categorical Variable
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mtcars = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/mtcars.csv')
fig = ff.create_facet_grid(
mtcars,
x='mpg',
y='wt',
facet_col='cyl',
color_name='cyl',
color_is_cat=True,
)
py.iplot(fig, filename='facet - color by categorical variable')
```
#### Custom Colormap
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
tips = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/tips.csv')
fig = ff.create_facet_grid(
tips,
x='total_bill',
y='tip',
color_name='sex',
show_boxes=False,
marker={'size': 10, 'opacity': 1.0},
colormap={'Male': 'rgb(165, 242, 242)', 'Female': 'rgb(253, 174, 216)'}
)
py.iplot(fig, filename='facet - custom colormap')
```
#### Label Variable Name:Value
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mtcars = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/mtcars.csv')
fig = ff.create_facet_grid(
mtcars,
x='mpg',
y='wt',
facet_col='cyl',
facet_col_labels='name',
facet_row_labels='name',
)
py.iplot(fig, filename='facet - label variable name')
```
#### Custom Labels
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mtcars = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/mtcars.csv')
fig = ff.create_facet_grid(
mtcars,
x='wt',
y='mpg',
facet_col='cyl',
facet_col_labels={4: '$2^2 = 4$', 6: '$\\frac{18}{3} = 6$', 8: '$2\cdot4 = 8$'},
marker={'color': 'rgb(240, 100, 2)'},
)
py.iplot(fig, filename='facet - custom labels')
```
#### Plot in 'ggplot2' style
To learn more about ggplot2, check out http://ggplot2.tidyverse.org/reference/facet_grid.html
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
tips = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/tips.csv')
fig = ff.create_facet_grid(
tips,
x='total_bill',
y='tip',
facet_row='sex',
facet_col='smoker',
marker={'symbol': 'circle-open', 'size': 10},
ggplot2=True
)
py.iplot(fig, filename='facet - ggplot2 style')
```
#### Plot with 'scattergl' traces
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
mpg = pd.read_table('https://raw.githubusercontent.com/plotly/datasets/master/mpg_2017.txt')
grid = ff.create_facet_grid(
mpg,
x='class',
y='displ',
trace_type='scattergl',
)
py.iplot(grid, filename='facet - scattergl')
```
#### Plot with Histogram Traces
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
tips = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/tips.csv')
fig = ff.create_facet_grid(
tips,
x='total_bill',
y='tip',
facet_row='sex',
facet_col='smoker',
trace_type='histogram',
)
py.iplot(fig, filename='facet - histogram traces')
```
#### Other Trace Types
Facet Grids support `scatter`, `scattergl`, `histogram`, `bar` and `box` trace types. More trace types coming in the future.
```
import plotly.plotly as py
import plotly.figure_factory as ff
import pandas as pd
tips = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/tips.csv')
fig = ff.create_facet_grid(
tips,
y='tip',
facet_row='sex',
facet_col='smoker',
trace_type='box',
)
py.iplot(fig, filename='facet - box traces')
```
#### Reference
```
help(ff.create_facet_grid)
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'facet-and-trellis-plots.ipynb', 'python/facet-plots/', 'Facet and Trellis Plots',
'How to make Facet and Trellis Plots in Python with Plotly.',
title = 'Python Facet and Trellis Plots | plotly',
redirect_from ='python/trellis-plots/',
has_thumbnail='true', thumbnail='thumbnail/facet-trellis-thumbnail.jpg',
language='python',
display_as='statistical', order=10.2)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sb
import matplotlib.pyplot as plt
%matplotlib inline
#load/create df
f = open('State+of+the+Union+Addresses+1970-2016.txt')
lines = f.readlines()
bigline = " ".join(lines)
stars = bigline.split('***')
splits = [s.split('\n') for s in stars[1:]]
tups = [(s[2], s[3], s[4], "".join(s[5:])) for s in splits]
speech_df = pd.DataFrame(tups)
#dirty reformatting
speech_df.columns = ['Type', 'Name', 'Date', 'Speech']
speech_df = speech_df.drop('Type', 1)
speech_df = speech_df[:][178:]
speech_df = speech_df.reset_index()
speech_df = speech_df.drop('index',1)
speech_df['Name'][36:] = speech_df['Date'][36:]
speech_df['Date'][36] = speech_df['Speech'][36][0:17]
speech_df['Date'][37] = speech_df['Speech'][37][0:17]
speech_df['Date'][38] = speech_df['Speech'][38][0:18]
speech_df['Date'][39] = speech_df['Speech'][39][0:17]
speech_df['Date'][40] = speech_df['Speech'][40][0:17]
speech_df['Date'][41] = speech_df['Speech'][41][0:17]
speech_df['Date'][42] = speech_df['Speech'][42][0:18]
speech_df['Date'][43] = speech_df['Speech'][43][0:17]
speech_df['Date'][44] = speech_df['Speech'][44][0:17]
speech_df['Date'][45] = speech_df['Speech'][45][0:17]
speech_df['Speech'][36] = speech_df['Speech'][36][18:]
speech_df['Speech'][37] = speech_df['Speech'][37][18:]
speech_df['Speech'][38] = speech_df['Speech'][38][19:]
speech_df['Speech'][39] = speech_df['Speech'][39][18:]
speech_df['Speech'][40] = speech_df['Speech'][40][18:]
speech_df['Speech'][41] = speech_df['Speech'][41][18:]
speech_df['Speech'][42] = speech_df['Speech'][42][19:]
speech_df['Speech'][43] = speech_df['Speech'][43][18:]
speech_df['Speech'][44] = speech_df['Speech'][44][18:]
speech_df['Speech'][45] = speech_df['Speech'][45][18:]
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(stop_words='english')
count_vect.fit(speech_df['Speech'])
X = count_vect.transform(speech_df['Speech'])
freq = zip(count_vect.get_feature_names(), np.asarray(X.sum(axis=0)).ravel())
df = pd.DataFrame(freq)
df.columns = ['word', 'count']
#top words in entire corpus
df.sort('count', ascending = False)
#http://pages.stern.nyu.edu/~adamodar/New_Home_Page/datafile/histretSP.html
#import stock returns
returns = pd.read_csv('returns.csv')
#reformat to only include years: 69-87, 89-91, 93-00, 00-15
test = pd.DataFrame(returns[0:19][:])
test1 = pd.DataFrame(returns[20:23][:])
test2 = pd.DataFrame(returns[24:32][:])
test3 = pd.DataFrame(returns[31:][:])
new_ret = pd.concat([test, test1, test2, test3])
new_ret = new_ret.reset_index()
new_ret = new_ret.drop('index', axis = 1)
i = 0
for each in new_ret['S&P 500']:
new_ret['S&P 500'][i] = each.strip('%')
i+=1
#append to speech df
speech_df['S&P_ret'] = new_ret['S&P 500'].astype(float)
#create wordcounts
speech_df['word_counts'] = ""
for i in xrange(0,len(speech_df)):
speech_df['word_counts'].iloc[i] = zip(count_vect.get_feature_names(), np.asarray(X[i].sum(axis=0)).ravel())
#list of words pertaining to economy
#pull out a list of indexes for words/similar words
#search each
word_list = ['econom', 'tax', 'spend', 'budget', 'business',
'job', 'wealth', 'poor', 'recession', 'depression',
'income', 'deficit', 'expand']
idx = []
for i in word_list:
temp = []
for each in xrange(0,len(df)):
if df['word'].iloc[each].find(i) > -1:
temp.append(each)
idx.append(temp)
#create empty columns
for each in word_list:
speech_df[each] = ""
#count words in column
for word in xrange(0,len(word_list)):
i = 0
for speech in speech_df['word_counts']:
total = 0.0
for each in counts[word]:
total = total + speech[each][1]
speech_df[word_list[word]].iloc[i] = total
i+=1
speech_df
corr_df = speech_df[word_list].astype(float)
corr_df['S&P_returns'] = speech_df['S&P_ret']
corr_df.corr()
g = sb.PairGrid(corr_df)
g.map(plt.scatter)
plt.savefig('corr.png')
```
| github_jupyter |
# Введение
Данные интерактивные тетради основаны на языке Python.
Для выполнения кода выберите ячейку с кодом и нажмите `Ctrl + Enter`.
```
from platform import python_version
print("Используемая версия Python:", python_version())
```
Ячейки подразумевают последовательное исполнение.
```
l = [1, 2, 3]
l[0]
type(l)
help(l)
```
## Математический аппарат
В этих интерактивных тетрадях используется математический аппарат, основанный на парах вектор-кватернион.
Вектор (`Vector`) представлен тремя чиселами, кватернион (`Quaternion`) - четыремя.
Пара вектор-кватернион (`Transformation`) соостоит из вектора и кватерниона и описывает последовательные перемещение и поворот.
$$ T =
\begin{bmatrix}
[v_x, v_y, v_z] \\
[q_w, q_x, q_y, q_z]
\end{bmatrix}
$$
Математический аппарат расположен в файле [kinematics.py](../edit/kinematics.py)
### Vector
Вектор - тройка чисел, описывает перемещение:
$$ v = [v_x, v_y, v_z] $$
```
from kinematics import Vector
```
Создание вектора требует трех чисел:
```
v1 = Vector(1, 2, 3)
v2 = Vector(-2, 4, -3)
```
Вектора можно складывать поэлементно:
```
v1 + v2
```
А также умножать на скаляр:
```
2.5 * v1
```
Нулевой вектор создается через `Vector.zero()`:
```
Vector.zero()
```
### Quaternion
Кватернион - четверка чисел, описывает поворот:
$$ q = [q_w, q_x, q_y, q_z] $$
```
from kinematics import Quaternion
from numpy import pi
```
Кватернион создается из угла и оси поворота:
```
q1 = Quaternion.from_angle_axis(0.5 * pi, Vector(0, 0, 1))
q2 = Quaternion.from_angle_axis(0.5 * pi, Vector(1, 0, 0))
print(q1)
print(q2)
```
Перемножение кватернионов соответствует последовательному приложению поворотов, в данном случае - повороту вокруг оси, проходящей через точку `(1, 1, 1)` на угол 120 градусов:
```
q1 * q2
Quaternion.from_angle_axis(2 / 3 * pi, Vector(1, 1, 1).normalized())
```
Поворот вектора сокращен до оператора `*`:
```
q = Quaternion.from_angle_axis(pi / 2, Vector(0, 0, 1))
q * Vector(1, 2, 3)
```
Кватернион нулевого поворота создается `Quaternion.identity()`:
```
Quaternion.identity() * Vector(1, 2, 3)
```
### Transform
```
from kinematics import Transform
```
Пара вектор-кватернион собирается из вектора и кватерниона:
```
t1 = Transform(v1, q1)
t2 = Transform(v2, q2)
```
Пара состоит из смещения и поворота:
```
t1.translation
t1.rotation
```
Пара с нулевыми смещением и поворотом создается через `Transform.identity()`:
```
Transform.identity()
```
Суммирование двух пар описывет последовательное применение смещения - поворота - смещения - поворота:
```
t1 + t2
```
Суммирование пары и ветора описывает применение преобразования, записанного в паре к вектору:
```
t1 + Vector(1, 0, 0)
```
## Графика
Подключим магию для работы с графикой:
```
from matplotlib import pyplot as plt
from matplotlib import animation
import numpy as np
from IPython.display import HTML
import graphics
%matplotlib notebook
```
Отрисовка систем координат производится через `graphics.axis`.
Преобразование цепочки в отдельные массивы точек `X, Y, Z` производится через `graphics.chain_to_points`.
```
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.set_xlim([-3, 3]); ax.set_ylim([-3, 3]); ax.set_zlim([-3, 3]);
graphics.axis(ax, Transform.identity(), 3)
graphics.axis(ax, t1)
graphics.axis(ax, t1 + t2)
x, y, z = graphics.chain_to_points([Transform.identity(), t1, t1 + t2])
ax.plot(x, y, z)
fig.show()
```
## Анимация
Анимация будет сохраняться в переменную, например в `ani`, которую потом можно будет отобразить в виде видеоролика через `HTML(ani.to_jshtml())`.
Перед сохранением в виде ролика можно заранее повернуть сцену мышкой.
Обратите внимание что перерисовка каждого кадра требует работы ядра.
Для остановки нажмите кнопку выключения в правом верхнем углу трехмерной сцены.
```
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.set_xlim([-1, 1]); ax.set_ylim([-1, 1]); ax.set_zlim([0, 2 * pi])
l, = ax.plot([], [], [])
t = np.arange(1, 2 * pi, 0.1)
frames = 100
def animate(i):
offs = i / frames * 2 * pi
z = t
q = Quaternion.from_angle_axis(t + offs, Vector(0, 0, 1))
v = q * Vector(1, 0, 0)
x = v.x
y = v.y
l.set_data_3d(x, y, z)
ani = animation.FuncAnimation(
fig,
animate,
frames=frames,
interval=100
)
```
Не забудьте выключить пересчет модели кнопкой в правом верхнем углу трехмерной сцены.
```
HTML(ani.to_jshtml())
```
Полученый таким образом ролик можно сохранить в составе всей тетради и выкачать локальную копию через `File -> Download as -> Notebook (.ipynb)`.
## Символьные вычисления
Для работы с символьными вычислениями используется пакет `sympy`.
```
import sympy as sp
x = sp.symbols("x")
x
```
`sympy` позволяет описывать деревья вычислений:
```
v = sp.sin(x) ** 2 + sp.cos(x) ** 2
v
```
И упрощать их:
```
sp.simplify(v)
u = sp.cos(x) ** 2 - sp.sin(x) ** 2
u
sp.simplify(u)
```
Можно легко дифференцировать выражения:
```
t = sp.symbols("t")
f = sp.sin(t + 2 * x ** 2)
f
```
Производная по $t$:
```
sp.diff(f, t)
```
Производная по $x$:
```
sp.diff(f, x)
```
Для того, чтобы описать кватернион в системе `sympy`, нужно передать `sympy`(`sp`) как последний агрумент в `Quaternion.from_angle_axis`:
```
a, b, c = sp.symbols("a, b, c")
angle = sp.symbols("alpha")
q = Quaternion.from_angle_axis(angle, Vector(0, 0, 1), sp)
v = Vector(a, b, c)
rotated = q * v
sp.simplify(rotated.x)
sp.simplify(rotated.y)
sp.simplify(rotated.z)
```
А еще можно решать уравнения:
```
alpha, beta = sp.symbols("alpha, beta")
t0 = Transform(
Vector.zero(),
Quaternion.from_angle_axis(alpha, Vector(0, 0, 1), sp)
)
t1 = t0 + Transform(
Vector(beta, 0, 0),
Quaternion.identity()
)
target_x = t1.translation.x
target_x
target_y = t1.translation.y
target_y
x, y = sp.symbols("x, y")
solution = sp.solve(
[
sp.simplify(target_x) - x,
sp.simplify(target_y) - y
],
[
alpha,
beta
]
)
```
Первое решение для $\alpha$:
```
solution[0][0]
```
Первое решение для $\beta$:
```
solution[0][1]
```
Действительно, если подставить решение, в, например, $y$, получим следущее:
```
sp.simplify(
t1.translation.y.replace(alpha, solution[0][0]).replace(beta, solution[0][1])
)
```
Для $x$ такой красоты (пока) не произойдет, придется упрощать вручную:
```
sp.simplify(
t1.translation.x.replace(alpha, solution[0][0]).replace(beta, solution[0][1])
)
```
Возможно стоит использовать свое собственное решение, например:
$$ \alpha = \tan^{-1}(y, x) $$
$$ \beta = \sqrt{x^2 + y^2} $$
```
own_alpha = sp.atan2(y, x)
own_beta = sp.sqrt(x ** 2 + y ** 2)
sp.simplify(t1.translation.x.replace(alpha, own_alpha).replace(beta, own_beta))
sp.simplify(t1.translation.y.replace(alpha, own_alpha).replace(beta, own_beta))
```
| github_jupyter |
## Quantal Response Equilibrium
#### Example: Entry/Exit Game
Consider the following Entry/Exit game:
| | | Firm 2| |
|------| ----- |:-----:| :-----:|
| | | Entry (E) | Exit (N) |
|Firm 1| Entry (E) | -3, -3| 5, 0 |
| | Exit (N) | 0, 5 | 0, 0 |
#### Quantal Response Function
Now, we consider quantal response of Firm i to Firm -i's strategy: $QR_{i}\left(\sigma_{-i}\right)\left(s_{i}\right)=\frac{\exp\left(u_{i}\left(s_{i},\,\sigma_{-i}\right)/\mu\right)}{\sum_{s_{i}^{\prime}}\exp\left(u_{i}\left(s_{i}^{\prime},\,\sigma_{-i}\right)/\mu\right)}$.
By letting $\sigma_i=p_iE+(1-p_i)N$, it is rewritten as
$p_{i}(p_{-i})=\frac{\exp\left(u_{i}\left(E,\,p_{-i}\right)/\mu\right)}{\exp\left(u_{i}\left(E,\,p_{-i}\right)/\mu\right)+\exp\left(u_{i}\left(N,\,p_{-i}\right)/\mu\right)}$, which is calculated by the following code.
```
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
u_EE = -3
u_EN = 5
u_NE = 0
u_NN = 0
def u_Esigmaj(pj):
return u_EE*pj + u_EN*(1-pj)
def u_Nsigmaj(pj):
return u_NE*pj + u_NN*(1-pj)
def QR(pj, mu):
return np.exp(u_Esigmaj(pj)/mu)/(np.exp(u_Esigmaj(pj)/mu) + np.exp(u_Nsigmaj(pj)/mu))
```
You can see how this function varies depending on the level of $\mu$. (You can see that $p_i \rightarrow \frac{1}{2}$ as $\mu \rightarrow \infty$, and $p_i \rightarrow BR_i(p_{-i})$ as $\mu \rightarrow 0$)
```
pjs = np.arange(0, 1, 0.001)
fig = plt.figure(figsize = (4,4))
for mu, col in zip([100, 10, 1, 0.01], ['C0', 'C1', 'C2', 'C3']):
plt.plot(pjs, QR(pjs, mu), label = '$QR_2(p_1), \mu $= {}'.format(mu), linestyle = 'dashed', color = col)
plt.xlabel('$p_1$')
plt.ylabel('$p_2$')
plt.legend(loc=(1,0))
plt.grid(True)
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['bottom'].set_visible(False)
#ax.spines['left'].set_visible(False)
```
#### Quantal Response Equilibrium
Finally, we can depict QRE as intersections of quantal resopnses in the following graphs.
In general, QRE does not coinside with NE of the original game. Furthermore, it varies depending on the level of $\mu$.
```
pjs = np.arange(0, 1, 0.01)
fig, axes = plt.subplots(1,4, figsize = (16,4))
#axes[0,1].plot(pjs, QR(pjs, 1), label = '$QR_2(p_1), \mu $= {}'.format(1), linestyle = 'dashed', color = 'C0')
for ax, mu, col in zip(axes, [100, 10, 1, 0.01], ['C0', 'C1', 'C2', 'C3']):
ax.plot(pjs, QR(pjs, mu), label = '$QR_2(p_1), \mu $= {}'.format(mu), linestyle = 'dashed', color = col)
ax.plot(QR(pjs, mu), pjs, label = '$QR_1(p_2), \mu $= {}'.format(mu), linestyle = 'dotted', color = col)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.legend(loc='lower left')
ax.grid(True)
ax.set_xlabel('$p_1$')
ax.set_ylabel('$p_2$')
#ax.spines['bottom'].set_visible(False)
#ax.spines['left'].set_visible(False)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/SahityaRoy/AKpythoncodes/blob/main/Copy_of_Untitled22.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
%matplotlib inline
mpl.style.use('ggplot')
car=pd.read_csv('/content/quikr_car.csv')
car.head()
car.shape
car.info()
backup=car.copy()
car.year.unique()
car.kms_driven.unique()
car.Price.unique()
car.fuel_type.unique()
car.company.unique()
# names are pretty inconsistent
# names have company names attached to it
# some names are spam like 'Maruti Ertiga showroom condition with' and 'Well mentained Tata Sumo'
# company: many of the names are not of any company like 'Used', 'URJENT', and so on.
# year has many non-year values
# year is in object. Change to integer
# Price has Ask for Price
# Price has commas in its prices and is in object
# kms_driven has object values with kms at last.
# It has nan values and two rows have 'Petrol' in them
# fuel_type has nan values
# Cleaning Data
car=car[car['year'].str.isnumeric()]
car['year']=car['year'].astype('int')
car.info()
#price has ask for
car=car[car['Price']!='Ask For Price']
#price has commas in its price and is in object
car['Price']=car['Price'].str.replace(',','').astype('int')
#kms_driven has object value with kms at last
car['kms_driven']=car['kms_driven'].str.split().str.get(0).str.replace(',','')
#it has nan values and two rows have 'Petrol' in them
car=car[car['kms_driven'].str.isnumeric()]
car['kms_driven']=car['kms_driven'].astype(int)
#fuel_type has nan values
car=car[~car['fuel_type'].isna()]
car.shape
#name and company had spammed data...but with the previous cleaning, those rows got removed.
#Company does not need any cleaning now. Changing car names. Keeping only the first three words
car['name']=car['name'].str.split().str.slice(start=0,stop=3).str.join(' ')
#Resetting the index of the final cleaned data
car=car.reset_index(drop=True)
#cleaned data
car.head()
car.to_csv('/content/Cleaned_Car_data.csv')
car.info()
car.describe(include='all')
car=car[car['Price']<6000000]
car.company.unique()
import seaborn as sns
#relationship B/W company and Price
plt.subplots(figsize=(15,7))
ax=sns.boxplot(x='company',y='Price',data=car)
ax.set_xticklabels(ax.get_xticklabels(),rotation=40,ha='right')
plt.show()
#Checking relationship of Year with Price
plt.subplots(figsize=(20,10))
ax=sns.swarmplot(x='year',y='Price',data=car)
ax.set_xticklabels(ax.get_xticklabels(),rotation=40,ha='right')
plt.show()
#Checking relationship of kms_driven with Price
sns.relplot(x='kms_driven',y='Price',data=car,height=7,aspect=1.5)
#Checking relationship of Fuel Type with Price
plt.subplots(figsize=(14,7))
sns.boxplot(x='fuel_type',y='Price',data=car)
#Relationship of Price with FuelType, Year and Company mixed
ax=sns.relplot(x='company',y='Price',data=car,hue='fuel_type',size='year',height=7,aspect=2)
ax.set_xticklabels(rotation=40,ha='right')
#Extracting Training Data
X=car[['name','company','year','kms_driven','fuel_type']]
y=car['Price']
X.shape
y.shape
#Appliying train test split
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import make_column_transformer
from sklearn.pipeline import make_pipeline
from sklearn.metrics import r2_score
#Creating an OneHotEncoder object to contain all the possible categories
ohe=OneHotEncoder()
ohe.fit(X[['name','company','fuel_type']])
#Creating a column transformer to transform categorical columns
column_trans=make_column_transformer((OneHotEncoder(categories=ohe.categories_),['name','company','fuel_type']),
remainder='passthrough')
lr=LinearRegression()
pipe=make_pipeline(column_trans,lr)
pipe.fit(X_train,y_train)
y_pred=pipe.predict(X_test)
y_pred
#cheking r2_score
r2_score(y_test,y_pred)
#Finding the model with a random state of TrainTestSplit where the model was found to give almost 0.92 as r2_score
scores=[]
for i in range(1000):
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,random_state=i)
lr=LinearRegression()
pipe=make_pipeline(column_trans,lr)
pipe.fit(X_train,y_train)
y_pred=pipe.predict(X_test)
scores.append(r2_score(y_test,y_pred))
np.argmax(scores)
scores[np.argmax(scores)]
pipe.predict(pd.DataFrame(columns=X_test.columns,data=np.array(['Maruti Suzuki Swift','Maruti',2019,100,'Petrol']).reshape(1,5)))
#The best model is found at a certain random state
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,random_state=np.argmax(scores))
lr=LinearRegression()
pipe=make_pipeline(column_trans,lr)
pipe.fit(X_train,y_train)
y_pred=pipe.predict(X_test)
r2_score(y_test,y_pred)
import pickle
pickle.dump(pipe,open('LinearRegressionModel.pkl','wb'))
pipe.predict(pd.DataFrame(columns=['name','company','year','kms_driven','fuel_type'],data=np.array(['Maruti Suzuki Swift','Maruti',2019,100,'Petrol']).reshape(1,5)))
pipe.steps[0][1].transformers[0][1].categories[0]
```
| github_jupyter |
```
import logging
import importlib
importlib.reload(logging) # see https://stackoverflow.com/a/21475297/1469195
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
%%capture
import os
import site
os.sys.path.insert(0, '/home/schirrmr/code/reversible/')
os.sys.path.insert(0, '/home/schirrmr/braindecode/code/braindecode/')
os.sys.path.insert(0, '/home/schirrmr/code/explaining/reversible//')
%load_ext autoreload
%autoreload 2
import numpy as np
import logging
log = logging.getLogger()
log.setLevel('INFO')
import sys
logging.basicConfig(format='%(asctime)s %(levelname)s : %(message)s',
level=logging.INFO, stream=sys.stdout)
import matplotlib
from matplotlib import pyplot as plt
from matplotlib import cm
%matplotlib inline
%config InlineBackend.figure_format = 'png'
matplotlib.rcParams['figure.figsize'] = (12.0, 1.0)
matplotlib.rcParams['font.size'] = 14
import seaborn
seaborn.set_style('darkgrid')
from reversible2.sliced import sliced_from_samples
from numpy.random import RandomState
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import copy
import math
import itertools
import torch as th
from braindecode.torch_ext.util import np_to_var, var_to_np
from reversible2.splitter import SubsampleSplitter
from reversible2.view_as import ViewAs
from reversible2.affine import AdditiveBlock
from reversible2.plot import display_text, display_close
from reversible2.high_gamma import load_file, create_inputs
from reversible2.high_gamma import load_train_test
th.backends.cudnn.benchmark = True
from reversible2.models import deep_invertible
sensor_names = ['Fz',
'FC3','FC1','FCz','FC2','FC4',
'C5','C3','C1','Cz','C2','C4','C6',
'CP3','CP1','CPz','CP2','CP4',
'P1','Pz','P2',
'POz']
# create model
# create dist
train_inputs, test_inputs = load_train_test(
subject_id=4,
car=True,
n_sensors=22,
final_hz=256,
start_ms=500,
stop_ms=1500,
half_before=True,
only_load_given_sensors=False,
)
# create model
# create dist
test_dist_inputs, test_dist_inputs_2 = load_train_test(
subject_id=5,
car=True,
n_sensors=22,
final_hz=256,
start_ms=500,
stop_ms=1500,
half_before=True,
only_load_given_sensors=False,
)
train_less = [t[:180,7:9].clone().contiguous() for t in train_inputs]
test_less = [t[:180,7:9].clone().contiguous() for t in test_inputs]
test_dist_less = [t[:180,7:9].clone().contiguous() for t in test_dist_inputs]
for t in train_less + test_less + test_dist_less:
t.data[:,1] = 0
from reversible2.models import larger_model
from reversible2.distribution import TwoClassIndependentDist
import ot
from reversible2.ot_exact import get_matched_samples
from reversible2.model_and_dist import ModelAndDist, set_dist_to_empirical
from reversible2.util import flatten_2d
n_chans = train_less[0].shape[1]
n_time = train_less[0].shape[2]
n_chan_pad = 0
filter_length_time = 11
model = larger_model(n_chans, n_time, final_fft=True, kernel_length=11, constant_memory=False)
model.cuda()
from reversible2.models import add_dropout_before_convs
#add_dropout_before_convs(model,p_conv=0.3, p_full=0.3)
dist = TwoClassIndependentDist(np.prod(train_less[0].size()[1:]))
dist.cuda()
model_and_dist = ModelAndDist(model, dist)
model_and_dist.set_dist_to_empirical(train_less)
optim = th.optim.Adam([{'params': dist.parameters(), 'lr':1e-2},
{'params': list(model_and_dist.model.parameters()),
'lr': 1e-4}])
from reversible2.model_and_dist import create_empirical_dist
import pandas as pd
df = pd.DataFrame()
samples = model_and_dist.dist.get_samples(i_class, 30).detach()
in_samples = model_and_dist.model.invert(samples)
perturbations = th.rand()
out_diffs = th.norm(samples.unsqueeze(0) - samples.unsqueeze(1), p=2, dim=2)
in_diffs = th.norm(flatten_2d(in_samples).unsqueeze(0) - flatten_2d(in_samples).unsqueeze(1), p=2, dim=2)
out_diffs = out_diffs.flatten()
in_diffs = in_diffs.flatten()
assert len(out_diffs.shape) == 1
assert len(in_diffs.shape) == 1
ratio = in_diffs / th.clamp(out_diffs, min=1e-9)
assert len(ratio.shape) == 1
lip_loss = th.mean(F.relu(ratio - lip_threshold) ** 2)
from reversible2.timer import Timer
from reversible2.distribution import TwoClassIndependentDist
i_class = 1
n_epochs = 201
class_ins = train_less[i_class].cuda()
test_ins = test_less[i_class].cuda()
test_dist_ins = test_dist_less[i_class].cuda()
noise_factor = 1
lip_threshold = 1.3
lip_perturb_factor = 0.1
lip_loss_factor = 1000
for i_epoch in range(n_epochs):
with Timer(verbose=False) as timer:
if i_epoch > 0:
model.train()
optim.zero_grad()
for i_class in range(2):
class_ins = train_less[i_class].cuda()
log_probs = model_and_dist.get_total_log_prob(
i_class, class_ins + (th.randn_like(class_ins)) * noise_factor)
loss = -th.mean(log_probs)
loss.backward()
optim.step()
model.eval()
if i_epoch % 10 == 0:
model.eval()
with th.no_grad():
samples = model_and_dist.dist.get_samples(i_class, 30).detach()
in_samples = model_and_dist.model.invert(samples)
out_diffs = th.norm(samples.unsqueeze(0) - samples.unsqueeze(1), p=2, dim=2)
in_diffs = th.norm(flatten_2d(in_samples).unsqueeze(0) - flatten_2d(in_samples).unsqueeze(1), p=2, dim=2)
out_diffs = out_diffs.flatten()
in_diffs = in_diffs.flatten()
assert len(out_diffs.shape) == 1
assert len(in_diffs.shape) == 1
ratio = in_diffs / th.clamp(out_diffs, min=1e-9)
assert len(ratio.shape) == 1
lip_loss = th.mean(F.relu(ratio - lip_threshold) ** 2)
epoch_row = {'lip_loss': lip_loss.item()}
with th.no_grad():
for setname, inputs in (("Train", train_less), ("Test", test_less), ("Other", test_dist_less)):
OTs = []
nlls = []
inputs = [i.cuda() for i in inputs]
for i_class in range(2):
examples = model_and_dist.get_examples(i_class,len(inputs[i_class]) * 20)
matched_examples = get_matched_samples(flatten_2d(inputs[i_class]), flatten_2d(examples))
OT = th.mean(th.norm(flatten_2d(inputs[i_class]).unsqueeze(1) - matched_examples, p=2, dim=2))#
nll = -th.mean(model_and_dist.get_total_log_prob(i_class, inputs[i_class]))
OTs.append(OT.item())
nlls.append(nll.item())
epoch_row[setname + '_OT'] = np.mean(OTs)
epoch_row[setname + '_NLL'] = np.mean(nlls)
for setname, inputs in (("Train", train_less), ("Test", test_less)):
corrects = []
inputs =[i.cuda() for i in inputs]
for i_class in range(2):
outs = model_and_dist.log_softmax(inputs[i_class].cuda())
pred_label = np.argmax(var_to_np(outs), axis=1)
correct = pred_label == i_class
corrects.append(correct)
acc = np.mean(np.concatenate(corrects))
epoch_row['model_' + setname + '_acc'] = acc
for name, inputs in (("Train", train_less),
("Combined", [th.cat((train_less[i_class].cuda(),
test_less[i_class].cuda()), dim=0)
for i_class in range(2)]),
("Test", test_less)):
emp_dist = create_empirical_dist(model_and_dist.model, inputs)
emp_model_dist = ModelAndDist(model_and_dist.model, emp_dist)
with th.no_grad():
for setname, inner_inputs in (("Train", train_less), ("Test", test_less)):
inner_inputs =[i.cuda() for i in inner_inputs]
corrects = []
for i_class in range(2):
outs = emp_model_dist.log_softmax(inner_inputs[i_class].cuda())
pred_label = np.argmax(var_to_np(outs), axis=1)
correct = pred_label == i_class
corrects.append(correct)
acc = np.mean(np.concatenate(corrects))
epoch_row[name + '_' + setname + '_acc'] = acc
df = df.append(epoch_row, ignore_index=True)
if i_epoch % (n_epochs // 20) == 0:
with th.no_grad():
print("Epoch {:d} of {:d}".format(i_epoch, n_epochs))
print("Runtime {:.1E} ms".format(timer.elapsed))
text_strs = []
for setname, inputs in (("Train", class_ins), ("Test", test_ins), ("Other", test_dist_ins)):
inputs = inputs.cuda()
examples = model_and_dist.get_examples(1,len(inputs) * 20)
matched_examples = get_matched_samples(flatten_2d(inputs), flatten_2d(examples))
OT = th.mean(th.norm(flatten_2d(inputs).unsqueeze(1) - matched_examples, p=2, dim=2))#
nll = -th.mean(model_and_dist.get_total_log_prob(i_class, inputs))
text_strs.append("{:7s} NLL {:.1E}".format(setname, nll.item()))
text_strs.append("{:7s} OT {:.1E}".format(setname, OT.item()))
display_text("\n".join(text_strs))
print("Actual Model")
for setname, inputs in (("Train", train_less), ("Test", test_less)):
inputs =[i.cuda() for i in inputs]
corrects = []
for i_class in range(2):
outs = model_and_dist.log_softmax(inputs[i_class].cuda())
pred_label = np.argmax(var_to_np(outs), axis=1)
correct = pred_label == i_class
corrects.append(correct)
acc = np.mean(np.concatenate(corrects))
print("{:6s} Accuracy {:.1f} ({:.1f}/{:.1f})".format(setname, acc * 100,
np.mean(corrects[0]) * 100,
np.mean(corrects[1]) * 100))
for name, inputs in (("Train", train_less),
("Combined", [th.cat((train_less[i_class].cuda(),
test_less[i_class].cuda()), dim=0)
for i_class in range(2)]),
("Test", test_less)):
inputs =[i.cuda() for i in inputs]
emp_dist = create_empirical_dist(model_and_dist.model, inputs)
emp_model_dist = ModelAndDist(model_and_dist.model, emp_dist)
print(name)
with th.no_grad():
for setname, inner_inputs in (("Train", train_less), ("Test", test_less)):
corrects = []
inner_inputs =[i.cuda() for i in inner_inputs]
for i_class in range(2):
outs = emp_model_dist.log_softmax(inner_inputs[i_class].cuda())
pred_label = np.argmax(var_to_np(outs), axis=1)
correct = pred_label == i_class
corrects.append(correct)
acc = np.mean(np.concatenate(corrects))
print("{:6s} Accuracy {:.1f} ({:.1f}/{:.1f})".format(setname, acc * 100,
np.mean(corrects[0]) * 100,
np.mean(corrects[1]) * 100))
examples = model_and_dist.get_examples(1,len(class_ins) * 20)
matched_examples = get_matched_samples(flatten_2d(class_ins), flatten_2d(examples))
fig, axes = plt.subplots(5,2, figsize=(16,12), sharex=True, sharey=True)
for ax, signal, matched in zip(axes.flatten(), class_ins, matched_examples):
ax.plot(var_to_np(signal).squeeze().T)
for ex in var_to_np(matched.view(len(matched), class_ins.shape[1], class_ins.shape[2])):
ax.plot(ex[0], color=seaborn.color_palette()[0], lw=0.5, alpha=0.7)
ax.plot(ex[1], color=seaborn.color_palette()[1], lw=0.5, alpha=0.7)
display_close(fig)
fig = plt.figure()
plt.plot(var_to_np(th.exp(model_and_dist.dist.class_log_stds)[1]))
display_close(fig)
examples = model_and_dist.get_examples(1,len(class_ins) * 20)
fake_bps = np.abs(np.fft.rfft(var_to_np(examples[:,0]).squeeze()))
real_bps = np.abs(np.fft.rfft(var_to_np(class_ins[:,0]).squeeze()))
fig = plt.figure(figsize=(8,3))
plt.plot(np.fft.rfftfreq(256, 1/256.0), np.mean(real_bps, axis=0))
plt.plot(np.fft.rfftfreq(256, 1/256.0), np.mean(fake_bps, axis=0))
display_close(fig)
```
### split off 40 validation trials
```
valid_less = [t[-40:] for t in train_less]
train_less = [t[:-40] for t in train_less]
n_chans = train_less[0].shape[1]
n_time = train_less[0].shape[2]
n_chan_pad = 0
filter_length_time = 11
model = larger_model(n_chans, n_time, final_fft=True, kernel_length=11, constant_memory=False)
model.cuda()
from reversible2.models import add_dropout_before_convs
#add_dropout_before_convs(model,p_conv=0.3, p_full=0.3)
dist = TwoClassIndependentDist(np.prod(train_less[0].size()[1:]))
dist.cuda()
model_and_dist = ModelAndDist(model, dist)
model_and_dist.set_dist_to_empirical(train_less)
optim = th.optim.Adam([{'params': dist.parameters(), 'lr':1e-2},
{'params': list(model_and_dist.model.parameters()),
'lr': 1e-4}])
rand_log_noise_factor = th.zeros(1, requires_grad=True, device='cuda')
optim_noise = th.optim.Adam([
{'params': [rand_log_noise_factor], 'lr':1e-2},])
from reversible2.timer import Timer
from reversible2.distribution import TwoClassIndependentDist
from reversible2.mixture import GaussianMixture
n_epochs = 201
class_ins = train_less[i_class].cuda()
test_ins = test_less[i_class].cuda()
test_dist_ins = test_dist_less[i_class].cuda()
noise_factor = 1
lip_threshold = 1.3
lip_perturb_factor = 0.1
lip_loss_factor = 1000
for i_epoch in range(n_epochs):
with Timer(verbose=False) as timer:
if i_epoch > 0:
model.train()
optim.zero_grad()
for i_class in range(2):
class_ins = train_less[i_class].cuda()
log_probs = model_and_dist.get_total_log_prob(
i_class, class_ins + (th.randn_like(class_ins)) * th.exp(rand_log_noise_factor))
loss = -th.mean(log_probs)
loss.backward()
optim.step()
optim_noise.zero_grad()
for i_class in range(2):
train_ins = train_less[i_class].cuda()
valid_ins = valid_less[i_class].cuda()
with th.no_grad():
tr_outs = model_and_dist.model(train_ins).detach()
log_stds = rand_log_noise_factor.repeat(tr_outs.shape)
mixture = GaussianMixture(tr_outs, log_stds)
with th.no_grad():
val_outs = model_and_dist.model(valid_ins).detach()
nll = -th.mean(mixture.log_probs(val_outs))
nll.backward()
optim_noise.step()
model.eval()
if i_epoch % 10 == 0:
model.eval()
with th.no_grad():
samples = model_and_dist.dist.get_samples(i_class, 30).detach()
in_samples = model_and_dist.model.invert(samples)
out_diffs = th.norm(samples.unsqueeze(0) - samples.unsqueeze(1), p=2, dim=2)
in_diffs = th.norm(flatten_2d(in_samples).unsqueeze(0) - flatten_2d(in_samples).unsqueeze(1), p=2, dim=2)
out_diffs = out_diffs.flatten()
in_diffs = in_diffs.flatten()
assert len(out_diffs.shape) == 1
assert len(in_diffs.shape) == 1
ratio = in_diffs / th.clamp(out_diffs, min=1e-9)
assert len(ratio.shape) == 1
lip_loss = th.mean(F.relu(ratio - lip_threshold) ** 2)
epoch_row = {'lip_loss': lip_loss.item()}
with th.no_grad():
for setname, inputs in (("Train", train_less), ("Test", test_less), ("Other", test_dist_less)):
OTs = []
nlls = []
inputs = [i.cuda() for i in inputs]
for i_class in range(2):
examples = model_and_dist.get_examples(i_class,len(inputs[i_class]) * 20)
matched_examples = get_matched_samples(flatten_2d(inputs[i_class]), flatten_2d(examples))
OT = th.mean(th.norm(flatten_2d(inputs[i_class]).unsqueeze(1) - matched_examples, p=2, dim=2))#
nll = -th.mean(model_and_dist.get_total_log_prob(i_class, inputs[i_class]))
OTs.append(OT.item())
nlls.append(nll.item())
epoch_row[setname + '_OT'] = np.mean(OTs)
epoch_row[setname + '_NLL'] = np.mean(nlls)
for setname, inputs in (("Train", train_less), ("Test", test_less)):
corrects = []
inputs =[i.cuda() for i in inputs]
for i_class in range(2):
outs = model_and_dist.log_softmax(inputs[i_class].cuda())
pred_label = np.argmax(var_to_np(outs), axis=1)
correct = pred_label == i_class
corrects.append(correct)
acc = np.mean(np.concatenate(corrects))
epoch_row['model_' + setname + '_acc'] = acc
for name, inputs in (("Train", train_less),
("Combined", [th.cat((train_less[i_class].cuda(),
test_less[i_class].cuda()), dim=0)
for i_class in range(2)]),
("Test", test_less)):
emp_dist = create_empirical_dist(model_and_dist.model, inputs)
emp_model_dist = ModelAndDist(model_and_dist.model, emp_dist)
with th.no_grad():
for setname, inner_inputs in (("Train", train_less), ("Test", test_less)):
inner_inputs =[i.cuda() for i in inner_inputs]
corrects = []
for i_class in range(2):
outs = emp_model_dist.log_softmax(inner_inputs[i_class].cuda())
pred_label = np.argmax(var_to_np(outs), axis=1)
correct = pred_label == i_class
corrects.append(correct)
acc = np.mean(np.concatenate(corrects))
epoch_row[name + '_' + setname + '_acc'] = acc
df = df.append(epoch_row, ignore_index=True)
if i_epoch % (n_epochs // 20) == 0:
with th.no_grad():
print("Epoch {:d} of {:d}".format(i_epoch, n_epochs))
print("Runtime {:.1E} ms".format(timer.elapsed))
text_strs = []
for setname, inputs in (("Train", class_ins), ("Test", test_ins), ("Other", test_dist_ins)):
inputs = inputs.cuda()
examples = model_and_dist.get_examples(1,len(inputs) * 20)
matched_examples = get_matched_samples(flatten_2d(inputs), flatten_2d(examples))
OT = th.mean(th.norm(flatten_2d(inputs).unsqueeze(1) - matched_examples, p=2, dim=2))#
nll = -th.mean(model_and_dist.get_total_log_prob(i_class, inputs))
text_strs.append("{:7s} NLL {:.1E}".format(setname, nll.item()))
text_strs.append("{:7s} OT {:.1E}".format(setname, OT.item()))
display_text("\n".join(text_strs))
print("Actual Model")
for setname, inputs in (("Train", train_less), ("Test", test_less)):
inputs =[i.cuda() for i in inputs]
corrects = []
for i_class in range(2):
outs = model_and_dist.log_softmax(inputs[i_class].cuda())
pred_label = np.argmax(var_to_np(outs), axis=1)
correct = pred_label == i_class
corrects.append(correct)
acc = np.mean(np.concatenate(corrects))
print("{:6s} Accuracy {:.1f} ({:.1f}/{:.1f})".format(setname, acc * 100,
np.mean(corrects[0]) * 100,
np.mean(corrects[1]) * 100))
for name, inputs in (("Train", train_less),
("Combined", [th.cat((train_less[i_class].cuda(),
test_less[i_class].cuda()), dim=0)
for i_class in range(2)]),
("Test", test_less)):
inputs =[i.cuda() for i in inputs]
emp_dist = create_empirical_dist(model_and_dist.model, inputs)
emp_model_dist = ModelAndDist(model_and_dist.model, emp_dist)
print(name)
with th.no_grad():
for setname, inner_inputs in (("Train", train_less), ("Test", test_less)):
corrects = []
inner_inputs =[i.cuda() for i in inner_inputs]
for i_class in range(2):
outs = emp_model_dist.log_softmax(inner_inputs[i_class].cuda())
pred_label = np.argmax(var_to_np(outs), axis=1)
correct = pred_label == i_class
corrects.append(correct)
acc = np.mean(np.concatenate(corrects))
print("{:6s} Accuracy {:.1f} ({:.1f}/{:.1f})".format(setname, acc * 100,
np.mean(corrects[0]) * 100,
np.mean(corrects[1]) * 100))
examples = model_and_dist.get_examples(1,len(class_ins) * 20)
matched_examples = get_matched_samples(flatten_2d(class_ins), flatten_2d(examples))
fig, axes = plt.subplots(5,2, figsize=(16,12), sharex=True, sharey=True)
for ax, signal, matched in zip(axes.flatten(), class_ins, matched_examples):
ax.plot(var_to_np(signal).squeeze().T)
for ex in var_to_np(matched.view(len(matched), class_ins.shape[1], class_ins.shape[2])):
ax.plot(ex[0], color=seaborn.color_palette()[0], lw=0.5, alpha=0.7)
ax.plot(ex[1], color=seaborn.color_palette()[1], lw=0.5, alpha=0.7)
display_close(fig)
fig = plt.figure()
plt.plot(var_to_np(th.exp(model_and_dist.dist.class_log_stds)[1]))
display_close(fig)
examples = model_and_dist.get_examples(1,len(class_ins) * 20)
fake_bps = np.abs(np.fft.rfft(var_to_np(examples[:,0]).squeeze()))
real_bps = np.abs(np.fft.rfft(var_to_np(class_ins[:,0]).squeeze()))
fig = plt.figure(figsize=(8,3))
plt.plot(np.fft.rfftfreq(256, 1/256.0), np.mean(real_bps, axis=0))
plt.plot(np.fft.rfftfreq(256, 1/256.0), np.mean(fake_bps, axis=0))
display_close(fig)
other_model_dist = th.load('/data/schirrmr/schirrmr/reversible/experiments/dropout-weight-decay/243/model_and_dist.pkl')
out = model_and_dist.model(train_less[1][:1].cuda())
del outs
class_ins = train_inputs[1].cuda()
fig = plt.figure()
plt.plot(var_to_np(th.exp(other_model_dist.dist.class_log_stds)[1]))
display_close(fig)
examples = other_model_dist.get_examples(1,len(class_ins) * 1)
fake_bps = np.abs(np.fft.rfft(var_to_np(examples[:,0]).squeeze()))
fig = plt.figure(figsize=(8,3))
plt.plot(np.fft.rfftfreq(256, 1/256.0), np.mean(real_bps, axis=0))
plt.plot(np.fft.rfftfreq(256, 1/256.0), np.mean(fake_bps, axis=0))
display_close(fig)
for i_class in range(2):
class_ins = train_less[i_class].cuda()
log_probs = model_and_dist.get_total_log_prob(
i_class, class_ins + (th.randn_like(class_ins)) * th.exp(rand_log_noise_factor))
loss = -th.mean(log_probs)
print(loss)
mixtures = []
for i_class in range(2):
train_ins = train_less[i_class].cuda()
with th.no_grad():
tr_outs = model_and_dist.model(train_ins).detach()
log_stds = rand_log_noise_factor.repeat(tr_outs.shape)
mixture = GaussianMixture(tr_outs, log_stds)
mixtures.append(mixture)
for i_class in range(2):
test_ins = test_less[i_class].cuda()
with th.no_grad():
te_outs = model_and_dist.model(test_ins).detach()
log_probs = [m.log_probs(te_outs) for m in mixtures]
print(np.mean(np.argmax(var_to_np(th.stack(log_probs, dim=1)), axis=1) == i_class))
th.exp(rand_log_noise_factor)
for i_class in range(2):
class_ins = train_less[i_class].cuda()
log_probs = model_and_dist.get_total_log_prob(
1-i_class, class_ins + (th.randn_like(class_ins)) * th.exp(rand_log_noise_factor))
loss = -th.mean(log_probs)
print(loss)
plt.plot(var_to_np(dist.get_mean_std(0)[0]))
plt.plot(var_to_np(dist.get_mean_std(1)[0]))
tr_out = model_and_dist.model(train_less[0].cuda())
plt.plot(np.mean(var_to_np(tr_out), axis=0))
tr_out = model_and_dist.model(train_less[1].cuda())
plt.plot(np.mean(var_to_np(tr_out), axis=0))
with th.no_grad():
for i_class in range(2):
tr_out = model_and_dist.model(train_less[i_class].cuda())
plt.plot(np.mean(var_to_np(tr_out), axis=0))
for i_class in range(2):
tr_out = model_and_dist.model(valid_less[i_class].cuda())
plt.plot(np.mean(var_to_np(tr_out), axis=0))
plt.plot(var_to_np(dist.get_mean_std(0)[1]))
plt.plot(var_to_np(dist.get_mean_std(1)[1]))
dist.get_mean_std(1)
inputs =[i.cuda() for i in train_less]
emp_dist = create_empirical_dist(model_and_dist.model, inputs)
emp_model_dist = ModelAndDist(model_and_dist.model, emp_dist)
plt.plot(var_to_np(emp_model_dist.dist.get_mean_std(0)[0]))
plt.plot(var_to_np(emp_model_dist.dist.get_mean_std(1)[0]))
emp_model_dist.dist.get_mean_std(0)[0]
p 0,0.1,0.3,0.5
w decay 0 1e-4, 1e-3, 1e-2,
noise factor 1e-2, 5e-2, 1e-1
```
| github_jupyter |
```
import tensorflow as tf
import pandas as pd
import numpy as np
import pickle
from time import time
from utils.df_loader import load_compas_df
from utils.preprocessing import min_max_scale_numerical, remove_missing_values, inverse_dummy
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from alibi.explainers import CounterFactualProto, CounterFactual
from alibi_cf.utils import get_cat_vars_dict
tf.get_logger().setLevel(40) # suppress deprecation messages
tf.compat.v1.disable_v2_behavior() # disable TF2 behaviour as alibi code still relies on TF1 constructs
tf.keras.backend.clear_session()
pd.options.mode.chained_assignment = None
print('TF version: ', tf.__version__)
print('Eager execution enabled: ', tf.executing_eagerly()) # False
seed = 123
tf.random.set_seed(seed)
np.random.seed(seed)
df, feature_names, numerical_cols, categorical_cols, columns_type, target_name, possible_outcomes = load_compas_df()
scaled_df, scaler = min_max_scale_numerical(df, numerical_cols)
scaled_df.head(5)
dummy_df = pd.get_dummies(scaled_df, columns= [ col for col in categorical_cols if col != target_name])
### We should have this amount of input features.
sum([len(scaled_df[col].unique()) for col in categorical_cols if col != target_name]) + len(numerical_cols)
# enconded_df, encoder_dict = label_encode(scaled_df, categorical_cols)
cat_to_ohe_cat = {}
for c_col in categorical_cols:
if c_col != target_name:
cat_to_ohe_cat[c_col] = [ ohe_col for ohe_col in dummy_df.columns if ohe_col.startswith(c_col) and ohe_col != target_name]
ohe_feature_names = [ col for col in dummy_df.columns if col != target_name]
dummy_df.head(5)
inverse_dummy(dummy_df, cat_to_ohe_cat).head(5)
from sklearn.preprocessing import LabelEncoder
target_label_encoder = LabelEncoder()
dummy_df[target_name] = target_label_encoder.fit_transform(dummy_df[target_name])
dummy_df= dummy_df[ohe_feature_names + [target_name]]
train_df, test_df = train_test_split(dummy_df, train_size=.8, random_state=seed, shuffle=True)
X_train = np.array(train_df[ohe_feature_names])
y_train = np.array(train_df[target_name])
X_test = np.array(test_df[ohe_feature_names])
y_test = np.array(test_df[target_name])
### Train
nn = model= tf.keras.models.Sequential(
[
tf.keras.layers.Dense(24,activation='relu'),
tf.keras.layers.Dense(12,activation='relu'),
tf.keras.layers.Dense(12,activation='relu'),
tf.keras.layers.Dense(12,activation='relu'),
tf.keras.layers.Dense(12,activation='relu'),
tf.keras.layers.Dense(1),
tf.keras.layers.Activation(tf.nn.sigmoid),
]
)
nn.compile(optimizer="Adam", loss='binary_crossentropy', metrics=['accuracy'])
nn.fit(X_train, y_train, batch_size=64, epochs=20, shuffle=True)
models = {
"dt": DecisionTreeClassifier().fit(X_train,y_train),
"rfc": RandomForestClassifier().fit(X_train,y_train),
"nn": nn,
}
pickle.dump(models['dt'], open('./saved_models/dt.p', 'wb'))
pickle.dump(models['rfc'], open('./saved_models/rfc.p', 'wb'))
models['nn'].save('./saved_models/nn.h5',overwrite=True)
### Load
models = {}
models['dt'] = pickle.load(open('./saved_models/dt.p', 'rb'))
models['rfc'] = pickle.load(open('./saved_models/rfc.p', 'rb'))
models['nn'] = tf.keras.models.load_model('./saved_models/nn.h5')
## Initialise NN output shape as (None, 1) for tensorflow.v1
models['nn'].predict(np.zeros((2, X_train.shape[-1])))
example_data = X_test[0, :].reshape(1,-1)
dt_pred = models['dt'].predict(example_data)[0]
rfc_pred = models['rfc'].predict(example_data)[0]
nn_pred = models['nn'].predict(example_data)[0][0]
print(f"DT [{dt_pred}], RFC [{rfc_pred}], NN [{nn_pred}]")
```
# Alibi
## 1. Counterfactual Prototype
```
cat_vars_dict = get_cat_vars_dict(scaled_df, categorical_cols, feature_names, target_name)
cat_vars_dict
cat_feature_names = [ col for col in categorical_cols if col != target_name ]
cat_vars_idx_info = []
for cat_col in cat_feature_names:
num_unique_v = len([ col for col in train_df.columns if col.startswith(f"{cat_col}_")])
first_index = min([ list(train_df.columns).index(col) for col in train_df.columns if col.startswith(f"{cat_col}_")])
cat_vars_idx_info.append({
"col": cat_col,
"num_unique_v": num_unique_v,
"first_index": first_index
})
cat_vars_idx_info
cat_vars_ohe = {}
for idx_info in cat_vars_idx_info:
cat_vars_ohe[idx_info['first_index']] = idx_info['num_unique_v']
from alibi_cf import AlibiBinaryPredictWrapper
alibi_wrapped = {
'dt': AlibiBinaryPredictWrapper(models['dt']),
'rfc': AlibiBinaryPredictWrapper(models['rfc']),
'nn': AlibiBinaryPredictWrapper(models['nn']),
}
feature_range = (np.ones((1, len(feature_names))), np.zeros((1, len(feature_names))))
cf_p_dict = {}
for k in alibi_wrapped.keys():
cf_p_dict[k] = CounterFactualProto(
alibi_wrapped[k].predict,
example_data.shape,
cat_vars=cat_vars_ohe,
feature_range=feature_range,
max_iterations=500,
ohe=True,
)
cf_p_dict[k].fit(X_train)
""
num_instances = 5
num_cf_per_instance = 1
results = {}
for k in cf_p_dict.keys():
results[k] = []
print(f"Finding counterfactual for {k}")
for idx, instance in enumerate(X_test[0:num_instances]):
print(f"instance {idx}")
example = instance.reshape(1, -1)
for num_cf in range(num_cf_per_instance):
print(f"CF {num_cf}")
start_t = time()
exp = cf_p_dict[k].explain(example)
end_t = time ()
running_time = end_t - start_t
if k=='nn':
prediction = target_label_encoder.inverse_transform((models[k].predict(example)[0]> 0.5).astype(int))[0]
else:
prediction = target_label_encoder.inverse_transform(models[k].predict(example))[0]
if (not exp.cf is None) and (len(exp.cf) > 0):
print("Found CF")
if k == 'nn':
cf = inverse_dummy(pd.DataFrame(exp.cf['X'], columns=ohe_feature_names), cat_to_ohe_cat)
cf.loc[0, target_name] = target_label_encoder.inverse_transform([exp.cf['class']])[0]
else:
cf = inverse_dummy(pd.DataFrame(exp.cf['X'], columns=ohe_feature_names), cat_to_ohe_cat)
cf.loc[0, target_name] = target_label_encoder.inverse_transform([exp.cf['class']])[0]
# print(exp.cf)
# print(cf)
else:
print("CF not found")
cf = None
input_df = inverse_dummy(pd.DataFrame(example, columns=ohe_feature_names), cat_to_ohe_cat)
input_df.loc[0, target_name] = prediction
results[k].append({
"input": input_df,
"cf": cf,
'exp': exp,
"running_time": running_time,
"ground_truth": target_label_encoder.inverse_transform([y_test[idx]])[0],
"prediction": prediction,
})
all_df = {}
for k in results.keys():
all_data = []
for i in range(len(results[k])):
final_df = pd.DataFrame([{}])
scaled_input_df = results[k][i]['input'].copy(deep=True)
origin_columns = [f"origin_input_{col}" for col in scaled_input_df.columns]
origin_input_df = scaled_input_df.copy(deep=True)
scaled_input_df.columns = [f"scaled_input_{col}" for col in scaled_input_df.columns]
origin_input_df[numerical_cols] = scaler.inverse_transform(origin_input_df[numerical_cols])
origin_input_df.columns = origin_columns
final_df = final_df.join([scaled_input_df, origin_input_df])
if not results[k][i]['cf'] is None:
scaled_cf_df = results[k][i]['cf'].copy(deep=True)
# scaled_cf_df.loc[0, target_name] = target_label_encoder.inverse_transform([scaled_cf_df.loc[0, target_name]])[0]
origin_cf_columns = [f"origin_cf_{col}" for col in scaled_cf_df.columns]
origin_cf_df = scaled_cf_df.copy(deep=True)
scaled_cf_df.columns = [f"scaled_cf_{col}" for col in scaled_cf_df.columns]
origin_cf_df[numerical_cols] = scaler.inverse_transform(origin_cf_df[numerical_cols])
origin_cf_df.columns = origin_cf_columns
final_df = final_df.join([scaled_cf_df, origin_cf_df])
# final_df = final_df.join([scaled_input_df, origin_input_df, scaled_cf_df, origin_cf_df])
final_df['running_time'] = results[k][i]['running_time']
final_df['Found'] = "Y" if not results[k][i]['cf'] is None else "N"
final_df['ground_truth'] = results[k][i]['ground_truth']
final_df['prediction'] = results[k][i]['prediction']
all_data.append(final_df)
all_df[k] = pd.concat(all_data)
for df_k in all_df.keys():
all_df[df_k].to_csv(f"./results/proto_compas_{df_k}_result.csv")
```
| github_jupyter |
# Lesson 2: `if / else` and Functions
---
Sarah Middleton (http://sarahmid.github.io/)
This tutorial series is intended as a basic introduction to Python for complete beginners, with a special focus on genomics applications. The series was originally designed for use in GCB535 at Penn, and thus the material has been highly condensed to fit into just four class periods. The full set of notebooks and exercises can be found at http://github.com/sarahmid/python-tutorials
For a slightly more in-depth (but non-interactive) introduction to Python, see my Programming Bootcamp materials here: http://github.com/sarahmid/programming-bootcamp
Note that if you are viewing this notebook online from the github/nbviewer links, you will not be able to use the interactive features of the notebook. You must download the notebook files and run them locally with Jupyter/IPython (http://jupyter.org/).
---
## Table of Contents
1. Conditionals I: The "`if / else`" statement
2. Built-in functions
3. Modules
4. Test your understanding: practice set 2
# 1. Conditionals I: The "`if / else`" statement
---
Programming is a lot like giving someone instructions or directions. For example, if I wanted to give you directions to my house, I might say...
> Turn right onto Main Street
> Turn left onto Maple Ave
> **If** there is construction, continue straight on Maple Ave, turn right on Cat Lane, and left on Fake Street; **else**, cut through the empty lot to Fake Street
> Go straight on Fake Street until house 123
The same directions, but in code:
```
construction = False
print "Turn right onto Main Street"
print "Turn left onto Maple Ave"
if construction:
print "Continue straight on Maple Ave"
print "Turn right onto Cat Lane"
print "Turn left onto Fake Street"
else:
print "Cut through the empty lot to Fake Street"
print "Go straight on Fake Street until house 123"
```
This is called an "`if / else`" statement. It basically allows you to create a "fork" in the flow of your program based on a condition that you define. If the condition is `True`, the "`if`"-block of code is executed. If the condition is `False`, the `else`-block is executed.
Here, our condition is simply the value of the variable `construction`. Since we defined this variable to quite literally hold the value `False` (this is a special data type called a Boolean, more on that in a minute), this means that we skip over the `if`-block and only execute the `else`-block. If instead we had set `construction` to `True`, we would have executed only the `if`-block.
Let's define Booleans and `if / else` statements more formally now.
---
### [ Definition ] Booleans
- A Boolean ("bool") is a type of variable, like a string, int, or float.
- However, a Boolean is much more restricted than these other data types because it is only allowed to take two values: `True` or `False`.
- In Python, `True` and `False` are always capitalized and never in quotes.
- Don't think of `True` and `False` as words! You can't treat them like you would strings. To Python, they're actually interpreted as the numbers 1 and 0, respectively.
- Booleans are most often used to create the "conditional statements" used in if / else statements and loops.
---
### [ Definition ] The `if / else` statement
**Purpose:** creates a fork in the flow of the program based on whether a conditional statement is `True` or `False`.
**Syntax:**
if (conditional statement):
this code is executed
else:
this code is executed
**Notes:**
- Based on the Boolean (`True` / `False`) value of a conditional statement, either executes the `if`-block or the `else`-block
- The "blocks" are indicated by indentation.
- The `else`-block is optional.
- Colons are required after the `if` condition and after the `else`.
- All code that is part of the `if` or `else` blocks must be indented.
**Example:**
```
x = 5
if (x > 0):
print "x is positive"
else:
print "x is negative"
```
---
So what types of conditionals are we allowed to use in an `if / else` statement? Anything that can be evaluated as `True` or `False`! For example, in natural language we might ask the following true/false questions:
> is `a` True?
> is `a` less than `b`?
> is `a` equal to `b`?
> is `a` equal to "ATGCTG"?
> is (`a` greater than `b`) and (`b` greater than `c`)?
To ask these questions in our code, we need to use a special set of symbols/words. These are called the **logical operators**, because they allow us to form logical (true/false) statements. Below is a chart that lists the most common logical operators:

Most of these are pretty intuitive. The big one people tend to mess up on in the beginning is `==`. Just remember: a single equals sign means *assignment*, and a double equals means *is the same as/is equal to*. You will NEVER use a single equals sign in a conditional statement because assignment is not allowed in a conditional! Only `True` / `False` questions are allowed!
### `if / else` statements in action
Below are several examples of code using `if / else` statements. For each code block, first try to guess what the output will be, and then run the block to see the answer.
```
a = True
if a:
print "Hooray, a was true!"
a = True
if a:
print "Hooray, a was true!"
print "Goodbye now!"
a = False
if a:
print "Hooray, a was true!"
print "Goodbye now!"
```
> Since the line `print "Goodbye now!"` is not indented, it is NOT considered part of the `if`-statement.
Therefore, it is always printed regardless of whether the `if`-statement was `True` or `False`.
```
a = True
b = False
if a and b:
print "Apple"
else:
print "Banana"
```
> Since `a` and `b` are not both `True`, the conditional statement "`a and b`" as a whole is `False`. Therefore, we execute the `else`-block.
```
a = True
b = False
if a and not b:
print "Apple"
else:
print "Banana"
```
> By using "`not`" before `b`, we negate its current value (`False`), making `b` `True`. Thus the entire conditional as a whole becomes `True`, and we execute the `if`-block.
```
a = True
b = False
if not a and b:
print "Apple"
else:
print "Banana"
```
>"`not`" only applies to the variable directly in front of it (in this case, `a`). So here, `a` becomes `False`, so the conditional as a whole becomes `False`.
```
a = True
b = False
if not (a and b):
print "Apple"
else:
print "Banana"
```
> When we use parentheses in a conditional, whatever is within the parentheses is evaluated first. So here, the evaluation proceeds like this:
> First Python decides how to evaluate `(a and b)`. As we saw above, this must be `False` because `a` and `b` are not both `True`.
> Then Python applies the "`not`", which flips that `False` into a `True`. So then the final answer is `True`!
```
a = True
b = False
if a or b:
print "Apple"
else:
print "Banana"
```
> As you would probably expect, when we use "`or`", we only need `a` *or* `b` to be `True` in order for the whole conditional to be `True`.
```
cat = "Mittens"
if cat == "Mittens":
print "Awwww"
else:
print "Get lost, cat"
a = 5
b = 10
if (a == 5) and (b > 0):
print "Apple"
else:
print "Banana"
a = 5
b = 10
if ((a == 1) and (b > 0)) or (b == (2 * a)):
print "Apple"
else:
print "Banana"
```
>Ok, this one is a little bit much! Try to avoid complex conditionals like this if possible, since it can be difficult to tell if they're actually testing what you think they're testing. If you do need to use a complex conditional, use parentheses to make it more obvious which terms will be evaluated first!
### Note on indentation
- Indentation is very important in Python; it’s how Python tells what code belongs to which control statements
- Consecutive lines of code with the same indenting are sometimes called "blocks"
- Indenting should only be done in specific circumstances (if statements are one example, and we'll see a few more soon). Indent anywhere else and you'll get an error.
- You can indent by however much you want, but you must be consistent. Pick one indentation scheme (e.g. 1 tab per indent level, or 4 spaces) and stick to it.
### [ Check yourself! ] `if/else` practice
Think you got it? In the code block below, write an `if/else` statement to print a different message depending on whether `x` is positive or negative.
```
x = 6 * -5 - 4 * 2 + -7 * -8 + 3
# ******add your code here!*********
```
# 2. Built-in functions
---
Python provides some useful built-in functions that perform specific tasks. What makes them "built-in"? Simply that you don’t have to "import" anything in order to use them -- they're always available. This is in contrast the the *non*-built-in functions, which are packaged into modules of similar functions (e.g. "math") that you must import before using. More on this in a minute!
We've already seen some examples of built-in functions, such as `print`, `int()`, `float()`, and `str()`. Now we'll look at a few more that are particularly useful: `raw_input()`, `len()`, `abs()`, and `round()`.
---
### [ Definition ] `raw_input()`
**Description:** A built-in function that allows user input to be read from the terminal.
**Syntax:**
raw_input("Optional prompt: ")
**Notes**:
- The execution of the code will pause when it reaches the `raw_input()` function and wait for the user to input something.
- The input ends when the user hits "enter".
- The user input that is read by `raw_input()` can then be stored in a variable and used in the code.
- **Important: This function always returns a string, even if the user entered a number!** You must convert the input with int() or float() if you expect a number input.
**Examples:**
```
name = raw_input("Your name: ")
print "Hi there", name, "!"
age = int(raw_input("Your age: ")) #convert input to an int
print "Wow, I can't believe you're only", age
```
---
### [ Definition ] `len()`
**Description:** Returns the length of a string (also works on certain data structures). Doesn’t work on numerical types.
**Syntax:**
len(string)
**Examples:**
```
print len("cat")
print len("hi there")
seqLength = len("ATGGTCGCAT")
print seqLength
```
---
### [ Definition ] `abs()`
**Description:** Returns the absolute value of a numerical value. Doesn't accept strings.
**Syntax:**
abs(number)
**Examples:**
```
print abs(-10)
print abs(int("-10"))
positiveNum = abs(-23423)
print positiveNum
```
---
### [ Definition ] `round()`
**Description:** Rounds a float to the indicated number of decimal places. If no number of decimal places is indicated, rounds to zero decimal places.
**Synatx:**
round(someNumber, numDecimalPlaces)
**Examples:**
```
print round(10.12345)
print round(10.12345, 2)
print round(10.9999, 2)
```
---
If you want to learn more built in functions, go here: https://docs.python.org/2/library/functions.html
# 3. Modules
---
Modules are groups of additional functions that come with Python, but unlike the built-in functions we just saw, these functions aren't accessible until you **import** them. Why aren’t all functions just built-in? Basically, it improves speed and memory usage to only import what is needed (there are some other considerations, too, but we won't get into it here).
The functions in a module are usually all related to a certain kind of task or subject area. For example, there are modules for doing advanced math, generating random numbers, running code in parallel, accessing your computer's file system, and so on. We’ll go over just two modules today: `math` and `random`. See the full list here: https://docs.python.org/2.7/py-modindex.html
### How to use a module
Using a module is very simple. First you import the module. Add this to the top of your script:
import <moduleName>
Then, to use a function of the module, you prefix the function name with the name of the module (using a period between them):
<moduleName>.<functionName>
(Replace `<moduleName>` with the name of the module you want, and `<functionName>` with the name of a function in the module.)
The `<moduleName>.<functionName>` synatx is needed so that Python knows where the function comes from. Sometimes, especially when using user created modules, there can be a function with the same name as a function that's already part of Python. Using this syntax prevents functions from overwriting each other or causing ambiguity.
---
### [ Definition ] The `math` module
**Description:** Contains many advanced math-related functions.
See full list of functions here: https://docs.python.org/2/library/math.html
**Examples:**
```
import math
print math.sqrt(4)
print math.log10(1000)
print math.sin(1)
print math.cos(0)
```
---
### [ Definition ] The `random` module
**Description:** contains functions for generating random numbers.
See full list of functions here: https://docs.python.org/2/library/random.html
**Examples:**
```
import random
print random.random() # Return a random floating point number in the range [0.0, 1.0)
print random.randint(0, 10) # Return a random integer between the specified range (inclusive)
print random.gauss(5, 2) # Draw from the normal distribution given a mean and standard deviation
# this code will output something different every time you run it!
```
# 4. Test your understanding: practice set 2
---
For the following blocks of code, **first try to guess what the output will be**, and then run the code yourself. These examples may introduce some ideas and common pitfalls that were not explicitly covered in the text above, ***so be sure to complete this section***.
The first block below holds the variables that will be used in the problems. Since variables are shared across blocks in Jupyter notebooks, you just need to run this block once and then those variables can be used in any other code block.
```
# RUN THIS BLOCK FIRST TO SET UP VARIABLES!
a = True
b = False
x = 2
y = -2
cat = "Mittens"
print a
print (not a)
print (a == b)
print (a != b)
print (x == y)
print (x > y)
print (x = 2)
print (a and b)
print (a and not b)
print (a or b)
print (not b or a)
print not (b or a)
print (not b) or a
print (not b and a)
print not (b and a)
print (not b) and a
print (x == abs(y))
print len(cat)
print cat + x
print cat + str(x)
print float(x)
print ("i" in cat)
print ("g" in cat)
print ("Mit" in cat)
if (x % 2) == 0:
print "x is even"
else:
print "x is odd"
if (x - 4*y) < 0:
print "Invalid!"
else:
print "Banana"
if "Mit" in cat:
print "Hey Mits!"
else:
print "Where's Mits?"
x = "C"
if x == "A" or "B":
print "yes"
else:
print "no"
x = "C"
if (x == "A") or (x == "B"):
print "yes"
else:
print "no"
```
> Surprised by the last two? It's important to note that when you want compare a variable against multiple things, you only compare it to one thing at a time. Although it makes sense in English to say, is x equal to A or B?, in Python you must write: ((x == "A") or (x == "B")) to accomplish this. The same goes for e.g. ((x > 5) and (x < 10)) and anything along those lines.
> So why does the first version give the answer "yes"? Basically, anything that isn't `False` or the literal number 0 is considered to be `True` in Python. So when you say '`x == "A" or "B"`', this evaluates to '`False or True`', which is `True`!
| github_jupyter |
```
given = """
E N T E R L A S E R L A S E R R E S A L
L A S E R O B S I D I A N L A S E R G W
E R E S A L L A S E R L M R R E S A L A
L A S E R R E S A L A O E R L A S E R L
L L E M I T T E R S O S E L R E S A L L
A A M R E S A L E N A S L A L A S E R R
S S R E S A L R S L A R A S R E S A L E
E E L A S E R T R L L E S E L A S E R S
R R W A L L O E R R A S E R R E S A L A
L A S E R N S E L E S A R E B E R Y L L
L R L M E A S L A S E L N L L A S E R J
A E A O L A R A S A R O L R E S A L A E
S S S O L L E S E L T A L A S E R D S M
E A E N R A S E R S W R E S A L E L R I
R L R S E S A R D R L A S E R O W A E T
L R R T S E L O E T R E S A L N A S S T
A E E O A R O S O N L A S E R Y L E A E
S S S N L L A B A R R E S A L X L R L R
E A A E B L R E S A L L A S E R E X I T
R L L L A S E R E S L A S E R L A S E R
""".strip().replace(' ', '\t')
print(given)
print("""
Artisan
Bass speaker
Count (on)
Deprived (of)
Enjoyed together
False teeth
French cap
Gram or pound
Inform (of)
Like a birthday
Moon feature
More confident
Not capable
Place a value on
Put in danger
Race in stages
Recycle alternative
Regal
Renders undrinkable
Rip up
Suitor
Trash
Undo a wedding
Wave rider
""".replace('\t', ' '))
import forge
from puzzle.puzzlepedia import puzzlepedia
puzzle = puzzlepedia.parse("""
@ 6 9 7 11 3 1 8 4 10 5 12 2
* APPRAISE
* DENATURE
* IMPERIAL
* ?
* RELAY
* ?
* BEREFT
* CRAFTER
* REFUSE
* SURFER
* UNFIT
* WOOFER
""".lower())
import Numberjack
given = """
1 0 1 0 0
0 0 0 1 1
0 0 1 0 1
0 1 1 1 0
1 0 1 0 1
0 1 0 1 1
0 0 1 0 0
1 0 1 1 1
1 0 0 0 0
1 1 0 0 1
""".strip()
G = Numberjack.Matrix(5, 5, 'x')
model = Numberjack.Model()
options = G.row + G.col
def constrain(bits):
model.add(Numberjack.Disjunction([
Numberjack.Conjunction([bits_bit == option_bit for bits_bit, option_bit in zip(bits, option)])
for option in options
]))
for soln in given.split('\n'):
bits = [bit == '1' for bit in soln.split('\t')]
constrain(bits)
solver = model.load('Mistral')
solver.solve()
print(str(model))
print("is_sat", solver.is_sat())
print("is_unsat", solver.is_unsat())
for r in range(5):
row = []
for c in range(5):
row.append('%2d' % G[r][c].get_value())
print('\t'.join(row))
''.join("""
QLSRUXWGGSFCSUSLFWLSDAFHWFIXTEYYCMVT
OLTGUGHVHMVIXOUBWEZFTHYPTXCGPWGIKOLW
BFIIRBYXTOGQYFPYOZZXGMQJNUUGHNWNWATH
MYZFRGMSYUUFVYEDPTPHLJOOQLFTXARTWKOG
YNJSTTHPXNCRHOSHLKJYWVBTDCYNRGLVKMWN
HIURUSGNCIFNJWXWCPCTROYRQGELVRFWEAGT
RHTFGEWNRJGYGYKFVYEYMSPQDWKFEXUDPJRZ
EMDDUGUCGTGRGMFGRPVYIMVQUWVBGOAPKRGN
GHJTUTNYGWYWWKVTKRTRUNULGYFUYEXQRUCF
CNXJKUFKHYJVCITWIBGMGIIFRIARRIXAWJKI
SUNERQRUTFGRGFDPMJUWNBWDJWKCZJJYFVKN
WJSHPGNPLPLTULJHRSMKMAXNVZYRMYGHWNDU
ENUIWQDDLSATXFJQSZSTNPZIVERLCTLXKGTD
WZIQXWCYTGERRLFWMQMJSXILJAVGSTCRVQHW
VFJPUQNUFHEYMHNYNNLRJGJRQKXGFPRENLWR
GHDDFLLNUZHTXQRFTCXQMAJNYGILDPIZXUGK
""".strip().split('\n'))
example = {"ver":[[1,2,3],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]],"hor":[[12,3],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0],[0]]}
ver_str = """
5 1 1 5
4 3 3 1 3 4 3 4
4 4 1 1 1 1 1 1 4 3
3 4 3 1 1 1 1 2 4 3
2 4 1 1 1 1 1 1 1 5 2
1 2 2 3 4 3 4 2 2 1
1 2 2 2 2 1
2 2 2 2
2 2 4 4 4 4 4 1 1 2 2
2 2 1 1 1 1 1 1 1 1 2 2 2 2
2 2 3 4 4 1 1 1 1 1 1 1 2 2
2 2 1 1 1 1 1 1 1 1 1 1 1 2 2
2 4 4 4 1 2 4 4 1 1 2 2
2 4 2 2
2 6 2 2
2 6 1 1 4 4 4 2 4
2 7 1 1 1 1 1 1 1 2 4
2 4 2 4 1 1 1 1 4 2 5
2 8 1 1 1 1 1 1 4 5
2 5 3 1 4 4 4 5 5
2 6 3 6 6
2 5 3 6 5
2 6 4 4 8 6
2 6 24 3 3 2
9 23 3 2 2
2 1 5 4 3 3 2 2
1 3 5 4 4 4 2 2 1
1 3 4 15 4 2 2 1
2 3 4 9 5 2 2 2
3 3 5 5 2 2 3
3 3 6 7 2 2 3
4 3 18 4 4
5 4 14 4 5
6 4 8 4 6
8 4 5 8
10 5 6 9
12 19 11
14 14 13
""".strip()
hor_str = """
00 00 0 0 00 00 00 00 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 1 1
00 00 0 0 00 00 00 00 0 0 0 0 0 0 0 1 1 0 0 0 0 0 1 0 0 0 0 1 1 0 0 0 0 1 1
00 00 0 0 00 00 00 00 0 0 0 0 0 0 0 1 1 1 0 0 0 1 1 1 0 0 0 1 1 0 0 0 5 1 1
00 00 0 0 00 00 00 00 0 2 1 1 0 0 0 1 1 1 0 0 0 5 1 1 0 0 5 1 1 3 0 0 1 1 5 1
00 00 0 0 00 00 00 00 0 1 1 1 0 0 5 1 1 1 0 0 5 1 1 3 3 0 5 1 1 5 0 0 1 1 1 3
00 00 0 3 01 00 00 00 6 1 1 1 5 0 5 1 1 3 0 0 5 1 1 5 1 0 5 1 1 5 0 5 3 1 1 1 0 1 1 0 2 06 15 3 0 1 2
00 00 4 3 02 00 03 16 5 4 3 3 3 4 3 1 1 5 2 2 2 2 2 3 3 3 3 2 2 2 2 2 1 1 1 3 5 3 4 1 5 10 03 8 3 3 3 04
07 05 5 7 13 03 19 08 7 5 4 4 4 3 4 5 5 5 2 1 2 2 2 2 2 2 2 2 2 2 2 1 4 3 4 3 3 4 4 9 9 03 04 5 5 7 8 05 05 7
09 15 5 3 03 18 04 03 3 2 2 2 2 2 3 2 3 2 3 3 2 3 3 3 3 3 3 3 3 3 3 3 3 4 3 4 4 2 3 4 4 02 02 2 3 3 3 11 15 9
12 10 9 7 06 05 04 04 3 3 2 2 1 1 2 2 2 2 3 2 2 2 2 2 2 2 2 2 2 2 2 3 2 2 3 2 3 1 1 2 2 03 04 4 5 6 7 09 10 12
""".strip()
ver = [[int(s) for s in line.split()] for line in ver_str.split('\n')]
hor_rows = [[int(s) for s in line.split()] for line in hor_str.split('\n')]
hor = []
for col_idx in range(max(map(len, hor_rows))):
col = []
hor.append(col)
for row in hor_rows:
if col_idx < len(row) and row[col_idx]:
col.append(row[col_idx])
print(ver)
print(hor)
import json
json.dumps({'ver': ver, 'hor': hor})
import forge
from puzzle.puzzlepedia import puzzlepedia
puzzle = puzzlepedia.parse("""
@ 1 2 3 4 5 6 7 8 9 10
* BigOldBell
* BootyJuker
* CorkChoker
* FacePinner
* FakeTurtle
* LemurPoker
* PixieProng
* SheepStick
* SqueezeToy
* TinyStools
""".lower())
import Numberjack
model = Numberjack.Model()
x = {}
for name in 'abcd':
x[name] = Numberjack.Variable([4, 5, 6, 8], name)
model.add(Numberjack.AllDiff(x.values()))
model.add(Numberjack.Sum([x[n] for n in 'abcd']) == 23)
base = 1
for n in 'efgh':
base = x[n] * base
model.add(base == 42)
solver = model.load('Mistral')
solver.solve()
print("is_sat", solver.is_sat())
print("is_unsat", solver.is_unsat())
print(model)
for name in 'abcdefgh':
print(x[name], x[name].get_value())
```
| github_jupyter |
```
! pip install h2o
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, roc_curve, auc
from sklearn import tree
import h2o
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.grid.grid_search import H2OGridSearch
from h2o.estimators.stackedensemble import H2OStackedEnsembleEstimator
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from google.colab import drive
drive.mount('/content/drive')
#Initialize H2o
h2o.init()
# Reading dataset from Google drive
df_creditcarddata = h2o.import_file("/content/drive/My Drive/Colab Notebooks/UCI_Credit_Card.csv")
type(df_creditcarddata)
df_creditcarddata.head()
#check dimensions of the data
df_creditcarddata.shape
df_creditcarddata.columns
df_creditcarddata.types
#Count for the response var
df_creditcarddata['default.payment.next.month'].table()
df_creditcarddata = df_creditcarddata.drop(["ID"], axis = 1)
df_creditcarddata.head()
import pylab as pl
df_creditcarddata[['AGE','BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5','BILL_AMT6', 'LIMIT_BAL']].as_data_frame().hist(figsize=(20,20))
pl.show()
# Defaulters by Gender
columns = ["default.payment.next.month","SEX"]
default_by_gender = df_creditcarddata.group_by(by=columns).count(na ="all")
print(default_by_gender.get_frame())
# Defaulters by education
columns = ["default.payment.next.month","EDUCATION"]
default_by_education = df_creditcarddata.group_by(by=columns).count(na ="all")
print(default_by_education.get_frame())
# Defaulters by MARRIAGE
columns = ["default.payment.next.month","MARRIAGE"]
default_by_marriage = df_creditcarddata.group_by(by=columns).count(na ="all")
print(default_by_marriage.get_frame())
# Convert the categorical variables into factors
df_creditcarddata['SEX'] = df_creditcarddata['SEX'].asfactor()
df_creditcarddata['EDUCATION'] = df_creditcarddata['EDUCATION'].asfactor()
df_creditcarddata['MARRIAGE'] = df_creditcarddata['MARRIAGE'].asfactor()
df_creditcarddata['PAY_0'] = df_creditcarddata['PAY_0'].asfactor()
df_creditcarddata['PAY_2'] = df_creditcarddata['PAY_2'].asfactor()
df_creditcarddata['PAY_3'] = df_creditcarddata['PAY_3'].asfactor()
df_creditcarddata['PAY_4'] = df_creditcarddata['PAY_4'].asfactor()
df_creditcarddata['PAY_5'] = df_creditcarddata['PAY_5'].asfactor()
df_creditcarddata['PAY_6'] = df_creditcarddata['PAY_6'].asfactor()
df_creditcarddata.types
# Also, encode the binary response variable as a factor
df_creditcarddata['default.payment.next.month'] = df_creditcarddata['default.payment.next.month'].asfactor()
df_creditcarddata['default.payment.next.month'].levels()
# Define predictors manually
predictors = ['LIMIT_BAL','SEX','EDUCATION','MARRIAGE','AGE','PAY_0','PAY_2','PAY_3',\
'PAY_4','PAY_5','PAY_6','BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4',\
'BILL_AMT5','BILL_AMT6','PAY_AMT1','PAY_AMT2','PAY_AMT3','PAY_AMT4','PAY_AMT5','PAY_AMT6']
target = 'default.payment.next.month'
# Split the H2O data frame into training/test sets
# using 70% for training
# using the rest 30% for test evaluation
splits = df_creditcarddata.split_frame(ratios=[0.7], seed=1)
train = splits[0]
test = splits[1]
```
**GENERALIZED LINEAR MODEL (Defaut Settings)**
STANDARDIZATION is enabled by default
GLM with default setting
GLM using lmbda search
GLM using Grid search
GLM WITH DEFAULT SETTINGS
Logistic Regression (Binomial Family)
H2O's GLM has the "family" argument, where the family is 'binomial' if the data is categorical 2 levels/classes or binary (Enum or Int).
```
GLM_default_settings = H2OGeneralizedLinearEstimator(family='binomial', \
model_id='GLM_default',nfolds = 10, \
fold_assignment = "Modulo", \
keep_cross_validation_predictions = True)
GLM_default_settings.train(x = predictors, y = target, training_frame = train)
```
### **GLM WITH LAMBDA SEARCH**
The model parameter, lambda, controls the amount of regularization in a GLM model
Setting lambda_search = True gives us optimal lambda value for the regularization strength.
```
GLM_regularized = H2OGeneralizedLinearEstimator(family='binomial', model_id='GLM', \
lambda_search=True, nfolds = 10, \
fold_assignment = "Modulo", \
keep_cross_validation_predictions = True)
GLM_regularized.train(x = predictors, y = target,training_frame = train)
```
### **GLM WITH GRID SEARCH**
GLM needs to find the optimal values of the regularization parameters α and λ
lambda: controls the amount of regularization, when set to 0 it gets disabled
alpha : controls the distribution between lasso & ridge regression penalties.
random grid search: H2o supports 2 types of grid search, cartesian and random. We make use of the random as the search criteria for faster computation
Stopping metric: we specify the metric used for early stopping. AUTO takes log loss as default
source: http://docs.h2o.ai/h2o/latest-stable/h2o-docs/data-science/algo-params/lambda.html
```
hyper_parameters = { 'alpha': [0.0001, 0.001, 0.01, 0.1],
'lambda': [0.001, 0.01, 0.1] }
search_criteria = { 'strategy': "RandomDiscrete",
'stopping_metric': "AUTO",
'stopping_rounds': 5}
GLM_grid_search = H2OGridSearch(H2OGeneralizedLinearEstimator(family='binomial', \
nfolds = 10, fold_assignment = "Modulo", \
keep_cross_validation_predictions = True),\
hyper_parameters, grid_id="GLM_grid", search_criteria=search_criteria)
GLM_grid_search.train(x= predictors,y= target, training_frame=train)
```
### Get the grid results, sorted by validation AUC
```
# Get the grid results, sorted by validation AUC
GLM_grid_sorted = GLM_grid_search.get_grid(sort_by='auc', decreasing=True)
GLM_grid_sorted
# Extract the best model from random grid search
Best_GLM_model_from_Grid = GLM_grid_sorted.model_ids[0]
#model performance
Best_GLM_model_from_Grid = h2o.get_model(Best_GLM_model_from_Grid)
print(Best_GLM_model_from_Grid)
```
### RF WITH DEFAULT SETTINGS
```
# Build a RF model with default settings
RF_default_settings = H2ORandomForestEstimator(model_id = 'RF_D',\
nfolds = 10, fold_assignment = "Modulo", \
keep_cross_validation_predictions = True)
# Use train() to build the model
RF_default_settings.train(x = predictors, y = target, training_frame = train)
#Let's see the default parameters that RF model utilizes:
RF_default_settings.summary()
```
### RF with GRID SEARCH to extract the best model
```
hyper_params = {'sample_rate':[0.7,0.9],
'col_sample_rate_per_tree': [0.8, 0.9],
'max_depth': [3, 5, 9],
'ntrees': [200, 300, 400]
}
RF_grid_search = H2OGridSearch(H2ORandomForestEstimator(nfolds = 10, \
fold_assignment = "Modulo", \
keep_cross_validation_predictions = True, \
stopping_metric = 'AUC',stopping_rounds = 5), \
hyper_params = hyper_params, \
grid_id= 'RF_gridsearch')
# Use train() to start the grid search
RF_grid_search.train(x = predictors, y = target, training_frame = train)
# Sort the grid models
RF_grid_sorted = RF_grid_search.get_grid(sort_by='auc', decreasing=True)
print(RF_grid_sorted)
# Extract the best model from random grid search
Best_RF_model_from_Grid = RF_grid_sorted.model_ids[0]
# Model performance
Best_RF_model_from_Grid = h2o.get_model(Best_RF_model_from_Grid)
print(Best_RF_model_from_Grid)
GBM_default_settings = H2OGradientBoostingEstimator(model_id = 'GBM_default', \
nfolds = 10, \
fold_assignment = "Modulo", \
keep_cross_validation_predictions = True)
# Use train() to build the model
GBM_default_settings.train(x = predictors, y = target, training_frame = train)
hyper_params = {'learn_rate': [0.001,0.01, 0.1],
'sample_rate': [0.8, 0.9],
'col_sample_rate': [0.2, 0.5, 1],
'max_depth': [3, 5, 9],
'ntrees' : [100, 200, 300]
}
GBM_grid_search = H2OGridSearch(H2OGradientBoostingEstimator(nfolds = 10, \
fold_assignment = "Modulo", \
keep_cross_validation_predictions = True,\
stopping_metric = 'AUC', stopping_rounds = 5),
hyper_params = hyper_params, grid_id= 'GBM_Grid')
# Use train() to start the grid search
GBM_grid_search.train(x = predictors, y = target, training_frame = train)
# Sort and show the grid search results
GBM_grid_sorted = GBM_grid_search.get_grid(sort_by='auc', decreasing=True)
print(GBM_grid_sorted)
# Extract the best model from random grid search
Best_GBM_model_from_Grid = GBM_grid_sorted.model_ids[0]
Best_GBM_model_from_Grid = h2o.get_model(Best_GBM_model_from_Grid)
print(Best_GBM_model_from_Grid)
```
### STACKED ENSEMBLE
```
# list the best models from each grid
all_models = [Best_GLM_model_from_Grid, Best_RF_model_from_Grid, Best_GBM_model_from_Grid]
# Set up Stacked Ensemble
ensemble = H2OStackedEnsembleEstimator(model_id = "ensemble", base_models = all_models, metalearner_algorithm = "deeplearning")
# uses GLM as the default metalearner
ensemble.train(y = target, training_frame = train)
```
### Checking model performance of all base learners
```
# Checking the model performance for all GLM models built
model_perf_GLM_default = GLM_default_settings.model_performance(test)
model_perf_GLM_regularized = GLM_regularized.model_performance(test)
model_perf_Best_GLM_model_from_Grid = Best_GLM_model_from_Grid.model_performance(test)
# Checking the model performance for all RF models built
model_perf_RF_default_settings = RF_default_settings.model_performance(test)
model_perf_Best_RF_model_from_Grid = Best_RF_model_from_Grid.model_performance(test)
# Checking the model performance for all GBM models built
model_perf_GBM_default_settings = GBM_default_settings.model_performance(test)
model_perf_Best_GBM_model_from_Grid = Best_GBM_model_from_Grid.model_performance(test)
```
### Best AUC from the base learners
```
# Best AUC from the base learner models
best_auc = max(model_perf_GLM_default.auc(), model_perf_GLM_regularized.auc(), \
model_perf_Best_GLM_model_from_Grid.auc(), \
model_perf_RF_default_settings.auc(), \
model_perf_Best_RF_model_from_Grid.auc(), \
model_perf_GBM_default_settings.auc(), \
model_perf_Best_GBM_model_from_Grid.auc())
print("Best AUC out of all the models performed: ", format(best_auc))
```
### AUC from the Ensemble Learner
```
# Eval ensemble performance on the test data
Ensemble_model = ensemble.model_performance(test)
Ensemble_model = Ensemble_model.auc()
print(Ensemble_model)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
%matplotlib inline
import matplotlib as mpl
mpl.rcParams.update({
'font.size': 20.0,
'axes.titlesize': 'small',
'axes.labelsize': 'small',
'xtick.labelsize': 'small',
'ytick.labelsize': 'small'
})
import folium
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import LeaveOneOut
from sklearn.linear_model import Ridge
county_consumption = pd.read_csv('./data/county_consumption.csv')
county_consumption['fips'] = county_consumption['fips'].astype(str)
folium_map = folium.Map(location=[41.95, -93.5], zoom_start=7)
folium_map.choropleth(
geo_data=r'./data/county_geography.json',
data=county_consumption,
columns=['fips', 'county_consumption'],
key_on='feature.id',
fill_color='PuRd',
fill_opacity=0.9,
line_opacity=0.2,
legend_name='Sales (Millions of Liters)',
)
folium_map.save('./img/county_consumption_choropleth_map.html')
df = pd.read_csv('./data/aggregated_data.csv')
df['Month'] = pd.to_datetime(df['Month'])
df['DemandPctChange'] = df['Demand'].pct_change()
df['LogDemand'] = np.log(df['Demand'])
df['LogDemandDiff'] = df['LogDemand'].diff()
df['PricePctChange'] = df['Price'].pct_change()
df['LogPrice'] = np.log(df['Price'])
df['LogPriceDiff'] = df['LogPrice'].diff()
Month = pd.to_datetime(df['Month']).values
Demand = pd.Series(df['Demand']).values
Price = pd.Series(df['Price']).values
DemandPctChange = pd.Series(df['DemandPctChange']).values
LogDemandDiff = pd.Series(df['LogDemandDiff']).values
PricePctChange = pd.Series(df['PricePctChange']).values
LogPriceDiff = pd.Series(df['LogPriceDiff']).values
LogDemand = pd.Series(df['LogDemand']).values
LogPrice = pd.Series(df['LogPrice']).values
DemandPctChange[0] = 0
LogDemandDiff[0] = 0
PricePctChange[0] = 0
LogPriceDiff[0] = 0
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot(Price[0:12],Demand[0:12],'o',
Price[12:24],Demand[12:24],'o',
Price[24:36],Demand[24:36],'o',
Price[36:48],Demand[36:48],'o',
Price[48:],Demand[48:],'o');
ax.set_title('Price-Response - Jim Beam 1750ml')
ax.set_xlabel('Price ($)')
ax.set_ylabel('Demand (Bottles)')
ax.legend(['2012', '2013', '2014', '2015', '2016'], loc=1)
fig.tight_layout()
fig.savefig('./img/price_response.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot(LogPrice[0:12],LogDemand[0:12],'o',
LogPrice[12:24],LogDemand[12:24],'o',
LogPrice[24:36],LogDemand[24:36],'o',
LogPrice[36:48],LogDemand[36:48],'o',
LogPrice[48:],LogDemand[48:],'o');
ax.set_title('Log-Log Price-Response - Jim Beam 1750ml')
ax.set_xlabel('Log Price')
ax.set_ylabel('Log Demand')
ax.legend(['2012', '2013', '2014', '2015', '2016'], loc=1)
fig.tight_layout()
fig.savefig('./img/log_log_price_response.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot(Month, Demand);
ax.set_title('Historical Demand - Jim Beam 1750ml')
ax.set_ylabel('Demand (Bottles)')
ax.set_xlabel('Year')
fig.tight_layout()
fig.savefig('./img/historical_demand.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot(Month, DemandPctChange*100, '-',
Month, LogDemandDiff*100, '-');
ax.set_title('Historical Change in Demand - Jim Beam 1750ml')
ax.set_ylabel('Change in Demand')
ax.set_xlabel('Year')
ax.legend(['% Change in Demand', 'Log Diff in Demand ~ %'], loc=1)
fig.tight_layout()
fig.savefig('./img/historical_change_in_demand.png')
fig, ax = plt.subplots(1, 3, sharey=True, figsize=(12, 4))
ax[0].hist(Demand)
ax[0].set_title('Demand Distribution')
ax[0].set_ylabel('Frequency')
ax[0].set_xlabel('Demand')
ax[1].hist(DemandPctChange)
ax[1].set_title('% Change Distribution')
ax[1].set_xlabel('Demand % Change')
ax[2].hist(LogDemandDiff)
ax[2].set_title('Log Diff Distribution')
ax[2].set_xlabel('Demand Log Diff')
fig.tight_layout()
fig.savefig('./img/demand_hist.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot([i for i in range(1,13)],Demand[0:12],'-',
[i for i in range(1,13)],Demand[12:24],'-',
[i for i in range(1,13)],Demand[24:36],'-',
[i for i in range(1,13)],Demand[36:48],'-',
[i for i in range(1,9)],Demand[48:],'-');
ax.set_title('Monthly Demand - Jim Beam 1750ml')
ax.set_ylabel('Demand (Bottles)')
ax.set_xlabel('Month')
ax.legend(['2012', '2013', '2014', '2015', '2016'], loc=2)
fig.tight_layout()
fig.savefig('./img/monthly_demand.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot([i for i in range(1,13)],LogDemandDiff[0:12]*100,'-',
[i for i in range(1,13)],LogDemandDiff[12:24]*100,'-',
[i for i in range(1,13)],LogDemandDiff[24:36]*100,'-',
[i for i in range(1,13)],LogDemandDiff[36:48]*100,'-',
[i for i in range(1,9)],LogDemandDiff[48:]*100,'-');
ax.set_title('Monthly Change in Demand - Jim Beam 1750ml')
ax.set_ylabel('Change in Demand (Log Diff ~ %)')
ax.set_xlabel('Month')
ax.legend(['2012', '2013', '2014', '2015', '2016'], loc=3)
fig.tight_layout()
fig.savefig('./img/monthly_change_in_demand.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot(Month, Price);
ax.set_title('Historical Price - Jim Beam 1750ml')
ax.set_ylabel('Price ($)')
ax.set_xlabel('Year')
fig.tight_layout()
fig.savefig('./img/historical_price.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot(Month, PricePctChange*100, '-',
Month, LogPriceDiff*100, '-');
ax.set_title('Historical Change in Price - Jim Beam 1750ml')
ax.set_ylabel('Change in Price')
ax.set_xlabel('Year')
ax.legend(['% Change in Price', 'Log Diff in Price ~ %'], loc=1)
fig.tight_layout()
fig.savefig('./img/historical_change_in_price.png')
fig, ax = plt.subplots(1, 3, sharey=True, figsize=(12, 4))
ax[0].hist(Price)
ax[0].set_title('Price Distribution')
ax[0].set_ylabel('Frequency')
ax[0].set_xlabel('Price')
ax[1].hist(PricePctChange)
ax[1].set_title('% Change Distribution')
ax[1].set_xlabel('Price % Change')
ax[2].hist(LogPriceDiff)
ax[2].set_title('Log Diff Distribution')
ax[2].set_xlabel('Price Log Diff')
fig.tight_layout()
fig.savefig('./img/price_hist.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot([i for i in range(1,13)],Price[0:12],'-',
[i for i in range(1,13)],Price[12:24],'-',
[i for i in range(1,13)],Price[24:36],'-',
[i for i in range(1,13)],Price[36:48],'-',
[i for i in range(1,9)],Price[48:56],'-');
ax.set_title('Monthly Price - Jim Beam 1750ml')
ax.set_ylabel('Price ($)')
ax.set_xlabel('Month')
ax.legend(['2012', '2013', '2014', '2015', '2016'], loc=4)
fig.tight_layout()
fig.savefig('./img/monthly_price.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot([i for i in range(1,13)],LogPriceDiff[0:12]*100,'-',
[i for i in range(1,13)],LogPriceDiff[12:24]*100,'-',
[i for i in range(1,13)],LogPriceDiff[24:36]*100,'-',
[i for i in range(1,13)],LogPriceDiff[36:48]*100,'-',
[i for i in range(1,9)],LogPriceDiff[48:56]*100,'-');
ax.set_title('Monthly Change in Price - Jim Beam 1750ml')
ax.set_ylabel('Change in Price (Log Diff ~ %)')
ax.set_xlabel('Month')
ax.legend(['2012', '2013', '2014', '2015', '2016'], loc=2)
fig.tight_layout()
fig.savefig('./img/monthly_change_in_price.png')
def lin_reg_errors(y_true, y_pred):
# Mean square error.
MSE=mean_squared_error(y_true, y_pred)
# Root mean square error.
RMSE = math.sqrt(MSE)
# Coefficient of determination.
R2 = r2_score(y_true,y_pred)
return [MSE, RMSE, R2]
def acc_res_plots(y_true, y_pred):
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(10, 12))
# Accuracy plot.
ax[0].scatter(y_pred, y_true);
ax[0].set_title('Accuracy')
ax[0].set_ylabel('Actual Demand (Bottles)')
# Residual plot.
residuals = y_true - y_pred
ax[1].scatter(y_pred, residuals);
ax[1].set_title('Residuals')
ax[1].set_ylabel('Residual (Bottles)')
ax[1].set_xlabel('Predicted Demand (Bottles)')
# Plot model.
pred_min = np.amin(y_pred)
pred_max = np.amax(y_pred)
ax[0].plot([pred_min, pred_max], [pred_min, pred_max], 'k--')
ax[1].plot([pred_min, pred_max], [0, 0], 'k--')
fig.tight_layout()
return (fig, ax)
def loocv(X, y, model=LinearRegression()):
leave_one_out = LeaveOneOut()
predictions = []
betas = []
errors = []
for train_index, test_index in leave_one_out.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
standard_scaler = StandardScaler()
standard_scaler.fit(X_train)
X_train_std = standard_scaler.transform(X_train)
X_test_std = standard_scaler.transform(X_test)
# Make and fit linear model.
model.fit(X_train_std, y_train)
# Get betas.
betas.append(list(np.squeeze(np.asarray(model.coef_))))
# Generate predictions.
y_pred = model.predict(X_test_std)
predictions.append(np.asarray(y_pred)[0])
# Compute error measures.
errors.append(lin_reg_errors(np.exp(y_test), np.exp(y_pred)))
return (np.array(betas), np.array(errors), np.array(predictions))
one_hot_encoder = OneHotEncoder(sparse=False)
one_hot_encoder.fit(np.matrix(df['Month'].dt.month).transpose())
X = one_hot_encoder.transform(np.matrix(df['Month'].dt.month).transpose())
X = np.append(X, np.matrix(df['LogPrice']).transpose(), axis=1)
y = np.matrix(df['LogDemand']).transpose()
alphas = []
rmse_means = []
for i in range(0,41):
alpha = 10 + i * 0.1
alphas.append(alpha)
ridge_regression = Ridge(alpha=alpha)
betas, errors, predictions = loocv(X, y, model=ridge_regression)
RMSEs = errors[:,1]
rmse_means.append(RMSEs.mean())
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot(np.array(alphas), np.array(rmse_means));
ax.set_title('Hyperparameter Tuning')
ax.set_ylabel('RMSE (Bottles)')
ax.set_xlabel('Regularization Strength')
fig.tight_layout()
fig.savefig('./img/hyperparameter_tuning.png')
ridge_regression = Ridge(alpha=10.5)
betas, errors, predictions = loocv(X, y, model=ridge_regression)
# Accuracy and residual plots.
fig, ax = acc_res_plots(np.squeeze(np.asarray(np.exp(y))), np.squeeze(np.asarray(np.exp(predictions))))
fig.savefig('./img/accuracy_residuals.png')
fig, ax = plt.subplots(1, 1, sharey=True, figsize=(10, 6))
ax.hist(np.squeeze(np.asarray(np.exp(y))) - np.squeeze(np.asarray(np.exp(predictions))))
ax.set_title('Residuals Distribution')
ax.set_ylabel('Frequency')
ax.set_xlabel('Residual (Bottles)')
fig.tight_layout()
fig.savefig('./img/residuals_hist.png')
RMSEs = errors[:,1]
RMSEs.mean()
RMSEs.std()
np.median(RMSEs)
fig, ax = plt.subplots(1, 1, sharey=True, figsize=(10, 6))
ax.hist(RMSEs)
ax.set_title('RMSE Distribution')
ax.set_ylabel('Frequency')
ax.set_xlabel('RMSE (Bottles)')
fig.tight_layout()
fig.savefig('./img/rmse_distribution.png')
beta_month = ['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']
beta_month_means = []
fig, ax = plt.subplots(3, 4, sharey=True, figsize=(12, 10))
for i in range(0,3):
for j in range(0,4):
beta_index = 4 * i + j
beta_no = beta_index + 1
ax[i][j].hist(betas[:,beta_index])
ax[i][j].set_title(beta_month[beta_index] + ' Beta Dist' + '\n' + 'mu = ' + str(betas[:,beta_index].mean().round(2)) + '\n' + 'std = ' + str(betas[:,beta_index].std().round(4)))
ax[i][j].set_ylabel('Freq')
ax[i][j].set_xlabel('Beta ' + str(beta_no))
beta_month_means.append(betas[:,beta_index].mean())
fig.tight_layout()
fig.savefig('./img/seasonality_coef_hists.png')
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot([i for i in range(1,13)],np.array(beta_month_means)*100,'-')
ax.set_title('Seasonality - Jim Beam 1750ml')
ax.set_ylabel('% Effect on Demand')
ax.set_xlabel('Month')
fig.tight_layout()
fig.savefig('./img/seasonality.png')
fig, ax = plt.subplots(1, 1, figsize=(10, 6))
ax.hist(betas[:,12])
ax.set_title('Price Beta Dist' + '\n' + 'mu = ' + str(betas[:,12].mean().round(2)) + '\n' + 'std = ' + str(betas[:,12].std().round(4)))
ax.set_ylabel('Frequency')
ax.set_xlabel('Beta 13')
fig.tight_layout()
fig.savefig('./img/price_elasticity_hist.png')
transactions = pd.read_csv('./tmp/item_sales.csv')
# print(transactions['state_bottle_retail']*transactions['sale_bottles'])
# print(pd.DatetimeIndex(transactions['date']).strftime(date_format='%Y-%m'))
transactions['month']=pd.DatetimeIndex(transactions['date']).strftime(date_format='%Y-%m')
transactions.drop(columns=['invoice_line_no','date','itemno','pack','vendor_no'], inplace=True)
gb = transactions.groupby('month')
agg = pd.DataFrame()
agg['Price'] = gb['state_bottle_retail'].mean()
agg['Cost'] = gb['state_bottle_cost'].mean()
agg['Demand'] = gb['sale_bottles'].sum()
agg['Profit'] = (agg['Price'] - agg['Cost']) * agg['Demand']
agg['Cum_Profit'] = agg['Profit'].cumsum()
Cum_Profit = pd.Series(agg['Cum_Profit']).values / 1000000
agg.head()
fig, ax = plt.subplots(1, 1, sharex=True, figsize=(10, 6))
ax.plot([i for i in range(1,13)],Cum_Profit[0:12],'-',
[i for i in range(1,13)],Cum_Profit[12:24],'-',
[i for i in range(1,13)],Cum_Profit[24:36],'-',
[i for i in range(1,13)],Cum_Profit[36:48],'-',
[i for i in range(1,9)],Cum_Profit[48:56],'-');
ax.set_title('Profit - Jim Beam 1750ml')
ax.set_ylabel('Cumulative Profit (Millions of $)')
ax.set_xlabel('Month')
ax.legend(['2012', '2013', '2014', '2015', '2016 (optimized)'], loc=4)
fig.tight_layout()
fig.savefig('./img/monthly_profit.png')
```
2012 to 2013 % increase August
```
((agg['Cum_Profit'].loc['2013-08'] - agg['Cum_Profit'].loc['2012-08']))
```
2013 to 2014 % increase August
```
((agg['Cum_Profit'].loc['2014-08'] - agg['Cum_Profit'].loc['2013-08']))
```
2014 to 2015 % increase August
```
((agg['Cum_Profit'].loc['2015-08'] - agg['Cum_Profit'].loc['2014-08']))
```
2015 to 2016 % increase August
```
((agg['Cum_Profit'].loc['2016-08'] - agg['Cum_Profit'].loc['2015-08']))
```
| github_jupyter |
# Loss Functions
## 1. L1 and L2 loss
*L1* and *L2* are two common loss functions in machine learning which are mainly used to minimize the error.
**L1 loss function** are also known as **Least Absolute Deviations** in short **LAD**.
**L2 loss function** are also known as **Least square errors** in short **LS**.
Let's get brief of these two
### L1 Loss function
It is used to minimize the error which is the sum of all the absolute differences in between the true value and the predicted value.
<img src=".\Images\img13.png">
### L2 Loss Function
It is also used to minimize the error which is the sum of all the squared differences in between the true value and the pedicted value.
<img src=".\Images\img15.png">
**The disadvantage** of the **L2 norm** is that when there are outliers, these points will account for the main component of the loss. For example, the true value is 1, the prediction is 10 times, the prediction value is 1000 once, and the prediction value of the other times is about 1, obviously the loss value is mainly dominated by 1000.
```
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
x_guess = tf.lin_space(-1., 1., 100)
x_actual = tf.constant(0,dtype=tf.float32)
l1_loss = tf.abs((x_guess-x_actual))
l2_loss = tf.square((x_guess-x_actual))
with tf.Session() as sess:
x_,l1_,l2_ = sess.run([x_guess, l1_loss, l2_loss])
plt.plot(x_,l1_,label='l1_loss')
plt.plot(x_,l2_,label='l2_loss')
plt.legend()
plt.show()
```
## 2. Huber Loss
Huber Loss is often used in regression problems. Compared with L2 loss, Huber Loss is less sensitive to outliers(because if the residual is too large, it is a piecewise function, loss is a linear function of the residual).
<img src=".\Images\img1.png">
Among them, $\delta$ is a set parameter, $y$ represents the real value, and $f(x)$ represents the predicted value.
The advantage of this is that when the residual is small, the loss function is L2 norm, and when the residual is large, it is a linear function of L1 norm
### Pseudo-Huber loss function
A smooth approximation of Huber loss to ensure that each order is differentiable.
<img src=".\Images\img2.png">
Where $\delta$ is the set parameter, the larger the value, the steeper the linear part on both sides.
<img src=".\Images\img3.png">
## 3.Hinge Loss
Hinge loss is often used for binary classification problems, such as ground true: t = 1 or -1, predicted value y = wx + b
In the svm classifier, the definition of hinge loss is
<img src=".\Images\img4.png">
In other words, the closer the y is to t, the smaller the loss will be.
```
x_guess2 = tf.linspace(-3.,5.,500)
x_actual2 = tf.convert_to_tensor([1.]*500)
#Hinge loss
#hinge_loss = tf.losses.hinge_loss(labels=x_actual2, logits=x_guess2)
hinge_loss = tf.maximum(0.,1.-(x_guess2*x_actual2))
0with tf.Session() as sess:
x_,hin_ = sess.run([x_guess2, hinge_loss])
plt.plot(x_,hin_,'--', label='hin_')
plt.legend()
plt.show()
```
## 4.Cross-entropy loss
<img src=".\Images\img7.png">
The above is mainly to say that cross-entropy loss is mainly applied to binary classification problems. The predicted value is a probability value and the loss is defined according to the cross entropy. Note the value range of the above value: the predicted value of y should be a probability and the value range is [0,1]
<img src=".\Images\img8.png">
## 5.Sigmoid-Cross-entropy loss
The above cross-entropy loss requires that the predicted value is a probability. Generally, we calculate $scores = x*w + b$. Entering this value into the sigmoid function can compress the value range to (0,1).
<img src=".\Images\img9.png">
It can be seen that the sigmoid function smoothes the predicted value(such as directly inputting 0.1 and 0.01 and inputting 0.1, 0.01 sigmoid and then entering, the latter will obviously have a much smaller change value), which makes the predicted value of sigmoid-ce far from the label loss growth is not so steep.
## 6.Softmax cross-entropy loss
First, the softmax function can convert a set of fraction vectors into corresponding probability vectors. Here is the definition of softmax function
<img src=".\Images\img10.png">
As above, softmax also implements a vector of 'squashes' k-dimensional real value to the [0,1] range of k-dimensional, while ensuring that the cumulative sum is 1.
According to the definition of cross entropy, probability is required as input.Sigmoid-cross-entropy-loss uses sigmoid to convert the score vector into a probability vector, and softmax-cross-entropy-loss uses a softmax function to convert the score vector into a probability vector.
According to the definition of cross entropy loss.
<img src=".\Images\img11.png">
where $p(x)$ represents the probability that classification $x$ is a correct classification, and the value of $p$ can only be 0 or 1. This is the prior value
$q(x)$ is the prediction probability that the $x$ category is a correct classification, and the value range is (0,1)
So specific to a classification problem with a total of C types, then $p(x_j)$, $(0 <_{=} j <_{=} C)$ must be only 1 and C-1 is 0(because there can be only one correct classification, correct the probability of classification as correct classification is 1, and the probability of the remaining classification as correct classification is 0)
Then the definition of softmax-cross-entropy-loss can be derived naturally.
Here is the definition of softmax-cross-entropy-loss.
<img src=".\Images\img12.png">
Where $f_j$ is the score of all possible categories, and $f_{y_i}$ is the score of ground true class
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#3.1-Python-Input-and-Output" data-toc-modified-id="3.1-Python-Input-and-Output-0.1">3.1 Python Input and Output</a></span><ul class="toc-item"><li><span><a href="#3.1.1-Python-Output" data-toc-modified-id="3.1.1-Python-Output-0.1.1">3.1.1 Python Output</a></span></li><li><span><a href="#3.1.2-Output-Formatting" data-toc-modified-id="3.1.2-Output-Formatting-0.1.2">3.1.2 Output Formatting</a></span></li><li><span><a href="#3.1.3-Python-Input" data-toc-modified-id="3.1.3-Python-Input-0.1.3">3.1.3 Python Input</a></span></li></ul></li><li><span><a href="#3.2-Operators" data-toc-modified-id="3.2-Operators-0.2">3.2 Operators</a></span><ul class="toc-item"><li><span><a href="#3.2.1-Arithmetic-Operators" data-toc-modified-id="3.2.1-Arithmetic-Operators-0.2.1">3.2.1 Arithmetic Operators</a></span></li><li><span><a href="#3.2.2-Comparision-Operators" data-toc-modified-id="3.2.2-Comparision-Operators-0.2.2">3.2.2 Comparision Operators</a></span></li><li><span><a href="#3.2.3-Logical-Operators" data-toc-modified-id="3.2.3-Logical-Operators-0.2.3">3.2.3 Logical Operators</a></span></li><li><span><a href="#3.2.4-Bitwise-operators" data-toc-modified-id="3.2.4-Bitwise-operators-0.2.4">3.2.4 Bitwise operators</a></span></li><li><span><a href="#3.2.5-Assignment-operators" data-toc-modified-id="3.2.5-Assignment-operators-0.2.5">3.2.5 Assignment operators</a></span></li></ul></li><li><span><a href="#3.3-Special-Operators" data-toc-modified-id="3.3-Special-Operators-0.3">3.3 Special Operators</a></span><ul class="toc-item"><li><span><a href="#3.3.1-Identity-Operators" data-toc-modified-id="3.3.1-Identity-Operators-0.3.1">3.3.1 Identity Operators</a></span></li></ul></li><li><span><a href="#3.4-MemberShip-Operators" data-toc-modified-id="3.4-MemberShip-Operators-0.4">3.4 MemberShip Operators</a></span></li><li><span><a href="#3.5-Python-break-and-continue-Statements" data-toc-modified-id="3.5-Python-break-and-continue-Statements-0.5">3.5 Python break and continue Statements</a></span><ul class="toc-item"><li><span><a href="#3.5.1-Python-break-Statement" data-toc-modified-id="3.5.1-Python-break-Statement-0.5.1">3.5.1 Python break Statement</a></span></li><li><span><a href="#3.5.1-Python-Continue-Statement" data-toc-modified-id="3.5.1-Python-Continue-Statement-0.5.2">3.5.1 Python Continue Statement</a></span></li></ul></li></ul></li><li><span><a href="#EX1:-Pyhon-Program-to-check-given-number-is-Prime-number-or-not-(using-break)" data-toc-modified-id="EX1:-Pyhon-Program-to-check-given-number-is-Prime-number-or-not-(using-break)-1">EX1: Pyhon Program to check given number is Prime number or not (using break)</a></span><ul class="toc-item"><li><span><a href="#3.6-Python-for-Loop" data-toc-modified-id="3.6-Python-for-Loop-1.1">3.6 Python for Loop</a></span><ul class="toc-item"><li><span><a href="#3.6.1-range()-function" data-toc-modified-id="3.6.1-range()-function-1.1.1">3.6.1 range() function</a></span></li></ul></li><li><span><a href="#3.7-Python-if-...-else-Statement" data-toc-modified-id="3.7-Python-if-...-else-Statement-1.2">3.7 Python if ... else Statement</a></span><ul class="toc-item"><li><span><a href="#3.7.1-if-statement-syntax" data-toc-modified-id="3.7.1-if-statement-syntax-1.2.1">3.7.1 if statement syntax</a></span></li><li><span><a href="#3.7.2-if...elif...else-Statement" data-toc-modified-id="3.7.2-if...elif...else-Statement-1.2.2">3.7.2 if...elif...else Statement</a></span></li></ul></li></ul></li><li><span><a href="#EX-2-:Python-program-to-find-the-largest-element-among-three-Numbers" data-toc-modified-id="EX-2-:Python-program-to-find-the-largest-element-among-three-Numbers-2">EX 2 :Python program to find the largest element among three Numbers</a></span><ul class="toc-item"><li><span><a href="#3.8-Python-while-Loop" data-toc-modified-id="3.8-Python-while-Loop-2.1">3.8 Python while Loop</a></span></li></ul></li><li><span><a href="#EX-3-:Python-Program-to-check-given-number-is-Prime-number-or-not" data-toc-modified-id="EX-3-:Python-Program-to-check-given-number-is-Prime-number-or-not-3">EX 3 :Python Program to check given number is Prime number or not</a></span></li></ul></div>
## 3.1 Python Input and Output
### 3.1.1 Python Output
We use the print() function to output data to the standard output device
```
print("Hello World")
a = 10
print("The value of a is", a) #python 3
#print "The value of a is " + str(a) # python2
```
### 3.1.2 Output Formatting
```
a = 10; b = 20 #multiple statements in single line.
print("The value of a is {} and b is {}".format(a, b)) #default
print("A values is:{} B value is:{}".format(b,a))
print("A ={} B={}".format("Hello",20.8+5j))
a = 10; b = 20 #multiple statements in single line
print("The value of b is {1} and a is {0}".format(a, b)) #specify position of arguments
#we can use keyword arguments to format the string
print("Hello {greeting}, {name}".format(name="Venky", greeting="Good Morning"))
#we can combine positional arguments with keyword arguments
print('The story of {0}, {other}, and {1}'.format('Samantha', 'Thamanna',
other='Sunny'))
```
### 3.1.3 Python Input
```
num = input("Enter a number: ")
print(num)
a,b=[int(x) for i in input("Enter two values").split(':')]
a,b,c=raw_input("Please enter 3 values in one line using commas\n").split(',')
```
## 3.2 Operators
Operators are special symbols in Python that carry out arithmetic or logical computation. The value that the operator operates on is called the operand.
Operator Types
1. Arithmetic operators
2. Comparison (Relational) operators
3. Logical (Boolean) operators
4. Bitwise operators
5. Assignment operators
6. Special operators
### 3.2.1 Arithmetic Operators
Arithmetic operators are used to perform mathematical operations like addition, subtraction, multiplication etc.
+ , -, *, /, %, //, ** are arithmetic operators
Example:
```
x, y = 10, 20
#addition
print(x + y)
#subtraction(-)
print(x - y)
#multiplication(*)
print(x * y)
#division(/)
print(x / y)
#modulo division (%)
print(x % y)
#Floor Division (//)
print(x // y)
#Exponent (**)
print(x ** y)
```
### 3.2.2 Comparision Operators
Comparison operators are used to compare values. It either returns True or False according to the condition.
>, <, ==, !=, >=, <= are comparision operators
```
a, b = 10, 20
print(a < b) #check a is less than b
#check a is greater than b
#check a is equal to b
#check a is not equal to b (!=)
#check a greater than or equal to b
#check a less than or equal to b
```
### 3.2.3 Logical Operators
Logical operators are **and, or, not** operators.
```
a, b = True, False
#print a and b
print(a and b)
#print a or b
#print not b
```
### 3.2.4 Bitwise operators
Bitwise operators act on operands as if they were string of binary digits. It operates bit by bit
&, |, ~, ^, >>, << are Bitwise operators
```
a, b = 10, 4
#Bitwise AND
print(a & b)
#Bitwise OR
#Bitwise NOT
#Bitwise XOR
#Bitwise rightshift
#Bitwise Leftshift
```
### 3.2.5 Assignment operators
Assignment operators are used in Python to assign values to variables.
a = 5 is a simple assignment operator that assigns the value 5 on the right to the variable a on the left.
=, +=, -=, *=, /=, %=, //=, **=, &=, |=, ^=, >>=, <<= are Assignment operators
```
a = 10
a += 10 #add AND
print(a)
#subtract AND (-=)
#Multiply AND (*=)
#Divide AND (/=)
#Modulus AND (%=)
#Floor Division (//=)
#Exponent AND (**=)
```
## 3.3 Special Operators
### 3.3.1 Identity Operators
**is and is not** are the identity operators in Python.
They are used to check if two values (or variables) are located on the same part of the memory.
```
a = 5
b = 5
print(a is b) #5 is object created once both a and b points to same object
#check is not
l1 = [1, 2, 3]
l2 = [1, 2, 3]
print(l1 is l2)
s1 = "vishnu"
s2 = "vishnu"
print(s1 is not s2)
```
## 3.4 MemberShip Operators
**in and not in** are the membership operators in Python.
They are used to test whether a value or variable is found in a sequence (string, list, tuple, set and dictionary).
```
lst = [1, 2, 3, 4]
print(1 in lst) #check 1 is present in a given list or not
#check 5 is present in a given list
d = {1: "a", 2: "b"}
print(1 in d)
```
## 3.5 Python break and continue Statements
In Python, break and continue statements can alter the flow of a normal loop.
Loops iterate over a block of code until test expression is false, but sometimes we wish to terminate the current iteration or even the whole loop without cheking test expression.
The break and continue statements are used in these cases.
### 3.5.1 Python break Statement
Syntax:
break
<img src="t.webp" alt="Smiley face" height="500000" width="342">
```
numbers = [1, 2, 3, 4]
for num in numbers: #iterating over list
if num == 4:
break
print(num)
print("Outside of for loop")
```
### 3.5.1 Python Continue Statement
syntax:
continue
```
#print odd numbers present in a list
numbers = [1, 2, 3, 4, 5]
for num in numbers:
if num % 2 == 0:
continue
print(num)
else:
print("else-block")
```
# EX1: Pyhon Program to check given number is Prime number or not (using break)
```
num = int(input("Enter a number: ")) #convert string to int
isDivisible = False;
i=2;
while i < num:
if num % i == 0:
isDivisible = True;
print ("{} is divisible by {}".format(num,i) )
break; # this line is the only addition.
i += 1;
if isDivisible:
print("{} is NOT a Prime number".format(num))
else:
print("{} is a Prime number".format(num))
```
## 3.6 Python for Loop
The for loop in Python is used to iterate over a sequence (list, tuple, string) or other iterable objects.
Iterating over a sequence is called traversal.
Syntax:
for element in sequence :
Body of for
Here, element is the variable that takes the value of the item inside the sequence on each iteration.
Loop continues until we reach the last item in the sequence.
<img src="for.png" alt="Smiley face" height="5000" width="342">
```
#Find product of all numbers present in a list
lst = [10, 20, 30, 40, 50]
product = 1
#iterating over the list
for ele in lst:
product *= ele
print("Product is: {}".format(product))
```
### 3.6.1 range() function
We can generate a sequence of numbers using range() function. range(10) will generate numbers from 0 to 9 (10 numbers).
We can also define the start, stop and step size as range(start,stop,step size). step size defaults to 1 if not provided.
This function does not store all the values in memory, it would be inefficient. So it remembers the start, stop, step size and generates the next number on the go.
```
#print range of 10
for i in range(10):
print(i)
#print range of numbers from 1 to 20 with step size of 2
for i in range(0, 20, 5):
print(i)
lst = ["apple", "mango", "banana", "coconut", "orange"]
#iterate over the list using index
#for index in range(len(lst)):
# print(lst[index])
for ele in lst:
print(ele)
```
## 3.7 Python if ... else Statement
The **if…elif…else** statement is used in Python for decision making.
### 3.7.1 if statement syntax
if test expression:
statement(s)
The program evaluates the test expression and will execute statement(s) only if the text expression is True.
If the text expression is False, the statement(s) is not executed.
Python interprets non-zero values as True. None and 0 are interpreted as False.
<img src="if.jpg" alt="Smiley face" height="5000" width="342">
```
num = 10
if num > 0:
print("Positive number")
else:
print("Negative Number")
```
### 3.7.2 if...elif...else Statement
**syntax**
if test expression:
Body of if
elif test expression:
Body of elif
else:
Body of else
```
num = 10.5
if num > 0:
print("Positive number")
elif num == 0:
print("ZERO")
else:
print("Negative Number")
```
# EX 2 :Python program to find the largest element among three Numbers
```
num1 = 10
num2 = 50
num3 = 15
if (num1 >= num2) and (num1 >= num3): #logical operator and
largest = num1
elif (num2 >= num1) and (num2 >= num3):
largest = num2
else:
largest = num3
print("Largest element among three numbers is: {}".format(largest))
```
## 3.8 Python while Loop
The while loop in Python is used to iterate over a block of code as long as the test expression (condition) is true.
**Syntax**
while test_expression:
Body of while
The body of the loop is entered only if the test_expression evaluates to True.
After one iteration, the test expression is checked again.
This process continues until the test_expression evaluates to False.
<img src="while.jpg" alt="Smiley face" height="5000" width="342">
```
#Find product of all numbers present in a list
lst = [10, 20, 30, 40, 60]
product = 1
index = 0
while index < len(lst):
product *= lst[index]
index += 1
print("Product is: {}".format(product))
```
# EX 3 :Python Program to check given number is Prime number or not
```
num = int(input("Enter a number: ")) #convert string to int
isDivisible = False;
i=2;
while i < num:
if num % i == 0:
isDivisible = True;
print ("{} is divisible by {}".format(num,i) )
i += 1;
if isDivisible:
print("{} is NOT a Prime number".format(num))
else:
print("{} is a Prime number".format(num))
```
| github_jupyter |
```
import numpy as np
import cv2
import matplotlib.pyplot as plt
# 图6-1中的矩阵
img = np.array([
[[255, 0, 0], [0, 255, 0], [0, 0, 255]],
[[255, 255, 0], [255, 0, 255], [0, 255, 255]],
[[255, 255, 255], [128, 128, 128], [0, 0, 0]],
], dtype=np.uint8)
# 用matplotlib存储
plt.imsave('img_pyplot.jpg', img)
# 用OpenCV存储
cv2.imwrite('img_cv2.jpg', img)
# 1.the example above shows used to show how to use the opencv to make the image
# 2.and save the image
import cv2
# 读取一张400x600分辨率的图像(还未尝试), 现在用的是一个3x3的图片
color_img = cv2.imread('img_cv2.jpg')
print(color_img.shape) #shape 所显示的结果为(高,宽,通道数)
# 直接读取单通道
gray_img = cv2.imread('img_cv2.jpg', cv2.IMREAD_GRAYSCALE)
print(gray_img.shape)
# 把单通道图片保存后,再读取,仍然是3通道,相当于把单通道值复制到3个通道保存
cv2.imwrite('test_grayscale.jpg', gray_img)
reload_grayscale = cv2.imread('test_grayscale.jpg')
print(reload_grayscale.shape)
# cv2.IMWRITE_JPEG_QUALITY指定jpg质量,范围0到100,默认95,越高画质越好,文件越大
cv2.imwrite('test_imwrite.jpg', color_img, (cv2.IMWRITE_JPEG_QUALITY, 80))
# cv2.IMWRITE_PNG_COMPRESSION指定png质量,范围0到9,默认3,越高文件越小,画质越差
cv2.imwrite('test_imwrite.png', color_img, (cv2.IMWRITE_PNG_COMPRESSION, 5))
#compare the different read strategy
import cv2
# 读取一张400x600分辨率的图像(还未尝试), 现在用的是一个3x3的图片
color_img = cv2.imread('img_cv2.jpg')
print(color_img.shape)
#使用OpenCV 进行缩放,裁剪和补边
import cv2
# 读取一张四川大录古藏寨的照片
img = cv2.imread('Tibet.jpg')
# 缩放成200x200的方形图像
img_200x200 = cv2.resize(img, (200, 200))
# 不直接指定缩放后大小,通过fx和fy指定缩放比例,0.5则长宽都为原来一半
# 等效于img_200x300 = cv2.resize(img, (300, 200)),注意指定大小的格式是(宽度,高度)
# 插值方法默认是cv2.INTER_LINEAR,这里指定为最近邻插值
#if you want to set the resize by desize then fx,fy, as 0, 0
#if want to set the fx,fy then the size as (0,0)
img_200x300 = cv2.resize(img, (0, 0), fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST)
# 在上张图片的基础上,上下各贴50像素的黑边,生成300x300的图像
img_300x300 = cv2.copyMakeBorder(img, 50, 50, 0, 0,
cv2.BORDER_CONSTANT,
value=(0, 0, 0))
# 对照片中树的部分进行剪裁
patch_tree = img[20:150, -180:-50]
cv2.imwrite('cropped_tree.jpg', patch_tree)
cv2.imwrite('resized_200x200.jpg', img_200x200)
cv2.imwrite('resized_200x300.jpg', img_200x300)
cv2.imwrite('bordered_300x300.jpg', img_300x300)
#The x, y of the picture in the opencv as start from the left top corner as (0,0)
#the value of y will be negtive as goes down and x will be positive goes left
# 通过cv2.cvtColor把图像从BGR转换到HSV
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# H空间中,绿色比黄色的值高一点,所以给每个像素+15,黄色的树叶就会变绿
turn_green_hsv = img_hsv.copy()
turn_green_hsv[:, :, 0] = (turn_green_hsv[:, :, 0]+15) % 180
turn_green_img = cv2.cvtColor(turn_green_hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite('turn_green.jpg', turn_green_img)
# 减小饱和度会让图像损失鲜艳,变得更灰
colorless_hsv = img_hsv.copy()
colorless_hsv[:, :, 1] = 0.5 * colorless_hsv[:, :, 1]
colorless_img = cv2.cvtColor(colorless_hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite('colorless.jpg', colorless_img)
# 减小明度为原来一半
darker_hsv = img_hsv.copy()
darker_hsv[:, :, 2] = 0.5 * darker_hsv[:, :, 2]
darker_img = cv2.cvtColor(darker_hsv, cv2.COLOR_HSV2BGR)
cv2.imwrite('darker.jpg', darker_img)
```
| github_jupyter |
# Now You Code 3: Sentiment v2.0
Let's write a better version of the basic sentiment analyzer in Python. Instead of using a string of words, this example will read a list of words positive and negative words from a file.
In fact, we've included two files for you so you don't have to some up with the positive and negative words! Just load the files and go! Of course if you want more positive and negative words you can always edit the files.
- Positive words are in `NYC3-pos.txt`
- Negative words are in `NYC3-neg.txt`
You will have to write a function called `LoadWords(filename)` to read the words from the file and load them into a string.
## Step 1: Problem Analysis for function
Input (function arguments): `filename` to read.
Output (function returns): a `text` string of words as loaded from the file.
Algorithm:
```
open the filename for reading
read the entire file all at once, into text
return the text
```
```
## Step 2: Write the LoadWords(filename) function
def LoadWords():
with open(filename, 'r') as f:
for line in f.readlines():
text = str(line)
return text
## Quick test of your LoadWords() function
pos = LoadWords("NYC3-pos.txt")
neg = LoadWords("NYC3-neg.txt")
print("POSITIVE WORD LIST:",pos)
print("NEGATIVE WORD LIST", neg)
```
## Step 3: The Final Program
Now write a program which allows you to enter text and then analyzes the sentiment of the text by printing a score. The program should keep analyzing text input until you enter "quit".
Sample Run
```
Sentiment Analyzer 1.0
Type 'quit' to exit.
Enter Text: i love a good book from amazon
2 positive.
Enter Text: i hate amazon their service makes me angry
-2 negative.
Enter Text: i love to hate amazon
0 neutral.
Enter Text: quit
```
### 3.a Problem Analysis
Input: text
Output: how positive or negative the text is
Algorithm: ask user for text, use function to find how many of the words that are in the text, are in each file and if they are in positive file then the number is positive and if in negative file then number is negative and then you add numbers to see how psitive or negative the text is
```
## 3.b Write solution here, use Load Words to help you read in the pos and neg words.
text input("Enter text:")
LoadWords(text)
print(sentiment)
```
## Step 4: Questions
1. This is a better solution than sentiment 1.0. Why?
There are more words that the program can check with in a list than a string of words
2. Execute the program and enter the following input: `i love! a GOOD book` What is the score and why? how can this issue be fixed?
1 bcause GOOD is not in the file so you can add it to the file
3. Re-write your final solution to address the issues discovered in step 2.
## Reminder of Evaluation Criteria
1. What the problem attempted (analysis, code, and answered questions) ?
2. What the problem analysis thought out? (does the program match the plan?)
3. Does the code execute without syntax error?
4. Does the code solve the intended problem?
5. Is the code well written? (easy to understand, modular, and self-documenting, handles errors)
| github_jupyter |
# Dragon Real Estate -Price Prediction
```
#load the house dataset
import pandas as pd
housing=pd.read_csv("data.csv")
#sample of first 5 data
housing.head()
#housing information
housing.info()
#or find missing value
housing.isnull().sum()
print(housing["CHAS"].value_counts())
housing.describe()
%matplotlib inline
# data visualization
import matplotlib.pyplot as plt
housing.hist(bins=50,figsize=(20,15))
plt.show()
##train test spliting
import numpy as np
def split_train_test(data,test_ratio):
np.random.seed(42)
shuffled=np.random.permutation(len(data))
print(shuffled)
test_set_size=int(len(data) * test_ratio)
test_indices=shuffled[:test_set_size]
train_indices=shuffled[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set,test_set=split_train_test(housing,0.2)
print(f"Rows in train set:{len(train_set)}\nRoows in test set:{len(test_set)}\n")
#train the data
from sklearn.model_selection import train_test_split
train_set,test_set=train_test_split(housing,test_size=0.2,random_state=42)
print(f"Rows in train set:{len(train_set)}\nRoows in test set:{len(test_set)}\n")
from sklearn.model_selection import StratifiedShuffleSplit
split=StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=42)
for train_index,test_index in split.split(housing,housing["CHAS"]):
strat_train_set=housing.loc[train_index]
strat_test_set=housing.loc[test_index]
strat_test_set
strat_test_set.describe()
strat_test_set.info()
strat_test_set["CHAS"].value_counts()
strat_train_set["CHAS"].value_counts()
95/7
376/28
housing=strat_train_set.copy()
```
# looking for corelation
```
corr_matrix=housing.corr()
corr_matrix["MEDV"].sort_values(ascending=False)
from pandas.plotting import scatter_matrix
attributes=["MEDV","RM","ZN","LSTAT"]
scatter_matrix(housing[attributes],figsize=(12,8))
housing.plot(kind="scatter",x="RM",y="MEDV",alpha=0.8)
```
# TRYING OUT ATTRIBUTE COMBINATIONS
```
housing["TAXRM"]=housing["TAX"]/housing["RM"]
housing.head()
corr_matrix=housing.corr()
corr_matrix["MEDV"].sort_values(ascending=False)
housing.plot(kind="scatter",x="TAXRM",y="MEDV",alpha=0.8)
housing=strat_train_set.drop("MEDV",axis=1)
housing_labels=strat_train_set["MEDV"].copy()
#if some missing attributes is present so what we do???
#1.get rid of the missing data points
#2.get rid of the whole attribute
#3. set th evalue to some value(0,mean or median)
#1....
#a=housing.dropna(subset=["RM"])
#2....
#housing.drop("RM",axis=1)
#3.....
#median=housing["RM"].median()
#housing["RM"].fillna(median)
from sklearn.impute import SimpleImputer
imputer=SimpleImputer(strategy="median")
imputer.fit(housing)
imputer.statistics_
X=imputer.transform(housing)
housing_tr=pd.DataFrame(X,columns=housing.columns)
housing_tr.describe()
```
# feature scalling
```
#min max scalling
#Standarzitaion
```
# creating pipeline
```
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
my_pipeline=Pipeline([
("imputer",SimpleImputer(strategy="median")),
#.....add as many as you want in your pipeline
("std_scaler",StandardScaler()),
])
housing_num_tr=my_pipeline.fit_transform(housing_tr)
housing_num_tr
housing_num_tr.shape
```
# selecting a desired model for dragon real estate
```
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
#model=LinearRegression()
#model=DecissionTreeRegressor()
model=RandomForestRegressor()
model.fit(housing_num_tr,housing_labels)
some_data=housing.iloc[:5]
some_labels=housing_labels.iloc[:5]
prepared_data=my_pipeline.transform(some_data)
model.predict(prepared_data)
list(some_labels)
```
# evaluating the model
```
from sklearn.metrics import mean_squared_error
housing_predictions=model.predict(housing_num_tr)
mse=mean_squared_error(housing_labels,housing_predictions)
rmse=np.sqrt(mse)
rmse
```
# using better evaluation technique- cross validation
```
from sklearn.model_selection import cross_val_score
scores=cross_val_score(model,housing_num_tr,housing_labels,scoring="neg_mean_squared_error",cv=10)
rmse_scores=np.sqrt(-scores)
rmse_scores
def print_scores(scores):
print("Scores:",scores)
print("Mean:",scores.mean())
print("Standard deviation:",scores.std())
print_scores(rmse_scores)
```
quiz:convert this notebook into a python file and run the pipeline using visual studio code
# saving the model
```
from joblib import dump, load
dump(model, 'Dragon.joblib')
```
##testing the model on test data
```
X_test=strat_test_set.drop("MEDV",axis=1)
Y_test=strat_test_set["MEDV"].copy()
X_test_prepared=my_pipeline.transform(X_test)
final_predictions=model.predict(X_test_prepared)
final_mse=mean_squared_error(Y_test,final_predictions)
final_rmse=np.sqrt(final_mse)
print(final_predictions,list(Y_test))
final_rmse
prepared_data[0]
```
##using the model
```
from joblib import dump, load
import numpy as np
model=load('Dragon.joblib')
features=np.array([[-0.43942006, 3.12628155, -1.12165014, -0.27288841, -1.42262747,
-0.24141041, -1.31238772, 2.61111401, -1.0016859 , -0.5778192 ,
-0.97491834, 0.41164221, -0.86091034]])
model.predict(features)
```
| github_jupyter |
### This script relies on a active environment with Basemap
If that is not possible, you properly have to outcomment a thing or two.
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
import geopandas as gpd
from mpl_toolkits.basemap import Basemap
import ezodf
basePath = 'C:/Users/Krist/University College London/Digital Visualisation/Final Project/'
ports_1 = gpd.read_file(basePath+'Data Sources/Global Ports Shapefile (1)/WPI.shp')
countries = gpd.read_file(basePath+'Data Sources/countries (1)/ne_50m_admin_0_countries.shp').fillna(value='None')
```
# Let's see the ports.
```
# Let's visualise the ports
# Plotting the civil airports
plt.figure(figsize=(15,15))
map = Basemap()
map.drawcoastlines()
for port in ports_1.index:
map.plot(list(ports_1.loc[port].geometry.coords)[0][0],
list(ports_1.loc[port].geometry.coords)[0][1],color='darkblue',marker='o',markersize=3)
plt.show()
```
### Berief descriptive statistics
```
ports_1.columns
ports_1.head()
ports_1.shape
ports_1.RAILWAY.unique()
ports_1.HARBORSIZE.unique()
print('Distribution of ports in each size category:\n\n',
{size:sum([True if obs == size else False for obs in ports_1.HARBORSIZE]) for size in ports_1.HARBORSIZE.unique()})
print('Distribution of ports with Railway access:\n\n',
{size:sum([True if obs == size else False for obs in ports_1.RAILWAY]) for size in ports_1.RAILWAY.unique()})
```
## Joining the countries shapefile with the ports
But first ensuring country-classification attached on individual ports.
#### We want the ports to have a country classification attach, i.e. a unique ISO_A3 code. This can be done via a spatial join of the ports and the geometries in the country shapefile. However, some of the countries (more like states) in the country-shapefile doesn't not have a ISO_A3 code, which is corrected below.
Alternating the iso codes is needed, because some have a iso_code of -99 (Because it isn't a sovereign state)
http://www.naturalearthdata.com/downloads/10m-cultural-vectors/10m-admin-0-details/
https://unstats.un.org/unsd/tradekb/Knowledgebase/Country-Code
```
# Ensuring correct ISO_A3 codes
iso_codes = []
for iso,name,sov in zip(countries.ISO_A3,countries.NAME,countries.SOV_A3):
if (iso == '-99') and (name != 'France'):
print(name,sov)
iso_codes.append(sov)
elif (iso == '-99') and (name == 'France'):
iso_codes.append('FRA')
else:
iso_codes.append(iso)
countries['ISO3'] = np.array(iso_codes).astype('object')
IsoCodes = countries[['ISO_A2','ISO3','SOV_A3','NAME']].sort_values(by=['ISO_A2'])
IsoCodes = IsoCodes.reset_index(drop=True)
# Manually based on their SOV_A3 code
# Reference: https://www.iban.com/country-codes
# Not bullet proof, but it works for the purpose
for i in IsoCodes.index:
if IsoCodes['ISO_A2'].loc[i]=='-99':
#print(IsoCodes['ISO3'].loc[i][0:2])
IsoCodes['ISO_A2'].loc[i] = IsoCodes['ISO3'].loc[i][0:2]
IsoCodes = IsoCodes.sort_values(by=['NAME'])
#IsoCodes = IsoCodes.drop_duplicates(['ISO_A2'])
IsoCodes = IsoCodes.set_index(['ISO_A2'])
iso3CodePorts = []
nameCountryPorts = []
for country in ports_1.COUNTRY:
if country in list(IsoCodes.index):
if country == 'AU':
iso3CodePorts.append('AUS')
nameCountryPorts.append('Australia')
elif country == 'SO':
iso3CodePorts.append('SOM')
nameCountryPorts.append('Somalia')
elif country == 'CY':
iso3CodePorts.append('CYP')
nameCountryPorts.append('Cyprus')
else:
iso3CodePorts.append(IsoCodes['ISO3'].loc[country])
nameCountryPorts.append(IsoCodes['NAME'].loc[country])
else:
iso3CodePorts.append('None')
nameCountryPorts.append('None')
ports_1['ISO3'] = np.array(iso3CodePorts)
ports_1['CountryName'] = np.array(nameCountryPorts)
```
### Performing the join
```
# Joining together the ports with the country file
ports = gpd.sjoin(ports_1,countries,how='left')
# Selecting the columns of interest
ports = ports[['PORT_NAME','ISO3_left','CountryName','LATITUDE','LONGITUDE','HARBORSIZE','HARBORTYPE','RAILWAY','geometry']]
# Renaming the chosen columns
ports.columns=['port_name','iso3','country_name','latitude','longitude','harborsize','harbortype','railway','geometry']
ports.head()
```
### Investigating the ports without any country-classification
```
# How many ports do not have a ISO3 code?
sum([1 if iso == 'None' else 0 for iso in ports.iso3])
# Only continuing with the ports without country classification
portsNoCountry = ports[[True if iso == 'None' else False for iso in ports.iso3]]
print('The number of ports in each possible harbor size are:\n\n',
{size:sum([True if harborsize == size else False for harborsize in ports.harborsize]) for\
size in ports.harborsize.unique()})
print('The number of ports without a country is:\n\n',
{size:sum([1 if iso == 'None' else 0 for iso in portsNoCountry[[True if portsize == size else False for \
portsize in portsNoCountry.harborsize]].iso3])\
for size in portsNoCountry.harborsize.unique()})
```
For now, nothing is done about the ports with missing country classification, as the amount is to small compared to total number of ports.
### Writing the files
```
gpdPorts = gpd.GeoDataFrame(ports,geometry=ports.geometry,crs = countries.crs)
# Based on this answer: https://gis.stackexchange.com/questions/159681/geopandas-cant-save-geojson
with open(basePath+'Final Data/ports.geojson', 'w') as f:
f.write(gpdPorts.to_json())
portsToSave = ports.drop(['geometry'],axis=1)
portsToSave.to_csv(basePath+'Final Data/ports.csv')
```
### Potential Attributes
```
# The purpose below is to give an idea of the content of the potential attributes, which could be included for visualisation.
print('The excluded columns are:\n\n',
list(ports_1.columns)[0:16]+list(ports_1.columns)[-4:],
'\n\nThey are excluded because the already are included or because there is too many unique values.',
'\n\nThe potentially interesting attributes are:\n\n',
np.array(ports_1.columns)[16:-4])
for potentialAtt in ports_1.columns:
if potentialAtt in list(list(ports_1.columns)[16:-4]):
print('\nThe name of the potential attribute is: %s\nThe unique values are:\n\n'%(potentialAtt),
ports_1[potentialAtt].unique())
```
### The below is some exploratory stats on potentailly interesting categories. It is not at such important unless new attributes/dimensions is of interest
We are interested in the largest ports around the world, namely 'M' and 'L'.
```
#sizeMLIndcies = [i for i,s in enumerate(ports_1.HARBORSIZE) if s in ('M','L')]
sizeMLPorts = ports_1[(ports_1.HARBORSIZE == 'M') | (ports_1.HARBORSIZE == 'L')]
sizeMLPorts = sizeMLPorts.reset_index(drop=True)
# Plot the V sized ports
# Plotting the civil airports
plt.figure(figsize=(15,15))
map = Basemap()
map.drawcoastlines()
for port in sizeMLPorts.index:
map.plot(list(sizeMLPorts.loc[port].geometry.coords)[0][0],
list(sizeMLPorts.loc[port].geometry.coords)[0][1],color='darkblue',marker='o',markersize=3)
plt.show()
sizeMLPorts.head()
```
#### Let's take a look at some cargo-related variables.
```
cargoColumns = [column for column in ports_1.columns if 'cargo' in column.lower()]
cargoColumns
for column in cargoColumns:
print('%s\n' % column)
print({uniVal:sum([True if obs == uniVal else False for obs in ports_1[column]]) \
for uniVal in ports_1[column].unique()},'\n')
```
And cargo variables for our sized-subset
```
for column in cargoColumns:
print('%s\n' % column)
print({uniVal:sum([True if obs == uniVal else False for obs in sizeMLPorts[column]]) \
for uniVal in sizeMLPorts[column].unique()},'\n')
```
### Country distribution
```
print({C:sum([True if obs == C else False for obs in sizeMLPorts.COUNTRY]) for C in sizeMLPorts.COUNTRY.unique()},'\n')
print('\nThere are %i countries in the subset' % len(sizeMLPorts.COUNTRY.unique()))
```
### End of exploratory stats
## Let's take a look at the UK Data
```
# Opening the Open Document Sheet
doc = ezodf.opendoc(basePath+'Data Sources/UK Port Freight (2)/port-freight-statistics-2017/port0400.ods')
# Data sheet
sheet = doc.sheets[2]
# A way to extract the data
start = time.time()
totalContent = []
for i,row in enumerate(sheet.rows()):
if i == 0:
columns = [cell.value for cell in row]
columns = columns[:-1]
# individualUKPortData = pd.DataFrame(columns = columns)
if 0 < i:
content = [cell.value for cell in row]
totalContent.append(content[:-1])
#individualUKPortData.loc[i] = content
individualUKPortData = pd.DataFrame(totalContent,columns = columns).fillna('None')
end = time.time()
print('The processing took %.3f seconds' % (end-start))
individualUKPortData.shape
#individualUKPortData['Year'] = np.array(individualUKPortData['Year']).astype('datetime64')
individualUKPortData.loc[0:10]
```
### Let's see some unique values, to get an idea of the opportunities that the data presents.
```
for column in individualUKPortData.columns:
print('This is "%s"'% column,'\n\n','The unique values are:\n\n',individualUKPortData[column].unique(),'\n\n')
```
From the above, it seems natural to consider the direction and the region for each port, as we already are restricted on the year.
```
individualUKPortData2017 = individualUKPortData[individualUKPortData.Year == 2017]
print(individualUKPortData2017.shape)
portsUK = pd.DataFrame(index=individualUKPortData2017['Reporting Port'].unique())
dataholder = [[] for i in np.arange(6)] # <- because there are 3 unique values in 'Regions' and 2 unique values in 'Directions'
# plus a total column
# Excluding the 'None'
regions = [region for region in individualUKPortData2017.Region.unique() if region != 'None']
j = 0
for region in regions:
temp = individualUKPortData2017[individualUKPortData2017.Region==region]
for port in individualUKPortData2017['Reporting Port'].unique():
# Inwards
dataholder[j].append(round(sum(temp[(temp['Reporting Port'] == port) & (temp.direction == 1)\
& (temp.Type == 'Tonnage')]['Value (thousands)'])/1000,3))
# Outwards
dataholder[j+1].append(round(sum(temp[(temp['Reporting Port'] == port) & (temp.direction == 2)\
& (temp.Type == 'Tonnage')]['Value (thousands)'])/1000,3))
#dataholder[-1].append(dataholder[j][-1]+dataholder[j+1][-1])
j += 2
for column in np.arange(len(dataholder)):
portsUK[str(column)] = dataholder[column]
# print the regions, to set correct columns
print(regions)
portsUK.columns = ['domestic_traffic_inwards','domestic_traffic_outwards',
'european_union_traffic_inwards','european_union_traffic_outwards',
'non-eu_foreign_traffic_inwards','non-eu_foreign_traffic_outwards']
portsUK
# Adding the total column
portsUK['Total'] = portsUK.sum(axis=1)
```
### It is now time to attach geometries to the constructed data
```
print('The number of UK ports present in the global ports file:', len([ukport for ukport in portsUK.index if \
any([True if ukport.lower() in globalports.lower() else False for globalports in ports.port_name])]),'\n')
print('The ports present are:\n')
[print(ukport) for ukport in portsUK.index if \
any([True if ukport.lower() in globalports.lower() else False for globalports in ports.port_name])]
print()
```
### Time to subset the UK-ports data
```
ports.head()
portsUKinGlobal = portsUK[[True if any([True if (ukport.lower() in globalports.lower()) & (iso == 'GBR')\
else False for globalports,iso in zip(ports.port_name,ports.iso3)]) else False for ukport in portsUK.index]]
portsUKinGlobal = portsUKinGlobal.sort_index()
portsUKinGlobal
```
### Time to subset the global-ports data, in order to merge the two subsetted DataFrames
```
portsGlobalInUK = ports[([True if any([True if ukport.lower() in globalport.lower()\
else False for ukport in portsUK.index]) \
else False for globalport in ports.port_name]) & (ports.iso3 == 'GBR')]
portsGlobalInUK = portsGlobalInUK.sort_values(by=['port_name'])
portsGlobalInUK = portsGlobalInUK.reset_index(drop=True)
# Change this manually correction later
portsGlobalInUK['port_name'].loc[17] = 'HULL'
portsGlobalInUK = portsGlobalInUK.sort_values(by=['port_name'])
portsGlobalInUK = portsGlobalInUK.reset_index(drop=True)
portsGlobalInUK
```
Because there are found one more port UK in the global file, than the other way around, a inspection of the found ports above is needed. It is seen that two ports for Liverpool are detected, a minor and a large one. Comparing with the previous table, it is seen that the Liverpool-port of interest is the third largest in UK, which implies that we manually drop the minor Liverpool port found above.
```
portsGlobalInUK = portsGlobalInUK.drop([20])
portsGlobalInUK = portsGlobalInUK.reset_index(drop=True)
portsGlobalInUK = portsGlobalInUK.set_index(portsUKinGlobal.index)
# Joining together the subsetted dataframes
UKports = portsUKinGlobal.join(portsGlobalInUK)
UKports
```
### Writing the files
```
gpdUKPorts = gpd.GeoDataFrame(UKports,geometry=UKports.geometry,crs = countries.crs)
# Based on this answer: https://gis.stackexchange.com/questions/159681/geopandas-cant-save-geojson
with open(basePath+'Final Data/UKports.geojson', 'w') as f:
f.write(gpdUKPorts.to_json())
UKportsToSave = UKports.drop(['geometry'],axis=1)
UKportsToSave.to_csv(basePath+'Final Data/UKports.csv')
```
| github_jupyter |
<img style = "float: right; margin: 45px 22px 22px 22px;" src = "https://pngimage.net/wp-content/uploads/2018/06/logo-iteso-png-5.png" width = "600" height = "960" />
# <font color='navy'> Quantitative Finance
**Students:**
Espinosa García, Lyha
Flores Ortiz, Moisés
Lares Barragán, Andrés
Rosas Medellín, Miriam Eunice
Vázquez Vargas, Ana Cristina
**Date:**
December 4th, 2021
**Professor:**
José Mario Zárate Carbajal
# Griegas de Opciones Financieras
## Primer Orden
### Delta
La delta de una opción se define como la tasa de cambio del precio de la opción con respecto al precio del activo subyacente. La pendiente de la curva relaciona el precio de la opción con el precio del activo subyacente. Suponga que la delta de una opción
de compra sobre una acción es de 0.6. Esto significa que cuando el precio de la acción cambia en
un monto pequeño, el precio de la opción cambia alrededor de 60% de ese monto.
### Vega
Para las griegas, generalmente, se asume la volatilidad como un valor constante.
Relativa al cambio del valor en la cartera con respecto a la volatilidad del activo, si la vega es alta quiere decir que la cartera es muy sensibe a los cambios en la volatilidad.
### Theta
Representa la tasa de cambio del valor de la cartera con respecto al paso del tiempo, suponiendo que todos los otros factores sean constantes. También se conoce como decaimiento del tiempo (time decay).
En términos de opciones, la theta tiende a ser negativa esto debido a que el valor de la opción tiende a disminuir a medida que el tiempo de vencimiento disminuye.
### Rho
Es la tasa de cambio del valor de una cartera con respecto a la tasa de interés. Por lo tanto mide la sensibilidad de las opciones con respecto a la fluctuación en la tasa de interés.
### Epsilon
### Lambda
Es la letra griega que muestra el cambio en el precio de una opción con respecto al cambio en la volatilidad implicita.
## Segundo Orden
### Gamma
Muestra la tasa de cambio de la delta de una opción con respecto al precio del subyacente.
El tamaño de gamma describe el cambio en delta, es decir, si gamma es pequeña la delta cambia lentamente. No obstante, si es grande en valores absolutos, significa que la delta es muy sensible al cambio en el precio del subyacente.
### Vanna
Cambio de Delta con respecto a la volatilidad.
### Charm
Cambio de Vega con respecto a la volatilidad.
### Vomma
Mide la sensibilidad de segundo orden a la volatilidad. Vomma es la segunda derivada del valor de la opción con respecto a la volatilidad o, dicho de otra manera, vomma mide la tasa de cambio a vega a medida que cambia la volatilidad.
Con vomma positiva, una posición se convertirá en vega larga a medida que aumenta la volatilidad implícita y vega corta a medida que disminuye. Y una posición inicialmente vega-neutral, de vomma larga se puede construir a partir de proporciones de opciones en diferentes strikes.
$$Vomma = \frac{\partial \nu}{\partial\sigma} = \frac{\partial^{2} V_{t}}{\partial\sigma^{2}}$$
### Veta
### Vera
## *Third-order*
### Speed
### Zomma
### Color
### Ultima
Mide la sensibilidad de la opción vomma con respecto al cambio en la volatilidad. Ultima es una derivada de tercer orden del valor de la opción respecto a la volatilidad.
$$Ultima = \frac{\partial vomma}{\partial\sigma} = \frac{\partial^{3} V_{t}}{\partial\sigma^{3}}$$
### Veta
[resumen]
### Vera
[resumen]
## Tercer Orden
### Speed
### Zomma
### Color
### Ultima
# Derivaciones
#### Propiedad 1
Se tiene un portafolio de una opción de compra europea
$$V_{t}=S_{t}\mathcal{N}(d_{1})-Ke^{-r(T-t)}\mathcal{N}(d_{2})$$
Para poder derivar de una manera más práctica se realiza un cambio de variable para tener todo en términos de $\mathcal{N}(d_{1})$ ya que se conoce la siguiente propiedad:
$$d_{2}=d_{1}-\sigma \sqrt{T-t}$$
Por lo que se tiene:
$$V_{t}=S_{t}\mathcal{N}(d_{1})-Ke^{-r(T-t)}\mathcal{N}(d_{1}-\sigma \sqrt{T-t})$$
Se procede a derivar con respecto de $d_{1}$ la variable común:
$$\frac{\partial V_{t}}{\partial d_{1}}[V_{t}=S_{t}\mathcal{N}(d_{1})-Ke^{-r(T-t)}\mathcal{N}(d_{1}-\sigma \sqrt{T-t})]$$
$$\frac{\partial V_{t}}{\partial d_{1}}=0=S_{t}\mathcal{N}'(d_{1})-Ke^{-r(T-t)}\mathcal{N}'(d_{1}-\sigma \sqrt{T-t})$$
Esto resulta en la propiedad 1 que sostiene que:
$$S_{t}\mathcal{N}'(d_{1})-Ke^{-r(T-t)}\mathcal{N}'(d_{1}-\sigma \sqrt{T-t})=0$$
regresando a las variables originales se tiene lo siguiente:
$$S_{t}\mathcal{N}'(d_{1})-Ke^{-r(T-t)}\mathcal{N}'(d_{2})=0$$
o bien que:
$$S_{t}\mathcal{N}'(d_{1})=Ke^{-r(T-t)}\mathcal{N}'(d_{2})$$
**Delta $\Delta$**
## *delta $\Delta$*
Buscamos las derivadas parciales de $d_{1}$ y $d_{2}$ con respecto a $S_{t}$
$$d_{1} = \frac{ln\big(\frac{S_{t}}{K}\big) + \big(r + \frac{\sigma^{2}}{2}\big)(T-t)}{\sigma \sqrt{T-t}}$$
$$\frac{\partial -d_{1}}{\partial S_{t}} = -\frac{1}{S_{t}(\sigma \sqrt{T-t})}$$
$$d_{2} = \frac{ln\big(\frac{S_{t}}{K}\big) + \big(r - \frac{\sigma^{2}}{2}\big)(T-t)}{\sigma \sqrt{T-t}} $$
$$\frac{\partial d_{2}}{\partial S_{t}} = \frac{1}{S_{t}(\sigma \sqrt{T-t})}$$
Por lo tanto se obtiene la siguiente **propiedad (2)**:
$$\frac{\partial d_{1}}{\partial S_{t}} = \frac{\partial d_{2}}{\partial S_{t}}$$
**Call**
Tomando en cuenta que por definición el valor justo de una opción call es:
$$V_t=S_tN(d_1)-Ke^{-r(T-t)}N(d_2)$$
$$\frac{\partial V_t}{\partial S_t }= \frac{\partial S_t }{\partial S_t}.N(d1)+\frac{\partial N(d_1) }{\partial d_1}\frac{\partial d_1}{\partial S_t } St - Ke^{-r(T-t)}\frac{\partial N(d_2) }{\partial d_2}\frac{\partial d_2}{\partial S_t }$$
$$N(d1)-N'(d1)d1'St-Ke^{-r(T-t)}N'(d2)d2'$$
Aplicamos propiedad 2 para dejar todo en términos de $d1$:
$$\frac{\partial V_t}{\partial S_t }=N(d1) + N'(d_1)S_td1'-Ke^{-r(T-t)}N'(d2)d1'$$
Agrupamos términos
$$\frac{\partial V_t}{\partial S_t }=N(d_1)+d1'[St N'(d_1) - Ke^{-r(T-t)}N'(d_2)]$$
Tomando en cuenta la propiedad 1 la expresión dentro de los corchetes es igual a cero por lo tanto:
$$\Delta=\frac{\partial V_t}{\partial S_t }=N(d_1)$$
**Put**
Tomando en cuenta que por definición el valor justo de una opción put es:
$$ V_{t} = K e^{-r(T-t)} \mathcal{N}(-d_{2})-S_{t} \mathcal{N}(-d_{1}) $$
$$ \frac{\partial V_{t}}{\partial S_{t}} = K e^{-r(T-t)} \mathcal{N}(-d_{2}) \frac{\partial -d_{2}}{\partial S_{t}} - \mathcal{N}(-d_{1}) + S_{t} \mathcal{N}´(-d_{1}) \frac{\partial -d_{1}}{\partial S_{t}} $$
Regresamos a encontrar delta, aplicando la propiedad 2:
$$ \frac{\partial V_{t}}{\partial S_{t}} =\frac{\partial d_{1}}{\partial S_{t}}[K e^{-r(T-t)} \mathcal{N}'(-d_{2})-S_{t} \mathcal{N}´(-d_{1}) ] -\mathcal{N}(-d_{1}) $$
Aplicando la propiedad 1 eliminamos el término que se encuetra entre corchetes por lo que:
$$ \Delta=\frac{\partial V_{t}}{\partial S_{t}} = -\mathcal{N}(-d_{1}) $$
**Vega $\mathcal{V}$**
**Call**
Derivamos $d_{1}$ y $d_{2}$ con respecto a $\sigma$
$$d_{1}=\frac{ln\big(\frac{S_{t}}{K}\big)+\big(r+\frac{\sigma^{2} }{2}\big)(T-t)}{\sigma \sqrt{T-t}}$$
$$\frac{\partial d_{1}}{\partial \sigma }=ln\big(\frac{S_{t}}{K}\big)\sigma ^{-1}(T-t)^{-\frac{1}{2}}+r(T-t)\sigma ^{-1}(T-t)^{-\frac{1}{2}}+\frac{\sigma ^{2}}{2}\sigma ^{-1}(T-t)^{-\frac{1}{2}}$$
$$\frac{\partial d_{1}}{\partial \sigma }=\frac{ln\big(\frac{S_{t}}{K}\big)}{\sigma ^{2}\sqrt{T-t}}+\frac{r(T-t)}{\sigma ^{2}\sqrt{T-t}}+\frac{1}{2}\frac{(T-t)}{\sqrt{T-t}}$$
$$d_{2}=\frac{ln\big(\frac{S_{t}}{K}\big)+\big(r-\frac{\sigma^{2} }{2}\big)(T-t)}{\sigma \sqrt{T-t}} = d_{1}-\sigma\sqrt{T-t}$$
Tomando la ultima expresión, derivamos $d_{2}$ con respecto a $\sigma$
$$\frac{\partial d_{2}}{\partial \sigma }=\frac{\partial d_{1}}{\partial \sigma }-\sqrt{T-t}$$
Con lo anterior obtenemos la siguiente propiedad (3):
$$\frac{\partial d_{1}}{\partial \sigma }=\frac{\partial d_{2}}{\partial \sigma }+ \sqrt{T-t}$$
Por definición el valor justo de una opción europea call sin dividendo:
$$V_{t}=S_{t}\mathcal{N}(d_{1})-Ke^{-r(T-t)}\mathcal{N}(d_{2})$$
$$\frac{\partial V_{t}}{\partial \sigma }=S_{t} \frac{\partial \mathcal{N}(d_{1}) }{\partial d_{1}}\frac{\partial d_{1}}{\partial \sigma } - Ke^{-r(T-t)}\frac{\partial \mathcal{N}(d_{2}) }{\partial d_{2}}\frac{\partial d_{2}}{\partial \sigma }$$
Aplicamos la propiedad antes encontrada (propiedad 3)
$$\frac{\partial V_{t}}{\partial \sigma }=S_{t} \mathcal{N}'(d_{1})\bigg(\frac{\partial d_{2}}{\partial \sigma }+\sqrt{T-t}\bigg) - Ke^{-r(T-t)}\mathcal{N}'(d_{2})\frac{\partial d_{2}}{\partial \sigma }$$
Agrupamos términos
$$\frac{\partial V_{t}}{\partial \sigma }=S_{t} \mathcal{N}'(d_{1})\sqrt{T-t}+\frac{\partial d_{2}}{\partial \sigma }[S_{t} \mathcal{N}'(d_{1}) - Ke^{-r(T-t)}\mathcal{N}'(d_{2})]$$
Tomando en cuenta la **propiedad 1** la expresión dentro de los corchetes es $= 0$ por lo tanto:
$$\nu=\frac{\partial V_{t}}{\partial \sigma }=S_{t} \mathcal{N}'(d_{1})\sqrt{T-t}$$
**Put**
Por definición el valor justo de una opción europea put sin dividendo:
$$V_{t} = K e^{-r(T-t)} \mathcal{N}(-d_{2})-S_{t} \mathcal{N}(-d_{1}) $$
$$\frac{\partial V_{t}}{\partial \sigma }=Ke^{-r(T-t)}\frac{\partial \mathcal{N}(-d_{2}) }{\partial -d_{2}}\frac{\partial -d_{2}}{\partial \sigma }-S_{t} \frac{\partial \mathcal{N}(-d_{1}) }{\partial -d_{1}}\frac{\partial -d_{1}}{\partial \sigma } $$
Aplicamos la propiedad antes encontrada (propiedad 3)
$$\frac{\partial V_{t}}{\partial \sigma }=Ke^{-r(T-t)}\frac{\partial \mathcal{N}(-d_{2}) }{\partial -d_{2}}\frac{\partial -d_{2}}{\partial \sigma }-S_{t} \frac{\partial \mathcal{N}(-d_{1})}{\partial -d_{1}}\bigg(\frac{\partial- d_{2}}{\partial \sigma }-\sqrt{T-t}\bigg)$$
Agrupamos términos
$$\frac{\partial V_{t}}{\partial \sigma }= S_{t} \mathcal{N}'(d_{1})\sqrt{T-t} +\frac{\partial- d_{2}}{\partial \sigma }[ - S_{t} \mathcal{N}'(-d_{1})+Ke^{-r(T-t)}\mathcal{N}(-d_{2})]$$
Tomando en cuenta la **propiedad 1** la expresión dentro de los corchetes es $= 0$ por lo tanto:
$$\nu=\frac{\partial V_{t}}{\partial \sigma }=S_{t} \mathcal{N}'(d_{1})\sqrt{T-t}$$
**Theta $\Theta$**
**Call**
Considerando que $(T-t) = \tau $
Derivamos $d_{1}$ y $d_{2}$ con respecto a $\tau $
$$d_{1}=\frac{ln\big(\frac{S_{t}}{K}\big)+\big(r+\frac{\sigma^{2} }{2}\big)\tau}{\sigma \sqrt{\tau}}$$
$$\frac{\partial d_{1}}{\partial \tau }=ln\bigg(\frac{S_{t}}{K}\bigg)\sigma \tau ^{-\frac{1}{2}}+ \bigg(r+\frac{\sigma ^{2}}{2}\bigg)\sigma \tau ^{\frac{1}{2}}$$
$$\frac{\partial d_{1}}{\partial \tau }=-\frac{1}{2}ln\bigg(\frac{S_{t}}{K}\bigg)\sigma \tau ^{-\frac{3}{2}}+ \frac{1}{2}\bigg(r+\frac{\sigma ^{2}}{2}\bigg)\sigma \tau ^{\frac{-1}{2}}$$
$$\frac{\partial d_{1}}{\partial \tau }= -\frac{1}{2}\frac{ln\big(\frac{S_{t}}{K}\big)}{\sigma \tau \sqrt{\tau }}+ \frac{1}{2}\frac{\big(r+\frac{\sigma ^{2}}{2}\big)}{\sigma \sqrt{\tau }}=-\frac{1}{2\sigma \sqrt{\tau }}\bigg[\frac{ln\big(\frac{S_{t}}{K}\big)}{\tau}-\bigg(r+\frac{\sigma ^{2}}{2}\bigg)\bigg]$$
$$d_{2}=\frac{ln\big(\frac{S_{t}}{K}\big)+\big(r-\frac{\sigma^{2} }{2}\big)\tau}{\sigma \sqrt{\tau}} = d_{1}-\sigma\sqrt{\tau}$$
Tomando la ultima expresión, derivamos $d_{2}$ con respecto a $\tau$
$$\frac{\partial d_{2}}{\partial \tau }=\frac{\partial d_{1}}{\partial \tau }-\frac{1}{2}\frac{\sigma}{\sqrt{\tau}}$$
Con lo anterior obtenemos la siguiente propiedad (4):
$$\frac{\partial d_{1}}{\partial \tau }=\frac{\partial d_{2}}{\partial \tau }+\frac{1}{2}\frac{\sigma}{\sqrt{\tau}}$$
Proseguimos a encontrar Theta, el valor justo de una opción call es:
$$V_{t}=S_{t}\mathcal{N}(d_{1})-Ke^{-r\tau}\mathcal{N}(d_{2})$$
$$\frac{\partial V_{t}}{\partial \tau }=S_{t}\frac{\partial \mathcal{N}(d_{1})}{\partial d_{1}}\frac{\partial d_{1}}{\partial \tau}-\bigg[-rKe^{-r\tau}\mathcal{N}(d_{2})+\frac{\partial \mathcal{N}(d_{2})}{\partial d_{2}}\frac{\partial d_{2}}{\partial \tau}Ke^{-r\tau}\bigg]$$
$$\frac{\partial V_{t}}{\partial \tau }=S_{t}\mathcal{N}'(d_{1})\frac{\partial d_{1}}{\partial \tau}-\bigg[-rKe^{-r\tau}\mathcal{N}(d_{2})+Ke^{-r\tau}\mathcal{N}'(d_{2})\frac{\partial d_{2}}{\partial \tau}\bigg]$$
Aplicamos la propiedad 4:
$$\frac{\partial V_{t}}{\partial \tau }=S_{t}\mathcal{N}'(d_{1})(\frac{\partial d_{2}}{\partial \tau }+\frac{1}{2}\frac{\sigma}{\sqrt{\tau}})- \bigg[-rKe^{-r\tau}\mathcal{N}(d_{2})+Ke^{-r\tau}\mathcal{N}'(d_{2})\frac{\partial d_{2}}{\partial \tau}\bigg]$$
Reagrupamos terminos:
$$\frac{\partial V_{t}}{\partial \tau }=\frac{1}{2}\frac{\sigma}{\sqrt{\tau}}S_{t}\mathcal{N}'(d_{1})+rKe^{-r\tau}\mathcal{N}(d_{2})+\frac{\partial d_{2}}{\partial \tau}\bigg[S_{t}\mathcal{N}'(d_{1})-Ke^{-r\tau}\mathcal{N}'(d_{2})\bigg]$$
Tomando en cuenta la **propiedad 1** el término que se encuentra entre corchetes $=0$
$$\frac{\partial V_{t}}{\partial \tau }=\frac{1}{2}\frac{\sigma}{\sqrt{\tau}}S_{t}\mathcal{N}'(d_{1})+rKe^{-r\tau}\mathcal{N}(d_{2})$$
**Put**
Por definición el valor justo de una opción put es:
$$V_{t} = K e^{-r\tau} \mathcal{N}(-d_{2})-S_{t} \mathcal{N}(-d_{1}) $$
$$\frac{\partial V_{t}}{\partial \tau }= [-rKe^{-r\tau}\mathcal{N}(-d_{2})+Ke^{-r\tau}\mathcal{N}'(-d_{2})-d'_{2}]-S_{t}\mathcal{N}'(-d_{1})-d'_{1}$$
Aplicamos la propiedad 4 y sustituimos
$$\frac{\partial V_{t}}{\partial \tau }= [-rKe^{-r\tau}\mathcal{N}(-d_{2})+Ke^{-r\tau}\mathcal{N}'(-d_{2})-d'_{2}]-S_{t}\mathcal{N}'(-d_{1})\bigg(-d'_{2}-\frac{1}{2}\frac{\sigma}{\sqrt{\tau}}\bigg)$$
Reagrupamos términos
$$\frac{\partial V_{t}}{\partial \tau }=-d'_{2}[Ke^{-r\tau}\mathcal{N}'(-d_{2})-S_{t}\mathcal{N}'(-d_{1})]-rKe^{-r\tau}\mathcal{N}(-d_{2})-\frac{1}{2}\frac{\sigma}{\sqrt{\tau}}S_{t}\mathcal{N}'(-d_{1})$$
Finalmente, aplicamos propiedad 1, el termino entre corchetes = 0:
$$\frac{\partial V_{t}}{\partial \tau }=-rKe^{-r\tau}\mathcal{N}(-d_{2})-\frac{1}{2}\frac{\sigma}{\sqrt{\tau}}S_{t}\mathcal{N}'(-d_{1})$$
$$\theta=-\frac{\partial V_{t}}{\partial \tau }=-\bigg[-rKe^{-r\tau}\mathcal{N}(-d_{2})-\frac{1}{2}\frac{\sigma}{\sqrt{\tau}}S_{t}\mathcal{N}'(-d_{1})\bigg]$$
**Second-order Greeks**
gamma $\Gamma$
**Third-order Greeks**
speed
color
| github_jupyter |
```
import tweepy
import json
import pandas as pd
import csv
import mysql.connector
from mysql.connector import Error
#imports for catching the errors
from ssl import SSLError
from requests.exceptions import Timeout, ConnectionError
from urllib3.exceptions import ReadTimeoutError
#Twitter API credentials
consumer_key = "NDhGN1poxOV4el21shhNpFbbf"
consumer_secret = "xk8ZtyEh6Hpq2zucqvcrqSDXm7gTredBC0T8S6T9mSCPZJhEmx"
access_token = "825394860190470144-VmRgBQeYWF0MtoBYCrT4IA5ANzmDMwG"
access_token_secret = "gkHs9Beab9lPsJA9bMCwCUITRqsTLkGIwdYl6xlav0jIu"
#boundingbox ciudad de Madrid obtenido de https://boundingbox.klokantech.com/
madrid = [-3.7475842804,40.3721683069,-3.6409114868,40.4886258195]
def connect(user_id, user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count,
tweet_id,text,created_at,source,
reply_id, reply_user_id,
retweet_id,retweet_user_id,
quote_id,quote_user_id,
reply_count,retweet_count,favorite_count,quote_count,
hashtags, mention_ids,
place_id, place_name, coord):
con = mysql.connector.connect(host = 'localhost',
database='twitterdb', user='david', password = 'password', charset = 'utf8mb4',auth_plugin='mysql_native_password')
cursor = con.cursor()
try:
if con.is_connected():
query = "INSERT INTO UsersMad (user_id, tweet_id,user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count) VALUES (%s,%s, %s, %s, %s, %s, %s, %s)"
cursor.execute(query, (user_id, tweet_id, user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count))
query2 = "INSERT INTO PostsMad (tweet_id,user_id,text,created_at,source,reply_id, reply_user_id,retweet_id, retweet_user_id,quote_id,quote_user_id,reply_count,retweet_count,favorite_count,quote_count,place_id, place_name, coord,hashtags, mention_ids) VALUES (%s,%s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s, %s, %s, %s, %s,%s, %s, %s, %s)"
cursor.execute(query2, (tweet_id,user_id,text,created_at,source,
reply_id, reply_user_id,
retweet_id, retweet_user_id,
quote_id,quote_user_id,
reply_count,retweet_count,favorite_count,quote_count,
place_id, place_name, coord,
hashtags, mention_ids))
con.commit()
except Error as e:
print(e)
print(text)
#Carlota: He dejado este print, porque no era capaz de almacenar emojis por la codificacion.
#Estoy casi segura de que se ha arreglado, pero por si acaso
cursor.close()
con.close()
return
class MyStreamListener(tweepy.StreamListener):
def on_data(self,data):
# Twitter returns data in JSON format - we need to decode it first
try:
decoded = json.loads(data)
except Exception as e:
print ("Error on_data: %s" % str(e)) #we don't want the listener to stop
return True
#LOCATION METADATA
#En caso de estar geolocalizado guardar la geolocalizacion
#Si esta geolocalizado dentro de un bounding box (no exacta)
if decoded.get('place') is not None:
place_id = decoded.get('place').get('id')
place_name =decoded.get('place').get('name')
else:
place_id = 'None'
place_name = 'None'
#Si es localizacion exacta
#Geo is deprecated, they suggest to use simply coordinates
if decoded.get('coordinates') is not None:
m_coord = decoded.get('coordinates')['coordinates']
#print(m_coord)
coord=str(m_coord[0])+';'+str(m_coord[1])
#print(coord)
#time.sleep(100)
else:
coord = 'None'
#print(place_id)
#USER METADATA
user_name = '@' + decoded.get('user').get('screen_name') #nombre cuenta @itdUPM
user_id=decoded.get('user').get('id') #id de la cuenta (int)
user_loc=decoded.get('user').get('location')
user_follow_count=decoded.get('user').get('followers_count')
user_friends_count=decoded.get('user').get('friends_count')
user_fav_count=decoded.get('user').get('favourites_count')
user_status_count=decoded.get('user').get('statuses_count')
#POST METADATA
created_at = decoded.get('created_at') #Fecha
tweet_id = decoded['id'] #tweet id (int64)
source = decoded['source'] #string source (web client, android, iphone) interesante???
if decoded.get('truncated'):
text = decoded['extended_tweet']['full_text'].replace('\n',' ')
else:
text = decoded['text'].replace('\n',' ') #Contenido tweet
#REPLY METADATA
reply_id=decoded['in_reply_to_status_id']
reply_user_id=decoded['in_reply_to_user_id']
#RETWEET
if decoded.get('retweeted_status') is not None:
retweet_id = decoded['retweeted_status'] ['id']
retweet_user_id = decoded['retweeted_status']['user']['id']
if decoded['retweeted_status']['truncated']:
text = decoded['retweeted_status']['extended_tweet']['full_text'].replace('\n',' ')
else:
text = decoded['retweeted_status']['text'].replace('\n',' ') #Contenido tweet
#Carlota: Si es un retweet los campos de nº de retweets favs etc vienen dentro de retweeted status
#David: ok bien visto, he añadido el id de usuario retweeteado
reply_count = decoded['retweeted_status']['reply_count'] #Number of times this Tweet has been replied to
retweet_count = decoded['retweeted_status']['retweet_count'] #Number of times this Tweet has been retweeted
favorite_count = decoded['retweeted_status']['favorite_count'] #how many times this Tweet has been liked by Twitter users.
quote_count = decoded['retweeted_status']['quote_count']
#hashtags_list=decoded.get('retweeted_status').get('entities').get('hashtags')
#mentions=decoded.get('retweeted_status').get('entities').get('user_mentions')
#David: para esto hay que crear una cadena de texto recorriendo la lista, el
#código estaba en la versión anterior...
hashtags_list=decoded['retweeted_status']['entities']['hashtags']
mentions=decoded['retweeted_status']['entities']['user_mentions']
hashtags=''
c=0
if len(hashtags_list)>0:
for i in range(0, len(hashtags_list)-1):
mh=hashtags_list[i].get('text')
hashtags=hashtags+mh+';'
c=c+1
mh=hashtags_list[c].get('text')
hashtags=hashtags+str(mh)
else:
hashtags='None'
mention_ids=''
c=0
if len(mentions)>0:
for i in range(0, len(mentions)-1):
mid=mentions[i].get('id_str')
mention_ids=mention_ids+mid+';'#use a different separator!
c=c+1
mid=mentions[c].get('id_str')
mention_ids=mention_ids+str(mid)
else:
mention_ids='None'
#David: esto no sé si haría falta... este justo es un retweet de un post que a su ves
#es un quote de una noticia, osea que hay dos pasos de conexión, pero el retweet
#con el quote ya existe... lo guardamos pero hay que tenerlo en cuenta que es redundante
#Carlota: Lo quito, porque tienes razon y no habia caido...
#David. lo podemos dejar porque no son campos adicionales
if decoded['retweeted_status']['is_quote_status']:
if 'quoted_status' not in decoded['retweeted_status']:
quote_id='None'
quote_user_id='None'
else:
quote_id=decoded['retweeted_status']['quoted_status']['id']
quote_user_id=decoded['retweeted_status']['quoted_status']['user']['id']
else:
quote_id='None'
quote_user_id='None'
else:
reply_count = decoded['reply_count'] #Number of times this Tweet has been replied to
retweet_count = decoded['retweet_count'] #Number of times this Tweet has been retweeted
favorite_count = decoded['favorite_count'] #how many times this Tweet has been liked by Twitter users.
quote_count = decoded['quote_count']
retweet_id = 'None'
retweet_user_id = 'None'
if decoded['is_quote_status']:
if 'quoted_status' not in decoded:
quote_id='None'
quote_user_id='None'
else:
quote_id=decoded['quoted_status']['id']
quote_user_id=decoded['quoted_status']['user']['id']
else:
quote_id='None'
quote_user_id='None'
hashtags_list=decoded.get('entities').get('hashtags')
mentions=decoded.get('entities').get('user_mentions')
hashtags=''
c=0
if len(hashtags_list)>0:
for i in range(0, len(hashtags_list)-1):
mh=hashtags_list[i].get('text')
hashtags=hashtags+mh+';'
c=c+1
mh=hashtags_list[c].get('text')
hashtags=hashtags+str(mh)
else:
hashtags='None'
mention_ids=''
c=0
if len(mentions)>0:
for i in range(0, len(mentions)-1):
mid=mentions[i].get('id_str')
mention_ids=mention_ids+mid+';'#use a different separator!
c=c+1
mid=mentions[c].get('id_str')
mention_ids=mention_ids+str(mid)
else:
mention_ids='None'
#insert data just collected into MySQL database
connect(user_id, user_name, user_loc, user_follow_count,user_friends_count, user_fav_count,user_status_count,
tweet_id,text,created_at,source,
reply_id, reply_user_id,
retweet_id,retweet_user_id,
quote_id,quote_user_id,
reply_count,retweet_count,favorite_count,quote_count,
hashtags, mention_ids,
place_id, place_name, coord)
#print("Tweet colleted at: {} ".format(str(created_at)))
def on_error(self, status_code):
if status_code == 420:
#returning False in on_error disconnects the stream
return False
# returning non-False reconnects the stream, with backoff.
if __name__ == '__main__':
print ('Starting')
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
#create the api and the stream object
myStreamListener = MyStreamListener()
myStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)
#Filter the stream by keywords
myStream.filter(locations = madrid)
```
| github_jupyter |
## Example of extracting the lever arm and the charging energy from bias triangles and addition lines
Authors: Anne-Marije Zwerver and Pieter Eendebak
More details on non-equilibrium charge stability measurements can be found in https://doi.org/10.1103/RevModPhys.75.1 (section B) and https://doi.org/10.1063/1.3640236
The core functions used in the example are `perpLineIntersect`, `lever_arm` and `E_charging`. Input needed for the code are the non-equilibrium charge stability diagrams of a 1,0-0,1 interdot transition for the lever arm, and a charge stability diagram with the 0-1 and 1-2 charge transitions.
```
%matplotlib inline
import os, sys
import qcodes
import qtt
import matplotlib.pyplot as plt
import numpy as np
from qcodes.plots.qcmatplotlib import MatPlot
from qcodes.data.data_set import DataSet
from qtt.data import diffDataset
from qtt.algorithms.bias_triangles import perpLineIntersect, lever_arm, E_charging
```
### Load datasets
```
exampledatadir=os.path.join(qtt.__path__[0],'exampledata','misc')
DataSet.default_io = qcodes.data.io.DiskIO(exampledatadir)
dataset_anticrossing = qcodes.data.data_set.load_data(os.path.join(exampledatadir, r'Lever_arm_Charging_energy', '14-28-52_qtt_scan2Dfast'))
dataset_la = qcodes.data.data_set.load_data(os.path.join(exampledatadir, r'Lever_arm_Charging_energy', '14-33-26_qtt_scan2Dfast'))
dataset_Ec = qcodes.data.data_set.load_data(os.path.join(exampledatadir, r'Lever_arm_Charging_energy', '10-06-59_qtt_scan2Dfast'))
```
First, make a double dot and find the (1,0) -- (0,1) anticrossing:
```
plt.figure(1); plt.clf()
MatPlot([dataset_anticrossing.measured], num = 1)
_=plt.suptitle('Anti crossing (1,0)--(0,1)')
```
Then, apply a bias across the Fermi reservoirs (in the example -800 uV) and scan the anti crossing again. This non-equilibrium regime shows "bias triangles", which can be used to extract the gate-to-dot lever arms. More information on these measurements can be found in the references cited in the introduction of this example.
```
plt.figure(1); plt.clf()
MatPlot([dataset_la.measured], num = 1)
_=plt.suptitle('Bias triangle')
```
### Lever arm
The function perpLineIntersect guides you through the process of extracting the lever arm from the bias triangles. To do this, you must include description = 'lever_arm' as input to the function.
The function instructs you to click on 3 points in the figure. Point 1 and 2 along the addition line for the dot of which you want to determine the lever arm, the third point on the triple point where both dot levels and reservoir are aligned. The perpLineIntersect function will return a dictionary containing the coordinates of these three points, the intersection point of a horizontal/vertical line of the third point with the (extended) line through point 1 and 2 and the line length from the third point to the intersection.
It is important to set the vertical input based on the dot for which the lever arm is being measured. vertical = True (False) to measure the lever arm of the gate in vertical (horizontal) axis.
NB: perpLineIntersect makes use of clickable interactive plots. However, the inline plots in this notebook are not interactive, therefore, in this example we provide the function the clicked points as an input. If you want to try and click, restart the notebook and please use '%pylab tk' instead of '%matplotlib inline' and remove the points input from the function call.
```
dot = 'P5'
if dot == 'P4':
vertical = False
elif dot == 'P5':
vertical = True
else:
print("Please choose either dot 4 or dot 5")
clicked_pts = np.array([[ 24.87913077, 38.63388728, 40.44875099],
[ 135.28934654, 128.50469446, 111.75508464]])
lev_arm_fit = perpLineIntersect(dataset_la, description = 'lever_arm', vertical = vertical, points = clicked_pts)
```
Determine the lever arm ($\mu$V/mV) by dividing the applied bias for the bias triangles by the voltage span determined by perpLineIntersect
```
bias = dataset_la.snapshot()['allgatevalues']['O5'] # bias voltage extracted from the dataset
print(bias)
lev_arm = lever_arm(bias, lev_arm_fit, fig = True)
print('''The lever arm of gate %s to dot %s is %.2f ueV/mV'''%(dot, dot[1], lev_arm))
```
### Extract addition energy
Once the lever arm is known, the addition energy can be extracted from a charge stability diagram showing 2 addition lines. Again, use the function perpLineIntersect, this time using description = 'E_charging'. The function instructs you to click on the 3 relevant points from which the distance between the 2 addition lines can be measured, and converted to meV using the lever arm.
```
plt.figure(3); plt.clf()
MatPlot([dataset_Ec.measured], num = 3)
ax = plt.gca()
_=plt.suptitle('Addition lines')
clicked_pts = np.array([[ -11.96239499, 24.89272409, 2.56702695],
[ 202.62140281, 181.56972616, 142.246783 ]])
Ec_fit = perpLineIntersect(dataset_Ec, description = 'E_charging', vertical = vertical, points = clicked_pts)
E_c = E_charging(lev_arm, results = Ec_fit, fig = True)
print('The charging energy of dot %s is %.2f meV' % (dot[1], E_c/1000))
```
| github_jupyter |
```
import os
import sys
import subprocess
import numpy as np
import pandas as pd
from io import StringIO
os.getcwd()
from skempi_consts import *
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
df = skempi_df
ddg1 = df[df.Protein.isin(G1)].DDG.values
ddg2 = df[df.Protein.isin(G2)].DDG.values
ddg3 = df[df.Protein.isin(G3)].DDG.values
ddg4 = df[df.Protein.isin(G4)].DDG.values
ddg5 = df[df.Protein.isin(G5)].DDG.values
ddg1235 = df[df.Protein.isin(G1 + G2 + G3 + G5)].DDG.values
# plt.hist(ddg1, bins=100, alpha=0.5, label="G1", normed=1, cumulative=False, histtype='bar')
# plt.hist(ddg2, bins=100, alpha=0.5, label="G2", normed=1, cumulative=False, histtype='bar')
# plt.hist(ddg3, bins=100, alpha=0.5, label="G3", normed=1, cumulative=False, histtype='bar')
plt.hist(ddg4, bins=100, alpha=0.5, label="G4", normed=1, cumulative=False, histtype='bar')
# plt.hist(ddg5, bins=100, alpha=0.5, label="G5", normed=1, cumulative=False, histtype='bar')
plt.hist(ddg1235, bins=100, alpha=0.5, label="G1235", normed=1, cumulative=False, histtype='bar')
plt.legend(loc='upper right')
plt.title("DDG Distribution")
plt.ylabel("Frequency")
plt.grid(True)
plt.show()
skempi_df.head()
from skempi_utils import *
import skempi_consts as consts
num_mut = 0
pbar = tqdm(range(len(skempi_df)), desc="row processed")
for i, row in skempi_df.iterrows():
num_mut += len(row["Mutation(s)_cleaned"].split(','))
pbar.update(1)
pbar.close()
num_mut
from scipy.stats import pearsonr
all_features = {}
def get_temperature_array(records, agg=np.min):
arr = []
pbar = tqdm(range(len(skempi_df)), desc="row processed")
for i, row in skempi_df.iterrows():
arr_obs_mut = []
for mutation in row["Mutation(s)_cleaned"].split(','):
mut = Mutation(mutation)
res_i, chain_id = mut.i, mut.chain_id
t = tuple(row.Protein.split('_'))
skempi_record = records[t]
res = skempi_record[chain_id][res_i]
temps = [a.temp for a in res.atoms]
arr_obs_mut.append(np.mean(temps))
arr.append(agg(arr_obs_mut))
pbar.update(1)
pbar.close()
return arr
skempi_records = load_skempi_structs(pdb_path="../data/pdbs_n", compute_dist_mat=True)
all_features["B-factor"] = temp_arr = get_temperature_array(skempi_records, agg=np.min)
pearsonr(temp_arr, skempi_df.DDG)
from aaindex import *
B = BLOSUM62
C = SKOJ970101
skempi_records = load_skempi_structs(pdb_path="../data/pdbs", compute_dist_mat=True)
def comp_ei(mut, skempi_record, B, radius):
P = skempi_record.get_profile(mut.chain_id)
return EI(mut.m, mut.w, P, mut.i, B)
def comp_cp(mut, skempi_record, C, radius):
return CP(mut, skempi_record, C, radius)
def get_ddg_ei_cp_arrays(M, func, radius=None):
arr_ddg = []
arr_obs = []
pbar = tqdm(range(len(skempi_df)), desc="row processed")
for i, row in skempi_df.iterrows():
ddg = row.DDG
arr_ddg.append(ddg)
arr_obs_mut = []
for mutation in row["Mutation(s)_cleaned"].split(','):
mut = Mutation(mutation)
t = tuple(row.Protein.split('_'))
skempi_record = skempi_records[t]
obs = func(mut, skempi_record, M, radius)
arr_obs_mut.append(obs)
arr_obs.append(np.sum(arr_obs_mut))
pbar.update(1)
pbar.close()
return arr_ddg, arr_obs
from itertools import product
def grid_search_cp(matrices=[SKOJ970101, BASU010101], radiuses=[4, 5, 6, 7, 8, 9, 10]):
res_dict = {}
for C, angs in product(matrices, radiuses):
key = (str(C), angs)
arr_ddg, arr_cp = get_ddg_ei_cp_arrays(C, comp_cp, angs)
res_dict[key] = (arr_ddg, arr_cp)
cor_cp = pearsonr(arr_ddg, arr_cp)
print("%s: CP: %s" % (key, cor_cp,))
return res_dict
def grid_search_ei(matrices=[BLOSUM62, SKOJ970101, BASU010101]):
res_dict = {}
for B in matrices:
key = str(B)
arr_ddg, arr_ei = get_ddg_ei_cp_arrays(B, comp_ei)
res_dict[key] = (arr_ddg, arr_ei)
cor_ei = pearsonr(arr_ddg, arr_ei)
print("%s: EI: %s" % (key, cor_ei,))
return res_dict
# cps = grid_search_cp()
def comp_cp_a_b(mut, skempi_record, C, radius):
return CP_A_B(mut, skempi_record, C, radius)
def get_ddg_cp_a_b_arrays(M, func, radius=None):
arr_ddg = []
arr_obs_a = []
arr_obs_b = []
pbar = tqdm(range(len(skempi_df)), desc="row processed")
for i, row in skempi_df.iterrows():
ddg = row.DDG
arr_ddg.append(ddg)
arr_obs_mut_a = []
arr_obs_mut_b = []
for mutation in row["Mutation(s)_cleaned"].split(','):
mut = Mutation(mutation)
t = tuple(row.Protein.split('_'))
skempi_record = skempi_records[t]
obs_a, obs_b = func(mut, skempi_record, M, radius)
arr_obs_mut_a.append(obs_a)
arr_obs_mut_b.append(obs_b)
arr_obs_a.append(np.sum(arr_obs_mut_a))
arr_obs_b.append(np.sum(arr_obs_mut_b))
pbar.update(1)
pbar.close()
return arr_ddg, arr_obs_a, arr_obs_b
def grid_search_cp_a_b(matrices=[SKOJ970101, BASU010101], radiuses=[4, 5, 6, 7, 8, 9, 10]):
res_dict = {}
for C, angs in product(matrices, radiuses):
key = (str(C), angs)
arr_ddg, arr_cp_a, arr_cp_b = get_ddg_cp_a_b_arrays(C, comp_cp_a_b, angs)
arr_cp = np.asarray(arr_cp_a) + np.asarray(arr_cp_b)
res_dict[key] = (arr_ddg, arr_cp_a, arr_cp_b)
cor_cp_a = pearsonr(arr_ddg, arr_cp_a)
cor_cp_b = pearsonr(arr_ddg, arr_cp_b)
cor_cp = pearsonr(arr_ddg, arr_cp)
print("%s: CP_A: %s, CP_B: %s, CP %s" % (key, cor_cp_a, cor_cp_b, cor_cp))
return res_dict
def CP_A_B(mut, skempi, C, radius=6):
i, chain_a = mut.i, mut.chain_id
m, w = mut.m, mut.w
def helper(P, j):
return sum([P[(j, a)] * (C[(a, m)] - C[(a, w)]) for a in amino_acids])
retA, retB = 0, 0
for chain_b, j in skempi.get_sphere_indices(chain_a, i,radius):
a = skempi[chain_b][j].name
if j == i and chain_b == chain_a:
assert a == w
continue
P = skempi.get_profile(chain_b)
if chain_b == chain_a:
retA += helper(P, j)
else:
retB += helper(P, j)
return retA, retB
# cp_a_b_s_orig = grid_search_cp_a_b(matrices=[SKOJ970101, BASU010101], radiuses=[4, 5, 6, 7, 8, 9, 10])
def CP_A_B(mut, skempi, C, radius=6):
i, chain_a = mut.i, mut.chain_id
m, w = mut.m, mut.w
def helper(a, j):
return C[(a, m)] - C[(a, w)]
retA, retB = 0, 0
for chain_b, j in skempi.get_sphere_indices(chain_a, i, radius):
a = skempi[chain_b][j].name
if j == i and chain_b == chain_a:
assert a == w
continue
P = skempi.get_profile(chain_b)
if chain_b == chain_a:
retA += helper(a, j)
else:
retB += helper(a, j)
return retA, retB
# cp_a_b_s_no_profile = grid_search_cp_a_b(matrices=[BASU010101], radiuses=[2.5, 3.75, 5.0, 6.25, 7.5, 8.75, 10.0])
def CP_A_B(mut, skempi, C, radius=6):
i, chain_a = mut.i, mut.chain_id
m, w = mut.m, mut.w
def helper(P, j):
return sum([0.05 * (C[(a, m)] - C[(a, w)]) for a in amino_acids])
retA, retB = 0, 0
for chain_b, j in skempi.get_sphere_indices(chain_a, i,radius):
a = skempi[chain_b][j].name
if j == i and chain_b == chain_a:
assert a == w
continue
P = skempi.get_profile(chain_b)
if chain_b == chain_a:
retA += helper(P, j)
else:
retB += helper(P, j)
return retA, retB
# cp_a_b_s_uniform = grid_search_cp_a_b(matrices=[SKOJ970101, BASU010101], radiuses=[6, 7])
eis = grid_search_ei(matrices=[BLOSUM62])
def register_cp_a_b(cp_a_b, prefix):
for key, val in cp_a_b.iteritems():
_, cp_a, cp_b = val
mat, rad = key
all_features[(prefix, "CP_A", mat, rad)] = cp_a
all_features[(prefix, "CP_B", mat, rad)] = cp_b
def register_cp_a_b_shells(cp_a_b, prefix):
for key, val in cp_a_b.iteritems():
_, cp_a, cp_b = val
mat, inner, outer = key
all_features[(prefix, "CP_A", mat, inner, outer)] = cp_a
all_features[(prefix, "CP_B", mat, inner, outer)] = cp_b
all_features[(prefix, "CP", mat, inner, outer)] = np.sum([cp_a, cp_b], axis=0)
def register_eis(eis):
for key, val in eis.iteritems():
_, ei = val
all_features[("EI", key)] = ei
def CP_A_B(mut, skempi, C, inner, outer):
i, chain_a = mut.i, mut.chain_id
m, w = mut.m, mut.w
retA, retB = 0, 0
for chain_id, j in skempi.get_shell_indices(chain_a, i, inner, outer):
a = skempi[chain_id][j].name
if j == i and chain_id == chain_a:
assert a == w
continue
P = skempi.get_profile(chain_id)
if chain_id == chain_a:
retA += C[(a, m)] - C[(a, w)]
else:
retB += C[(a, m)] - C[(a, w)]
return retA, retB
def get_cp_a_b_array(M, inner, outer):
arr_obs_a = []
arr_obs_b = []
pbar = tqdm(range(len(skempi_df)), desc="row processed")
for i, row in skempi_df.iterrows():
arr_obs_mut_a = []
arr_obs_mut_b = []
for mutation in row["Mutation(s)_cleaned"].split(','):
mut = Mutation(mutation)
t = tuple(row.Protein.split('_'))
skempi_record = skempi_records[t]
obs_a, obs_b = CP_A_B(mut, skempi_record, M, inner, outer)
arr_obs_mut_a.append(obs_a)
arr_obs_mut_b.append(obs_b)
arr_obs_a.append(np.sum(arr_obs_mut_a))
arr_obs_b.append(np.sum(arr_obs_mut_b))
pbar.update(1)
pbar.close()
return arr_obs_a, arr_obs_b
matrices = [BASU010101]
shells = [(0.0, 2.0), (2.0, 4.0), (4.0, 6.0), (6.0, 8.0)]
def grid_search_cp(matrices=matrices, shells=shells):
res_dict = {}
grid = [(mat, shell) for mat in matrices for shell in shells]
for mat, (inner, outer) in grid:
arr_cp_a, arr_cp_b = get_cp_a_b_array(mat, inner, outer)
arr_cp = np.asarray(arr_cp_a) + np.asarray(arr_cp_b)
arr_ddg = skempi_df.DDG
cor_cp_a = pearsonr(arr_ddg, arr_cp_a)
cor_cp_b = pearsonr(arr_ddg, arr_cp_b)
cor_cp = pearsonr(arr_ddg, arr_cp)
key = (str(mat), inner, outer)
res_dict[key] = (arr_ddg, arr_cp_a, arr_cp_b)
print("%s: CP_A: %s, CP_B: %s, CP %s" % (key, cor_cp_a, cor_cp_b, cor_cp))
return res_dict
cp_a_b_s_shells = grid_search_cp(matrices, shells)
# register_cp_a_b(cp_a_b_s_uniform, "uniform")
# register_cp_a_b(cp_a_b_s_orig, "original")
# register_cp_a_b(cp_a_b_s_no_profile, "no_profile")
register_cp_a_b_shells(cp_a_b_s_shells, "shells")
register_eis(eis)
num_muts = np.asarray([len(mut.split(",")) for mut in skempi_df["Mutation(s)_cleaned"]])
pearsonr(skempi_df.DDG, np.log(num_muts)), pearsonr(skempi_df.DDG, num_muts)
all_features["#mutations"] = np.log(num_muts)
def get_stride_array(func, agg=np.sum):
arr_stride = []
pbar = tqdm(range(len(skempi_df)), desc="row processed")
for i, row in skempi_df.iterrows():
arr_obs_mut = []
for mutation in row["Mutation(s)_cleaned"].split(','):
mut = Mutation(mutation)
res_i, chain_id = mut.i, mut.chain_id
t = tuple(row.Protein.split('_'))
skempi_record = skempi_records[t]
d_asa = skempi_record.stride[(chain_id, res_i)]
obs = func(d_asa)
arr_obs_mut.append(obs)
total = skempi_record.stride._total
arr_stride.append((agg(arr_obs_mut), total))
pbar.update(1)
pbar.close()
return arr_stride
asa_arr_mutated, asa_arr_total = zip(*get_stride_array(lambda stride: stride["ASA_Chain"]-stride["ASA"]))
all_features["sum(ASA_Chain-ASA):mutated"] = asa_arr_mutated
pearsonr(skempi_df.DDG, asa_arr_mutated)
all_features["sum(ASA_Chain-ASA):total"] = asa_arr_total
pearsonr(skempi_df.DDG, asa_arr_total)
def get_desc_array(mat, agg=np.mean):
arr = []
pbar = tqdm(range(len(skempi_df)), desc="row processed")
for i, row in skempi_df.iterrows():
arr_obs_mut = []
for mutation in row["Mutation(s)_cleaned"].split(','):
mut = Mutation(mutation)
res_i, chain_id = mut.i, mut.chain_id
t = tuple(row.Protein.split('_'))
skempi_record = skempi_records[t]
res = skempi_record[chain_id][res_i]
desc = mat[mut.m] - mat[mut.w]
arr_obs_mut.append(desc)
arr.append(agg(arr_obs_mut))
pbar.update(1)
pbar.close()
return arr
M = FASG760101
mol_arr = get_desc_array(M, np.mean)
all_features["MolWeight"] = mol_arr
pearsonr(mol_arr, skempi_df.DDG)
H = ARGP820101
hyd_arr = get_desc_array(H, np.mean)
all_features["Hydrophobic"] = hyd_arr
pearsonr(hyd_arr, skempi_df.DDG)
DSSP = ["G", "H", "I", "T", "E", "B", "S", "C"]
from sklearn import preprocessing
lb = preprocessing.LabelBinarizer()
lb.fit(DSSP)
def get_bin_ss(stride):
return lb.transform([stride["SS"]])[0]
from sklearn.decomposition import PCA
ss_arr, _ = zip(*get_stride_array(get_bin_ss, agg=lambda a: np.sum(a, axis=0)))
n_components = 3
ss_arr = PCA(n_components=n_components).fit_transform(ss_arr)
[pearsonr(skempi_df.DDG, np.asarray(ss_arr)[:, j]) for j in range(n_components)]
class XCor(object):
def __init__(self, all_features):
self.feat_name_to_indx = {key:i for i, key in enumerate(all_features.keys())}
self.xcor_mat = np.corrcoef(np.asarray(all_features.values()))
def __getitem__(self, t):
feat1, feat2 = t
i = self.feat_name_to_indx[feat1]
j = self.feat_name_to_indx[feat2]
return self.xcor_mat[(i, j)]
xcor = XCor(all_features)
import itertools
def search_min_xcor(all_features, th=0.05):
acc = set()
for comb in itertools.combinations(all_features.keys(), 2):
feat1, feat2 = comb
rho = xcor[(feat1, feat2)]
if abs(rho) < th:
acc.add(feat1)
acc.add(feat2)
return acc
acc_feats = search_min_xcor(all_features)
len(acc_feats), acc_feats
acc_feats = {
'#mutations',
'B-factor',
'Hydrophobic',
'MolWeight',
'sum(ASA_Chain-ASA):mutated',
('EI', 'BLOSUM62'),
# ('shells', 'CP_A', 'BASU010101', 0.0, 2.0),
('shells', 'CP_A', 'BASU010101', 2.0, 4.0),
('shells', 'CP_A', 'BASU010101', 4.0, 6.0),
# ('shells', 'CP_B', 'BASU010101', 6.0, 8.0),
# ('shells', 'CP_B', 'BASU010101', 0.0, 2.0),
('shells', 'CP_B', 'BASU010101', 2.0, 4.0),
('shells', 'CP_B', 'BASU010101', 4.0, 6.0),
# ('shells', 'CP_B', 'BASU010101', 6.0, 8.0),
}
X = np.transpose([all_features[feat] for feat in acc_feats])
# X = np.concatenate([X, np.asarray(ss_arr)], axis=1)
X.shape
def records_to_xy(skempi_records, load_neg=False):
data = []
for record in tqdm(skempi_records, desc="records processed"):
r = record
assert r.struct is not None
data.append([r.features(True), [r.ddg], [r.group, r.is_minus]])
if not load_neg:
continue
X, y, ix = [np.asarray(d) for d in zip(*data)]
return X, y, ix
skempi_structs = load_skempi_structs("../data/pdbs", compute_dist_mat=False)
skempi_records = load_skempi_records(skempi_structs)
X_, y_, ix_ = records_to_xy(skempi_records, load_neg=True)
X = X_[:, :]
X = np.concatenate([X.T, [temp_arr]], axis=0).T
y = y_[:, 0]
ix = ix_
X.shape, y.shape, ix.shape
df = skempi_df
from sklearn.preprocessing import StandardScaler
from itertools import combinations as comb
def run_cv_test(X, get_regressor, normalize=0):
gt, preds, cors = [], [], []
groups = [G1, G2, G3, G4, G5]
prots = G1 + G2 + G3 + G4 + G5
for pair in comb(range(len(groups)), 2):
group = groups[pair[0]] + groups[pair[1]]
rest = list(set(prots) - set(group))
indx_tst = df.Protein.isin(group)
indx_trn = df.Protein.isin(rest)
# indx_trn = np.logical_not(indx_tst)
y_trn = df.DDG[indx_trn]
y_true = df.DDG[indx_tst]
X_trn = X[indx_trn]
X_tst = X[indx_tst]
regressor = get_regressor()
if normalize == 1:
scaler = StandardScaler()
scaler.fit(X_trn)
X_trn, X_tst = scaler.transform(X_trn), scaler.transform(X_tst)
regressor.fit(X_trn, y_trn)
y_pred = regressor.predict(X_tst)
cor, _ = pearsonr(y_true, y_pred)
print("G%d" % (pair[0]+1), "G%d" % (pair[1]+1), "%.3f" % cor)
cors.append(cor)
preds.extend(y_pred)
gt.extend(y_true)
return gt, preds, cors
from sklearn.ensemble import RandomForestRegressor
def get_regressor(): return RandomForestRegressor(n_estimators=50, random_state=0)
gt, preds, cors = run_cv_test(X, get_regressor, normalize=1)
print("%.3f" % np.mean(cors))
from sklearn.svm import SVR
def get_regressor(): return SVR(kernel='rbf')
gt, preds, cors = run_cv_test(X, get_regressor, normalize=1)
print("%.3f" % np.mean(cors))
def run_cv_test(X, alpha=0.5, normalize=1):
gt, preds, cors = [], [], []
groups = [G1, G2, G3, G4, G5]
prots = G1 + G2 + G3 + G4 + G5
for pair in comb(range(NUM_GROUPS), 2):
group = groups[pair[0]] + groups[pair[1]]
rest = list(set(prots) - set(group))
indx_tst = df.Protein.isin(group)
indx_trn = df.Protein.isin(rest)
y_trn = df.DDG[indx_trn]
y_true = df.DDG[indx_tst]
X_trn = X[indx_trn]
X_tst = X[indx_tst]
rf = RandomForestRegressor(n_estimators=50, random_state=0)
svr = SVR(kernel='rbf')
if normalize == 1:
scaler = StandardScaler()
scaler.fit(X_trn)
X_trn, X_tst = scaler.transform(X_trn), scaler.transform(X_tst)
svr.fit(X_trn, y_trn)
rf.fit(X_trn, y_trn)
y_pred_svr = svr.predict(X_tst)
y_pred_rf = rf.predict(X_tst)
y_pred = alpha * y_pred_svr + (1-alpha) * y_pred_rf
cor, _ = pearsonr(y_true, y_pred)
print("G%d" % (pair[0]+1), "G%d" % (pair[1]+1), "%.3f" % cor)
cors.append(cor)
preds.extend(y_pred)
gt.extend(y_true)
return gt, preds, cors
gt, preds, cors = run_cv_test(X, normalize=1)
print("%.3f" % np.mean(cors))
len(gt)
def run_holdout_test(X, alpha=0.5, normalize=1):
groups = [G1, G2, G3, G4, G5]
prots = G1 + G2 + G3 + G4 + G5
indx_trn = df.Protein.isin(prots)
indx_tst = np.logical_not(indx_trn)
y_trn = df.DDG[indx_trn]
y_true = df.DDG[indx_tst]
X_trn = X[indx_trn]
X_tst = X[indx_tst]
rf = RandomForestRegressor(n_estimators=50, random_state=0)
svr = SVR(kernel='rbf')
if normalize == 1:
scaler = StandardScaler()
scaler.fit(X_trn)
X_trn, X_tst = scaler.transform(X_trn), scaler.transform(X_tst)
svr.fit(X_trn, y_trn)
rf.fit(X_trn, y_trn)
y_pred_svr = svr.predict(X_tst)
y_pred_rf = rf.predict(X_tst)
y_pred = alpha * y_pred_svr + (1-alpha) * y_pred_rf
cor, _ = pearsonr(y_true, y_pred)
print("holdout", "%.3f" % cor)
return y_true, y_pred, cor
gt, preds, cor = run_holdout_test(X, normalize=1)
print("%.3f" % cor)
len(gt)
```
| github_jupyter |
## Practical Data Science - Classroom to Careers
### Session 3
#### Topics to be covered
- Eigenvalues and Eigenvectors ( pick up from where we left / questions)
- Basic Calculus - Differentiation and Integration
- Maximum Likelihood estimation
- Parametric vs. Non-parametric estimation
### Eigenvalues and Eigenvectors
**How do we determine the Eigenvalues and Eigenvectors corresponding to a Matrix?**
An eigenvector is a vector whose direction remains unchanged when a linear transformation is applied to it.
Consider the image below in which three vectors are shown. The green square is only drawn to illustrate the linear transformation that is applied to each of these three vectors.
<img src = "https://www.visiondummy.com/wp-content/uploads/2014/03/eigenvectors.png">
When a given **nxn** Matrix is given we will have to identify a vector **v** such that
**A**.**v**=λ.**v**
where -
**v** is the Eigen Vector of Matrix A
**λ** is the Eigen value corresponding to the eigen vector **v**
***Understanding with an Example***
Compute the Eigen-value(s) and Eigen-vector(s) corresponding to the Matrix A
$$A = \begin{bmatrix} 2 & 3 \\ 2 & 1 \end{bmatrix}$$
Compute the Eigencalues of matrix A
$$\text{Find a vector }\vec{v}^{\,} \text{ such that }A\vec{v}^{\,} = λ\vec{v}^{\,}$$
$$A\vec{v}^{\,} - λ\vec{v}^{\,} = 0$$
$$\begin{bmatrix} 2 & 3 \\ 2 & 1 \end{bmatrix} - λ\begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} = 0$$
$$\begin{bmatrix} 2-λ & 3 \\ 2 & 1-λ \end{bmatrix} - λ\begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix} = 0$$
image and content courtesy: www.visiondummy.com
### Differentiation
A differential of any function is the rate of change.
Or in other words it is the slope of the tangent for a curve (a limiting point, defined only for contibuous functions)
$$\frac{d}{dx}f(x) = f'(x) = \lim_{h\to0} \frac{f(x+h) - f(x)}{h}$$
There are other ways of definining this -
- Differential Coefficient
- First Derivative
**Applications of Derivatives**
1) Simple application for estimation
Let us understand the application of derivatives with a simple example
Given a value find the value
$$\log_{10} 2 = 0.3010, \\ \log_{2} 24 = ? $$
2) Estimate a change in function and find whether the function is increasing or decrasing
$$ f(x) = x^{2} + 5x + 6 $$
$$find {f'(x)} {and} {f''(x)}$$
### Integration
This is a reverse process of differentiation, it is also called piece wise summation
### Parametric vs. Non - parametric estimation
**Parametric estimation**
Algorithms that simplify the function to a known form are called parametric machine learning algorithms.
The algorithms involve two steps:
- Select a form for the function.
- Learn the coefficients for the function from the training data.
- An easy to understand functional form for the mapping function is a line, as is used in linear regression:
**a0 + a1*x1 + a2*x2 + .... + an*xn = 0**
Where a0, a1, a2...an are the coefficients of the line that control the intercept and slope, and x1,x2...xn are input variables.
**Non-parametric estimation**
Nonparametric methods seek to best fit the training data in constructing the mapping function, whilst maintaining some ability to generalize to unseen data. As such, they are able to fit a large number of functional forms.
Benefits of Nonparametric Machine Learning Algorithms:
**Flexibility**: Capable of fitting a large number of functional forms.
**Power**: No assumptions (or weak assumptions) about the underlying function.
**Performance**: Can result in higher performance models for prediction.
**Limitations of Nonparametric Machine Learning Algorithms**
**More data**: Require a lot more training data to estimate the mapping function.
**Slower**: A lot slower to train as they often have far more parameters to train.
**Overfitting**: More of a risk to overfit the training data and it is harder to explain why specific predictions are made.
References - https://machinelearningmastery.com/parametric-and-nonparametric-machine-learning-algorithms/
| github_jupyter |
# Mixture of Softmaxes (RNN LM)
Trying out Gauss-Logit parametrization from here https://arxiv.org/pdf/1605.06197.pdf
```
import os, sys
sys.path.append("./mos/")
import time
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torch.autograd import Variable
import gc
import mos_data as data
import modelsbtransform as m
from utils import batchify, get_batch, repackage_hidden, create_exp_dir, save_checkpoint
def logging(s, print_=True, log_=True):
if print_:
print(s)
if log_:
with open(os.path.join(args.save, 'log.txt'), 'a+') as f_log:
f_log.write(s + '\n')
# Set the random seed manually for reproducibility.
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.manual_seed_all(seed)
# Load data
datafile = "./data/penn/"
train_batch_size = 12
eval_batch_size = 10
test_batch_size = 1
corpus = data.Corpus(datafile)
ntokens = len(corpus.dictionary)
train_data = batchify(corpus.train, train_batch_size, is_cuda)
val_data = batchify(corpus.valid, eval_batch_size, is_cuda)
test_data = batchify(corpus.test, test_batch_size, is_cuda)
# Build model
ntokens = len(corpus.dictionary)
is_keep_training = False
path2saved_model = ""
# Use parameters from first example in original repository
# python main.py --data data/penn --dropouti 0.4 --dropoutl 0.29 --dropouth 0.225 --seed 28 --batch_size 12
# --lr 20.0 --epoch 1000 --nhid 960 --nhidlast 620 --emsize 280 --n_experts 15 --save PTB --single_gpu
# Type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU, SRU)
model_type = "LSTM"
# Size of embedding dimension
emsize = 280
# Number of hidden units per every RNN layer except the last one
nhid = 960
# Number of hidden units for the last RNN layer
nhidlast = 620
# Number of RNN layers
nlayers = 3
# Dropout after the last RNN layer
dropout = 0.3 # default
# Dropout for RNN layers
dropouth = 0.225
# Dropout for input embedding layers
dropouti = 0.4
# Dropout to remove words from embedding layer
dropoute = 0.1 # default
# Dropout for latent representation, before decoding
dropoutl = 0.29
# Amount of weight dropout to apply to the RNN hidden to hidden matrix
# Strange dropout
wdrop = 0.5 # default
# Tie the word embedding and softmax weights
tied = False
# Number of softmaxes to mix
n_experts = 15
if is_keep_training:
model = torch.load(os.path.join(path2saved_model, 'model.pt'))
else:
model = m.RNNModel(model_type, ntokens, emsize, nhid, nhidlast, nlayers,
dropout, dropouth, dropouti, dropoute, wdrop,
tied, dropoutl, n_experts)
if torch.cuda.is_available():
model.cuda()
total_params = sum(x.data.nelement() for x in model.parameters())
# logging('Args: {}'.format(args))
logging('Model total parameters: {}'.format(total_params), log_=False)
# Evaluate model
# seq_lenght is strange parameter
def evaluate(data_source, model, ntokens, batch_size, seq_lenght):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0
hidden = model.init_hidden(batch_size)
for i in range(0, data_source.size(0) - 1, seq_lenght):
data, targets = get_batch(data_source, i, seq_lenght, evaluation=True)
targets = targets.view(-1)
log_prob, hidden = model(data, hidden)
loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), targets).data
total_loss += loss * len(data)
hidden = repackage_hidden(hidden)
return total_loss.item() / len(data_source)
# Set parameters of training
batch_size = 12
# The batch size for computation. batch_size should be divisible by small_batch_size
# In our implementation, we compute gradients with small_batch_size multiple times, and accumulate the gradients\
# until batch_size is reached. An update step is then performed.
small_batch_size = batch_size
# Gradient clipping
clip = 0.25 # default
# Regularization weight on RNN activations
alpha = 2 # default
# Sequence lenght
bptt = 70 # default
# Max sequence length delta
max_seq_len_delta = 40 # default
# Interval to print loss
log_interval = 200 # default
# Use logfile
is_logfile = False
# Train model for single epoch
def train(model, train_data, optimizer, ntokens, batch_size, small_batch_size, bptt0):
assert batch_size % small_batch_size == 0, 'batch_size must be divisible by small_batch_size'
# Turn on training mode which enables dropout.
total_loss = 0
start_time = time.time()
hidden = [model.init_hidden(small_batch_size) for _ in range(batch_size // small_batch_size)]
batch, i = 0, 0
while i < train_data.size(0) - 1 - 1:
bptt = bptt0 if np.random.random() < 0.95 else bptt0 / 2.
# Prevent excessively small or negative sequence lengths
seq_len = max(5, int(np.random.normal(bptt, 5))) # loc 70, scale 5
# There's a very small chance that it could select a very long sequence length resulting in OOM
seq_len = min(seq_len, bptt + max_seq_len_delta)
lr2 = optimizer.param_groups[0]['lr']
optimizer.param_groups[0]['lr'] = lr2 * seq_len / bptt
model.train()
data, targets = get_batch(train_data, i, seq_len=seq_len)
optimizer.zero_grad()
start, end, s_id = 0, small_batch_size, 0
while start < batch_size:
cur_data, cur_targets = data[:, start: end], targets[:, start: end].contiguous().view(-1)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden[s_id] = repackage_hidden(hidden[s_id])
log_prob, hidden[s_id], rnn_hs, dropped_rnn_hs = model(cur_data.cuda(), hidden[s_id], return_h=True)
raw_loss = nn.functional.nll_loss(log_prob.view(-1, log_prob.size(2)), cur_targets)
loss = raw_loss
# Activation Regularization
loss = loss + sum(alpha * dropped_rnn_h.pow(2).mean() for dropped_rnn_h in dropped_rnn_hs[-1:])
# Temporal activation Regularization (slowness)
loss = loss + sum(beta * (rnn_h[1:] - rnn_h[:-1]).pow(2).mean() for rnn_h in rnn_hs[-1:])
loss *= small_batch_size / batch_size
total_loss += raw_loss.data * small_batch_size / batch_size
loss.backward()
s_id += 1
start = end
end = start + small_batch_size
gc.collect()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), clip)
optimizer.step()
# total_loss += raw_loss.data
optimizer.param_groups[0]['lr'] = lr2
if batch % log_interval == 0 and batch > 0:
cur_loss = total_loss.item() / log_interval
elapsed = time.time() - start_time
logging('| epoch {:3d} | {}/{} batches | lr {:02.4f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // bptt0, optimizer.param_groups[0]['lr'],
elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss)), log_=is_logfile)
total_loss = 0
start_time = time.time()
###
batch += 1
i += seq_len
# Optimizer parameters
# Learning rate
lr = 20
# Weight decay applied to all weights
wdecay = 1.2e-6
# Numbr of epochs
num_epoch = 100
# Beta slowness regularization applied on RNN activiation (beta = 0 means no regularization)
beta = 1
epoch = 1
best_val_loss = []
stored_loss = 100000000
exp_dir = '{}-{}'.format("PTB", time.strftime("%Y%m%d-%H%M%S"))
create_exp_dir(exp_dir)
try:
optimizer = torch.optim.SGD(model.parameters(), lr=lr, weight_decay=wdecay)
while epoch < num_epoch:
epoch_start_time = time.time()
train(model, train_data, optimizer, ntokens, batch_size, small_batch_size, bptt)
val_loss = evaluate(val_data, model, ntokens, eval_batch_size, bptt)
logging('-' * 89, log_=is_logfile)
logging('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)), log_=is_logfile)
logging('-' * 89, log_=is_logfile)
if val_loss < stored_loss:
save_checkpoint(model, optimizer, exp_dir)
logging('Saving Normal!', log_=is_logfile)
stored_loss = val_loss
best_val_loss.append(val_loss)
epoch += 1
except KeyboardInterrupt:
logging('-' * 89, log_=is_logfile)
logging('Exiting from training early', log_=is_logfile)
E
test_loss = evaluate(test_data, model, ntokens, test_batch_size, bptt)
logging('=' * 89, log_=is_logfile)
logging('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)), log_=is_logfile)
logging('=' * 89, log_=is_logfile)
from torch.autograd import Variable
num_words = 150
generated = []
temperature = 1.0 # highest temperature increase diversity
log_interval = 50 #
model.eval()
hidden = model.init_hidden(1)
input = Variable(torch.rand(1, 1).mul(ntokens).long().cuda(), volatile=True)
sent = []
for i in range(num_words):
output, hidden = model(input, hidden, return_prob=True)
word_weights = output.squeeze().data.div(temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.data.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
sent.append(word)
if i % 20 == 19:
generated.append(sent)
sent = []
if i % log_interval == 0:
print('| Generated {}/{} words'.format(i, num_words))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/BaiganKing/DS-Unit-2-Kaggle-Challenge/blob/master/module2/assignment_kaggle_challenge_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Lambda School Data Science, Unit 2: Predictive Modeling
# Kaggle Challenge, Module 2
## Assignment
- [ ] Read [“Adopting a Hypothesis-Driven Workflow”](https://outline.com/5S5tsB), a blog post by a Lambda DS student about the Tanzania Waterpumps challenge.
- [ ] Continue to participate in our Kaggle challenge.
- [ ] Try Ordinal Encoding.
- [ ] Try a Random Forest Classifier.
- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)
- [ ] Commit your notebook to your fork of the GitHub repo.
## Stretch Goals
### Doing
- [ ] Add your own stretch goal(s) !
- [ ] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection.
- [ ] Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/).
- [ ] Get and plot your feature importances.
- [ ] Make visualizations and share on Slack.
### Reading
Top recommendations in _**bold italic:**_
#### Decision Trees
- A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and _**[Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)**_
- [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.html#advantages-2)
- [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)
- [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)
- [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU)
#### Random Forests
- [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 8: Tree-Based Methods
- [Coloring with Random Forests](http://structuringtheunstructured.blogspot.com/2017/11/coloring-with-random-forests.html)
- _**[Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)**_
#### Categorical encoding for trees
- [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/)
- [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/)
- _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_
- _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_
- [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study)
- [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html)
#### Imposter Syndrome
- [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/)
- [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068)
- ["I am not a real data scientist"](https://brohrer.github.io/imposter_syndrome.html)
- _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_
```
# If you're in Colab...
import os, sys
in_colab = 'google.colab' in sys.modules
if in_colab:
# Install required python packages:
# category_encoders, version >= 2.0
# pandas-profiling, version >= 2.0
# plotly, version >= 4.0
!pip install --upgrade category_encoders pandas-profiling plotly
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge.git
!git pull origin master
# Change into directory for module
os.chdir('module2')
import pandas as pd
from sklearn.model_selection import train_test_split
# Merge train_features.csv & train_labels.csv
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
# Read test_features.csv & sample_submission.csv
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
import numpy as np
from sklearn.model_selection import train_test_split
train = pd.merge(pd.read_csv('../data/tanzania/train_features.csv'),
pd.read_csv('../data/tanzania/train_labels.csv'))
test = pd.read_csv('../data/tanzania/test_features.csv')
sample_submission = pd.read_csv('../data/tanzania/sample_submission.csv')
train, val = train_test_split(train, train_size=0.80, test_size=0.20,
stratify=train['status_group'], random_state=42)
def wrangle(df):
df = df.copy()
df['latitude'] = df['latitude'].replace(-2e-08,0)
col_zero = ['longitude','latitude','date_recorded','subvillage',
'installer','region','basin']
for col in col_zero:
df[col] = df[col].replace(0,np.nan)
df[col] = df[col].replace('0',np.nan)
df = df.drop(columns=['quantity_group','scheme_management',
'extraction_type_group','payment_type'])
return df
train = wrangle(train)
val = wrangle(val)
test = wrangle(test)
target = 'status_group'
train_features = train.drop(columns=[target])
numeric_features = train_features.select_dtypes(include='number').columns.tolist()
cardinality = train_features.select_dtypes(exclude='number').nunique()
categorical_features = cardinality[cardinality <= 50].index.tolist()
features = numeric_features + categorical_features
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
%%time
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names='True'),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
%%time
X_train = train.drop(columns=target)
y_train = train[target]
X_val = val.drop(columns=target)
y_val = val[target]
X_test = test
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
pipeline.fit(X_train, y_train)
print('Validation Accuracy', pipeline.score(X_val, y_val))
y_pred = pipeline.predict(X_test)
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.to_csv('submission-02.csv', index=False)
from google.colab import files
files.download('submission-02.csv')
```
| github_jupyter |
# Notebook 3b - Soil Type Engineering
In this notebook, we use soil type features to engineer new features using interactions.
```
# Global variables for testing changes to this notebook quickly
RANDOM_SEED = 0
NUM_FOLDS = 12
import numpy as np
import pandas as pd
import time
import pyarrow
import gc
# Model evaluation
from functools import partial
from sklearn.base import clone
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.metrics import accuracy_score, recall_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, BaggingClassifier, ExtraTreesClassifier, RandomForestClassifier
# Plotting
import matplotlib
import seaborn as sns
from matplotlib import pyplot as plt
# Hide warnings
import warnings
warnings.filterwarnings('ignore')
```
# Prepare Data
```
# Encode soil type
def categorical_encoding(input_df):
data = input_df.copy()
data['Soil_Type'] = 0
soil_features = list()
for i in range(1,41):
data['Soil_Type'] += i*data[f'Soil_Type{i}']
soil_features.append(f'Soil_Type{i}')
nonsoil_features = [x for x in data.columns if x not in soil_features]
return data[nonsoil_features]
%%time
# Load original data
original = categorical_encoding(pd.read_feather('../data/original.feather'))
# Label Encode
old_encoder = LabelEncoder()
original["Cover_Type"] = old_encoder.fit_transform(original["Cover_Type"])
y_train = original['Cover_Type'].iloc[:15119]
y_test = original['Cover_Type'].iloc[15119:]
# Get feature columns
features = [x for x in original.columns if x not in ['Id','Cover_Type']]
# Data structures for summary scores
bagging_scores = list()
extratrees_scores = list()
adaboost_scores = list()
random_scores = list()
```
# Scoring Function
```
def train_original(sklearn_model, processing = None):
# Original Training/Test Split
X_temp = original[features].iloc[:15119]
X_test = original[features].iloc[15119:]
y_temp = original['Cover_Type'].iloc[:15119]
y_test = original['Cover_Type'].iloc[15119:]
# Feature Engineering
if processing:
X_temp = processing(X_temp)
X_test = processing(X_test)
# Store the out-of-fold predictions
test_preds = np.zeros((X_test.shape[0],7))
oof_preds = np.zeros((X_temp.shape[0],))
scores, times = np.zeros(NUM_FOLDS), np.zeros(NUM_FOLDS)
# Stratified k-fold cross-validation
skf = StratifiedKFold(n_splits = NUM_FOLDS, shuffle = True, random_state = RANDOM_SEED)
for fold, (train_idx, valid_idx) in enumerate(skf.split(X_temp,y_temp)):
# Training and Validation Sets
X_train, X_valid = X_temp.iloc[train_idx], X_temp.iloc[valid_idx]
y_train, y_valid = y_temp.iloc[train_idx], y_temp.iloc[valid_idx]
# Create model
start = time.time()
model = clone(sklearn_model)
model.fit(X_train, y_train)
# validation and test predictions
valid_preds = np.ravel(model.predict(X_valid))
oof_preds[valid_idx] = valid_preds
test_preds += model.predict_proba(X_test)
# Save scores and times
scores[fold] = accuracy_score(y_valid, valid_preds)
end = time.time()
times[fold] = end-start
time.sleep(0.5)
test_preds = np.argmax(test_preds, axis = 1)
test_score = accuracy_score(y_test, test_preds)
print('\n'+model.__class__.__name__)
print("Train Accuracy:", round(scores.mean(), 5))
print('Test Accuracy:', round(test_score, 5))
print(f'Training Time: {round(times.sum(), 2)}s')
return scores.mean(), oof_preds, test_score
```
# Models
We use the following 4 models from the scikit-learn library:
1. AdaBoost
2. ExtraTrees
3. Bagging
4. Random Forest
```
# AdaBoost Classifier
adaboost = AdaBoostClassifier(
base_estimator = DecisionTreeClassifier(
splitter = 'random',
random_state = RANDOM_SEED,
),
random_state = RANDOM_SEED,
)
# ExtraTrees Classifier
extratrees = ExtraTreesClassifier(
n_jobs = -1,
random_state = RANDOM_SEED,
max_features = None,
)
# Bagging Classifier
bagging = BaggingClassifier(
base_estimator = DecisionTreeClassifier(
splitter = 'random',
random_state = RANDOM_SEED,
),
n_jobs = -1,
random_state = RANDOM_SEED
)
# Random Forest Classifier
randomforest = RandomForestClassifier(
n_jobs = -1,
random_state = RANDOM_SEED,
)
```
# Baselines
```
# AdaBoost
cv_score, oof_preds, test_score = train_original(adaboost)
adaboost_scores.append((
'Baseline', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# ExtraTrees
cv_score, oof_preds, test_score = train_original(extratrees)
extratrees_scores.append((
'Baseline', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# Bagging
cv_score, oof_preds, test_score = train_original(bagging)
bagging_scores.append((
'Baseline', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
cv_score, oof_preds, test_score = train_original(randomforest)
random_scores.append((
'Baseline', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
```
# Categorial Feature Interactions
We test out the following interactions:
1. Climatic Zone and Wilderness Area
2. Geologic Zone and Wilderness Area
3. Surface Cover and Wilderness Area
4. Rock Size and Wilderness Area
## 1. Climatic Zone and Wilderness Area
```
def climatic_zone_original(input_df):
code = {
1:2702,2:2703,3:2704,4:2705,5:2706,6:2717,7:3501,8:3502,9:4201,
10:4703,11:4704,12:4744,13:4758,14:5101,15:5151,16:6101,17:6102,
18:6731,19:7101,20:7102,21:7103,22:7201,23:7202,24:7700,25:7701,
26:7702,27:7709,28:7710,29:7745,30:7746,31:7755,32:7756,33:7757,
34:7790,35:8703,36:8707,37:8708,38:8771,39:8772,40:8776
}
df = input_df.copy()
df['Climatic_Zone'] = input_df['Soil_Type'].apply(lambda x: int(str(code[x])[0]))
return df
def wilderness_climatic(input_df, drop = False):
data = climatic_zone_original(input_df)
df = input_df.copy()
df['Climate_Area1'] = df['Wilderness_Area1']*data['Climatic_Zone']
df['Climate_Area2'] = df['Wilderness_Area2']*data['Climatic_Zone']
df['Climate_Area3'] = df['Wilderness_Area3']*data['Climatic_Zone']
df['Climate_Area4'] = df['Wilderness_Area4']*data['Climatic_Zone']
return df
# AdaBoost
cv_score, oof_preds, test_score = train_original(adaboost, wilderness_climatic)
adaboost_scores.append((
'Wild_Clim', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# ExtraTrees
cv_score, oof_preds, test_score = train_original(extratrees, wilderness_climatic)
extratrees_scores.append((
'Wild_Clim', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# Bagging
cv_score, oof_preds, test_score = train_original(bagging, wilderness_climatic)
bagging_scores.append((
'Wild_Clim', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# RandomForest
cv_score, oof_preds, test_score = train_original(randomforest, wilderness_climatic)
random_scores.append((
'Wild_Clim', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
```
## 2. Geologic Zone and Wilderness Area
```
def geologic_zone_original(input_df):
code = {
1:2702,2:2703,3:2704,4:2705,5:2706,6:2717,7:3501,8:3502,9:4201,
10:4703,11:4704,12:4744,13:4758,14:5101,15:5151,16:6101,17:6102,
18:6731,19:7101,20:7102,21:7103,22:7201,23:7202,24:7700,25:7701,
26:7702,27:7709,28:7710,29:7745,30:7746,31:7755,32:7756,33:7757,
34:7790,35:8703,36:8707,37:8708,38:8771,39:8772,40:8776
}
df = input_df.copy()
df['Geologic_Zone'] = input_df['Soil_Type'].apply(lambda x: int(str(code[x])[1]))
return df
def wilderness_geologic(input_df, drop = False):
data = geologic_zone_original(input_df)
df = input_df.copy()
df['Geologic_Area1'] = df['Wilderness_Area1']*data['Geologic_Zone']
df['Geologic_Area2'] = df['Wilderness_Area2']*data['Geologic_Zone']
df['Geologic_Area3'] = df['Wilderness_Area3']*data['Geologic_Zone']
df['Geologic_Area4'] = df['Wilderness_Area4']*data['Geologic_Zone']
return df
# AdaBoost
cv_score, oof_preds, test_score = train_original(adaboost, wilderness_geologic)
adaboost_scores.append((
'Wild_Geo', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# ExtraTrees
cv_score, oof_preds, test_score = train_original(extratrees, wilderness_geologic)
extratrees_scores.append((
'Wild_Geo', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# Bagging
cv_score, oof_preds, test_score = train_original(bagging, wilderness_geologic)
bagging_scores.append((
'Wild_Geo', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# RandomForest
cv_score, oof_preds, test_score = train_original(randomforest, wilderness_geologic)
random_scores.append((
'Wild_Geo', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
```
## 3. Surface Cover and Wilderness Area
```
def surface_cover_original(input_df):
# Group IDs
no_desc = [7,8,14,15,16,17,19,20,21,23,35]
stony = [6,12]
very_stony = [2,9,18,26]
extremely_stony = [1,22,24,25,27,28,29,30,31,32,33,34,36,37,38,39,40]
rubbly = [3,4,5,10,11,13]
# Create dictionary
surface_cover = {i:0 for i in no_desc}
surface_cover.update({i:1 for i in stony})
surface_cover.update({i:2 for i in very_stony})
surface_cover.update({i:3 for i in extremely_stony})
surface_cover.update({i:4 for i in rubbly})
# Create Feature
df = input_df.copy()
df['Surface_Cover'] = input_df['Soil_Type'].apply(lambda x: surface_cover[x])
return df
def wilderness_surface(input_df, drop = False):
data = surface_cover_original(input_df)
df = input_df.copy()
df['Surface_Area1'] = df['Wilderness_Area1']*data['Surface_Cover']
df['Surface_Area2'] = df['Wilderness_Area2']*data['Surface_Cover']
df['Surface_Area3'] = df['Wilderness_Area3']*data['Surface_Cover']
df['Surface_Area4'] = df['Wilderness_Area4']*data['Surface_Cover']
return df
# AdaBoost
cv_score, oof_preds, test_score = train_original(adaboost, wilderness_surface)
adaboost_scores.append((
'Wild_Surf', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# ExtraTrees
cv_score, oof_preds, test_score = train_original(extratrees, wilderness_surface)
extratrees_scores.append((
'Wild_Surf', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# Bagging
cv_score, oof_preds, test_score = train_original(bagging, wilderness_surface)
bagging_scores.append((
'Wild_Surf', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# RandomForest
cv_score, oof_preds, test_score = train_original(randomforest, wilderness_surface)
random_scores.append((
'Wild_Surf', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
```
## 4. Rock Size and Wilderness Area
```
def rock_size_original(input_df):
# Group IDs
no_desc = [7,8,14,15,16,17,19,20,21,23,35]
stones = [1,2,6,9,12,18,24,25,26,27,28,29,30,31,32,33,34,36,37,38,39,40]
boulders = [22]
rubble = [3,4,5,10,11,13]
# Create dictionary
rock_size = {i:0 for i in no_desc}
rock_size.update({i:1 for i in stones})
rock_size.update({i:2 for i in boulders})
rock_size.update({i:3 for i in rubble})
df = input_df.copy()
df['Rock_Size'] = input_df['Soil_Type'].apply(lambda x: rock_size[x])
return df
def wilderness_rocksize(input_df, drop = False):
data = rock_size_original(input_df)
df = input_df.copy()
df['Rock_Area1'] = df['Wilderness_Area1']*data['Rock_Size']
df['Rock_Area2'] = df['Wilderness_Area2']*data['Rock_Size']
df['Rock_Area3'] = df['Wilderness_Area3']*data['Rock_Size']
df['Rock_Area4'] = df['Wilderness_Area4']*data['Rock_Size']
return df
# AdaBoost
cv_score, oof_preds, test_score = train_original(adaboost, wilderness_rocksize)
adaboost_scores.append((
'Wild_Rock', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# ExtraTrees
cv_score, oof_preds, test_score = train_original(extratrees, wilderness_rocksize)
extratrees_scores.append((
'Wild_Rock', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# Bagging
cv_score, oof_preds, test_score = train_original(bagging, wilderness_rocksize)
bagging_scores.append((
'Wild_Rock', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
# RandomForest
cv_score, oof_preds, test_score = train_original(randomforest, wilderness_rocksize)
random_scores.append((
'Wild_Rock', cv_score, test_score,
*recall_score(y_train, oof_preds, average = None)
))
```
# Summary
These probably require more testing (permutation importance, etc).
```
# AdaBoost
pd.DataFrame.from_records(
data = adaboost_scores,
columns = ['features','cv_score','holdout','recall_0', 'recall_1','recall_2','recall_3','recall_4','recall_5','recall_6']
).sort_values('cv_score')
# Extra Trees Classifier
pd.DataFrame.from_records(
data = extratrees_scores,
columns = ['features','cv_score','holdout','recall_0', 'recall_1','recall_2','recall_3','recall_4','recall_5','recall_6']
).sort_values('cv_score')
# Bagging Classifier
pd.DataFrame.from_records(
data = bagging_scores,
columns = ['features','cv_score','holdout','recall_0', 'recall_1','recall_2','recall_3','recall_4','recall_5','recall_6']
).sort_values('cv_score')
# Random Forest
pd.DataFrame.from_records(
data = random_scores,
columns = ['features','cv_score','holdout','recall_0', 'recall_1','recall_2','recall_3','recall_4','recall_5','recall_6']
).sort_values('cv_score')
```
| github_jupyter |
# Hierarchical Clustering
**Hierarchical clustering** refers to a class of clustering methods that seek to build a **hierarchy** of clusters, in which some clusters contain others. In this assignment, we will explore a top-down approach, recursively bipartitioning the data using k-means.
**Note to Amazon EC2 users**: To conserve memory, make sure to stop all the other notebooks before running this notebook.
## Import packages
```
import turicreate as tc
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import time
from scipy.sparse import csr_matrix
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances
%matplotlib inline
```
## Load the Wikipedia dataset
```
wiki = tc.SFrame('people_wiki.sframe/')
```
As we did in previous assignments, let's extract the TF-IDF features:
```
wiki['tf_idf'] = tc.text_analytics.tf_idf(wiki['text'])
```
To run k-means on this dataset, we should convert the data matrix into a sparse matrix.
```
from em_utilities import sframe_to_scipy # converter
# This will take about a minute or two.
wiki = wiki.add_row_number()
tf_idf, map_word_to_index = sframe_to_scipy(wiki, 'tf_idf')
```
To be consistent with the k-means assignment, let's normalize all vectors to have unit norm.
```
from sklearn.preprocessing import normalize
tf_idf = normalize(tf_idf)
```
## Bipartition the Wikipedia dataset using k-means
Recall our workflow for clustering text data with k-means:
1. Load the dataframe containing a dataset, such as the Wikipedia text dataset.
2. Extract the data matrix from the dataframe.
3. Run k-means on the data matrix with some value of k.
4. Visualize the clustering results using the centroids, cluster assignments, and the original dataframe. We keep the original dataframe around because the data matrix does not keep auxiliary information (in the case of the text dataset, the title of each article).
Let us modify the workflow to perform bipartitioning:
1. Load the dataframe containing a dataset, such as the Wikipedia text dataset.
2. Extract the data matrix from the dataframe.
3. Run k-means on the data matrix with k=2.
4. Divide the data matrix into two parts using the cluster assignments.
5. Divide the dataframe into two parts, again using the cluster assignments. This step is necessary to allow for visualization.
6. Visualize the bipartition of data.
We'd like to be able to repeat Steps 3-6 multiple times to produce a **hierarchy** of clusters such as the following:
```
(root)
|
+------------+-------------+
| |
Cluster Cluster
+------+-----+ +------+-----+
| | | |
Cluster Cluster Cluster Cluster
```
Each **parent cluster** is bipartitioned to produce two **child clusters**. At the very top is the **root cluster**, which consists of the entire dataset.
Now we write a wrapper function to bipartition a given cluster using k-means. There are three variables that together comprise the cluster:
* `dataframe`: a subset of the original dataframe that correspond to member rows of the cluster
* `matrix`: same set of rows, stored in sparse matrix format
* `centroid`: the centroid of the cluster (not applicable for the root cluster)
Rather than passing around the three variables separately, we package them into a Python dictionary. The wrapper function takes a single dictionary (representing a parent cluster) and returns two dictionaries (representing the child clusters).
```
def bipartition(cluster, maxiter=400, num_runs=4, seed=None):
'''cluster: should be a dictionary containing the following keys
* dataframe: original dataframe
* matrix: same data, in matrix format
* centroid: centroid for this particular cluster'''
data_matrix = cluster['matrix']
dataframe = cluster['dataframe']
# Run k-means on the data matrix with k=2. We use scikit-learn here to simplify workflow.
kmeans_model = KMeans(n_clusters=2, max_iter=maxiter, n_init=num_runs, random_state=seed, n_jobs=1)
kmeans_model.fit(data_matrix)
centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_
# Divide the data matrix into two parts using the cluster assignments.
data_matrix_left_child, data_matrix_right_child = data_matrix[cluster_assignment==0], \
data_matrix[cluster_assignment==1]
# Divide the dataframe into two parts, again using the cluster assignments.
cluster_assignment_sa = tc.SArray(cluster_assignment) # minor format conversion
dataframe_left_child, dataframe_right_child = dataframe[cluster_assignment_sa==0], \
dataframe[cluster_assignment_sa==1]
# Package relevant variables for the child clusters
cluster_left_child = {'matrix': data_matrix_left_child,
'dataframe': dataframe_left_child,
'centroid': centroids[0]}
cluster_right_child = {'matrix': data_matrix_right_child,
'dataframe': dataframe_right_child,
'centroid': centroids[1]}
return (cluster_left_child, cluster_right_child)
```
The following cell performs bipartitioning of the Wikipedia dataset. Allow 2+ minutes to finish.
Note. For the purpose of the assignment, we set an explicit seed (`seed=1`) to produce identical outputs for every run. In pratical applications, you might want to use different random seeds for all runs.
```
%%time
wiki_data = {'matrix': tf_idf, 'dataframe': wiki} # no 'centroid' for the root cluster
left_child, right_child = bipartition(wiki_data, maxiter=100, num_runs=1, seed=0)
```
Let's examine the contents of one of the two clusters, which we call the `left_child`, referring to the tree visualization above.
```
left_child
```
And here is the content of the other cluster we named `right_child`.
```
right_child
```
## Visualize the bipartition
We provide you with a modified version of the visualization function from the k-means assignment. For each cluster, we print the top 5 words with highest TF-IDF weights in the centroid and display excerpts for the 8 nearest neighbors of the centroid.
```
def display_single_tf_idf_cluster(cluster, map_index_to_word):
'''map_index_to_word: SFrame specifying the mapping betweeen words and column indices'''
wiki_subset = cluster['dataframe']
tf_idf_subset = cluster['matrix']
centroid = cluster['centroid']
# Print top 5 words with largest TF-IDF weights in the cluster
idx = centroid.argsort()[::-1]
for i in range(5):
print('{0}:{1:.3f}'.format(map_index_to_word['category'], centroid[idx[i]])),
print('')
# Compute distances from the centroid to all data points in the cluster.
distances = pairwise_distances(tf_idf_subset, [centroid], metric='euclidean').flatten()
# compute nearest neighbors of the centroid within the cluster.
nearest_neighbors = distances.argsort()
# For 8 nearest neighbors, print the title as well as first 180 characters of text.
# Wrap the text at 80-character mark.
for i in range(8):
text = ' '.join(wiki_subset[nearest_neighbors[i]]['text'].split(None, 25)[0:25])
print('* {0:50s} {1:.5f}\n {2:s}\n {3:s}'.format(wiki_subset[nearest_neighbors[i]]['name'],
distances[nearest_neighbors[i]], text[:90], text[90:180] if len(text) > 90 else ''))
print('')
```
Let's visualize the two child clusters:
```
display_single_tf_idf_cluster(left_child, map_word_to_index)
display_single_tf_idf_cluster(right_child, map_word_to_index)
```
The right cluster consists of athletes and artists (singers and actors/actresses), whereas the left cluster consists of non-athletes and non-artists. So far, we have a single-level hierarchy consisting of two clusters, as follows:
```
Wikipedia
+
|
+--------------------------+--------------------+
| |
+ +
Non-athletes/artists Athletes/artists
```
Is this hierarchy good enough? **When building a hierarchy of clusters, we must keep our particular application in mind.** For instance, we might want to build a **directory** for Wikipedia articles. A good directory would let you quickly narrow down your search to a small set of related articles. The categories of athletes and non-athletes are too general to facilitate efficient search. For this reason, we decide to build another level into our hierarchy of clusters with the goal of getting more specific cluster structure at the lower level. To that end, we subdivide both the `athletes/artists` and `non-athletes/artists` clusters.
## Perform recursive bipartitioning
### Cluster of athletes and artists
To help identify the clusters we've built so far, let's give them easy-to-read aliases:
```
non_athletes_artists = left_child
athletes_artists = right_child
```
Using the bipartition function, we produce two child clusters of the athlete cluster:
```
# Bipartition the cluster of athletes and artists
left_child_athletes_artists, right_child_athletes_artists = bipartition(athletes_artists,
maxiter=100, num_runs=6, seed=1)
```
The left child cluster mainly consists of athletes:
```
display_single_tf_idf_cluster(left_child_athletes_artists, map_word_to_index)
```
On the other hand, the right child cluster consists mainly of artists (singers and actors/actresses):
```
display_single_tf_idf_cluster(right_child_athletes_artists, map_word_to_index)
```
Our hierarchy of clusters now looks like this:
```
Wikipedia
+
|
+--------------------------+--------------------+
| |
+ +
Non-athletes/artists Athletes/artists
+
|
+----------+----------+
| |
| |
+ |
athletes artists
```
Should we keep subdividing the clusters? If so, which cluster should we subdivide? To answer this question, we again think about our application. Since we organize our directory by topics, it would be nice to have topics that are about as coarse as each other. For instance, if one cluster is about baseball, we expect some other clusters about football, basketball, volleyball, and so forth. That is, **we would like to achieve similar level of granularity for all clusters.**
Both the athletes and artists node can be subdivided more, as each one can be divided into more descriptive professions (singer/actress/painter/director, or baseball/football/basketball, etc.). Let's explore subdividing the athletes cluster further to produce finer child clusters.
Let's give the clusters aliases as well:
```
athletes = left_child_athletes_artists
artists = right_child_athletes_artists
```
### Cluster of athletes
In answering the following quiz question, take a look at the topics represented in the top documents (those closest to the centroid), as well as the list of words with highest TF-IDF weights.
Let us bipartition the cluster of athletes.
```
left_child_athletes, right_child_athletes = bipartition(athletes, maxiter=100, num_runs=6, seed=1)
display_single_tf_idf_cluster(left_child_athletes, map_word_to_index)
display_single_tf_idf_cluster(right_child_athletes, map_word_to_index)
```
**Quiz Question**. Which diagram best describes the hierarchy right after splitting the `athletes` cluster? Refer to the quiz form for the diagrams.
**Caution**. The granularity criteria is an imperfect heuristic and must be taken with a grain of salt. It takes a lot of manual intervention to obtain a good hierarchy of clusters.
* **If a cluster is highly mixed, the top articles and words may not convey the full picture of the cluster.** Thus, we may be misled if we judge the purity of clusters solely by their top documents and words.
* **Many interesting topics are hidden somewhere inside the clusters but do not appear in the visualization.** We may need to subdivide further to discover new topics. For instance, subdividing the `ice_hockey_football` cluster led to the appearance of runners and golfers.
### Cluster of non-athletes
Now let us subdivide the cluster of non-athletes.
```
%%time
# Bipartition the cluster of non-athletes
left_child_non_athletes_artists, right_child_non_athletes_artists = bipartition(non_athletes_artists, maxiter=100, num_runs=3, seed=1)
display_single_tf_idf_cluster(left_child_non_athletes_artists, map_word_to_index)
display_single_tf_idf_cluster(right_child_non_athletes_artists, map_word_to_index)
```
The clusters are not as clear, but the left cluster has a tendency to show important female figures, and the right one to show politicians and government officials.
Let's divide them further.
```
female_figures = left_child_non_athletes_artists
politicians_etc = right_child_non_athletes_artists
```
**Quiz Question**. Let us bipartition the clusters `female_figures` and `politicians`. Which diagram best describes the resulting hierarchy of clusters for the non-athletes? Refer to the quiz for the diagrams.
**Note**. Use `maxiter=100, num_runs=6, seed=1` for consistency of output.
```
# Bipartition the cluster of musicians_artists_etc
left_female_figures, right_female_figures = bipartition(female_figures, maxiter=100, num_runs=6, seed=1)
display_single_tf_idf_cluster(left_female_figures, map_word_to_index)
display_single_tf_idf_cluster(right_female_figures, map_word_to_index)
# Bipartition the cluster of musicians_artists_etc
left_politicians_etc, right_politicians_etc = bipartition(politicians_etc, maxiter=100, num_runs=6, seed=1)
display_single_tf_idf_cluster(left_politicians_etc, map_word_to_index)
display_single_tf_idf_cluster(right_politicians_etc, map_word_to_index)
```
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Deep Learning
## Project: Build a Traffic Sign Recognition Classifier
In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary.
> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.
The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
---
## Step 0: Load The Data
```
# Load pickled data
import pickle
import os
import numpy as np
# TODO: Fill this in based on where you saved the training and testing data
training_file = 'train.p'
validation_file = 'valid.p'
testing_file = 'test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
X_train = X_train.astype(np.float32)
X_valid = X_valid.astype(np.float32)
X_test = X_test.astype(np.float32)
X_train = np.average(X_train, axis=3)
X_valid = np.average(X_valid, axis=3)
X_test = np.average(X_test, axis=3)
X_train = X_train.reshape(X_train.shape[0],X_train.shape[1],X_train.shape[2],1)
X_valid = X_valid.reshape(X_valid.shape[0], X_valid.shape[1], X_valid.shape[2],1)
X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],1)
print(X_train.shape)
print(type(y_train))
```
---
## Step 1: Dataset Summary & Exploration
The pickled data is a dictionary with 4 key/value pairs:
- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**
Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results.
### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
```
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
# TODO: Number of training examples
n_train = len(train['features'])
# TODO: Number of validation examples
n_validation = len(valid['features'])
# TODO: Number of testing examples.
n_test = len(test['features'])
# TODO: What's the shape of an traffic sign image?
image_shape = train['features'].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(np.array(train['labels'])))
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
```
### Include an exploratory visualization of the dataset
Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.
**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
```
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
%matplotlib inline
fig,ax = plt.subplots(1, 1)
ax.hist(y_train, bins=n_classes)
plt.show()
```
----
## Step 2: Design and Test a Model Architecture
Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).
The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission.
There are various aspects to consider when thinking about this problem:
- Neural network architecture (is the network over or underfitting?)
- Play around preprocessing techniques (normalization, rgb to grayscale, etc)
- Number of examples per label (some have more than others).
- Generate fake data.
Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
### Pre-process the Data Set (normalization, grayscale, etc.)
Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project.
Other pre-processing steps are optional. You can try different techniques to see if it improves performance.
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
```
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
stddev = 0
mean = 0
def normalize(pixel):
return (pixel - mean)/stddev
stddev = np.std(X_train)
mean = np.average(X_train)
X_train = normalize(X_train)
stddev = np.std(X_valid)
mean = np.average(X_valid)
X_valid = normalize(X_valid)
stddev = np.std(X_test)
mean = np.average(X_test)
X_test = normalize(X_test)
#X_train = X_train.astype(np.float32)
#X_valid = X_valid.astype(np.float32)
#X_test = X_test.astype(np.float32)
print('Pishu',X_valid)
```
### Model Architecture
```
### Define your architecture here.
### Feel free to use as many code cells as needed.
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()
rate = 0.001
epochs = 30
batch_size = 100
mu = 0
sigma = 0.1
features = tf.placeholder(tf.float32, shape=(None, 32, 32, 1))
labels = tf.placeholder(tf.int32, (None))
onehot_labels = tf.one_hot(labels, n_classes)
keep_prob = tf.placeholder(tf.float32)
def Lenet(features):
weight_layer1 = tf.Variable(tf.truncated_normal([5,5,1,6], mean=mu, stddev=sigma))
bias_layer1 = tf.Variable(tf.zeros(6))
conv_layer1 = tf.nn.conv2d(features, weight_layer1, strides=[1,1,1,1], padding='VALID')
conv_layer1 = tf.nn.bias_add(conv_layer1, bias_layer1)
conv_layer1 = tf.nn.relu(conv_layer1)
conv_layer1 = tf.nn.dropout(conv_layer1, keep_prob)
conv_layer1 = tf.nn.max_pool(conv_layer1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')
weight_layer6 = tf.Variable(tf.truncated_normal([3,3,6,6], mean=mu, stddev=sigma))
bias_layer6 = tf.Variable(tf.zeros(6))
conv_layer6 = tf.nn.conv2d(conv_layer1, weight_layer6, strides=[1,1,1,1], padding='VALID')
conv_layer6 = tf.nn.bias_add(conv_layer6, bias_layer6)
conv_layer6 = tf.nn.relu(conv_layer6)
conv_layer6 = tf.nn.dropout(conv_layer6, keep_prob)
conv_layer6 = tf.nn.max_pool(conv_layer6, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')
weight_layer2 = tf.Variable(tf.truncated_normal([2,2,6,16], mean=0, stddev=sigma))
bias_layer2 = tf.Variable(tf.zeros(16))
conv_layer2 = tf.nn.conv2d(conv_layer6, weight_layer2, strides=[1,1,1,1], padding='VALID')
conv_layer2 = tf.nn.bias_add(conv_layer2, bias_layer2)
conv_layer2 = tf.nn.relu(conv_layer2)
conv_layer2 = tf.nn.dropout(conv_layer2, keep_prob)
#conv_layer2 = tf.nn.max_pool(conv_layer2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='VALID')
#r = tf.shape(conv_layer2)
conv_layer2 = tf.reshape(conv_layer2, shape=[-1, conv_layer2.shape[1] * conv_layer2.shape[2] * conv_layer2.shape[3]])
#conv_layer2 = tf.layers.flatten(conv_layer2)
print("Aditya", conv_layer2.shape)
weight_layer3 = tf.Variable(tf.truncated_normal([conv_layer2.shape[1], 43], mean=0, stddev=sigma))
bias_layer3 = tf.Variable(tf.zeros(43))
full_layer3 = tf.add(tf.matmul(conv_layer2, weight_layer3), bias_layer3)
#full_layer3 = tf.nn.relu(full_layer3)
"""
weight_layer4 = tf.Variable(tf.truncated_normal([120,84]))
bias_layer4 = tf.Variable(tf.zeros(84))
full_layer4 = tf.add(tf.matmul(full_layer3, weight_layer4), bias_layer4)
full_layer4 = tf.nn.relu(full_layer4)
weight_layer5 = tf.Variable(tf.truncated_normal([84,43]))
bias_layer5 = tf.Variable(tf.zeros(43))
full_layer5 = tf.add(tf.matmul(full_layer4, weight_layer5), bias_layer5)
"""
return full_layer3
"""
def optimize_weights(x, y):
logit = Lenet(features)
#entropy = tf.multiply(tf.log(layer) * (-1), y)
#entropy = tf.reduce_sum(entropy, axis=1)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=y, logits=logit)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
return training_operation
"""
#def optimize_weights(x, y):
logit = Lenet(features)
#entropy = tf.multiply(tf.log(layer) * (-1), y)
#entropy = tf.reduce_sum(entropy, axis=1)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=onehot_labels, logits=logit)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
#optimizer = tf.train.GradientDescentOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
#return training_operation
"""
def calc_accuracy(x,y):
logit = Lenet(x)
temp = tf.equal(tf.argmax(logit,1), tf.argmax(y,1))
accuracy_operation = tf.reduce_mean(tf.cast(temp, tf.float32))
return accuracy_operation
"""
#def calc_accuracy(x,y):
#logits = Lenet(x)
temp = tf.equal(tf.argmax(logit,1), tf.argmax(onehot_labels,1))
accuracy_operation = tf.reduce_mean(tf.cast(temp, tf.float32))
#return accuracy_operation
```
### Train, Validate and Test the Model
A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
```
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
from sklearn.utils import shuffle
import math
import os.path
save_file = './mode.ckpt'
saver = tf.train.Saver()
"""
training_operation = optimize_weights(features, onehot_labels)
accuracy_operation = calc_accuracy(features, onehot_labels)
"""
with tf.Session() as sess:
"""
if(os.path.isfile('mode.ckpt.index')):
print("File exists")
saver.restore(sess, save_file)
"""
if (False):
print("This code is never reached")
else:
print("File does not exist")
sess.run(tf.global_variables_initializer())
num_examples = X_train.shape[0]
num_batches = math.ceil(num_examples / batch_size)
print('Roka', num_examples, num_batches)
#validation_accuracy = sess.run(accuracy_operation, feed_dict={features:X_valid, labels:y_valid})
#print("Bamba accuracy = {:.3f}".format(validation_accuracy))
validation_accuracy = 0
i = 0
while (validation_accuracy < 0.93):
X_train, y_train = shuffle(X_train, y_train)
for j in range(num_batches):
x_batch = X_train[j*batch_size:(j+1)*batch_size]
y_batch = y_train[j*batch_size:(j+1)*batch_size]
sess.run(training_operation, feed_dict={features:x_batch, labels:y_batch, keep_prob:0.75})
train_accuracy = sess.run(accuracy_operation, feed_dict={features:X_train, labels:y_train, keep_prob:0.75})
validation_accuracy = sess.run(accuracy_operation, feed_dict={features:X_valid, labels:y_valid, keep_prob:1})
print("EPOCH {}...".format(i+1))
i = i + 1
print("Train accuracy = {:.3f}".format(train_accuracy))
print("Validation accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, save_file)
#test_accuracy = sess.run(accuracy_operation, feed_dict={features:X_test, labels:y_test, keep_prob:1})
#print("Test accuracy = {:.3f}".format(test_accuracy))
with tf.Session() as sess:
saver.restore(sess, save_file)
test_accuracy = sess.run(accuracy_operation, feed_dict={features:X_test, labels:y_test, keep_prob:1})
print("Test accuracy = {:.3f}".format(test_accuracy))
```
### ---
## Step 3: Test a Model on New Images
To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
### Load and Output the Images
```
### Load the images and plot them here.
### Feel free to use as many code cells as needed
import cv2
import matplotlib.image as mpimg
y_new = np.array([])
x_new = []
images = [('speed60.png', 3),('roundabout.png', 40),('yield.png', 13), ('children_crossing.png', 28), ('double_curve.png', 21), ('pedestrians.png', 27), ('traffic_signals.png', 26), ('wild_animals.png', 31)]
for image in images:
img = cv2.imread(image[0])
y_new = np.append(y_new, image[1])
print(img.shape)
width = 32
height = 32
dim = (width,height)
resized = cv2.resize(img, dim, interpolation=cv2.INTER_LINEAR)
print(resized.shape)
#cv2.imwrite(".\pasha",resized)
#cv2.imshow("aradhya", resized)
#cv2.waitKey(0)
resized = np.reshape(resized, [1, resized.shape[0], resized.shape[1], resized.shape[2]])
print(resized.shape)
resized = np.average(resized, axis=3)
resized = np.reshape(resized, [resized.shape[0], resized.shape[1], resized.shape[2], 1])
if len(x_new) == 0:
print("x_new is empty")
x_new = resized
else:
x_new = np.concatenate((x_new,resized))
#x_new = np.concatenate((x_new, resized))
print("SHAP", x_new.shape)
x_new = x_new.astype(np.float32)
stddev = np.std(X_train)
mean = np.average(X_train)
x_new = normalize(x_new)
```
### Predict the Sign Type for Each Image
```
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
probabilites = tf.nn.softmax(logit)
with tf.Session() as sess:
saver.restore(sess, save_file)
probabilites_new = sess.run(probabilites, feed_dict={features:x_new, keep_prob:1})
print(probabilites_new)
```
### Analyze Performance
```
### Calculate the accuracy for these 5 new images.
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
with tf.Session() as sess:
saver.restore(sess, save_file)
print(sess)
new_accuracy = sess.run(accuracy_operation, feed_dict={features:x_new, labels:y_new, keep_prob:1})
print("New accuracy = {:.3f}".format(new_accuracy))
```
### Output Top 5 Softmax Probabilities For Each Image Found on the Web
For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here.
The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.
`tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:
```
# (5, 6) array
a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
0.12789202],
[ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
0.15899337],
[ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
0.23892179],
[ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
0.16505091],
[ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
0.09155967]])
```
Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:
```
TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
[ 0.28086119, 0.27569815, 0.18063401],
[ 0.26076848, 0.23892179, 0.23664738],
[ 0.29198961, 0.26234032, 0.16505091],
[ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],
[0, 1, 4],
[0, 5, 1],
[1, 3, 5],
[1, 4, 3]], dtype=int32))
```
Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.
```
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
topk_probabilites = tf.nn.top_k(probabilites, k=5)
with tf.Session() as sess:
saver.restore(sess, save_file)
topk_probabilites_new = sess.run(topk_probabilites, feed_dict={features:x_new, keep_prob:1})
print(topk_probabilites_new)
```
### Project Writeup
Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
---
## Step 4 (Optional): Visualize the Neural Network's State with Test Images
This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.
Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.
For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.
<figure>
<img src="visualize_cnn.png" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above)</p>
</figcaption>
</figure>
<p></p>
```
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
```
| github_jupyter |
```
%matplotlib inline
```
# Probability Calibration for 3-class classification
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
```
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
```
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
---
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
---
**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
---
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
## Import Packages
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
```
## Ideas for Lane Detection Pipeline
**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
`cv2.inRange()` for color selection
`cv2.fillPoly()` for regions selection
`cv2.line()` to draw lines on an image given endpoints
`cv2.addWeighted()` to coadd / overlay two images
`cv2.cvtColor()` to grayscale or change color
`cv2.imwrite()` to output images to file
`cv2.bitwise_and()` to apply a mask to an image
**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
## Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
```
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
rm = []
lm = []
rc = []
lc = []
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
slope = (y2-y1)/(x2-x1)
center = [(x2-x1)/2,(y2-y1)/2]
##print(x1,x2,center,slope)
##print(y1,y2,center,slope)
##print("___________________________")
if slope < 0:
lm.append(slope)
lc.append(center)
else:
rm.append(slope)
rc.append(center)
r_slope = np.sum(rm)/len(rm)
l_slope = np.sum(lm)/len(lm)
r_center = np.divide(np.sum(rc,axis=0),len(rc))
l_center = np.divide(np.sum(lc,axis=0),len(lc))
##print("___________________________")
##print(l_slope,r_slope)
##print(l_center,r_center)
##print("___________________________")
newy1 = int(img.shape[0])
newy2 = int(img.shape[0] * 0.6)
newx1r = int((newy1 - r_center[1]) / r_slope)
newx2r = int((newy2 - r_center[0]) / r_slope)
##newx1l = int((newy2 - l_center[0]) * -l_slope)
##newx2l = int((l_center[1] - newy1) * l_slope)
##
##print("___________________________")
##print(l_center[0], l_center[1],l_slope,newy1, newy2)
##print(r_center[0], r_center[1],r_slope,newy1, newy2)
##print("___________________________")
##print(newy1,newy2)
##print(newx1r,newx2r)
##print("___________________________")
##print(newy1,newy2)
##print(newx1l,newx2l)
##cv2.line(img, (newx1r, newy1), (newx2r, newy2), color, 10)
##cv2.line(img, (newx1l, newy1), (newx2l, newy2), [0, 0, 255], 10)
##(y-y') = M(x-x')
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
```
## Build a Lane Finding Pipeline
## Test Images
Build your pipeline to work on the images in the directory "test_images"
**You should make sure your pipeline works well on these images before you try the videos.**
```
import os
files = os.listdir("test_images/")
for file in files:
if file[0:6] != "output":
img = mpimg.imread("test_images/"+file)
gray = grayscale(img)
gray = gaussian_blur(gray,3)
highlight_edges = canny(gray,30, 150)
imshape = img.shape
vertices = np.array([[(.51*imshape[1], imshape[0]*.58),(.49*imshape[1], imshape[0]*.58),(0, imshape[0]),(imshape[1], imshape[0])]], dtype=np.int32)
target = region_of_interest(highlight_edges, vertices)
lines = hough_lines(target, 1, np.pi/180, 35, 5, 2)
result = weighted_img(lines, img, α=0.8, β=1.0)
plt.imshow(result, cmap="gray")
r,g,b = cv2.split(result)
result = cv2.merge((b,g,r))
cv2.imwrite("test_images/output_"+file,result)
```
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
```
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
```
## Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
`solidWhiteRight.mp4`
`solidYellowLeft.mp4`
**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
**If you get an error that looks like this:**
```
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
```
**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
gray = grayscale(image)
gray = gaussian_blur(gray,5)
highlight_edges = canny(gray,30, 150)
imshape = image.shape
vertices = np.array([[(.51*imshape[1], imshape[0]*.58),(.49*imshape[1], imshape[0]*.58),(0, imshape[0]),(imshape[1], imshape[0])]], dtype=np.int32)
target = region_of_interest(highlight_edges, vertices)
lines = hough_lines(target, 1, np.pi/180, 35, 5, 2)
result = weighted_img(lines, image, α=0.8, β=1.0)
return result
```
Let's try the one with the solid white lane on the right first ...
```
white_output = 'test_videos_output/Output_solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
```
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
```
## Improve the draw_lines() function
**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
Now for the one with the solid yellow lane on the left. This one's more tricky!
```
yellow_output = 'test_videos_output/Output_solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Writeup and Submission
If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.
## Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
```
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
```
| github_jupyter |
# Mask R-CNN Tabletop Demo
A quick intro to using the pre-trained model to detect and segment objects.
```
import os
import sys
import random
import math
import numpy as np
import skimage.io
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
# Import the tabletop dataset config
import humanoids_pouring.tabletop_bottles as tabletop
%matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
TRAINED_MODEL_PATH = os.path.join(MODEL_DIR, "ycb_video_training20190617T2000", "mask_rcnn_ycb_video_training_0010.h5")
assert os.path.exists(TRAINED_MODEL_PATH)
# Directory of images to run detection on
IMAGE_DIR = os.path.join(ROOT_DIR, "images")
```
## Configurations
```
#config = tabletop.TabletopConfigInference()
config = tabletop.YCBVideoConfigInference()
config.DETECTION_MIN_CONFIDENCE = 0.5
config.display()
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = config.GPU_ID
```
## Create Model and Load Trained Weights
```
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(TRAINED_MODEL_PATH, by_name=True)
```
## Class Names
The model classifies objects and returns class IDs, which are integer value that identify each class. Some datasets assign integer values to their classes and some don't. For example, in the MS-COCO dataset, the 'person' class is 1 and 'teddy bear' is 88. The IDs are often sequential, but not always. The COCO dataset, for example, has classes associated with class IDs 70 and 72, but not 71.
To improve consistency, and to support training on data from multiple sources at the same time, our ```Dataset``` class assigns it's own sequential integer IDs to each class. For example, if you load the COCO dataset using our ```Dataset``` class, the 'person' class would get class ID = 1 (just like COCO) and the 'teddy bear' class is 78 (different from COCO). Keep that in mind when mapping class IDs to class names.
To get the list of class names, you'd load the dataset and then use the ```class_names``` property like this.
```
dataset_root_dir = os.path.join(ROOT_DIR, "datasets", "tabletop_20K_YCBVideoClasses")
dataset = tabletop.TabletopDataset()
dataset_root_dir = os.path.join(ROOT_DIR, "datasets", "YCB_Video_Dataset")
dataset = tabletop.YCBVideoDataset()
dataset_root_dir = os.path.join(ROOT_DIR, "datasets", "bottles")
dataset = tabletop.YCBVideoDataset()
dataset.load_class_names(dataset_root_dir)
```
### Run Object Detection
```
# Load dataset
dataset_val = tabletop.YCBVideoDataset()
dataset_val.load_dataset(dataset_root_dir, 'val')
# Load a random image from val set
dataset_val = tabletop.YCBVideoDataset()
dataset_val.load_dataset(dataset_root_dir, 'val')
random_val_img_info = dataset_val.image_info[random.randint(0, len(dataset_val.image_info))]
image = skimage.io.imread(random_val_img_info['path'])
# Load a random image from the images folder
file_names = next(os.walk(IMAGE_DIR))[2]
#image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
image = skimage.io.imread(os.path.join(IMAGE_DIR, '000001-color.png'))
# Load a random image from val set
random_val_img_info = dataset_val.image_info[random.randint(0, len(dataset_val.image_info))]
image = skimage.io.imread(random_val_img_info['path'])
# Run detection
results = model.detect([image], verbose=1)
# Visualize results
r = results[0]
visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'])
```
| github_jupyter |
<a href="https://colab.research.google.com/github/AI4Finance-LLC/FinRL-Library/blob/master/FinRL_portfolio_allocation_NeurIPS_2020.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Deep Reinforcement Learning for Stock Trading from Scratch: Portfolio Allocation
Tutorials to use OpenAI DRL to perform portfolio allocation in one Jupyter Notebook | Presented at NeurIPS 2020: Deep RL Workshop
* This blog is based on our paper: FinRL: A Deep Reinforcement Learning Library for Automated Stock Trading in Quantitative Finance, presented at NeurIPS 2020: Deep RL Workshop.
* Check out medium blog for detailed explanations:
* Please report any issues to our Github: https://github.com/AI4Finance-LLC/FinRL-Library/issues
* **Pytorch Version**
# Content
* [1. Problem Definition](#0)
* [2. Getting Started - Load Python packages](#1)
* [2.1. Install Packages](#1.1)
* [2.2. Check Additional Packages](#1.2)
* [2.3. Import Packages](#1.3)
* [2.4. Create Folders](#1.4)
* [3. Download Data](#2)
* [4. Preprocess Data](#3)
* [4.1. Technical Indicators](#3.1)
* [4.2. Perform Feature Engineering](#3.2)
* [5.Build Environment](#4)
* [5.1. Training & Trade Data Split](#4.1)
* [5.2. User-defined Environment](#4.2)
* [5.3. Initialize Environment](#4.3)
* [6.Implement DRL Algorithms](#5)
* [7.Backtesting Performance](#6)
* [7.1. BackTestStats](#6.1)
* [7.2. BackTestPlot](#6.2)
* [7.3. Baseline Stats](#6.3)
* [7.3. Compare to Stock Market Index](#6.4)
<a id='0'></a>
# Part 1. Problem Definition
This problem is to design an automated trading solution for single stock trading. We model the stock trading process as a Markov Decision Process (MDP). We then formulate our trading goal as a maximization problem.
The algorithm is trained using Deep Reinforcement Learning (DRL) algorithms and the components of the reinforcement learning environment are:
* Action: The action space describes the allowed actions that the agent interacts with the
environment. Normally, a ∈ A includes three actions: a ∈ {−1, 0, 1}, where −1, 0, 1 represent
selling, holding, and buying one stock. Also, an action can be carried upon multiple shares. We use
an action space {−k, ..., −1, 0, 1, ..., k}, where k denotes the number of shares. For example, "Buy
10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or −10, respectively
* Reward function: r(s, a, s′) is the incentive mechanism for an agent to learn a better action. The change of the portfolio value when action a is taken at state s and arriving at new state s', i.e., r(s, a, s′) = v′ − v, where v′ and v represent the portfolio
values at state s′ and s, respectively
* State: The state space describes the observations that the agent receives from the environment. Just as a human trader needs to analyze various information before executing a trade, so
our trading agent observes many different features to better learn in an interactive environment.
* Environment: Dow 30 consituents
The data of the single stock that we will be using for this case study is obtained from Yahoo Finance API. The data contains Open-High-Low-Close price and volume.
<a id='1'></a>
# Part 2. Getting Started- Load Python Packages
<a id='1.1'></a>
## 2.1. Install all the packages through FinRL library
```
## install finrl library
!pip install git+https://github.com/AI4Finance-LLC/FinRL-Library.git
```
<a id='1.2'></a>
## 2.2. Check if the additional packages needed are present, if not install them.
* Yahoo Finance API
* pandas
* numpy
* matplotlib
* stockstats
* OpenAI gym
* stable-baselines
* tensorflow
* pyfolio
<a id='1.3'></a>
## 2.3. Import Packages
```
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use('Agg')
import datetime
from finrl.config import config
from finrl.marketdata.yahoodownloader import YahooDownloader
from finrl.preprocessing.preprocessors import FeatureEngineer
from finrl.preprocessing.data import data_split
from finrl.env.env_portfolio import StockPortfolioEnv
from finrl.model.models import DRLAgent
from finrl.trade.backtest import backtest_stats, backtest_plot, get_daily_return, get_baseline,convert_daily_return_to_pyfolio_ts
import sys
sys.path.append("../FinRL-Library")
```
<a id='1.4'></a>
## 2.4. Create Folders
```
import os
if not os.path.exists("./" + config.DATA_SAVE_DIR):
os.makedirs("./" + config.DATA_SAVE_DIR)
if not os.path.exists("./" + config.TRAINED_MODEL_DIR):
os.makedirs("./" + config.TRAINED_MODEL_DIR)
if not os.path.exists("./" + config.TENSORBOARD_LOG_DIR):
os.makedirs("./" + config.TENSORBOARD_LOG_DIR)
if not os.path.exists("./" + config.RESULTS_DIR):
os.makedirs("./" + config.RESULTS_DIR)
```
<a id='2'></a>
# Part 3. Download Data
Yahoo Finance is a website that provides stock data, financial news, financial reports, etc. All the data provided by Yahoo Finance is free.
* FinRL uses a class **YahooDownloader** to fetch data from Yahoo Finance API
* Call Limit: Using the Public API (without authentication), you are limited to 2,000 requests per hour per IP (or up to a total of 48,000 requests a day).
```
selected_ticker = config.JII_TICKER
print(selected_ticker)
df = pd.read_csv('JII_data.csv')
# df = YahooDownloader(start_date = '2008-01-01',
# end_date = '2021-01-01',
# ticker_list = selected_ticker).fetch_data()
df.tail(10)
df.shape
# df.to_csv('JII_data.csv', index=False)
```
# Part 4: Preprocess Data
Data preprocessing is a crucial step for training a high quality machine learning model. We need to check for missing data and do feature engineering in order to convert the data into a model-ready state.
* Add technical indicators. In practical trading, various information needs to be taken into account, for example the historical stock prices, current holding shares, technical indicators, etc. In this article, we demonstrate two trend-following technical indicators: MACD and RSI.
* Add turbulence index. Risk-aversion reflects whether an investor will choose to preserve the capital. It also influences one's trading strategy when facing different market volatility level. To control the risk in a worst-case scenario, such as financial crisis of 2007–2008, FinRL employs the financial turbulence index that measures extreme asset price fluctuation.
```
fe = FeatureEngineer(
use_technical_indicator=True,
use_turbulence=False,
user_defined_feature = False)
df = fe.preprocess_data(df)
# import itertools
# list_ticker = df["tic"].unique().tolist()
# list_date = list(pd.date_range(df['date'].min(),df['date'].max()).astype(str))
# combination = list(itertools.product(list_date,list_ticker))
# processed_full = pd.DataFrame(combination,columns=["date","tic"]).merge(df,on=["date","tic"],how="left")
# processed_full = processed_full[processed_full['date'].isin(df['date'])]
# processed_full = processed_full.sort_values(['date','tic'])
# processed_full = processed_full.fillna(0)
# df = processed_full
df.shape
df.head()
```
## Add covariance matrix as states
```
# add covariance matrix as states
df=df.sort_values(['date','tic'],ignore_index=True)
df.index = df.date.factorize()[0]
cov_list = []
# look back is one year
lookback=246
for i in range(lookback,len(df.index.unique())):
data_lookback = df.loc[i-lookback:i,:]
price_lookback=data_lookback.pivot_table(index = 'date',columns = 'tic', values = 'close')
return_lookback = price_lookback.pct_change().dropna()
covs = return_lookback.cov().values
cov_list.append(covs)
df_cov = pd.DataFrame({'date':df.date.unique()[lookback:],'cov_list':cov_list})
df = df.merge(df_cov, on='date')
df = df.sort_values(['date','tic']).reset_index(drop=True)
df.shape
df.head()
```
<a id='4'></a>
# Part 5. Design Environment
Considering the stochastic and interactive nature of the automated stock trading tasks, a financial task is modeled as a **Markov Decision Process (MDP)** problem. The training process involves observing stock price change, taking an action and reward's calculation to have the agent adjusting its strategy accordingly. By interacting with the environment, the trading agent will derive a trading strategy with the maximized rewards as time proceeds.
Our trading environments, based on OpenAI Gym framework, simulate live stock markets with real market data according to the principle of time-driven simulation.
The action space describes the allowed actions that the agent interacts with the environment. Normally, action a includes three actions: {-1, 0, 1}, where -1, 0, 1 represent selling, holding, and buying one share. Also, an action can be carried upon multiple shares. We use an action space {-k,…,-1, 0, 1, …, k}, where k denotes the number of shares to buy and -k denotes the number of shares to sell. For example, "Buy 10 shares of AAPL" or "Sell 10 shares of AAPL" are 10 or -10, respectively. The continuous action space needs to be normalized to [-1, 1], since the policy is defined on a Gaussian distribution, which needs to be normalized and symmetric.
## Training data split: 2009-01-01 to 2018-12-31
```
train = data_split(df, '2009-01-01','2019-01-01')
#trade = data_split(df, '2020-01-01', config.END_DATE)
train.head()
```
## Environment for Portfolio Allocation
```
df.shape()
stock_dimension = len(train.tic.unique())
state_space = stock_dimension
print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}")
env_kwargs = {
"hmax": 100,
"initial_amount": 150000000,
"transaction_cost_pct": 0.0018,
"state_space": state_space,
"stock_dim": stock_dimension,
"tech_indicator_list": config.TECHNICAL_INDICATORS_LIST,
"action_space": stock_dimension,
"reward_scaling": 1e-4
}
e_train_gym = StockPortfolioEnv(df = train, **env_kwargs)
env_train, _ = e_train_gym.get_sb_env()
print(type(env_train))
```
<a id='5'></a>
# Part 6: Implement DRL Algorithms
* The implementation of the DRL algorithms are based on **OpenAI Baselines** and **Stable Baselines**. Stable Baselines is a fork of OpenAI Baselines, with a major structural refactoring, and code cleanups.
* FinRL library includes fine-tuned standard DRL algorithms, such as DQN, DDPG,
Multi-Agent DDPG, PPO, SAC, A2C and TD3. We also allow users to
design their own DRL algorithms by adapting these DRL algorithms.
```
# initialize
agent = DRLAgent(env = env_train)
```
### Model 1: **A2C**
```
agent = DRLAgent(env = env_train)
A2C_PARAMS = {"n_steps": 5, "ent_coef": 0.005, "learning_rate": 0.0002}
model_a2c = agent.get_model(model_name="a2c",model_kwargs = A2C_PARAMS)
trained_a2c = agent.train_model(model=model_a2c,
tb_log_name='a2c',
total_timesteps=60000)
```
### Model 2: **PPO**
```
agent = DRLAgent(env = env_train)
PPO_PARAMS = {
"n_steps": 2048,
"ent_coef": 0.005,
"learning_rate": 0.0001,
"batch_size": 128,
}
model_ppo = agent.get_model("ppo",model_kwargs = PPO_PARAMS)
trained_ppo = agent.train_model(model=model_ppo,
tb_log_name='ppo',
total_timesteps=80000)
```
### Model 3: **DDPG**
```
agent = DRLAgent(env = env_train)
DDPG_PARAMS = {"batch_size": 128, "buffer_size": 50000, "learning_rate": 0.001}
model_ddpg = agent.get_model("ddpg",model_kwargs = DDPG_PARAMS)
trained_ddpg = agent.train_model(model=model_ddpg,
tb_log_name='ddpg',
total_timesteps=50000)
```
### Model 4: **SAC**
```
agent = DRLAgent(env = env_train)
SAC_PARAMS = {
"batch_size": 128,
"buffer_size": 100000,
"learning_rate": 0.0003,
"learning_starts": 100,
"ent_coef": "auto_0.1",
}
model_sac = agent.get_model("sac",model_kwargs = SAC_PARAMS)
trained_sac = agent.train_model(model=model_sac,
tb_log_name='sac',
total_timesteps=50000)
```
## Trading
Assume that we have $1,000,000 initial capital at 2019-01-01. We use the DDPG model to trade Dow jones 30 stocks.
```
trade = data_split(df,'2019-01-01', '2021-01-01')
e_trade_gym = StockPortfolioEnv(df = trade, **env_kwargs)
trade.shape
df_daily_return, df_actions = DRLAgent.DRL_prediction(model=trained_a2c,
environment = e_trade_gym)
df_daily_return.head()
df_actions.head()
df_actions.to_csv('df_actions.csv')
```
<a id='6'></a>
# Part 7: Backtest Our Strategy
Backtesting plays a key role in evaluating the performance of a trading strategy. Automated backtesting tool is preferred because it reduces the human error. We usually use the Quantopian pyfolio package to backtest our trading strategies. It is easy to use and consists of various individual plots that provide a comprehensive image of the performance of a trading strategy.
<a id='6.1'></a>
## 7.1 BackTestStats
pass in df_account_value, this information is stored in env class
```
from pyfolio import timeseries
DRL_strat = convert_daily_return_to_pyfolio_ts(df_daily_return)
perf_func = timeseries.perf_stats
perf_stats_all = perf_func( returns=DRL_strat,
factor_returns=DRL_strat,
positions=None, transactions=None, turnover_denom="AGB")
print("==============DRL Strategy Stats===========")
perf_stats_all
```
<a id='6.2'></a>
## 7.2 BackTestPlot
```
import pyfolio
%matplotlib inline
baseline_df = get_baseline(
ticker='^DJI', start='2019-01-01', end='2021-01-01'
)
baseline_returns = get_daily_return(baseline_df, value_col_name="close")
with pyfolio.plotting.plotting_context(font_scale=1.1):
pyfolio.create_full_tear_sheet(returns = DRL_strat,
benchmark_rets=baseline_returns, set_context=False)
```
| github_jupyter |
# VIPERS SHAM Project
This notebook is part of the VIPERS-SHAM project:
http://arxiv.org/abs/xxxxxxx
Copyright 2019 by Ben Granett, granett@gmail.com
All rights reserved.
This file is released under the "MIT License Agreement". Please see the LICENSE
file that should have been included as part of this package.
```
%matplotlib inline
import os
from matplotlib import pyplot as plt
plt.style.use("small.style")
from matplotlib.ticker import FormatStrFormatter,ScalarFormatter, MultipleLocator
from matplotlib import colors,cm
import logging
logging.basicConfig(level=logging.INFO)
from scipy import interpolate, integrate
import numpy as np
import growthcalc
import load
import emulator
samples = ['sdss','L1','L2','L3','L4']
redshifts = {'sdss':.06, 'L1':0.6, 'L2':0.7, 'L3':0.8, 'L4':0.9}
rmin = 1
n_components = 2
thresh = 0.1
def chi2_svd(d, cmat, thresh=0.1):
""" """
u,s,v = np.linalg.svd(cmat)
cut = np.abs(s).max()*thresh
o = np.abs(s)>cut
s = s[o]
v = v[o]
d_ = np.dot(v, d)
chi2 = np.sum(d_**2/s)
return chi2
def limits(x, y, t=1):
best = y.argmin()
x0 = x[best]
ybest = y[best]
thresh = ybest + t
yup = y[best:]
b = best + yup.searchsorted(thresh)
ydown = y[:best][::-1]
a = best - ydown.searchsorted(thresh)
if a < 0:
a = None
if b >= len(x):
b = None
return best, a, b
r_sdss,wp_sdss,cov_sdss = load.load_sdss()
sel = r_sdss > rmin
r_sdss = r_sdss[sel]
wp_sdss = wp_sdss[sel]
cov_sdss = cov_sdss[sel,:][:,sel]
data = [(r_sdss, wp_sdss, cov_sdss)]
for sample in samples[1:]:
r,wp = np.loadtxt('../data/vipers/wp_sM{sample}.txt'.format(sample=sample[1]), unpack=True)
cmat = np.loadtxt('../data/vipers/cov_{sample}.txt'.format(sample=sample))
sel = r > rmin
r = r[sel]
wp = wp[sel]
cmat = cmat[sel,:][:,sel]
data.append((r,wp,cmat))
shamdata = {}
for sample in ['sdss','L1','L2','L3','L4']:
sham = load.load_sham(sample=sample, template="../data/sham400/nz_{sample}/wp_snap{snapshot:7.5f}.txt")
snapshots = sham.keys()
snapshots.sort()
for key in snapshots:
r, wp = sham[key]
sel = r > rmin
r = r[sel]
wp = wp[sel]
if not sample in shamdata:
shamdata[sample] = []
shamdata[sample].append((key, r, wp))
a_samples = []
interpolators = []
for key in samples:
y = []
x = []
for a,r,w in shamdata[key]:
sel = r > rmin
r = r[sel]
y.append(w[sel])
x.append(a)
y = np.array(y)
x = np.array(x)
f = emulator.WpInterpolator(x, r, y, n_components)
interpolators.append(f)
a_samples.append(1./(1+redshifts[key]))
a_samples = np.array(a_samples)
G = growthcalc.Growth(amax=10)
plt.figure(figsize=(9,3))
markers = ('.','*','*','*','*')
left = plt.subplot(121)
right = plt.subplot(122)
left.set_xlabel("Snapshot redshift")
left.set_ylabel("$\chi^2$")
left.grid(True)
left.set_yscale('log')
left.yaxis.set_major_formatter(FormatStrFormatter('%g'))
left.xaxis.set_major_locator(MultipleLocator(0.2))
left.xaxis.set_minor_locator(MultipleLocator(0.1))
right.yaxis.set_minor_locator(MultipleLocator(0.1))
right.xaxis.set_minor_locator(MultipleLocator(0.1))
right.set_ylabel("Snapshot redshift")
right.set_xlabel("Sample redshift")
right.grid(True)
right.set_xlim(0,1.1)
right.set_ylim(0,1.1)
right2 = right.twinx()
right2.set_ylabel("$\sigma_8(z)$")
lab_sig8 = np.arange(0.3,1.01,0.05)
lab_z = G.fid_inv(lab_sig8)
zz = np.linspace(-0.3,1.5,100)
for gamma in [0.4, 0.55, 0.7, 0.85]:
z_w = G.fid_inv(G(zz, gamma=gamma))
l, = right.plot(zz, z_w, c='grey', lw=1, zorder=5)
right.text(1.1, 1.15, "$\gamma=%3.2f$"%0.4, color='k', ha='right',va='center', rotation=25,zorder=5,fontsize=12)
right.text(1.1, 1.1, "$%3.2f$"%0.55, color='k', ha='right',va='center', rotation=24,zorder=5,fontsize=12)
right.text(1.1, 0.99, "$%3.2f$"%0.7, color='k', ha='right',va='center', rotation=22,zorder=5,fontsize=12)
right.text(1.1, 0.81,"$%3.2f$"%0.85, color='k', ha='right',va='center', rotation=20,zorder=5,fontsize=12)
print zip(lab_z,lab_sig8)
right2.set_yticks(lab_z)
right2.set_yticklabels("%3.2f"%x for x in lab_sig8)
right2.set_ylim(0, 1.2)
right2.set_xlim(-0.3, 1.5)
right.set_xlim(0,1.1)
right.set_ylim(-0.3,1.5)
right.set_xticks([0.2,0.4,0.6,0.8,1.])
for i,sample in enumerate(samples):
f = interpolators[i]
chi2 = []
r,wp,cmat = data[i]
for z in zz:
wpsham = f(1./(1+z))
d = wp - wpsham
c = chi2_svd(d, cmat, thresh=thresh)
chi2.append(c)
chi2 = np.array(chi2)
like = np.exp(-0.5*(chi2-chi2.min()))
print "min chi2",sample,chi2.min()
lines = left.plot(zz,chi2)
chi2_ = []
zcent = []
for asham,rsham,wpsham in shamdata[sample]:
d = wp - wpsham
c = chi2_svd(d, cmat, thresh=thresh)
chi2_.append(c)
zcent.append(1./asham - 1)
chi2_ = np.array(chi2_)
print "min chi2",sample,chi2_.min()
left.scatter(zcent,chi2_, marker=markers[i], color=lines[0].get_color(),zorder=10)
j = chi2.argmin()
if sample=='sdss':
left.text(-0.05,1.5,"SDSS",color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
right.text(.08, -0.08, "SDSS", color=lines[0].get_color(),va='center',ha='left',fontsize=12)
elif sample=='L1':
left.text(zz[-1],chi2[-1]*1.1,'M1',color=lines[0].get_color(),va='bottom',ha='right',fontsize=12)
right.text(0.6,0.25,"M1", color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
elif sample=='L2':
left.text(zz[j]+0.08,chi2[j],'M2',color=lines[0].get_color(),va='bottom',ha='left',fontsize=12)
right.text(0.7,0.35,"M2", color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
elif sample=='L3':
left.text(zz[j], chi2[j]*0.9,'M3',color=lines[0].get_color(),va='top',ha='center',fontsize=12)
right.text(0.8,0.35,"M3", color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
elif sample=='L4':
left.text(zz[50],chi2[50]*1.1,'M4',color=lines[0].get_color(),va='bottom',ha='left',fontsize=12)
right.text(0.9,0.6,"M4", color=lines[0].get_color(),va='bottom',ha='center',fontsize=12)
a,b,c = limits(zz, chi2)
zobs = redshifts[sample]
if b is None: # upper limit
logging.warning("upper limit! %s %s %s",a,b,c)
pass
elif c is None: # lower limit
logging.warning("lower limit! %s %s %s",a,b,c)
plt.arrow(zobs, zz[b], 0, 1.2-zz[b], lw=2.5, head_width=.015, head_length=0.03, color=lines[0].get_color(), zorder=10)
else:
right.plot([zobs, zobs], [zz[b], zz[c]], lw=3,color=lines[0].get_color(), zorder=10)
right.scatter(zobs, zz[a], marker=markers[i], color=lines[0].get_color(),zorder=10)
right.set_yticks([-0.2,0,0.2,0.4,0.6,0.8,1.0,1.2,1.4])
left.set_ylim(0.04, 50)
right.set_ylim(-0.3,1.5)
right2.set_ylim(-0.3,1.5)
plt.subplots_adjust(left=0.07,right=.92, bottom=0.18)
plt.savefig("../figs/fig8.pdf")
```
| github_jupyter |
# Keras tutorial - the Happy House
Welcome to the first assignment of week 2. In this assignment, you will:
1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
2. See how you can in a couple of hours build a deep learning algorithm.
Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
```
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
%matplotlib inline
```
**Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
## 1 - The Happy House
For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
<img src="images/happy-house.jpg" style="width:350px;height:270px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
<img src="images/house-members.png" style="width:550px;height:250px;">
Run the following code to normalize the dataset and learn about its shapes.
```
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Details of the "Happy" dataset**:
- Images are of shape (64,64,3)
- Training: 600 pictures
- Test: 150 pictures
It is now time to solve the "Happy" Challenge.
## 2 - Building a model in Keras
Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
Here is an example of a model in Keras:
```python
def model(input_shape):
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
```
Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
**Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
**Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
```
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
X_input = Input(input_shape)
X = ZeroPadding2D((3, 3))(X_input)
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2, 2), name='max_pool')(X)
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
model = Model(inputs = X_input, outputs = X, name='HappyModel')
### END CODE HERE ###
return model
```
You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
1. Create the model by calling the function above
2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
**Exercise**: Implement step 1, i.e. create the model.
```
### START CODE HERE ### (1 line)
happyModel = HappyModel(X_train.shape[1:])
### END CODE HERE ###
```
**Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
```
### START CODE HERE ### (1 line)
happyModel.compile(optimizer = "Adam", loss = "binary_crossentropy", metrics = ["accuracy"])
### END CODE HERE ###
```
**Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
```
### START CODE HERE ### (1 line)
happyModel.fit(x = X_train, y = Y_train, epochs = 15, batch_size = 16)
### END CODE HERE ###
```
Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
**Exercise**: Implement step 4, i.e. test/evaluate the model.
```
### START CODE HERE ### (1 line)
preds = happyModel.evaluate(x = X_test, y = Y_test)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
```
If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets.
To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
If you have not yet achieved a very good accuracy (let's say more than 80%), here're some things you can play around with to try to achieve it:
- Try using blocks of CONV->BATCHNORM->RELU such as:
```python
X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
```
until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
- You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
- Change your optimizer. We find Adam works well.
- If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
- Run on more epochs, until you see the train accuracy plateauing.
Even if you have achieved a good accuracy, please feel free to keep playing with your model to try to get even better results.
**Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
## 3 - Conclusion
Congratulations, you have solved the Happy House challenge!
Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
<font color='blue'>
**What we would like you to remember from this assignment:**
- Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
- Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
## 4 - Test with your own image (Optional)
Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
```
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
```
## 5 - Other useful functions in Keras (Optional)
Two other basic features of Keras that you'll find useful are:
- `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
- `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
Run the following code.
```
happyModel.summary()
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
```
| github_jupyter |
# Introducing Pandas
Pandas is a Python library that makes handling tabular data easier. Since we're doing data science - this is something we'll use from time to time!
It's one of three libraries you'll encounter repeatedly in the field of data science:
## Pandas
Introduces "Data Frames" and "Series" that allow you to slice and dice rows and columns of information.
## NumPy
Usually you'll encounter "NumPy arrays", which are multi-dimensional array objects. It is easy to create a Pandas DataFrame from a NumPy array, and Pandas DataFrames can be cast as NumPy arrays. NumPy arrays are mainly important because of...
## Scikit_Learn
The machine learning library we'll use throughout this course is scikit_learn, or sklearn, and it generally takes NumPy arrays as its input.
So, a typical thing to do is to load, clean, and manipulate your input data using Pandas. Then convert your Pandas DataFrame into a NumPy array as it's being passed into some Scikit_Learn function. That conversion can often happen automatically.
Let's start by loading some comma-separated value data using Pandas into a DataFrame:
```
%matplotlib inline
import numpy as np
import pandas as pd
df = pd.read_csv("PastHires.csv")
df.head()
```
head() is a handy way to visualize what you've loaded. You can pass it an integer to see some specific number of rows at the beginning of your DataFrame:
```
df.head(10)
```
You can also view the end of your data with tail():
```
df.tail(4)
```
We often talk about the "shape" of your DataFrame. This is just its dimensions. This particular CSV file has 13 rows with 7 columns per row:
```
df.shape
```
The total size of the data frame is the rows * columns:
```
df.size
```
The len() function gives you the number of rows in a DataFrame:
```
len(df)
```
If your DataFrame has named columns (in our case, extracted automatically from the first row of a .csv file,) you can get an array of them back:
```
df.columns
```
Extracting a single column from your DataFrame looks like this - this gives you back a "Series" in Pandas:
```
df['Hired']
```
You can also extract a given range of rows from a named column, like so:
```
df['Hired'][:5]
```
Or even extract a single value from a specified column / row combination:
```
df['Hired'][5]
```
To extract more than one column, you pass in an array of column names instead of a single one:
```
df[['Years Experience', 'Hired']]
```
You can also extract specific ranges of rows from more than one column, in the way you'd expect:
```
df[['Years Experience', 'Hired']][:5]
```
Sorting your DataFrame by a specific column looks like this:
```
df.sort_values(['Years Experience'])
```
You can break down the number of unique values in a given column into a Series using value_counts() - this is a good way to understand the distribution of your data:
```
degree_counts = df['Level of Education'].value_counts()
degree_counts
```
Pandas even makes it easy to plot a Series or DataFrame - just call plot():
```
degree_counts.plot(kind='bar')
```
## Exercise
Try extracting rows 5-10 of our DataFrame, preserving only the "Previous Employers" and "Hired" columns. Assign that to a new DataFrame, and create a histogram plotting the distribution of the previous employers in this subset of the data.
```
import matplotlib.pyplot as plt
df2 = df[['Previous employers', 'Hired']][5:10]
employer_counts = df2['Previous employers'].value_counts()
employer_counts
employer_counts.plot(kind='bar')
```
| github_jupyter |
## UTAH FORGE PROJECT'S MISSION
Enable cutting-edge research and drilling and technology testing, as well as to allow scientists to identify a replicable, commercial pathway to EGS. In addition to the site itself, the FORGE effort will include a robust instrumentation, data collection, and data dissemination component to capture and share data and activities occurring at FORGE in real time. The innovative research, coupled with an equally-innovative collaboration and management platform, is truly a first of its kind endeavor. More details here https://utahforge.com/
#### The data used in this repository comes from the public data provided by Utah FORGE https://gdr.openei.org/submissions/1111
##### Some functions adapted from https://sainosmichelle.github.io/elements.html
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import lasio
import seaborn as sns
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
```
#### Read main logs in ft
```
main = lasio.read('./localUTAHFORGEDATA/58-32_main.las')
dfmain = main.df()
print(dfmain.index)
dfmain.head(5)
```
#### Read sonic logs in ft
```
sonic = lasio.read('./localUTAHFORGEDATA/58-32_sonic.las')
dfsonicall = sonic.df()
dfsonicall['VpVs']= ((1/dfsonicall['DTCO'])/(1/dfsonicall['DTSM']))
dfsonic = dfsonicall[['DT1R','DT2','DT2R','DT4P','DT4S','DTCO','DTRP','DTRS','DTSM','ITT','PR','SPHI','VpVs']]
print(dfsonic.index)
```
#### Merge main and sonic logs (not repeated curves) using pandas
```
all_logs = pd.concat([dfmain, dfsonic], axis=1, sort=False)
#all_logs.info()
fig, ax = plt.subplots(figsize=(25,8))
sns.heatmap(all_logs.isnull(), ax=ax, cmap="magma")
plt.grid()
plt.show()
```
#### Calculations based on publication "Well-log based prediction of thermal conductivity of sedimentary successions: a case study from the North German Basin. Fuchs, S., and Foster, A. Geophysical Journal International. 2014. 196, pg 291-311. doi: 10.1093/gji/ggt382
```
#calculate Vsh from GR formula Vsh=(subdata.GR_EDTC-grmin)/(grmax-grmin)
all_logs['Vsh'] = all_logs['GR'] - min(all_logs['GR'])/(max(all_logs['GR'])- min(all_logs['GR']))
#calculate NPHI matrix from NPHI porosity and DEN porosity neu_m=subdata.NPOR-subdata.DPHZ
all_logs['NPOR_m'] = (all_logs['NPOR']) - (all_logs['DPHZ'])
#calculate eq10
#Matrix-TC equation derived from regression analysis for clastic rock types
all_logs['eq10'] = (5.281-(2.961*all_logs['NPOR_m'])-(2.797*all_logs['Vsh']))/-272.15
#calculate eq11
#Bulk-TC equation derived from regression analysis for subsurface data
all_logs['eq11'] = (4.75-(4.19*all_logs['NPOR'])-(1.81*all_logs['Vsh']))/-272.15
#all_logs.info()
#read discrete data - conversion to ft - depth equal to lower depth interval
tops = pd.read_csv('s3://geotermaldata/S3UTAHFORGEDATA/58-32_tops.csv')
#Thermal Conductivity
TC_coredata = pd.read_csv ('s3://geotermaldata/S3UTAHFORGEDATA/58-32_thermal_conductivity_data.csv')
TC_coredata['Depth'] = (3.28084*TC_coredata['Lower Depth Interval (m)'])
TC_coredata['Matrix_TC']=TC_coredata['matrix thermal conductivity (W/m deg C)']
TC_coredata.set_index('Depth', inplace=True)
#XRD lab data
XRD_coredata = pd.read_csv ('s3://geotermaldata/S3UTAHFORGEDATA/58-32_xray_diffraction_data.csv')
XRD_coredata = XRD_coredata.replace('tr',0)
XRD_coredata['Depth'] = (3.28084*XRD_coredata['Lower Depth Range (m)'])
XRD_coredata.set_index('Depth', inplace=True)
#TC_coredata.tail(15)
XRD_coredata.head()
#basic plot to inspect data
def make_layout_tc (log_df, XRD, TC):
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
fig, axs = plt.subplots(nrows=1, ncols=6, sharey=True, squeeze=True, figsize=(15, 15), gridspec_kw={'wspace': 0.25})
fig.subplots_adjust(left=0.05, bottom=0.05, right=0.975, top=0.7, wspace=0.2, hspace=0.2)
axs[0].set_ylabel('Depth (ft)')
axs[0].invert_yaxis()
axs[0].get_xaxis().set_visible(False)
# First track GR/SP/CALI logs to display
ax1 = axs[0].twiny()
ax1.plot(log_df.GR, log_df.index, '-', color='#2ea869', linewidth=0.5)
ax1.set_xlim(0,450)
ax1.set_xlabel('GR (API)', color='#2ea869')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[0].twiny()
ax2.plot(log_df.SP, log_df.index, '-', color='#0a0a0a', linewidth=0.7)
ax2.set_xlim(-200,200)
ax2.set_xlabel('SP(mV)', color='#0a0a0a')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[0].twiny()
ax3.plot(log_df.DCAL, log_df.index, '--', color='#9da4a1', linewidth=0.5)
ax3.set_xlim(-5,15)
ax3.set_xlabel('DCAL (in)', color='#9da4a1')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[0].get_xaxis().set_visible(False)
# Second track RHOB/NPHI/PEF logs to display
ax1 = axs[1].twiny()
ax1.plot(log_df.RHOZ, log_df.index, '-', color='#ea0606', linewidth=0.5)
ax1.set_xlim(1.5,3.0)
ax1.set_xlabel('RHOB (g/cm3)', color='#ea0606')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[1].twiny()
ax2.plot(log_df.NPHI, log_df.index, '-', color='#1577e0', linewidth=0.5)
ax2.set_xlim(1,0)
ax2.set_xlabel('NPHI (v/v)', color='#1577e0')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[1].twiny()
ax3.plot(log_df.PEFZ, log_df.index, '-', color='#1acb20', linewidth=0.5)
ax3.set_xlim(0,15)
ax3.set_xlabel('PEFZ (b/e)', color='#1acb20')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[1].get_xaxis().set_visible(False)
# Third track Resistivities
ax1 = axs[2].twiny()
ax1.plot(log_df.AT10, log_df.index, '-', color='#d6dbd7', linewidth=0.5)
ax1.set_xlim(0.2,20000)
ax1.set_xlabel('AT10 (ohm.m)', color='#d6dbd7')
ax1.set_xscale('log')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[2].twiny()
ax2.plot(log_df.AT30, log_df.index, '-', color='#0a0a0a', linewidth=0.5)
ax2.set_xlim(0.2,20000)
ax2.set_xlabel('AT30 (ohm.m)', color='#0a0a0a')
ax2.set_xscale('log')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[2].twiny()
ax3.plot(log_df.AT90, log_df.index, '-', color='#ea0606', linewidth=0.5)
ax3.set_xlim(0.2,20000)
ax3.set_xlabel('AT90 (ohm.m)', color='#ea0606')
ax3.set_xscale('log')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[2].get_xaxis().set_visible(False)
# Forth track Sonic
ax1 = axs[3].twiny()
ax1.plot(log_df.DTSM, log_df.index, '-', color='#9da4a1', linewidth=0.5)
ax1.set_xlim(200,40)
ax1.set_xlabel('DTS (us/ft)', color='#9da4a1')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[3].twiny()
ax2.plot(log_df.DTCO, log_df.index, '-', color='#0a0a0a', linewidth=0.5)
ax2.set_xlim(200,40)
ax2.set_xlabel('DTC (us/ft)', color='#0a0a0a')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[3].twiny()
ax3.plot(log_df.VpVs, log_df.index, '-', color='#e1093f', linewidth=0.5)
ax3.set_xlim(1,3)
ax3.set_xlabel('VpVs (unitless)', color='#e1093f')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[3].get_xaxis().set_visible(False)
# Fifth track XRD to display
ax1 = axs[4].twiny()
ax1.plot(XRD.Quartz, XRD.index, 'o', color='#eac406')
ax1.set_xlim(0,100)
ax1.set_xlabel('Quartz %', color='#eac406')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.15))
ax2 = axs[4].twiny()
ax2.plot(XRD['K-feldspar'], XRD.index, 'o', color='#05a9f0')
ax2.set_xlim(0,100)
ax2.set_xlabel('K-feldspar %', color='#05a9f0')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.075))
ax3 = axs[4].twiny()
ax3.plot(XRD['Illite'], XRD.index, 'o', color='#94898c')
ax3.set_xlim(0,100)
ax3.set_xlabel('Illite %', color='#94898c')
ax3.minorticks_on()
ax3.spines['top'].set_position(('axes', 1.0))
ax3.grid(True)
axs[4].get_xaxis().set_visible(False)
# Sixth track Temp/TC to display
ax1 = axs[5].twiny()
ax1.plot(TC.Matrix_TC, TC.index, 'o', color='#6e787c')
ax1.set_xlim(0,5)
ax1.set_xlabel('Matrix TC Measured W/mC', color='#6e787c')
ax1.minorticks_on()
ax1.spines['top'].set_position(('axes', 1.075))
ax2 = axs[5].twiny()
ax2.plot(log_df.CTEM, log_df.index, '-', color='#ed8712')
ax2.set_xlim(0,300)
ax2.set_xlabel('Temp degF', color='#ed8712')
ax2.minorticks_on()
ax2.spines['top'].set_position(('axes', 1.0))
ax2.grid(True)
axs[5].get_xaxis().set_visible(False)
fig.suptitle('Well Data for UTAH FORGE 58-32',weight='bold', fontsize=20, y=0.9);
plt.show()
make_layout_tc (all_logs, XRD_coredata, TC_coredata)
all_logs.to_csv('./localUTAHFORGEDATA/all_logs.csv')
```
| github_jupyter |
##### Copyright 2018 The TF-Agents Authors.
### Get Started
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/agents/blob/master/tf_agents/colabs/1_dqn_tutorial.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/agents/blob/master/tf_agents/colabs/1_dqn_tutorial.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
```
# Note: If you haven't installed the following dependencies, run:
!apt-get install xvfb
!pip install 'gym==0.10.11'
!pip install imageio
!pip install PILLOW
!pip install pyglet
!pip install pyvirtualdisplay
!pip install tf-agents-nightly
!pip install tf-nightly
```
## Introduction
This example shows how to train a [DQN (Deep Q Networks)](https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf) agent on the Cartpole environment using the TF-Agents library.

We will walk you through all the components in a Reinforcement Learning (RL) pipeline for training, evaluation and data collection.
## Setup
```
import base64
import imageio
import IPython
import matplotlib
import matplotlib.pyplot as plt
import PIL.Image
import pyvirtualdisplay
import tensorflow as tf
from tf_agents.agents.dqn import dqn_agent
from tf_agents.agents.dqn import q_network
from tf_agents.drivers import dynamic_step_driver
from tf_agents.environments import suite_gym
from tf_agents.environments import tf_py_environment
from tf_agents.environments import trajectory
from tf_agents.metrics import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
tf.compat.v1.enable_v2_behavior()
# Set up a virtual display for rendering OpenAI gym environments.
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
```
## Hyperparameters
```
env_name = 'CartPole-v0' # @param
num_iterations = 20000 # @param
initial_collect_steps = 1000 # @param
collect_steps_per_iteration = 1 # @param
replay_buffer_capacity = 100000 # @param
fc_layer_params = (100,)
batch_size = 64 # @param
learning_rate = 1e-3 # @param
log_interval = 200 # @param
num_eval_episodes = 10 # @param
eval_interval = 1000 # @param
```
## Environment
Environments in RL represent the task or problem that we are trying to solve. Standard environments can be easily created in TF-Agents using `suites`. We have different `suites` for loading environments from sources such as the OpenAI Gym, Atari, DM Control, etc., given a string environment name.
Now let us load the CartPole environment from the OpenAI Gym suite.
```
env = suite_gym.load(env_name)
```
We can render this environment to see how it looks. A free-swinging pole is attached to a cart. The goal is to move the cart right or left in order to keep the pole pointing up.
```
#@test {"skip": true}
env.reset()
PIL.Image.fromarray(env.render())
```
The `time_step = environment.step(action)` statement takes `action` in the environment. The `TimeStep` tuple returned contains the environment's next observation and reward for that action. The `time_step_spec()` and `action_spec()` methods in the environment return the specifications (types, shapes, bounds) of the `time_step` and `action` respectively.
```
print 'Observation Spec:'
print env.time_step_spec().observation
print 'Action Spec:'
print env.action_spec()
```
So, we see that observation is an array of 4 floats: the position and velocity of the cart, and the angular position and velocity of the pole. Since only two actions are possible (move left or move right), the `action_spec` is a scalar where 0 means "move left" and 1 means "move right."
```
time_step = env.reset()
print 'Time step:'
print time_step
action = 1
next_time_step = env.step(action)
print 'Next time step:'
print next_time_step
```
Usually we create two environments: one for training and one for evaluation. Most environments are written in pure python, but they can be easily converted to TensorFlow using the `TFPyEnvironment` wrapper. The original environment's API uses numpy arrays, the `TFPyEnvironment` converts these to/from `Tensors` for you to more easily interact with TensorFlow policies and agents.
```
train_py_env = suite_gym.load(env_name)
eval_py_env = suite_gym.load(env_name)
train_env = tf_py_environment.TFPyEnvironment(train_py_env)
eval_env = tf_py_environment.TFPyEnvironment(eval_py_env)
```
## Agent
The algorithm that we use to solve an RL problem is represented as an `Agent`. In addition to the DQN agent, TF-Agents provides standard implementations of a variety of `Agents` such as [REINFORCE](http://www-anw.cs.umass.edu/~barto/courses/cs687/williams92simple.pdf), [DDPG](https://arxiv.org/pdf/1509.02971.pdf), [TD3](https://arxiv.org/pdf/1802.09477.pdf), [PPO](https://arxiv.org/abs/1707.06347) and [SAC](https://arxiv.org/abs/1801.01290).
The DQN agent can be used in any environment which has a discrete action space. To create a DQN Agent, we first need a `Q-Network` that can learn to predict `Q-Values` (expected return) for all actions given an observation from the environment.
We can easily create a `Q-Network` using the specs of the observations and actions. We can specify the layers in the network which, in this example, is the `fc_layer_params` argument set to a tuple of `ints` representing the sizes of each hidden layer (see the Hyperparameters section above).
```
q_net = q_network.QNetwork(
train_env.observation_spec(),
train_env.action_spec(),
fc_layer_params=fc_layer_params)
```
We also need an `optimizer` to train the network we just created, and a `train_step_counter` variable to keep track of how many times the network was updated.
```
optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate=learning_rate)
train_step_counter = tf.compat.v2.Variable(0)
tf_agent = dqn_agent.DqnAgent(
train_env.time_step_spec(),
train_env.action_spec(),
q_network=q_net,
optimizer=optimizer,
td_errors_loss_fn=dqn_agent.element_wise_squared_loss,
train_step_counter=train_step_counter)
tf_agent.initialize()
```
## Policies
In TF-Agents, policies represent the standard notion of policies in RL: given a `time_step` produce an action or a distribution over actions. The main method is `policy_step = policy.step(time_step)` where `policy_step` is a named tuple `PolicyStep(action, state, info)`. The `policy_step.action` is the `action` to be applied to the environment, `state` represents the state for stateful (RNN) policies and `info` may contain auxiliary information such as log probabilities of the actions.
Agents contain two policies: the main policy that is used for evaluation/deployment (agent.policy) and another policy that is used for data collection (agent.collect_policy).
```
eval_policy = tf_agent.policy
collect_policy = tf_agent.collect_policy
```
We can also independently create policies that are not part of an agent. For example, a random policy:
```
random_policy = random_tf_policy.RandomTFPolicy(train_env.time_step_spec(),
train_env.action_spec())
```
## Metrics and Evaluation
The most common metric used to evaluate a policy is the average return. The return is the sum of rewards obtained while running a policy in an environment for an episode, and we usually average this over a few episodes. We can compute the average return metric as follows.
```
#@test {"skip": true}
def compute_avg_return(environment, policy, num_episodes=10):
total_return = 0.0
for _ in range(num_episodes):
time_step = environment.reset()
episode_return = 0.0
while not time_step.is_last():
action_step = policy.action(time_step)
time_step = environment.step(action_step.action)
episode_return += time_step.reward
total_return += episode_return
avg_return = total_return / num_episodes
return avg_return.numpy()[0]
compute_avg_return(eval_env, random_policy, num_eval_episodes)
# Please also see the metrics module for standard implementations of different
# metrics.
```
## Replay Buffer
In order to keep track of the data collected from the environment, we will use the TFUniformReplayBuffer. This replay buffer is constructed using specs describing the tensors that are to be stored, which can be obtained from the agent using `tf_agent.collect_data_spec`.
```
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=train_env.batch_size,
max_length=replay_buffer_capacity)
```
For most agents, the `collect_data_spec` is a `Trajectory` named tuple containing the observation, action, reward etc.
## Data Collection
Now let us execute the random policy in the environment for a few steps and record the data (observations, actions, rewards etc) in the replay buffer.
```
#@test {"skip": true}
def collect_step(environment, policy):
time_step = environment.current_time_step()
action_step = policy.action(time_step)
next_time_step = environment.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
# Add trajectory to the replay buffer
replay_buffer.add_batch(traj)
for _ in range(initial_collect_steps):
collect_step(train_env, random_policy)
# This loop is so common in RL, that we provide standard implementations of
# these. For more details see the drivers module.
```
In order to sample data from the replay buffer, we will create a `tf.data` pipeline which we can feed to the agent for training later. We can specify the `sample_batch_size` to configure the number of items sampled from the replay buffer. We can also optimize the data pipline using parallel calls and prefetching.
In order to save space, we only store the current observation in each row of the replay buffer. But since the DQN Agent needs both the current and next observation to compute the loss, we always sample two adjacent rows for each item in the batch by setting `num_steps=2`.
```
# Dataset generates trajectories with shape [Bx2x...]
dataset = replay_buffer.as_dataset(
num_parallel_calls=3, sample_batch_size=batch_size, num_steps=2).prefetch(3)
iterator = iter(dataset)
```
## Training the agent
The training loop involves both collecting data from the environment and optimizing the agent's networks. Along the way, we will occasionally evaluate the agent's policy to see how we are doing.
The following will take ~5 minutes to run.
```
#@test {"skip": true}
%%time
# (Optional) Optimize by wrapping some of the code in a graph using TF function.
tf_agent.train = common.function(tf_agent.train)
# Reset the train step
tf_agent.train_step_counter.assign(0)
# Evaluate the agent's policy once before training.
avg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)
returns = [avg_return]
for _ in range(num_iterations):
# Collect a few steps using collect_policy and save to the replay buffer.
for _ in range(collect_steps_per_iteration):
collect_step(train_env, tf_agent.collect_policy)
# Sample a batch of data from the buffer and update the agent's network.
experience, unused_info = next(iterator)
train_loss = tf_agent.train(experience)
step = tf_agent.train_step_counter.numpy()
if step % log_interval == 0:
print('step = {0}: loss = {1}'.format(step, train_loss.loss))
if step % eval_interval == 0:
avg_return = compute_avg_return(eval_env, tf_agent.policy, num_eval_episodes)
print('step = {0}: Average Return = {1}'.format(step, avg_return))
returns.append(avg_return)
```
## Visualization
### Plots
We can plot return vs global steps to see the performance of our agent. In `Cartpole-v0`, the environment gives a reward of +1 for every time step the pole stays up, and since the maximum number of steps is 200, the maximum possible return is also 200.
```
#@test {"skip": true}
steps = range(0, num_iterations + 1, eval_interval)
plt.plot(steps, returns)
plt.ylabel('Average Return')
plt.xlabel('Step')
plt.ylim(top=250)
```
### Videos
It is helpful to visualize the performance of an agent by rendering the environment at each step. Before we do that, let us first create a function to embed videos in this colab.
```
def embed_mp4(filename):
"""Embeds an mp4 file in the notebook."""
video = open(filename,'rb').read()
b64 = base64.b64encode(video)
tag = '''
<video width="640" height="480" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4">
Your browser does not support the video tag.
</video>'''.format(b64.decode())
return IPython.display.HTML(tag)
```
The following code visualizes the agent's policy for a few episodes:
```
num_episodes = 3
video_filename = 'imageio.mp4'
with imageio.get_writer(video_filename, fps=60) as video:
for _ in range(num_episodes):
time_step = eval_env.reset()
video.append_data(eval_py_env.render())
while not time_step.is_last():
action_step = tf_agent.policy.action(time_step)
time_step = eval_env.step(action_step.action)
video.append_data(eval_py_env.render())
embed_mp4(video_filename)
```
| github_jupyter |
# LAIsim demonstration notebook
This Jupyter Notebook demonstrates using pharmacokinetic data in a simple flip-flop model to simulate plasma levels using [LAIsim](https://github.com/ClairePower/LAIsim).
```
import LAIsim
import numpy as np
import matplotlib.pyplot as plt
from hair import Hair
from analyser import Analyser
from scipy.signal import find_peaks
pali = LAIsim.LAIsim('Paliperidone 12 weekly')
# Pharmacokinetic parameters from https://accp1.onlinelibrary.wiley.com/doi/abs/10.1002/jcph.597
# Ravenstijn, P., Remmerie, B., Savitz, A., Samtani, M.N., Nuamah, I., Chang, C.-T., De Meulder, M., Hough, D. and Gopal, S. (2016), Pharmacokinetics, safety, and tolerability of paliperidone palmitate 3-month formulation in patients with schizophrenia: A phase-1, single-dose, randomized, open-label study. The Journal of Clinical Pharmacology, 56: 330-339. https://doi.org/10.1002/jcph.597
pali.pk_tune(23, 56.3, 68.5)
paliperidone_plasma_level = pali.simulate_n(30, 90)
plt.plot(pali.curve[0:720])
plt.show()
pali.popt
plt.plot(paliperidone_plasma_level[:365*10])
plt.show()
pali.save()
clopixol = LAIsim.LAIsim('Zuclopenthixol')
# Pharmacokinetic data from Produce Monograph https://www.lundbeck.com/content/dam/lundbeck-com/americas/canada/products/files/clopixol_product_monograph_english.pdf
clopixol.pk_tune(5, 6, 19)
plt.plot(clopixol.curve)
plt.show()
clopixol.popt
clopixol_plasma_level = clopixol.simulate_n(26*10, 14)
clopixol.save()
plt.plot(clopixol_plasma_level[:720])
plt.show()
aripiprazole = LAIsim.LAIsim('Aripiprazole')
# Pharmacokinetic data from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5409034/
# Raoufinia, Arash et al. “Aripiprazole Once-Monthly 400 mg: Comparison of Pharmacokinetics, Tolerability, and Safety of Deltoid Versus Gluteal Administration.” The international journal of neuropsychopharmacology vol. 20,4 (2017): 295-304. doi:10.1093/ijnp/pyw116
aripiprazole.pk_tune(24.1, 136, 24)
plt.plot(aripiprazole.curve)
plt.show()
aripiprazole_plasma_level = aripiprazole.simulate_n(12*10, 28)
aripiprazole.save()
plt.plot(aripiprazole_plasma_level[:365*2])
plt.show()
haloperidol = LAIsim.LAIsim('Haloperidol')
# Data extrapolated from https://www.accessdata.fda.gov/drugsatfda_docs/label/2011/018701s054lbl.pdf Manufactorer data submitted to FDA
haloperidol.pk_tune(6, 3, 21)
plt.plot(haloperidol.curve)
plt.show()
haloperidol_plasma_level = haloperidol.simulate_n(120, 28)
haloperidol.save()
plt.plot(haloperidol_plasma_level[:365*2])
plt.show()
pali_monthly = LAIsim.LAIsim('Paliperidone 4 weekly')
# Data generated from https://accp1.onlinelibrary.wiley.com/doi/full/10.1002/cpdd.737
# Shimizu, H., Neyens, M., De Meulder, M., Gopal, S., Tsukamoto, Y., Samtani, M.N. and Remmerie, B. (2020), Population Pharmacokinetics of Paliperidone Palmitate (Once-Monthly Formulation) in Japanese, Korean, and Taiwanese Patients With Schizophrenia. Clinical Pharmacology in Drug Development, 9: 224-234. https://doi.org/10.1002/cpdd.737
# pali_monthly.pk_tune(18, 17.2, 45) produced unreaslistic curve either produce p0 estimates or manually created tdata/cdata as below
tdata = np.array([9, 18., 18.+45., 18.+2*45, 18.+3*45])
cdata = np.array([10, 17.2, 8., 4., 2.])
pali_monthly.tune(tdata, cdata, 720)
paliperidone_monthly_plasma_level = pali.simulate_n(120, 28)
plt.plot(pali_monthly.curve[0:320])
plt.show()
np.savez("plasma_levels", days = np.arange(1, 10*365), haloperidol = haloperidol_plasma_level, paliperidone = paliperidone_plasma_level, aripiprazole = aripiprazole_plasma_level, zuclopenthixol = clopixol_plasma_level, paliperidone_monthly = paliperidone_monthly_plasma_level, paliperidone_monthly_single = pali_monthly.curve, zuclopenthixol_single = clopixol.curve, haloperidol_single = haloperidol.curve, aripiprazole_single = aripiprazole.curve, paliperidone_single = pali.curve)
hair = Hair(haloperidol_plasma_level)
depot_days, _ = find_peaks(-hair.strand[100:300])
plt.plot(depot_days, hair.strand[100:300][depot_days], 'o', color='r')
plt.plot(hair.strand[100:300])
plt.show()
np.savez("strand_conc", days = np.arange(100, 300, 1), conc = hair.strand[100:300])
np.savez("strand_depot", days = depot_days, depots = hair.strand[100:300][depot_days])
segments = hair.segment_into_n(100)[2:22]
num_segments = len(segments)
analysis = Analyser(Analyser.homogenize(segments))
depot_days, _ = analysis.find_troughs()
plt.plot(depot_days, analysis.segments[depot_days], 'o', color='r')
plt.bar(range(num_segments), Analyser.homogenize(segments))
plt.show()
np.savez("segment_bars", days = range(num_segments), avg = Analyser.homogenize(segments))
np.savez("depot_days", days = depot_days, depots = analysis.segments[depot_days])
pali_monthly.popt
```
| github_jupyter |
# Keras tutorial - the Happy House
Welcome to the first assignment of week 2. In this assignment, you will:
1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK.
2. See how you can in a couple of hours build a deep learning algorithm.
Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models.
In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
```
import numpy as np
from keras import layers
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.regularizers import l2
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.callbacks import ModelCheckpoint, CSVLogger
import pydot
from IPython.display import SVG
from PIL import Image
import urllib
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import pickle
import json
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
from keras import backend as K
K.tensorflow_backend._get_available_gpus()
%matplotlib inline
```
**Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`.
## 1 - The Happy House
For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness.
<img src="images/happy-house.jpg" style="width:350px;height:270px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **the Happy House**</center></caption>
As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy.
You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled.
<img src="images/house-members.png" style="width:550px;height:250px;">
Run the following code to normalize the dataset and learn about its shapes.
```
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
#X_val = X_train_orig[450:600]/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
#Y_val = Y_train_orig[450:600].T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
#print ("number of validation examples = " + str(X_val.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
```
**Details of the "Happy" dataset**:
- Images are of shape (64,64,3)
- Training: 600 pictures
- Test: 150 pictures
It is now time to solve the "Happy" Challenge.
## 2 - Building a model in Keras
Keras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.
Here is an example of a model in Keras:
```python
def model(input_shape):
# Define the input placeholder as a tensor with shape input_shape. Think of this as your input image!
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
return model
```
Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above).
**Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`.
**Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
```
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
X_input = Input(input_shape)
# Zero-Padding: pads the border of X_input with zeroes
X = ZeroPadding2D((3, 3))(X_input)
# CONV -> BN -> RELU Block applied to X
X = Conv2D(64, (7, 7), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X) #axis 3 because "channel_last" is specified
X = Activation('relu')(X)
# MAXPOOL
X = MaxPooling2D((2, 2), name='max_pool')(X)
#Added to solve high bias
X = ZeroPadding2D((3, 3))(X)
X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv1')(X)
X = BatchNormalization(axis = 3, name = 'bn1')(X)
X = Activation('relu')(X)
X = MaxPooling2D((2, 2), name='max_pool1')(X)
# FLATTEN X (means convert it to a vector) + FULLYCONNECTED
X = Flatten()(X)
X = Dense(1, activation='sigmoid', name='fc')(X)
# Create model. This creates your Keras model instance, you'll use this instance to train/test the model.
model = Model(inputs = X_input, outputs = X, name='HappyModel')
### END CODE HERE ###
return model
```
You have now built a function to describe your model. To train and test this model, there are four steps in Keras:
1. Create the model by calling the function above
2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`
3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`
4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`
If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).
### **Exercise**: Implement step 1, i.e. create the model.
```
### START CODE HERE ### (1 line)
model = HappyModel((X_train.shape[1], X_train.shape[2], X_train.shape[3])) #64, 64, 3
### END CODE HERE ###
```
### **Added some callbacks:** CSVlogger saves the training info to a .log file. ModelCheckpoint specifies a metric to monitor and saves only the best model (epoch) to a .h5 file.
```
trainingLog = CSVLogger('training.log', separator=',', append=False)
checkpoint = ModelCheckpoint("HappyModel.h5", monitor='val_acc', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
```
### **Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
```
### START CODE HERE ### (1 line)
model.compile(optimizer = "adam", loss = "binary_crossentropy", metrics = ["accuracy"])
### END CODE HERE ###
```
### **Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
```
### START CODE HERE ### (1 line)
fit = model.fit(X_train, Y_train, epochs=50, batch_size=16, validation_split=0.25, callbacks=[checkpoint, trainingLog])
with open("history.txt", "w", encoding="utf8") as f:
json.dump(fit.history, f)
history = fit.history
### END CODE HERE ###
```
Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.
<font color=blue> **Save Model in a file**
```
with open('model', 'wb') as f:
pickle.dump([model, fit.history] , f)
```
<font color=blue> **Load file with the model** (Only if you didn´t execute the training! This will override your current model data!)
```
with open('model', 'rb') as f:
model, history = pickle.load(f)
#model = HappyModel((X_train.shape[1], X_train.shape[2], X_train.shape[3]))
model.load_weights('HappyModel.h5')
```
### Plot graphics with the results of the training
```
def plot_metrics_finetuning(hist, stop=50):
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15,6))
axes = axes.flatten()
axes[0].plot(range(stop), hist['acc'], label='Training', color='black')
axes[0].plot(range(stop), hist['val_acc'], label='Validation', color='red')
axes[0].set_title('Accuracy')
axes[0].set_ylabel('Accuracy')
axes[0].set_xlabel('Epoch')
axes[0].legend(loc='lower right')
axes[1].plot(range(stop), hist['loss'], label='Training', color='black')
axes[1].plot(range(stop), hist['val_loss'], label='Validation', color='red')
axes[1].set_title('Loss')
axes[1].set_ylabel('Loss')
axes[1].set_xlabel('Epoch')
axes[1].legend(loc='upper right')
plt.tight_layout();
print("Best Model:")
best_epoch = np.argmax(history["val_acc"])
print("Best epoch= " + str(best_epoch+1) + ", Validation Accuracy= " + str(history["val_acc"][best_epoch]) + " Validation loss= " + str(history["val_loss"][best_epoch]))
plot_metrics_finetuning(history, stop=50)
```
### Evaluate the model (keras.evaluate)
```
model.evaluate(X_test, Y_test) #Returns the loss value & metrics values for the model in test mode.
print(model.metrics_names)
```
### **Exercise**: Implement step 4, i.e. test/evaluate the model.
```
### START CODE HERE ### (1 line)
preds = model.predict(X_test, batch_size=16, verbose=1)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
import imageio
from h5py import File
i = 81 #Image to test (150 total)
f = File('datasets/test_happy.h5', 'r')
print([key for key in f.keys()])
dset = f['test_set_x']
data = np.array(dset[i,:,:,:])
file = 'test.jpg' # or .jpg
imageio.imwrite(file, data)
img = image.load_img('test.jpg', target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
pred = model.predict(x, verbose=1)
if pred < 0.9:
print(str(pred) + " You are sad! You are not allowed in the Happy House!")
elif pred >= 0.9:
print(str(pred) + " You are happy! Welcome to the happy house!")
```
If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets.
To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare.
If you have not yet achieved a very good accuracy (let's say more than 80%), here're some things you can play around with to try to achieve it:
- Try using blocks of CONV->BATCHNORM->RELU such as:
```python
X = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
```
until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.
- You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.
- Change your optimizer. We find Adam works well.
- If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)
- Run on more epochs, until you see the train accuracy plateauing.
Even if you have achieved a good accuracy, please feel free to keep playing with your model to try to get even better results.
**Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here.
## 3 - Conclusion
Congratulations, you have solved the Happy House challenge!
Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here.
<font color='blue'>
**What we would like you to remember from this assignment:**
- Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras?
- Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test.
## 4 - Test with your own image (Optional)
Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the following code
4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)!
The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
```
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
pred = model.predict(x, verbose=1)
if pred < 0.9:
print(str(pred) + " You are sad! You are not allowed in the Happy House!")
elif pred >= 0.9:
print(str(pred) + " You are happy! Welcome to the happy house!")
def test_image(img_path):
#mage(img_path)
data = urllib.request.urlretrieve(img_path, 'images/save.jpg')
img = image.load_img('images/save.jpg', target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
pred = model.predict(x, verbose=1)
if pred < 0.9:
print(str(pred) + " You are sad! You are not allowed in the Happy House!")
elif pred >= 0.9:
print(str(pred) + " You are happy! Welcome to the happy house!")
img_path = 'https://cmjanosbarcelona.com/wp-content/uploads/2015/07/maria-anderson.jpg'
test_image(img_path)
img_path = 'https://i.pinimg.com/564x/2f/5c/5e/2f5c5e2fd4d01740a5c07867baa725fa.jpg'
test_image(img_path)
img_path = 'http://clipart-library.com/images/8Txrjakoc.jpg'
test_image(img_path)
img_path = 'https://www.dysportusa.com/Content/img/new-beforeafter/1-before-Dina.jpg'
test_image(img_path)
img_path = 'https://www.telegraph.co.uk/content/dam/women/2016/08/24/GettyImages-503297802-xlarge_trans_NvBQzQNjv4BqLf6XVudQ0d-fF3pHUPFzENzToNs4KPGiwgfzbiiWmHA.jpg'
test_image(img_path)
img_path = 'https://i0.wp.com/metro.co.uk/wp-content/uploads/2019/08/PRC_80532341.jpg?quality=90&strip=all&zoom=1&resize=644%2C338&ssl=1'
test_image(img_path)
```
## 5 - Other useful functions in Keras (Optional)
Two other basic features of Keras that you'll find useful are:
- `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs
- `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.
Run the following code.
```
model.summary()
plot_model(model, to_file='HappyModel.png')
SVG(model_to_dot(model).create(prog='dot', format='svg'))
```
| github_jupyter |
```
# default_exp timestep
#hide
import sys
[sys.path.append(i) for i in ['.', '..']]
#hide
from nbdev.showdoc import *
%load_ext autoreload
%autoreload 2
#export
from aquacrop.solution import *
from aquacrop.initialize import calculate_HI_linear, calculate_HIGC
from aquacrop.classes import *
import numpy as np
import pandas as pd
```
# timestep
> run one timestep of model
```
#export
# - CC UPDATE: CHANGED
def solution(InitCond,ParamStruct,ClockStruct,weather_step,Outputs, canopy_step=np.nan):
"""
Function to perform AquaCrop-OS solution for a single time step
*Arguments:*\n
`InitCond` : `InitCondClass` : containing current model paramaters
`ClockStruct` : `ClockStructClass` : model time paramaters
`weather_step`: `np.array` : containing P,ET,Tmax,Tmin for current day
`Outputs` : `OutputClass` : object to store outputs
*Returns:*
`NewCond` : `InitCondClass` : containing updated model paramaters
`Outputs` : `OutputClass` : object to store outputs
"""
# Unpack structures
Soil = ParamStruct.Soil
CO2 = ParamStruct.CO2
if ParamStruct.WaterTable == 1:
Groundwater = ParamStruct.zGW[ClockStruct.TimeStepCounter]
else:
Groundwater = 0
P = weather_step[2]
Tmax = weather_step[1]
Tmin = weather_step[0]
Et0 = weather_step[3]
# Store initial conditions in structure for updating %%
NewCond = InitCond
# Check if growing season is active on current time step %%
if ClockStruct.SeasonCounter >= 0:
# Check if in growing season
CurrentDate = ClockStruct.StepStartTime
PlantingDate = ClockStruct.PlantingDates[ClockStruct.SeasonCounter]
HarvestDate = ClockStruct.HarvestDates[ClockStruct.SeasonCounter]
if (PlantingDate <= CurrentDate) and \
(HarvestDate >= CurrentDate) and \
(NewCond.CropMature == False) and \
(NewCond.CropDead == False):
GrowingSeason = True
else:
GrowingSeason = False
# Assign crop, irrigation management, and field management structures
Crop = ParamStruct.Seasonal_Crop_List[ClockStruct.SeasonCounter]
Crop_Name = ParamStruct.CropChoices[ClockStruct.SeasonCounter]
IrrMngt = ParamStruct.IrrMngt
if GrowingSeason == True:
FieldMngt = ParamStruct.FieldMngt
else:
FieldMngt = ParamStruct.FallowFieldMngt
else:
# Not yet reached start of first growing season
GrowingSeason = False
# Assign crop, irrigation management, and field management structures
# Assign first crop as filler crop
Crop = ParamStruct.Fallow_Crop
Crop_Name = "fallow"
Crop.Aer = 5; Crop.Zmin = 0.3
IrrMngt = ParamStruct.FallowIrrMngt
FieldMngt = ParamStruct.FallowFieldMngt
# Increment time counters %%
if GrowingSeason == True:
# Calendar days after planting
NewCond.DAP = NewCond.DAP+1
# Growing degree days after planting
GDD = growing_degree_day(Crop.GDDmethod,Crop.Tupp,Crop.Tbase,Tmax,Tmin)
## Update cumulative GDD counter ##
NewCond.GDD = GDD
NewCond.GDDcum = NewCond.GDDcum+GDD
NewCond.GrowingSeason = True
else:
NewCond.GrowingSeason = False
# Calendar days after planting
NewCond.DAP = 0
# Growing degree days after planting
GDD = 0.3
NewCond.GDDcum = 0
# save current timestep counter
NewCond.TimeStepCounter = ClockStruct.TimeStepCounter
NewCond.P = weather_step[2]
NewCond.Tmax = weather_step[1]
NewCond.Tmin = weather_step[0]
NewCond.Et0 = weather_step[3]
# Run simulations %%
# 1. Check for groundwater table
NewCond,Soil.Profile = check_groundwater_table(ClockStruct.TimeStepCounter,Soil.Profile,
NewCond,ParamStruct.WaterTable,Groundwater)
# 2. Root development
NewCond = root_development(Crop,Soil.Profile,NewCond,GDD,GrowingSeason,ParamStruct.WaterTable)
# 3. Pre-irrigation
NewCond, PreIrr = pre_irrigation(Soil.Profile,Crop,NewCond,GrowingSeason,IrrMngt)
# 4. Drainage
NewCond.th,DeepPerc,FluxOut = drainage(Soil.Profile,NewCond.th,NewCond.th_fc_Adj)
# 5. Surface runoff
Runoff,Infl,NewCond = rainfall_partition(P,NewCond,FieldMngt,
Soil.CN, Soil.AdjCN, Soil.zCN, Soil.nComp,Soil.Profile)
# 6. Irrigation
NewCond, Irr = irrigation(NewCond,IrrMngt,Crop,Soil.Profile,Soil.zTop,GrowingSeason,P,Runoff)
# 7. Infiltration
NewCond,DeepPerc,RunoffTot,Infl,FluxOut = infiltration(Soil.Profile,NewCond,Infl,Irr,IrrMngt.AppEff,FieldMngt,
FluxOut,DeepPerc,Runoff,GrowingSeason)
# 8. Capillary Rise
NewCond,CR = capillary_rise(Soil.Profile,Soil.nLayer,Soil.fshape_cr,NewCond,FluxOut,ParamStruct.WaterTable)
# 9. Check germination
NewCond = germination(NewCond,Soil.zGerm,Soil.Profile,Crop.GermThr,Crop.PlantMethod,GDD,GrowingSeason)
# 10. Update growth stage
NewCond = growth_stage(Crop,NewCond,GrowingSeason)
# - CC UPDATE: CHANGED
# 11. Canopy cover development
NewCond = canopy_cover(Crop,Soil.Profile,Soil.zTop,NewCond,GDD,Et0,GrowingSeason, canopy_step)
# 12. Soil evaporation
NewCond,Es,EsPot = soil_evaporation(ClockStruct.EvapTimeSteps,ClockStruct.SimOffSeason,ClockStruct.TimeStepCounter,
Soil.EvapZmin,Soil.EvapZmax,Soil.Profile,Soil.REW,Soil.Kex,Soil.fwcc,Soil.fWrelExp,Soil.fevap,
Crop.CalendarType,Crop.Senescence,
IrrMngt.IrrMethod,IrrMngt.WetSurf,
FieldMngt,
NewCond,Et0,Infl,P,Irr,GrowingSeason)
# 13. Crop transpiration
Tr,TrPot_NS,TrPot,NewCond,IrrNet = transpiration(Soil.Profile,Soil.nComp,Soil.zTop,
Crop,
IrrMngt.IrrMethod,IrrMngt.NetIrrSMT,
NewCond,Et0,CO2,GrowingSeason,GDD)
# 14. Groundwater inflow
NewCond,GwIn = groundwater_inflow(Soil.Profile,NewCond)
# 15. Reference harvest index
NewCond = HIref_current_day(NewCond,Crop,GrowingSeason)
# 16. Biomass accumulation
NewCond = biomass_accumulation(Crop,NewCond,Tr,TrPot_NS,Et0,GrowingSeason)
# 17. Harvest index
NewCond = harvest_index(Soil.Profile,Soil.zTop,
Crop,
NewCond,Et0,Tmax,Tmin,GrowingSeason)
# 18. Crop yield
if GrowingSeason == True:
# Calculate crop yield (tonne/ha)
NewCond.Y = (NewCond.B/100)*NewCond.HIadj
#print( ClockStruct.TimeStepCounter,(NewCond.B/100),NewCond.HIadj)
# Check if crop has reached maturity
if ((Crop.CalendarType == 1) and (NewCond.DAP >= Crop.Maturity)) \
or ((Crop.CalendarType == 2) and (NewCond.GDDcum >= Crop.Maturity)):
# Crop has reached maturity
NewCond.CropMature = True
elif GrowingSeason == False:
# Crop yield is zero outside of growing season
NewCond.Y = 0
# 19. Root zone water
Wr,_Dr,_TAW,_thRZ = root_zone_water(Soil.Profile,NewCond.Zroot,NewCond.th,Soil.zTop,float(Crop.Zmin),Crop.Aer)
# 20. Update net irrigation to add any pre irrigation
IrrNet = IrrNet+PreIrr
NewCond.IrrNetCum = NewCond.IrrNetCum+PreIrr
# Update model outputs %%
row_day = ClockStruct.TimeStepCounter
row_gs = ClockStruct.SeasonCounter
# Irrigation
if GrowingSeason == True:
if IrrMngt.IrrMethod == 4:
# Net irrigation
IrrDay = IrrNet
IrrTot = NewCond.IrrNetCum
else:
# Irrigation
IrrDay = Irr
IrrTot = NewCond.IrrCum
else:
IrrDay = 0
IrrTot = 0
NewCond.Depletion = _Dr.Rz
NewCond.TAW = _TAW.Rz
# Water contents
Outputs.Water[row_day,:3] = np.array([ClockStruct.TimeStepCounter,GrowingSeason,NewCond.DAP])
Outputs.Water[row_day,3:] = NewCond.th
# Water fluxes
Outputs.Flux[row_day,:] = [ClockStruct.TimeStepCounter,\
ClockStruct.SeasonCounter,NewCond.DAP,Wr,NewCond.zGW,\
NewCond.SurfaceStorage,IrrDay,\
Infl,Runoff,DeepPerc,CR,GwIn,Es,EsPot,Tr,P]
# Crop growth
Outputs.Growth[row_day,:] = [ClockStruct.TimeStepCounter,ClockStruct.SeasonCounter,NewCond.DAP,GDD,\
NewCond.GDDcum,NewCond.Zroot,\
NewCond.CC,NewCond.CC_NS,NewCond.B,\
NewCond.B_NS,NewCond.HI,NewCond.HIadj,\
NewCond.Y]
# Final output (if at end of growing season)
if ClockStruct.SeasonCounter > -1:
if ((NewCond.CropMature == True) \
or (NewCond.CropDead == True) \
or (ClockStruct.HarvestDates[ClockStruct.SeasonCounter] == ClockStruct.StepEndTime )) \
and (NewCond.HarvestFlag == False):
# Store final outputs
Outputs.Final.loc[ClockStruct.SeasonCounter] = [ClockStruct.SeasonCounter,Crop_Name,\
ClockStruct.StepEndTime,ClockStruct.TimeStepCounter,\
NewCond.Y,IrrTot]
# Set harvest flag
NewCond.HarvestFlag = True
return NewCond,ParamStruct,Outputs
#hide
show_doc(solution)
#export
def check_model_termination(ClockStruct,InitCond):
"""
Function to check and declare model termination
*Arguments:*\n
`ClockStruct` : `ClockStructClass` : model time paramaters
`InitCond` : `InitCondClass` : containing current model paramaters
*Returns:*
`ClockStruct` : `ClockStructClass` : updated clock paramaters
"""
## Check if current time-step is the last
CurrentTime = ClockStruct.StepEndTime
if CurrentTime < ClockStruct.SimulationEndDate:
ClockStruct.ModelTermination = False
elif CurrentTime >= ClockStruct.SimulationEndDate:
ClockStruct.ModelTermination = True
## Check if at the end of last growing season ##
# Allow model to exit early if crop has reached maturity or died, and in
# the last simulated growing season
if (InitCond.HarvestFlag == True) \
and (ClockStruct.SeasonCounter == ClockStruct.nSeasons-1):
ClockStruct.ModelTermination = True
return ClockStruct
#hide
show_doc(check_model_termination)
#export
def reset_initial_conditions(ClockStruct,InitCond,ParamStruct,weather):
"""
Function to reset initial model conditions for start of growing
season (when running model over multiple seasons)
*Arguments:*\n
`ClockStruct` : `ClockStructClass` : model time paramaters
`InitCond` : `InitCondClass` : containing current model paramaters
`weather`: `np.array` : weather data for simulation period
*Returns:*
`InitCond` : `InitCondClass` : containing reset model paramaters
"""
## Extract crop type ##
CropType = ParamStruct.CropChoices[ClockStruct.SeasonCounter]
## Extract structures for updating ##
Soil = ParamStruct.Soil
Crop = ParamStruct.Seasonal_Crop_List[ClockStruct.SeasonCounter]
FieldMngt = ParamStruct.FieldMngt
CO2 = ParamStruct.CO2
CO2_data = ParamStruct.CO2data
## Reset counters ##
InitCond.AgeDays = 0
InitCond.AgeDays_NS = 0
InitCond.AerDays = 0
InitCond.IrrCum = 0
InitCond.DelayedGDDs = 0
InitCond.DelayedCDs = 0
InitCond.PctLagPhase = 0
InitCond.tEarlySen = 0
InitCond.GDDcum = 0
InitCond.DaySubmerged = 0
InitCond.IrrNetCum = 0
InitCond.DAP = 0
InitCond.AerDaysComp = np.zeros(int(Soil.nComp))
## Reset states ##
# States
InitCond.PreAdj = False
InitCond.CropMature = False
InitCond.CropDead = False
InitCond.Germination = False
InitCond.PrematSenes = False
InitCond.HarvestFlag = False
# Harvest index
# HI
InitCond.Stage = 1
InitCond.Fpre = 1
InitCond.Fpost = 1
InitCond.fpost_dwn = 1
InitCond.fpost_upp = 1
InitCond.HIcor_Asum = 0
InitCond.HIcor_Bsum = 0
InitCond.Fpol = 0
InitCond.sCor1 = 0
InitCond.sCor2 = 0
# Growth stage
InitCond.GrowthStage = 0
# Transpiration
InitCond.TrRatio = 1
# crop growth
InitCond.rCor = 1
InitCond.CC = 0
InitCond.CCadj = 0
InitCond.CC_NS = 0
InitCond.CCadj_NS = 0
InitCond.B = 0
InitCond.B_NS = 0
InitCond.HI = 0
InitCond.HIadj = 0
InitCond.CCxAct = 0
InitCond.CCxAct_NS = 0
InitCond.CCxW = 0
InitCond.CCxW_NS = 0
InitCond.CCxEarlySen = 0
InitCond.CCprev = 0
InitCond.ProtectedSeed = 0
## Update CO2 concentration ##
# Get CO2 concentration
if ParamStruct.CO2concAdj != None:
CO2.CurrentConc = ParamStruct.CO2concAdj
else:
Yri = pd.DatetimeIndex([ClockStruct.StepStartTime]).year[0]
CO2.CurrentConc = CO2_data.loc[Yri]
# Get CO2 weighting factor for first year
CO2conc = CO2.CurrentConc
CO2ref = CO2.RefConc
if CO2conc <= CO2ref:
fw = 0
else:
if CO2conc >= 550:
fw = 1
else:
fw = 1-((550-CO2conc)/(550-CO2ref))
# Determine initial adjustment
fCO2 = (CO2conc/CO2ref)/(1+(CO2conc-CO2ref)\
*((1-fw)*Crop.bsted+fw*((Crop.bsted*Crop.fsink)\
+(Crop.bface*(1-Crop.fsink)))))
# Consider crop type
if Crop.WP >= 40:
# No correction for C4 crops
ftype = 0
elif Crop.WP <= 20:
# Full correction for C3 crops
ftype = 1
else:
ftype = (40-Crop.WP)/(40-20)
# Total adjustment
Crop.fCO2 = 1+ftype*(fCO2-1)
## Reset soil water conditions (if not running off-season) ##
if ClockStruct.SimOffSeason==False:
# Reset water content to starting conditions
InitCond.th = InitCond.thini
# Reset surface storage
if (FieldMngt.Bunds) and (FieldMngt.zBund > 0.001):
# Get initial storage between surface bunds
InitCond.SurfaceStorage = min(FieldMngt.BundWater,FieldMngt.zBund)
else:
# No surface bunds
InitCond.SurfaceStorage = 0
## Update crop parameters (if in GDD mode) ##
if Crop.CalendarType == 2:
# Extract weather data for upcoming growing season
wdf = weather[weather[:,4]>=ClockStruct.PlantingDates[ClockStruct.SeasonCounter]]
#wdf = wdf[wdf[:,4]<=ClockStruct.HarvestDates[ClockStruct.SeasonCounter]]
Tmin = wdf[:,0]
Tmax = wdf[:,1]
# Calculate GDD's
if Crop.GDDmethod == 1:
Tmean = (Tmax+Tmin)/2
Tmean[Tmean>Crop.Tupp] = Crop.Tupp
Tmean[Tmean<Crop.Tbase] = Crop.Tbase
GDD = Tmean-Crop.Tbase
elif Crop.GDDmethod == 2:
Tmax[Tmax>Crop.Tupp] = Crop.Tupp
Tmax[Tmax<Crop.Tbase] = Crop.Tbase
Tmin[Tmin>Crop.Tupp] = Crop.Tupp
Tmin[Tmin<Crop.Tbase] = Crop.Tbase
Tmean = (Tmax+Tmin)/2
GDD = Tmean-Crop.Tbase
elif Crop.GDDmethod == 3:
Tmax[Tmax>Crop.Tupp] = Crop.Tupp
Tmax[Tmax<Crop.Tbase] = Crop.Tbase
Tmin[Tmin>Crop.Tupp] = Crop.Tupp
Tmean = (Tmax+Tmin)/2
Tmean[Tmean<Crop.Tbase] = Crop.Tbase
GDD = Tmean-Crop.Tbase
GDDcum = np.cumsum(GDD)
assert GDDcum[-1] > Crop.Maturity, f"not enough growing degree days in simulation ({GDDcum[-1]}) to reach maturity ({Crop.Maturity})"
Crop.MaturityCD = np.argmax((GDDcum>Crop.Maturity))+1
assert Crop.MaturityCD < 365, "crop will take longer than 1 year to mature"
# 1. GDD's from sowing to maximum canopy cover
Crop.MaxCanopyCD = (GDDcum>Crop.MaxCanopy).argmax()+1
# 2. GDD's from sowing to end of vegetative growth
Crop.CanopyDevEndCD = (GDDcum>Crop.CanopyDevEnd).argmax()+1
# 3. Calendar days from sowing to start of yield formation
Crop.HIstartCD = (GDDcum>Crop.HIstart).argmax()+1
# 4. Calendar days from sowing to end of yield formation
Crop.HIendCD = (GDDcum>Crop.HIend).argmax()+1
# 5. Duration of yield formation in calendar days
Crop.YldFormCD = Crop.HIendCD-Crop.HIstartCD
if Crop.CropType == 3:
# 1. Calendar days from sowing to end of flowering
FloweringEnd = (GDDcum>Crop.FloweringEnd).argmax()+1
# 2. Duration of flowering in calendar days
Crop.FloweringCD = FloweringEnd-Crop.HIstartCD
else:
Crop.FloweringCD = -999
# Update harvest index growth coefficient
Crop = calculate_HIGC(Crop)
# Update day to switch to linear HI build-up
if Crop.CropType == 3:
# Determine linear switch point and HIGC rate for fruit/grain crops
Crop = calculate_HI_linear(Crop)
else:
# No linear switch for leafy vegetable or root/tiber crops
Crop.tLinSwitch = 0
Crop.dHILinear = 0.
## Update global variables ##
ParamStruct.Seasonal_Crop_List[ClockStruct.SeasonCounter] = Crop
ParamStruct.CO2 = CO2
return InitCond,ParamStruct
#hide
show_doc(reset_initial_conditions)
#export
def update_time(ClockStruct,InitCond,ParamStruct,Outputs,weather):
"""
Function to update current time in model
*Arguments:*\n
`ClockStruct` : `ClockStructClass` : model time paramaters
`InitCond` : `InitCondClass` : containing current model paramaters
`weather`: `np.array` : weather data for simulation period
*Returns:*
`ClockStruct` : `ClockStructClass` : model time paramaters
`InitCond` : `InitCondClass` : containing reset model paramaters
"""
## Update time ##
if ClockStruct.ModelTermination == False:
if (InitCond.HarvestFlag == True) \
and ((ClockStruct.SimOffSeason==False)):
# End of growing season has been reached and not simulating
# off-season soil water balance. Advance time to the start of the
# next growing season.
# Check if in last growing season
if ClockStruct.SeasonCounter < ClockStruct.nSeasons-1:
# Update growing season counter
ClockStruct.SeasonCounter = ClockStruct.SeasonCounter+1
# Update time-step counter
#ClockStruct.TimeSpan = pd.Series(ClockStruct.TimeSpan)
ClockStruct.TimeStepCounter = ClockStruct.TimeSpan.get_loc(ClockStruct.PlantingDates[ClockStruct.SeasonCounter])
# Update start time of time-step
ClockStruct.StepStartTime = ClockStruct.TimeSpan[ClockStruct.TimeStepCounter]
# Update end time of time-step
ClockStruct.StepEndTime = ClockStruct.TimeSpan[ClockStruct.TimeStepCounter + 1]
# Reset initial conditions for start of growing season
InitCond,ParamStruct = reset_initial_conditions(ClockStruct,InitCond,ParamStruct,weather)
else:
# Simulation considers off-season, so progress by one time-step
# (one day)
# Time-step counter
ClockStruct.TimeStepCounter = ClockStruct.TimeStepCounter+1
# Start of time step (beginning of current day)
#ClockStruct.TimeSpan = pd.Series(ClockStruct.TimeSpan)
ClockStruct.StepStartTime = ClockStruct.TimeSpan[ClockStruct.TimeStepCounter]
# End of time step (beginning of next day)
ClockStruct.StepEndTime = ClockStruct.TimeSpan[ClockStruct.TimeStepCounter + 1]
# Check if in last growing season
if ClockStruct.SeasonCounter < ClockStruct.nSeasons-1:
# Check if upcoming day is the start of a new growing season
if ClockStruct.StepStartTime == ClockStruct.PlantingDates[ClockStruct.SeasonCounter+1]:
# Update growing season counter
ClockStruct.SeasonCounter = ClockStruct.SeasonCounter+1
# Reset initial conditions for start of growing season
InitCond,ParamStruct = reset_initial_conditions(ClockStruct,InitCond,ParamStruct,weather)
elif ClockStruct.ModelTermination == True:
ClockStruct.StepStartTime = ClockStruct.StepEndTime
ClockStruct.StepEndTime = ClockStruct.StepEndTime + np.timedelta64(1, 'D')
Outputs.Flux = pd.DataFrame(Outputs.Flux, columns=["TimeStepCounter",\
"SeasonCounter","DAP","Wr","zGW",\
"SurfaceStorage","IrrDay",\
"Infl","Runoff","DeepPerc","CR",\
"GwIn","Es","EsPot","Tr","P"])
Outputs.Water =pd.DataFrame(Outputs.Water, columns=["TimeStepCounter","GrowingSeason","DAP"]\
+['th'+str(i) for i in range(1,Outputs.Water.shape[1]-2)])
Outputs.Growth = pd.DataFrame(Outputs.Growth, columns = ["TimeStepCounter",'SeasonCounter',"DAP",'GDD',\
'GDDcum','Zroot',\
'CC','CC_NS','B',\
'B_NS','HI','HIadj',\
'Y'])
return ClockStruct,InitCond,ParamStruct,Outputs
#hide
show_doc(update_time)
#hide
from nbdev.export import notebook2script
notebook2script()
```
| github_jupyter |
```
import pandas as pd
import dataframe_image as dfi
ssrt_ttests = pd.read_csv('result_csvs/discovery/metric-SSRT_test-ttests.csv', index_col=0)
ssrt_ttests
ttests_df = ssrt_ttests.T.reset_index().rename(columns={'index':'descrip'})
def get_task_name(descrip):
name_piece = descrip.split('vs.')[0].split(': ')[0]
if any(p in name_piece for p in ['DC', 'DE']):
return 'All'
else:
return name_piece
def get_test_name(descrip):
if 'DCvs.0' in descrip:
return 'Dual Context vs. Single Task'
elif 'DEvs.0' in descrip:
return 'Dual Condition vs. Single Task'
elif 'DEvs.DC' in descrip:
return 'Dual Condition vs. Dual Context'
else:
return 'mean(Dual Context, Dual Condition) vs. Single Task'
test_order = {'mean(Dual Context, Dual Condition) vs. Single Task': 1,
'Dual Context vs. Single Task': 2,
'Dual Condition vs. Single Task': 3,
'Dual Condition vs. Dual Context': 4}
task_order = {
'All': 'aa',
'go_no_go': 'ab',
'predictable_task_switching': 'ba',
'cued_task_switching': 'bb',
'flanker': 'ca',
'shape_matching': 'cb',
'n_back': 'da',
'directed_forgetting': 'db'
}
formal_tasks = {
'All': 'All',
'single_task': 'Single Task',
'go_no_go': 'Go/No-Go',
'predictable_task_switching': 'Predictable Task Switching',
'cued_task_switching': 'Cued Task Switching',
'flanker': 'Flanker',
'shape_matching': 'Shape Matching',
'n_back': 'n-Back',
'directed_forgetting': 'Directed Forgetting'
}
ttests_df['Dual Task(s)'] = ttests_df['descrip'].map(get_task_name)
ttests_df['Test'] = ttests_df['descrip'].map(get_test_name)
ttests_df['task_order'] = ttests_df['Dual Task(s)'].map(task_order)
ttests_df['test_order'] = ttests_df['Test'].map(test_order)
clean_df = ttests_df.sort_values(by=['test_order', 'task_order'])[['Test', 'Dual Task(s)', 'mean', 'd', 'p']].round({'mean': 1, 'd': 2, 'p': 3})
clean_df['Dual Task(s)'] = clean_df['Dual Task(s)'].map(formal_tasks)
clean_df = clean_df.rename(columns={'mean': 'Mean'})
clean_df = clean_df.set_index(['Test', 'Dual Task(s)'])
dfi.export(clean_df, 'figures/dataset-discovery_metric-SSRT_test_ttests.png')
clean_df.to_csv('cleaned_tables/dataset-discovery_metric-SSRT_test_ttests.csv')
clean_df
def load_display_ttest_table(dataset='discovery', metric='SSRT', mean_round=1):
ttests_df = pd.read_csv(f'result_csvs/{dataset}/metric-{metric}_test-ttests.csv', index_col=0)
ttests_df = ttests_df.T.reset_index().rename(columns={'index':'descrip'})
ttests_df['Dual Task(s)'] = ttests_df['descrip'].map(get_task_name)
ttests_df['Test'] = ttests_df['descrip'].map(get_test_name)
ttests_df['task_order'] = ttests_df['Dual Task(s)'].map(task_order)
ttests_df['test_order'] = ttests_df['Test'].map(test_order)
clean_df = ttests_df.sort_values(by=['test_order', 'task_order'])[['Test', 'Dual Task(s)', 'mean', 'd', 'p']].round({'mean': mean_round, 'd': 2, 'p': 3})
clean_df['Dual Task(s)'] = clean_df['Dual Task(s)'].map(formal_tasks)
clean_df = clean_df.rename(columns={'mean': 'Mean'})
clean_df = clean_df.set_index(['Test', 'Dual Task(s)'])
dfi.export(clean_df, f'figures/dataset-{dataset}_metric-{metric}_test_ttests.png')
clean_df.to_csv(f'cleaned_tables/dataset-{dataset}_metric-{metric}_test_ttests.csv')
return clean_df
```
# SSRTs after exclusions
```
load_display_ttest_table(dataset='discovery', metric='SSRT_wThresh_SSDs')
load_display_ttest_table(dataset='discovery', metric='SSRT_wThresh_subs')
```
## SSDs
```
SSD_table_df = load_display_ttest_table(metric='mean_SSD')
SSD_table_df
```
# P(resp | signal)
```
pResp_table_df = load_display_ttest_table(metric='p_respond', mean_round=3)
pResp_table_df
```
# go RT
```
goRT_table_df = load_display_ttest_table(metric='mean_go_RT')
goRT_table_df
load_display_ttest_table(metric='sd_go_RT')
```
# Go ACC
```
load_display_ttest_table(metric='go_acc', mean_round=3)
```
# Simple Task Contrasts
```
dataset='discovery'
def clean_contrast(con_str):
pieces = con_str.lower().rstrip('_').split('-')
pieces = [' '.join(piece.split('_')) for piece in pieces]
return ' - '.join(pieces)
simple_effects = pd.read_csv(f'result_csvs/{dataset}/simple_task_effects.csv', index_col=0)
simple_effects['Task'] = simple_effects['Task'].map(formal_tasks)
simple_effects = simple_effects.rename(columns={'p-value': 'p'})
simple_effects = simple_effects.query('Contrast!="pos-con_"').copy()
simple_effects['Contrast'] = simple_effects['Contrast'].apply(clean_contrast)
simple_effects['Metric'] = simple_effects['Metric'].apply(clean_contrast)
simple_effects = simple_effects[['Task', 'Contrast', 'Metric', 'Mean', 'd', 'p']].set_index('Task')#.set_index(['Task', 'Contrast', 'Metric'])
simple_effects
simple_rt_df = simple_effects.query('Metric == "rt"').round({'Mean': 1, 'd': 2, 'p': 3})
del simple_rt_df['Metric']
dfi.export(simple_rt_df, f'figures/dataset-{dataset}_metric-simpleContrastsRT_test_ttests.png')
simple_rt_df.to_csv(f'cleaned_tables/dataset-{dataset}_metric-simpleContrastsRT_test_ttests.csv')
simple_rt_df
simple_acc_df = simple_effects.query('Metric == "choice accuracy"').round({'Mean': 3, 'd': 2, 'p': 3})
del simple_acc_df['Metric']
dfi.export(simple_acc_df, f'figures/dataset-{dataset}_metric-simpleContrastsACC_test_ttests.png')
simple_acc_df.to_csv(f'cleaned_tables/dataset-{dataset}_metric-simpleContrastsACC_test_ttests.csv')
simple_acc_df
```
# Summary Means
```
dual_dict = {
'stop_signal_with_cued_task_switching': {
'dual_col': 'cue_task_switch',
'DC': 'cue_stay_task_stay',
'DE': 'cue_switch_task_switch',
'OTHER': ['cue_switch_task_stay']
},
'stop_signal_with_directed_forgetting': {
'dual_col': 'directed_forgetting_condition',
'DC': 'con',
'DE': 'neg',
'OTHER': ['pos']
},
'stop_signal_with_flanker': {
'dual_col': 'flanker_condition',
'DC': 'congruent',
'DE': 'incongruent'
},
'stop_signal_with_go_no_go': {
'dual_col': 'go_nogo_condition',
'DC': 'go',
'DE': 'nogo'
},
'stop_signal_with_n_back': {
'dual_col': 'delay_condition',
'DC': 1.,
'DE': 2.,
'OTHER': [3.]
},
'stop_signal_with_predictable_task_switching': {
'dual_col': 'predictable_condition',
'DC': 'stay',
'DE': 'switch'
},
'stop_signal_with_shape_matching': {
'dual_col': 'shape_matching_condition',
'DC': 'control',
'DE': 'distractor',
'OTHER': ['DSD', 'SSS']
},
}
def get_task(descrip):
if descrip=='SE':
return 'single_task'
else:
return '_'.join(descrip.split('_')[:-1])
def get_cond(descrip):
if descrip=='SE':
return ''
else:
task = get_task(descrip)
cond_type = descrip.split('_')[-1]
return dual_dict[f'stop_signal_with_{task}'][cond_type]
summary_means = pd.read_csv('result_csvs/discovery/summary_means.csv', index_col=0)
summary_means = summary_means.dropna().reset_index().rename(columns={'index': 'descrip'})
summary_means.insert(0, 'Task', summary_means['descrip'].apply(get_task).map(formal_tasks))
summary_means.insert(1, 'Condition', summary_means['descrip'].apply(get_cond))
del summary_means['descrip']
summary_means = summary_means.loc[:, [c for c in summary_means.columns if all(c!=ignore for ignore in ['omission_count', 'PSS_fail', 'PSS_success'])]].set_index('Task')
summary_means = summary_means.round({'SSRT': 1, 'mean_SSD': 1, 'p_respond': 2, 'mean_go_RT': 1, 'sd_go_RT': 1, 'mean_stopfail_RT': 1, 'sd_stopfail_RT': 1, 'omission_rate': 3, 'go_acc': 2, 'PSS_all': 2})
dfi.export(summary_means, f'figures/dataset-{dataset}_metric-summmaryMeans.png')
summary_means.to_csv(f'cleaned_tables/dataset-{dataset}_metric-summmaryMeans.csv')
summary_means
```
# Validation Tables with Exclusions
```
planned_tests_df = pd.read_csv('result_csvs/dataset-validation_metric-SSRT_wThresh_subs_tests-validationContrasts.csv', index_col=0)
planned_tests_df = planned_tests_df.reset_index().rename(columns={'index': 'descrip'})
def get_planned_contrasts(descrip):
if 'DE > DC' in descrip:
return 'Dual Condition > Dual Context'
else:
return 'mean(Dual Context, Dual Condition) > Single Task'
test_order = {'mean(Dual Context, Dual Condition) > Single Task': 1,
'Dual Condition > Dual Context': 2}
planned_tests_df['Dual Task(s)'] = planned_tests_df['descrip'].map(get_task_name)
planned_tests_df['Test'] = planned_tests_df['descrip'].map(get_planned_contrasts)
planned_tests_df['task_order'] = planned_tests_df['Dual Task(s)'].map(task_order)
planned_tests_df['test_order'] = planned_tests_df['Test'].map(test_order)
planned_tests_df['Dual Task(s)'] = planned_tests_df['Dual Task(s)'].map(formal_tasks)
planned_tests_df = planned_tests_df.rename(columns={'mean': 'Mean'})
clean_df = planned_tests_df.sort_values(by=['test_order', 'task_order'])[['Test', 'Dual Task(s)', 'Mean', 'd', 'p']].round({'Mean': 1, 'd': 2, 'p': 3})
clean_df = clean_df.set_index(['Test', 'Dual Task(s)'])
dfi.export(clean_df, f'figures/dataset-validation_metric-SSRT_wThresh_subs_test_validationContrasts.png')
clean_df.to_csv(f'cleaned_tables/dataset-validation_metric-SSRT_wThresh_subs_test_validationContrasts.csv')
clean_df
planned_tests_df = pd.read_csv('result_csvs/dataset-validation_metric-SSRT_wThresh_SSDs_tests-validationContrasts.csv', index_col=0)
planned_tests_df = planned_tests_df.reset_index().rename(columns={'index': 'descrip'})
planned_tests_df['Dual Task(s)'] = planned_tests_df['descrip'].map(get_task_name)
planned_tests_df['Test'] = planned_tests_df['descrip'].map(get_planned_contrasts)
planned_tests_df['task_order'] = planned_tests_df['Dual Task(s)'].map(task_order)
planned_tests_df['test_order'] = planned_tests_df['Test'].map(test_order)
planned_tests_df['Dual Task(s)'] = planned_tests_df['Dual Task(s)'].map(formal_tasks)
planned_tests_df = planned_tests_df.rename(columns={'mean': 'Mean'})
clean_df = planned_tests_df.sort_values(by=['test_order', 'task_order'])[['Test', 'Dual Task(s)', 'Mean', 'd', 'p']].round({'Mean': 1, 'd': 2, 'p': 3})
clean_df = clean_df.set_index(['Test', 'Dual Task(s)'])
dfi.export(clean_df, f'figures/dataset-validation_metric-SSRT_wThresh_SSDs_test_validationContrasts.png')
clean_df.to_csv(f'cleaned_tables/dataset-validation_metric-SSRT_wThresh_SSDs_test_validationContrasts.csv')
clean_df
planned_tests_df = pd.read_csv('result_csvs/dataset-all_metric-SSRT_wThresh_subs_tests-validationContrasts.csv', index_col=0)
planned_tests_df = planned_tests_df.reset_index().rename(columns={'index': 'descrip'})
planned_tests_df['Dual Task(s)'] = planned_tests_df['descrip'].map(get_task_name)
planned_tests_df['Test'] = planned_tests_df['descrip'].map(get_planned_contrasts)
planned_tests_df['task_order'] = planned_tests_df['Dual Task(s)'].map(task_order)
planned_tests_df['test_order'] = planned_tests_df['Test'].map(test_order)
planned_tests_df['Dual Task(s)'] = planned_tests_df['Dual Task(s)'].map(formal_tasks)
planned_tests_df = planned_tests_df.rename(columns={'mean': 'Mean'})
clean_df = planned_tests_df.sort_values(by=['test_order', 'task_order'])[['Test', 'Dual Task(s)', 'Mean', 'd', 'p']].round({'Mean': 1, 'd': 2, 'p': 3})
clean_df = clean_df.set_index(['Test', 'Dual Task(s)'])
dfi.export(clean_df, f'figures/dataset-all_metric-SSRT_wThresh_subss_test_validationContrasts.png')
clean_df.to_csv(f'cleaned_tables/dataset-all_metric-SSRT_wThresh_subs_test_validationContrasts.csv')
clean_df
```
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
from keras.optimizers import Adam
import glob
from PIL import Image
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.layers.core import Flatten, Dense, Dropout, Lambda
def plots(ims, figsize=(12,6), rows=1, interp=False, titles=None):
if type(ims[0]) is np.ndarray:
ims = np.array(ims).astype(np.uint8)
if (ims.shape[-1] != 3):
ims = ims.transpose((0,2,3,1))
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None:
sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i], interpolation=None if interp else 'none')
from keras.preprocessing import image
BATCH_SIZE = 64
PATH="data_/"
def get_fit_sample():
gen = image.ImageDataGenerator()
sample_batches = gen.flow_from_directory(PATH+'valid', target_size=(224,224),
class_mode='categorical', shuffle=False, batch_size=200)
imgs, labels = next(sample_batches)
return imgs
gen = image.ImageDataGenerator(featurewise_std_normalization=True)
gen.fit(get_fit_sample())
val_batches = gen.flow_from_directory(PATH+'valid', target_size=(224,224),
class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)
gen = image.ImageDataGenerator(featurewise_std_normalization=True, horizontal_flip=True, channel_shift_range=100, zoom_range=0.5)
gen.fit(get_fit_sample())
batches = gen.flow_from_directory(PATH+'train', target_size=(224,224),
class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)
#imgs,labels = next(batches)
#plots(imgs[:2])
CLASSES = 2
INPUT_SHAPE = (224,224,3)
model = Sequential()
# Block 1
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', input_shape=INPUT_SHAPE))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
# Block 2
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
# Block 3
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
# Block 4
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
# Block 5
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
# Classification block
model.add(Flatten(name='flatten'))
model.add(Dense(4096, activation='relu', name='fc1'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu', name='fc2'))
model.add(Dropout(0.5))
model.add(Dense(CLASSES, activation='softmax', name='predictions'))
from keras.optimizers import SGD
sgd = SGD(lr=0.01, decay=0.0005, momentum=0.9, nesterov=False)
model.compile(optimizer=sgd, loss='mean_squared_error', metrics=['accuracy'])
%%time
hist = model.fit_generator(batches, steps_per_epoch=100, epochs=10, validation_data=val_batches, validation_steps=10)
model.save('ConvNet-D-vgg16-aug.h5')
# http://qiita.com/TypeNULL/items/4e4d7de11ab4361d6085
loss = hist.history['loss']
val_loss = hist.history['val_loss']
nb_epoch = len(loss)
plt.plot(range(nb_epoch), loss, marker='.', label='loss')
plt.plot(range(nb_epoch), val_loss, marker='.', label='val_loss')
plt.legend(loc='best', fontsize=10)
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
```
| github_jupyter |
# Deep Q-Network (DQN)
---
In this notebook, you will implement a DQN agent with OpenAI Gym's LunarLander-v2 environment.
### 1. Import the Necessary Packages
```
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
%matplotlib inline
```
### 2. Instantiate the Environment and Agent
Initialize the environment in the code cell below.
```
env = gym.make('LunarLander-v2')
env.seed(0)
print('State shape: ', env.observation_space.shape)
print('Number of actions: ', env.action_space.n)
```
Please refer to the instructions in `Deep_Q_Network.ipynb` if you would like to write your own DQN agent. Otherwise, run the code cell below to load the solution files.
```
from dqn_agent import Agent
agent = Agent(state_size=8, action_size=4, seed=0)
# watch an untrained agent
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
```
### 3. Train the Agent with DQN
Run the code cell below to train the agent from scratch. You are welcome to amend the supplied values of the parameters in the function, to try to see if you can get better performance!
Alternatively, you can skip to the next step below (**4. Watch a Smart Agent!**), to load the saved model weights from a pre-trained agent.
```
def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):
"""Deep Q-Learning.
Params
======
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
eps_start (float): starting value of epsilon, for epsilon-greedy action selection
eps_end (float): minimum value of epsilon
eps_decay (float): multiplicative factor (per episode) for decreasing epsilon
"""
scores = [] # list containing scores from each episode
scores_window = deque(maxlen=100) # last 100 scores
eps = eps_start # initialize epsilon
for i_episode in range(1, n_episodes+1):
state = env.reset()
score = 0
for t in range(max_t):
action = agent.act(state, eps)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_window.append(score) # save most recent score
scores.append(score) # save most recent score
eps = max(eps_end, eps_decay*eps) # decrease epsilon
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end="")
if i_episode % 100 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))
if np.mean(scores_window)>=200.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))
torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')
break
return scores
scores = dqn()
# plot the scores
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(len(scores)), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
```
### 4. Watch a Smart Agent!
In the next code cell, you will load the trained weights from file to watch a smart agent!
```
# load the weights from file
agent.qnetwork_local.load_state_dict(torch.load('checkpoint.pth'))
for i in range(3):
state = env.reset()
for j in range(200):
action = agent.act(state)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
```
### 5. Explore
In this exercise, you have implemented a DQN agent and demonstrated how to use it to solve an OpenAI Gym environment. To continue your learning, you are encouraged to complete any (or all!) of the following tasks:
- Amend the various hyperparameters and network architecture to see if you can get your agent to solve the environment faster. Once you build intuition for the hyperparameters that work well with this environment, try solving a different OpenAI Gym task with discrete actions!
- You may like to implement some improvements such as prioritized experience replay, Double DQN, or Dueling DQN!
- Write a blog post explaining the intuition behind the DQN algorithm and demonstrating how to use it to solve an RL environment of your choosing.
| github_jupyter |
```
# Load the normalized tables
import pickle
import sys
sys.setrecursionlimit(10000)
with open("normal_tables1_1.txt", "rb") as fp:
nt1 = pickle.load(fp)
with open("normal_tables1_2.txt", "rb") as fp:
nt2 = pickle.load(fp)
with open("normal_tables2_1.txt", "rb") as fp:
nt3 = pickle.load(fp)
with open("normal_tables2_2.txt", "rb") as fp:
nt4 = pickle.load(fp)
# # split tables again
import itertools
from collections import defaultdict
def splitDict(d):
n = len(d) // 2 # length of smaller half
i = iter(d.items()) # alternatively, i = d.iteritems() works in Python 2
d1 = dict(itertools.islice(i, n)) # grab first n items
d2 = dict(i) # grab the rest
return d1, d2
# save the split tables to normalize by running 4 scripts concurrently
import pickle
# https://stackoverflow.com/a/26496899
def default_to_regular(d):
if isinstance(d, defaultdict):
d = {k: default_to_regular(v) for k, v in d.items()}
return d
def splitDictWrite(d, name1, name2):
d1,d2 = splitDict(d)
d1 = default_to_regular(d1)
d2 = default_to_regular(d2)
with open(name1, "wb") as fp:
pickle.dump(d1, fp)
with open(name2, "wb") as fp:
pickle.dump(d2, fp)
def loadDict(d):
with open(d, "rb") as fp:
n = pickle.load(fp)
return n
splitDictWrite(nt1,"nt1_1","nt1_2")
splitDictWrite(nt2,"nt2_1","nt2_2")
splitDictWrite(nt3,"nt3_1","nt3_2")
splitDictWrite(nt4,"nt4_1","nt4_2")
nt1_1 = loadDict("nt1_1")
nt1_2 = loadDict("nt1_2")
nt2_1 = loadDict("nt2_1")
nt2_2 = loadDict("nt2_2")
nt3_1 = loadDict("nt3_1")
nt3_2 = loadDict("nt3_2")
nt4_1 = loadDict("nt4_1")
nt4_2 = loadDict("nt4_2")
# len(nt1) + len(nt2) + len(nt3) + len(nt4) == \
len(nt1_1) + len(nt1_2) + len(nt2_1) + len(nt2_2) + len(nt3_1) + len(nt3_2) + len(nt4_1) + len(nt4_2)
# number of articles containing tables with disambiguated entities (186)
# len(nt1) + len(nt2) + len(nt3) + len(nt4)
# set up SPARQL endpoint for wikidata
from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper("https://query.wikidata.org/sparql")
# resolve Wikidata entity from title
from wikitables.client import Client
client = Client("en")
def getWikidata(title):
return client.fetch_wikidata(title)
def retrieveExtract(article):
return client.fetch_extract(article)
# Features (as suggested by the authors (Emir Munoz & Aidan Hogan) of Wikitables Triples Paper)
# =============
# Table Features
# (-) 1 Number of rows
# (-) 2 Number of columns
# (-) 3 Total relations extracted
# Column Features
# (+) 4 Potential relations
# (+) 5 Unique potential relations
# (+) 6 Entity relatedness (new)
# Predicate Features
# (+) 7 Normalized unique subject count / Normalized unique object count
# Cell Features
# (-) 8 Number of entities in subject cell
# (-) 9 Number of entities in object cell
# (-) 10 String length in subject cell
# (-) 11 String length in object cell
# Predicate/Column Features
# (+) 12 Maximum between Jaro-Walker distance and dice coefficient
# (+) 13 Number of rows where the relation holds
# (+) 14 Number of relations in KB for all possible relations
# (+) 15 Number of relations in KB for all unique relations
# Where (+) signifies a positive feature & (-) signifies a negative feature
# Predicate Features
# subject and object must be prefixed with "wd:"
# if the object is a value it must be double quoted
# getPredicates("wd:Q69","'2830'")
# we get subject -> object, and its inverse object -> subject
def getPredicates(subject,obj, number = False):
if number:
# we do a different query and return only the non-inverse
sparql.setQuery("""SELECT * WHERE
{
%s ?p %s .
FILTER(STRSTARTS(str(?p), "http://www.wikidata.org/prop/direct/"))
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}""" % (subject, obj))
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
predicates = list()
for row in results["results"]["bindings"]:
if row["p"]["type"] == "uri":
predicates.append(row["p"]["value"])
return predicates
sparql.setQuery("""SELECT DISTINCT ?p1 ?p2
{
{%s ?p1 %s
FILTER(STRSTARTS(str(?p1), "http://www.wikidata.org/prop/direct/"))}
UNION {%s ?p2 %s
FILTER(STRSTARTS(str(?p2), "http://www.wikidata.org/prop/direct/"))}
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}""" % (subject, obj, obj, subject))
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
predicates_so = list()
predicates_os = list()
for row in results["results"]["bindings"]:
try:
if row["p1"]["type"] == "uri":
predicates_so.append(row["p1"]["value"])
if row["p2"]["type"] == "uri":
predicates_os.append(row["p2"]["value"])
except:
pass
return predicates_so, predicates_os
def checkTriple(subject, predicate, obj):
sparql.setQuery("""ASK
{
%s %s %s .
SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
}""" % (subject, predicate, obj))
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
return results["boolean"]
# (+) 13,14,15 Normalized unique subject count / Normalized unique object count
# Predicate/ Column Features
# (+) 12 Max of dice coeffient and jaro-winkler distance
from pyjarowinkler import distance
def dice_coefficient(a,b):
if not len(a) or not len(b): return 0.0
""" quick case for true duplicates """
if a == b: return 1.0
""" if a != b, and a or b are single chars, then they can't possibly match """
if len(a) == 1 or len(b) == 1: return 0.0
""" use python list comprehension, preferred over list.append() """
a_bigram_list = [a[i:i+2] for i in range(len(a)-1)]
b_bigram_list = [b[i:i+2] for i in range(len(b)-1)]
a_bigram_list.sort()
b_bigram_list.sort()
# assignments to save function calls
lena = len(a_bigram_list)
lenb = len(b_bigram_list)
# initialize match counters
matches = i = j = 0
while (i < lena and j < lenb):
if a_bigram_list[i] == b_bigram_list[j]:
matches += 2
i += 1
j += 1
elif a_bigram_list[i] < b_bigram_list[j]:
i += 1
else:
j += 1
score = float(matches)/float(lena + lenb)
return score
def feature11(string1, string2):
return max(distance.get_jaro_distance(string1, string2, winkler=True, scaling=0.1),\
dice_coefficient(string1, string2))
# (+) 12 No of rows that contain the subject and object
# def feature12(predicate):
# sparql.setQuery("""SELECT * WHERE
# {
# ?s %s ?o .
# FILTER(STRSTARTS(str(?s), "http://www.wikidata.org/entity/"))
# FILTER(STRSTARTS(str(?o), "http://www.wikidata.org/entity/"))
# SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
# }""" % (predicate))
# sparql.setReturnFormat(JSON)
# results = sparql.query().convert()
# return len(results["results"]["bindings"])
# functions entitiy relatedness using API call
import tagme
with open("tagme", 'r') as file:
token = file.readline().strip()
tagme.GCUBE_TOKEN = token
def disambig(text, min_rho=None):
annotations = tagme.annotate(text)
a = dict()
for x in annotations.annotations:
if min_rho is None or x.score > min_rho:
a[str(x.mention)] = x.entity_title
return a
# Get relatedness between a pair of entities specified by title.
# rels = tagme.relatedness_title(("Barack Obama", "Italy"))
# print("Obama and italy have a semantic relation of", rels.relatedness[0].rel)
import simplemediawiki
wiki = simplemediawiki.MediaWiki('https://www.wikidata.org/w/api.php')
def findProperty(string, amount = None):
results = wiki.call({'action': 'wbsearchentities', 'search': string, 'type': 'property', 'language': 'en', 'limit': 10})
properties = list()
for i,x in enumerate(results['search']):
if amount and amount == i:
break
properties.append(x['id'])
return properties
# iterate through normalized and regular table
import re
def find_number(string):
return re.findall('\d+', string)
import itertools
def get_pairs(l):
return list(itertools.combinations(l,2))
def flatten(l):
return list(itertools.chain.from_iterable(l))
def getCol(value, row):
for i,x in enumerate(row):
if isinstance(x, list):
for i2, v in enumerate(x):
if value == v:
return (i,i2)
elif value == x:
return i
# didn't make it
print(value, row)
def getSOPred(string1, string2, fnt, existingtriples, row = list(), section_entities = list(), pos = None):
e1 = string1.replace("'",'"')
e2 = string2.replace("'",'"')
n1 = False
n2 = False
predicates = set()
if string1 not in fnt and string1 not in section_entities:
# not an actual entity; double quote
e1 = "'" + e1 + "'"
n1 = True
else:
# must catch exception because forgot to parse out deadlinks
try:
e1 = "wd:" + getWikidata(e1)
except:
e1 = "'" + e1 + "'"
n1 = True
if string2 not in fnt and string2 not in section_entities:
# not an actual entity; double quote
e2 = "'" + e2 + "'"
n2 = True
else:
# must catch exception because forgot to parse out deadlinks
try:
e2 = "wd:" + getWikidata(e2)
except:
e2 = "'" + e2 + "'"
n2 = True
if n1 and n2:
return
if not n1 and not n2:
pred1, pred2 = getPredicates(e1,e2)
if pred1:
# e1 ?p e2
if not pos:
s = (x, getCol(str(string1),row))
else:
s = pos
o = (x, getCol(str(string2), row))
for p1 in pred1:
existing_triples.append((s,p1,o))
predicates.add(p1)
if pred2:
# e2 ?p e1
if not pos:
o = (x, getCol(str(string1), row))
else:
o = pos
s = (x, getCol(str(string2), row))
for p1 in pred2:
existing_triples.append((s,p1,o))
predicates.add(p1)
elif n1 and not n2:
pred = getPredicates(e2,e1,True)
if pred:
# e2 ?p e1
if not pos:
o = (x, getCol(str(string1), row))
else:
o = pos
s = (x, getCol(str(string2), row))
for p1 in pred:
existing_triples.append((s,p1,o))
predicates.add(p1)
elif not n1 and n2:
pred = getPredicates(e1,e2,True)
if pred:
# e1 ?p e2
if not pos:
s = (x, getCol(str(string1), row))
else:
s = pos
o = (x, getCol(str(string2), row))
for p1 in pred:
existing_triples.append((s,p1,o))
predicates.add(p1)
return predicates
# helper functions to genreate candidate triples
def checkPositionValid(pos, table):
try:
x = table[pos[0]][pos[1][0]][pos[1][1]]
return True
except:
return False
def generateAllTuples(t, table):
s = set()
if t == (None, 'article') or t[1] == 'sub':
s.add((None,t))
return s
for i in range(1,len(table)):
pos = (i, t)
if checkPositionValid(pos, table):
s.add(pos)
return s
def getElementsPosition(t):
s = set()
for x in t:
s.add(x[1])
return s
def getValueFromTable(pos, table,k,section_entities):
if pos == (None, (None, 'article')):
return k
elif pos[1][1] == 'sub':
return section_entities[pos[1][0]]
return table[pos[0]][pos[1][0]][pos[1][1]]
# triple candidate generation
def generateCandidates(xextracted,xextractedtable):
scount = defaultdict(int)
ocount = defaultdict(int)
scountset_o = defaultdict(set)
ocountset_s = defaultdict(set)
candidate_triples = set()
candidate_preds = set()
# count the occurences of subject and object if > threshold then suggest candidate triples & extract features
for t in set(xextracted):
scount[(t[0][1], t[1].split('/')[-1])]+=1
scountset_o[(t[0][1], t[1].split('/')[-1])].add(t[2])
ocount[(t[1].split('/')[-1], t[2][1])]+=1
ocountset_s[(t[1].split('/')[-1], t[2][1])].add(t[0])
PROPERTY_STRING = "http://www.wikidata.org/prop/direct/"
# do some set theory to find the candidate triple
# generate all possible sets and then take away the current sets
for k,v in scount.items():
if (v / (len(xextractedtable) - 1)) > 0.5:
spos = generateAllTuples(k[0], xextractedtable)
opos = getElementsPosition(scountset_o[k])
allo = set()
for x in opos:
allo = allo.union(generateAllTuples(x, xextractedtable))
# generate cartesian product between spos and allo and insert predicate into the middle
combos = list(itertools.product(spos, allo))
#print(combos)
# add to list of candidate triples
for c in combos:
if c[0][0] == c[1][0] or c[0][0] == None or c[1][0] == None:
candidate_triples.add((c[0], PROPERTY_STRING + k[1], c[1]))
candidate_preds.add(PROPERTY_STRING + k[1])
# this might do the same thing, but haven't proved it yet
for k,v in ocount.items():
if (v / (len(xextractedtable) - 1)) > 0.5:
opos = generateAllTuples(k[1], xextractedtable)
spos = getElementsPosition(ocountset_s[k])
alls = set()
for x in spos:
alls = alls.union(generateAllTuples(x, xextractedtable))
combos = list(itertools.product(alls, opos))
for c in combos:
if c[0][0] == c[1][0] or c[0][0] == None or c[1][0] == None:
candidate_triples.add((c[0], PROPERTY_STRING + k[0], c[1]))
candidate_preds.add(PROPERTY_STRING + k[0])
candidate_triples = candidate_triples - set(xextracted)
return candidate_triples, candidate_preds
def getFeatures3457(len_cols, temp_table, article, existing_triples, section_entites = None):
# perm = list(itertools.permutations(list(range(len_cols))))
rcols = []
# for s1 in range(1, len_rows):
# for p1 in perm:
# rcols += list(itertools.product(temp_table[s1][p1[0]],temp_table[s1][p1[1]]))
# # cartesian products also for section heading and article
# rcols += list(itertools.product(list(range(len_cols)), article))
# if section_entities is not None:
# rcols += list(itertools.product(list(range(len_cols)), section_entities))
for s1 in range(1, len_rows):
ft = flatten(temp_table[s1])
rcols += list(itertools.permutations(ft,2))
rcols += list(itertools.product(ft, [article]))
rcols += list(itertools.product([article], ft))
if section_entities is not None:
rcols += list(itertools.product(ft, section_entities))
rcols += list(itertools.product(section_entities, ft))
allsubj_s = set()
allobj_s = set()
allsubj = list()
allobj = list()
for o in set(rcols):
allsubj_s.add(o[0])
allobj_s.add(o[1])
allsubj.append(o[0])
allobj.append(o[1])
# get the (s,o) from temp_table and existing_triples
etriples = set()
for x in existing_triples:
etriples.add((getValueFromTable(x[0], temp_table,article,section_entities),\
getValueFromTable(x[2], temp_table,article,section_entities)))
print(etriples)
feature3 = len(rcols)
feature4 = len(rcols) - len(etriples)
feature5 = len(set(rcols)) - len(etriples)
feature7 = (len(allsubj_s)/len(allsubj))/(len(allobj_s)/len(allobj))
return feature3, feature4, feature5, feature7, rcols
tables = nt1_2['Db4o']
k = 'Db4o'
# for k,v in tables.items():
# print(k)
# for k1,v1 in v.items():
for v in range(1):
print(k)
for k1,v1 in tables.items():
# hit table
old_table = v1["old_table"]
new_table = v1["new_table"]
section_title = v1["section_title"]
len_rows = len(old_table)
len_cols = len(old_table[0])
temp_table = [[0 for y in range(len_cols)] for x in range(len_rows)]
# # look at relationships between header (predicate) and cell (subject or object)
# # header case
# # disambiguate header (get first 3 results from wikidata search of header string)
# header_pred = dict()
# for y in range(len_cols):
# header_pred[str(y)] = findProperty(old_table[0][y], 3)
# populate temp table (copy of new table) with values from original table if it hasn't been disambiguated
existing_triples = list() # contains list of (pos,predicate, pos)
predicates = set()
for x in range(len_rows):
for y in range(len_cols):
if not new_table[x][y]:
# try to make the obj a number
n = find_number(old_table[x][y])
if n:
temp_table[x][y] = [str(n[0])]
else:
temp_table[x][y] = [str(old_table[x][y]).replace("'",'"')]
else:
temp_table[x][y] = new_table[x][y]
fnt = [x1 for x1 in flatten(new_table[x]) if x1!='']
if x > 0:
pairs = get_pairs([x1 for x1 in flatten(temp_table[x]) if x1!=''])
for p in pairs:
predicates = predicates.union(getSOPred(p[0], p[1], fnt, existing_triples, temp_table[x]))
# look at relationships between article title and cells
# look at relationships betweeen section title (if able to be disambiguated) and cells
# first disambiguate section title by combining article title, summary and section title
section_entities = list()
d = disambig(k + retrieveExtract(k) + section_title, 0.1)
for original,entitytitle in d.items():
if original in section_title:
section_entities.append(entitytitle)
# skip the header row
for x in range(1,len_rows):
cells = [x1 for x1 in flatten(temp_table[x]) if x1!='']
fnt = [x1 for x1 in flatten(new_table[x]) if x1!='']
for c in cells:
# match each cell with the header and possibly subsection title
if section_entities:
for ise, se in enumerate(section_entities):
predicates = predicates.union(getSOPred(se, c, fnt, existing_triples, temp_table[x], section_entities, (None,(ise, "sub"))))
feature3, feature4, feature5, feature7, allsocombos = getFeatures3457(len_cols, temp_table, k, existing_triples, section_entities)
else:
feature3, feature4, feature5, feature7, allsocombos = getFeatures3457(len_cols, temp_table, k, existing_triples)
predicates = predicates.union(getSOPred(k, c, fnt, existing_triples, row = temp_table[x], pos = (None, (None, "article"))))
# based on exisiting triples, suggest candidate triples
# if the existing predicate happens > 50% in the same indices we suggest it
# xextractedtable = temp_table
# xextracted = existing_triples
feature1 = len(temp_table) - 1
feature2 = len(temp_table[0])
# retrive feature 6 later
# features 8-12 are calculated later
# feature 13,14,15
uniquepotential, genpreds = generateCandidates(existing_triples,temp_table)
getFeatures3457(len_cols, temp_table, k, existing_triples, section_entities)
existing_triples
import csv
# now that table has been recreated with only disambiguated entities
# let the magic happen
# extract all the triples and features
def addTripleCSV(d, file, mode):
with open(file, mode, newline='') as csvfile:
fieldnames = ['id', 'subject', 'predicate','object']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
if mode == 'w':
writer.writeheader()
writer.writerow(d)
```
| github_jupyter |
# Off-policy Learning in Contextual Bandits
** *
This IPython notebook illustrates the usage of the [contextualbandits](https://www.github.com/david-cortes/contextualbandits) package's `offpolicy` module through a simulation with a public dataset.
** Small note: if the TOC here is not clickable or the math symbols don't show properly, try visualizing this same notebook from nbviewer following [this link](http://nbviewer.jupyter.org/github/david-cortes/contextualbandits/blob/master/example/offpolicy_learning.ipynb). **
** *
### Sections
[1. Problem description](#p1)
[2. Algorithms](#p2)
[3. Experiments](#p3)
[4. References](#p4)
** *
<a id="p1"></a>
## 1. Problem description
For a general description of the contextual bandits problem, see the first part of the package's guide [Online Contextual Bandits](http://nbviewer.jupyter.org/github/david-cortes/contextualbandits/blob/master/example/online_contextual_bandits.ipynb).
Unlike the `online` module mentioned above, this module deals with a slightly different problem: once we have collected (biased) data following some policy – consisting in features observed, actions chosen, rewards observed, and ideally scores or estimated rewards probabilities that the exploration policy predicted – how can we now build a different and perhaps better policy? (also known as "Off-policy learning").
This module focuses on building non-online, exploit-only policies, and unfortunately, the algorithms don't extend nicely to the case of classifiers that allow some exploration. It assumes a stationary exploration policy (non-online) for the methods to work well in theory, but in practice they can also work with data collected through online policies whose reward estimates shift over time (albeit performance is worse).
In the multi-label case with full information, this is straight-forward - we just fit the algorithm on all the data and then make predictions, but this logic doesn't extend so well to censored labels (i.e. knowing only whether the label that was chosen as correct), as the better the policy, the more biased the data becomes, and new policies might just end up imitating the old one.
The simplest approach would be to build a One-Vs-Rest classifier taking for each class only the data consisting of that action (if we know that observations always have at most 1 label, we can also take all the other data that got a reward and was not the action for which the classifier is being built as negative examples, but this is not a typical situation). However, we can also make use of the estimated rewards from the policy that generated the data in order to come up with less biased algorithms.
The approaches implemented here are just for comparison purposes. In practice, the naive One-Vs-Rest approach can perform better than the approaches described here, especially for the case of discrete rewards, and typical settings such as online advertising call for online algorithms.
** *
<a id="p2"></a>
## 2. Algorithms
The methods implemented here are:
* `OffsetTree` (see _The offset tree for learning with partial labels_).
* `DoublyRobustEstimator` (see _Doubly robust policy evaluation and learning_). Note that this method is meant for continuous rewards and doesn't work very well with discrete rewards. Also note that it is very computationally demanding.
In the author's own words:
Offset Tree:
> The Offset Tree uses the following trick, which is easiest to understand in the case of k = 2 choices . When the observed reward $r_a$ of choice a is low, we essentially pretend that the other choice $a′$ was chosen and a different reward $r_{a′}$ was observed. Precisely how this is done and why, is driven by the regret analysis.
Doubly-Robust Estimator:
> Informally, the estimator uses (estimated_reward) as a baseline and if there is data available, a correction is applied. We will see that our estimator is accurate if at least one of the estimators (reward_estimate) and (probability_estimate), is accurate, hence the name doubly robust.
Just like in the online module, these are also meta-heuristics that take a binary classification algorithm supporting sample weights as a base oracle. For the case of `DoublyRobustEstimator`, which converts the problem into cost-sensitive classification, you need to pass a regressor rather than a classifier when using method `RegressionOneVsRest` (picked by default).
(For more information, see the references section at the end)
** *
<a id="p3"></a>
## 3. Experiments
The experiments here are run on the same Bibtext dataset as in the guide for the online module.
In order to simulate a stationary (and biased) data collection policy, I will fit a logistic regression model with a sample of the **fully-labeled** data, then let it choose actions for some more data, and take those actions and rewards as input for a new policy, along with the estimated reward probabilities for the actions that were chosen. Note that this is done just to choose the actions of the data on which further policies will be built, and the full labels will not be available to these policies.
Some of the online algorithms in this package also allow an `output_score` argument to their `predict` function which can be used to generate this type of data (for more information see the individual documentation of each policy, e.g. `help(contextualbandits.online.BootstrappedTS)`).
The evaluation is done with a test set with the full labels available. For the problem of evaluating policies based on partially-labeled data see the other IPython notebook [Policy Evaluation in Contextual Bandits](http://nbviewer.jupyter.org/github/david-cortes/contextualbandits/blob/master/example/policy_evaluation.ipynb).
** *
Loading the Bibtex dataset again:
```
import pandas as pd, numpy as np, re
from sklearn.preprocessing import MultiLabelBinarizer
def parse_data(file_name):
features = list()
labels = list()
with open(file_name, 'rt') as f:
f.readline()
for l in f:
if bool(re.search("^[0-9]", l)):
g = re.search("^(([0-9]{1,2},?)+)\s(.*)$", l)
labels.append([int(i) for i in g.group(1).split(",")])
features.append(eval("{" + re.sub("\s", ",", g.group(3)) + "}"))
else:
l = l.strip()
labels.append([])
features.append(eval("{" + re.sub("\s", ",", l) + "}"))
features = pd.DataFrame.from_dict(features).fillna(0).iloc[:,:].values
mlb = MultiLabelBinarizer()
y = mlb.fit_transform(labels)
return features, y
features, y = parse_data("data0206.txt")
print(features.shape)
print(y.shape)
```
Simulating a stationary exploration policy:
```
from sklearn.linear_model import LogisticRegression
# the 'explorer' polcy will be fit with this small sample of the rows
st_seed = 0
end_seed = 100
# then it will choose actions for this larger sample
st_exploration = 0
end_exploration = 1000
# the new policy will be evaluated with a separate test set
st_test = 1001
end_test = 1893
# separating the covariates data for each case
Xseed = features[st_seed:end_seed, :]
Xexplore_sample = features[st_exploration:end_exploration, :]
Xtest = features[st_test:end_test, :]
nchoices = y.shape[1]
# now constructing an exploration policy as explained above, with fully-labeled data
explorer = LogisticRegression()
explorer.fit(Xseed, np.argmax(y[st_seed:end_seed], axis=1))
# letting the exploration policy choose actions for the new policy input
actions_explore_sample = explorer.predict(Xexplore_sample)
rewards_explore_sample = y[st_exploration:end_exploration, :]\
[np.arange(end_exploration - st_exploration), actions_explore_sample]
# extracting the probabilities it estimated
ix_internal_actions = {j:i for i,j in enumerate(explorer.classes_)}
ix_internal_actions = [ix_internal_actions[i] for i in actions_explore_sample]
ix_internal_actions = np.array(ix_internal_actions)
prob_actions_explore = explorer.predict_proba(Xexplore_sample)[np.arange(Xexplore_sample.shape[0]),
ix_internal_actions]
```
Naïve solution: separate classifiers using subsets of the data:
```
from contextualbandits.online import SeparateClassifiers
from sklearn.linear_model import LogisticRegression
new_policy = SeparateClassifiers(base_algorithm=LogisticRegression(), nchoices=y.shape[1],
beta_prior=None, smoothing=None)
new_policy.fit(X=Xexplore_sample, a=actions_explore_sample, r=rewards_explore_sample)
mean_reward_naive = np.mean(y[st_test:end_test, :]\
[np.arange(end_test - st_test), new_policy.predict(Xtest)])
print("Test set mean reward - Separate Classifiers: ", mean_reward_naive)
```
Idea from this same package: use a beta prior when the sample sizes are small:
```
from contextualbandits.online import SeparateClassifiers
from sklearn.linear_model import LogisticRegression
new_policy = SeparateClassifiers(base_algorithm=LogisticRegression(), nchoices=y.shape[1],
beta_prior="auto")
new_policy.fit(X=Xexplore_sample, a=actions_explore_sample, r=rewards_explore_sample)
mean_reward_beta = np.mean(y[st_test:end_test, :]\
[np.arange(end_test - st_test), new_policy.predict(Xtest)])
print("Test set mean reward - Separate Classifiers + Prior: ", mean_reward_beta)
from contextualbandits.online import SeparateClassifiers
from sklearn.linear_model import LogisticRegression
new_policy = SeparateClassifiers(base_algorithm=LogisticRegression(), nchoices=y.shape[1],
beta_prior=None, smoothing = (1,2))
new_policy.fit(X=Xexplore_sample, a=actions_explore_sample, r=rewards_explore_sample)
mean_reward_sm = np.mean(y[st_test:end_test, :]\
[np.arange(end_test - st_test), new_policy.predict(Xtest)])
print("Test set mean reward - Separate Classifiers + Smoothing: ", mean_reward_sm)
```
Now trying the offset tree method:
```
from contextualbandits.offpolicy import OffsetTree
from sklearn.linear_model import LogisticRegression
new_policy = OffsetTree(base_algorithm=LogisticRegression(), nchoices=y.shape[1])
new_policy.fit(X=Xexplore_sample, a=actions_explore_sample, r=rewards_explore_sample, p=prob_actions_explore)
mean_reward_ot = np.mean(y[st_test:end_test, :][np.arange(end_test - st_test), new_policy.predict(Xtest)])
print("Test set mean reward - Offset Tree technique: ", mean_reward_ot)
```
Performance is quite similar to how it was before, and it didn't manage to bet the naive method. However, this is quite an unfair comparison, as there are many arms that the exploration policy didn't choose even once, so the offset tree has to sometimes decide between classes for which no data is available.
** *
The doubly-robust method can also be tried for the case of discrete rewards, where the reward estimates are the same probability estimates from the base algorithm. However, its performance is not as good:
```
from contextualbandits.offpolicy import DoublyRobustEstimator
from sklearn.linear_model import LogisticRegression, Ridge
new_policy = DoublyRobustEstimator(base_algorithm = Ridge(),
reward_estimator = LogisticRegression(),
nchoices = y.shape[1],
method = 'rovr', beta_prior = None, smoothing = None)
new_policy.fit(X=Xexplore_sample, a=actions_explore_sample, r=rewards_explore_sample, p=prob_actions_explore)
mean_reward_dr = np.mean(y[st_test:end_test, :][np.arange(end_test - st_test), new_policy.predict(Xtest)])
print("Test set mean reward - Doubly-Robust Estimator: ", mean_reward_dr)
new_policy = DoublyRobustEstimator(base_algorithm = Ridge(),
reward_estimator = LogisticRegression(),
nchoices = y.shape[1],
method = 'rovr', beta_prior = "auto", smoothing = None)
new_policy.fit(X=Xexplore_sample, a=actions_explore_sample, r=rewards_explore_sample, p=prob_actions_explore)
mean_reward_dr_prior = np.mean(y[st_test:end_test, :][np.arange(end_test - st_test), new_policy.predict(Xtest)])
print("Test set mean reward - Doubly-Robust Estimator + Prior: ", mean_reward_dr_prior)
new_policy = DoublyRobustEstimator(base_algorithm = Ridge(),
reward_estimator = LogisticRegression(),
nchoices = y.shape[1],
method = 'rovr', beta_prior = None, smoothing = (1, 2))
new_policy.fit(X=Xexplore_sample, a=actions_explore_sample, r=rewards_explore_sample, p=prob_actions_explore)
mean_reward_dr_sm = np.mean(y[st_test:end_test, :][np.arange(end_test - st_test), new_policy.predict(Xtest)])
print("Test set mean reward - Doubly-Robust Estimator + Smoothing: ", mean_reward_dr_sm)
```
Unfortunately, it also didn't manage to improve the estimates - which is not surprising given that the method is meant for the continuous reward scenario rather than the discrete rewards as shown here.
```
import matplotlib.pyplot as plt, pandas as pd
import seaborn as sns
from pylab import rcParams
%matplotlib inline
results = pd.DataFrame({
'Off-policy Learning Method' : ['Naive', 'Naive + Prior', 'Naive + Smoothing', 'Offset Tree'],
'Test set mean reward' : [mean_reward_naive, mean_reward_beta, mean_reward_sm, mean_reward_ot]
})
sns.set(font_scale = 1.3)
rcParams['figure.figsize'] = 22, 7
sns.barplot(x = "Off-policy Learning Method", y="Test set mean reward", data=results)
plt.title('Off-policy Learning on Bibtex Dataset\nBase Classifier is Logistic Regression')
plt.show()
```
** *
<a id="p4"></a>
## References:
* Beygelzimer, A., & Langford, J. (2009, June). The offset tree for learning with partial labels. In Proceedings of the 15th ACM SIGKDD international conference on Knowledge discovery and data mining (pp. 129-138). ACM.
* Dudík, M., Langford, J., & Li, L. (2011). Doubly robust policy evaluation and learning. arXiv preprint arXiv:1103.4601.
* Dudík, M., Erhan, D., Langford, J., & Li, L. (2014). Doubly robust policy evaluation and optimization. Statistical Science, 485-511.
| github_jupyter |
# Duel of sorcerers
You are witnessing an epic battle between two powerful sorcerers: Gandalf and Saruman. Each sorcerer has 10 spells of variable power in their mind and they are going to throw them one after the other. The winner of the duel will be the one who wins more of those clashes between spells. Spells are represented as a list of 10 integers whose value equals the power of the spell.
```
gandalf = [10, 11, 13, 30, 22, 11, 10, 33, 22, 22]
saruman = [23, 66, 12, 43, 12, 10, 44, 23, 12, 17]
```
For example:
1. The first clash is won by Saruman: 10 against 23, wins 23
2. The second clash wins Saruman: 11 against 66, wins 66
3. etc.
You will create two variables, one for each sorcerer, where the sum of clashes won will be stored. Depending on which variable is greater at the end of the duel, you will show one of the following three results on the screen:
* Gandalf wins
* Saruman wins
* Tie
<img src="images/content_lightning_bolt_big.jpg" width="400">
## Solution
```
# Assign spell power lists to variables
gandalf = [10, 11, 13, 30, 22, 11, 10, 33, 22, 22]
saruman = [23, 66, 12, 43, 12, 10, 44, 23, 12, 17]
# Assign 0 to each variable that stores the victories
# Execution of spell clashes
# We check who has won, do not forget the possibility of a draw.
# Print the result based on the winner.
```
## Goals
1. Treatment of lists
2. Use of **for loop**
3. Use of conditional **if-elif-else**
4. Use of the functions **range(), len()**
5. Print
## Bonus
1. Spells now have a name and there is a dictionary that relates that name to a power.
2. A sorcerer wins if he succeeds in winning 3 spell clashes in a row.
3. Average of each of the spell lists.
4. Standard deviation of each of the spell lists.
```
POWER = {
'Fireball': 50,
'Lightning bolt': 40,
'Magic arrow': 10,
'Black Tentacles': 25,
'Contagion': 45
}
gandalf = ['Fireball', 'Lightning bolt', 'Lightning bolt', 'Magic arrow', 'Fireball',
'Magic arrow', 'Lightning bolt', 'Fireball', 'Fireball', 'Fireball']
saruman = ['Contagion', 'Contagion', 'Black Tentacles', 'Fireball', 'Black Tentacles',
'Lightning bolt', 'Magic arrow', 'Contagion', 'Magic arrow', 'Magic arrow']
```
Good luck!
```
# 1. Spells now have a name and there is a dictionary that relates that name to a power.
# variables
POWER = {
'Fireball': 50,
'Lightning bolt': 40,
'Magic arrow': 10,
'Black Tentacles': 25,
'Contagion': 45
}
gandalf = ['Fireball', 'Lightning bolt', 'Lightning bolt', 'Magic arrow', 'Fireball',
'Magic arrow', 'Lightning bolt', 'Fireball', 'Magic arrow', 'Fireball']
saruman = ['Contagion', 'Contagion', 'Black Tentacles', 'Fireball', 'Black Tentacles',
'Lightning bolt', 'Magic arrow', 'Contagion', 'Magic arrow', 'Magic arrow']
# Assign spell power lists to variables
# 2. A sorcerer wins if he succeeds in winning 3 spell clashes in a row.
# Execution of spell clashes
# check for 3 wins in a row
# check the winner
# 3. Average of each of the spell lists.
# 4. Standard deviation of each of the spell lists.
```
| github_jupyter |
<a href="https://colab.research.google.com/github/byKakayo/calc_num_py/blob/main/KarenKaoriYonea_10349471_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#1. Mínimos Quadrados
```
#Bibliotecas
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#Dados
x = np.array([1872, 1890, 1900, 1920, 1940, 1950, 1960, 1970, 1980, 1991, 2000, 2010])
y = np.array([9.9, 14.3, 17.4, 30.6, 41.2, 51.9, 70.9, 94.5, 121.1, 146.9, 169.5, 190.7])
#Visualização dos dados
#Definir tamanho
plt.figure(figsize=(8,5))
#Plot dos dados
sns.scatterplot(x,y, label="Dados");
#Título
plt.title('Censo-IBGE')
#Definir eixos
plt.xlabel('Ano')
plt.ylabel('População(milhões)')
#Mínimos Quadrados - recebe os dados e o grau do polinômio
def Least_Squares(x, y, grau):
shape = (len(x), grau + 1)
A = np.zeros( shape )
for k in range(0, grau+1):
A[:,k] = x**k
p = (np.linalg.inv( A.T @ A ) @ A.T) @ y
#Retorna os coeficientes
return p
#Erro quadrático
def deviation(y, yp):
err = np.sum((y-yp)**2)
return err
#Gerar pontos
def f(x, p):
y = np.zeros(len(x))
for i in range(0, len(p+1)):
y += p[i]*(x**i)
return y
#Para polinômios de grau 1 até 6
for i in range(1, 7):
print("Polinômio de grau: ", i)
#Coeficientes do mínimos quadrados
p = Least_Squares(x, y, i)
#Erro quadrático
print("Erro:", deviation(y,f(x, p)))
xp = np.linspace(1870, 2021, 15)
#Valores para o fitting
yp = f(xp, p)
#Plot do fitting para graus 1, 2 e 5
if i in (1,2,5):
sns.lineplot(xp, yp, label="Grau "+str(i))
#População de 2021
print("População em 2021:", yp[-1], "\n")
```
a) Analisando as aproximações polinomiais dos dados, podemos inferir que a melhor aproximação é a de grau 5, pois aumentando o grau temos uma diminuição do erro quadrático, porém ao chegarmos em grau 6 temos um brusco aumento do erro. Sendo assim, é possível afirmar que não necessariamente um aumento do grau leve a uma melhora na aproximação.
b) Para a melhor aproximação encontrada temos que a população estimada do Brasil em 2021 seria: 206,56 milhões.
ps: As populações estimadas para os outros graus menores que 7 estão no código acima.
#2. Equações Diferenciais Ordinárias
```
#Bibliotecas
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
#Runge-Kutta ordem 4 para modelo Kermack-McKendrick
def Runge_Kutta_4(f, y0, h, n):
#res recebe todos yn
res = np.zeros( (int(n//h), len(y0)) )
res[0,:] = y0
#Método Runge-Kutta
for t in range(1, int(n//h)):
# K's para cada equação do sistema
k = np.zeros( (len(y0), 4) )
for i in range(4):
k[:,i] = h*f(y0 + k[:,i-1]*h/2)
y0 = y0 + (k[:,0] + 2*k[:,1] + 2*k[:,2] + k[:,3])/6
# Adiciona yn em res
res[t,:] = y0
return res
#Função que retorna o dia do pico e o número de infectados
def pico(result, h):
for d, y in enumerate(result):
if y[0]-y[2] <= 0:
return round(d*h), round(y[1])
return 0, 0
#Função com os parametros do item a
def fa(yt):
a, b = 0.1, 0.0007
st, it, rt = yt
yt1 = np.zeros(3)
yt1[0] = -b*st*it
yt1[1] = b*st*it -a*it
yt1[2] = a*it
return yt1
#Parâmetros iniciais
S0, I0, R0 = 999, 1, 0
h, n = 0.001, 70
#Visualização dos dados
plt.figure(figsize=(8,5))
plt.title('Evolução da epidemia')
plt.xlabel('Dias')
plt.ylabel('Indivíduos')
#Resultado item a
result = Runge_Kutta_4(fa, (S0, I0, R0), h, n)
print("Pico da epidemia: dia", pico(result, h)[0], "\nTotal de infectados:", pico(result, h)[1])
sns.lineplot(np.linspace(0, 70, result.shape[0]), result[:,0], label="Suscetíveis");
sns.lineplot(np.linspace(0, 70, result.shape[0]), result[:,1], label="Infectados");
sns.lineplot(np.linspace(0, 70, result.shape[0]), result[:,2], label="Removidos");
```
a) Pico da pandemia no dia 14 com 565 infectados.
```
#Função com os parametros do item b
def fb(yt):
a, b = 0.1, 0.0004
st, it, rt = yt
yt1 = np.zeros(3)
yt1[0] = -b*st*it
yt1[1] = b*st*it -a*it
yt1[2] = a*it
return yt1
#Parâmetros iniciais
S0, I0, R0 = 999, 1, 0
h, n = 0.001, 70
#Visualização dos dados
plt.figure(figsize=(8,5))
plt.title('Evolução da epidemia')
plt.xlabel('Dias')
plt.ylabel('Indivíduos')
#Resultado item b
result = Runge_Kutta_4(fb, (S0, I0, R0), h, n)
print("Pico da epidemia: dia", pico(result, h)[0], "\nTotal de infectados:", pico(result, h)[1])
sns.lineplot(np.linspace(0, 70, result.shape[0]), result[:,0], label="Suscetíveis");
sns.lineplot(np.linspace(0, 70, result.shape[0]), result[:,1], label="Infectados");
sns.lineplot(np.linspace(0, 70, result.shape[0]), result[:,2], label="Removidos");
```
b) Em comparação ao item a) há um deslocamento do pico da epidemia para a direita e uma diminuição da quantidade de infectados nesse pico. No sentido real, esse deslocamento seria favorável no controle da epidemia, no sentido de gestão, há mais tempo até que ocorra o pico e menos casos simultâneos.
#3. Zeros de funções e sistemas não lineares
```
#Função que descreve a elevação
def p(x):
return -x**4 +7.7*x**3 -18*x**2 +13.6*x
#Função que descreve o lançamento do projetil
def q(x):
return -x**2 +5*x +0.75
#Função que calcula a distância entre o projétil e a elevação
def dist_pq(x):
return p(x) - q(x)
#Derivada da função de distância
def dx_dist_pq(x):
return -4*x**3 +3*7.7*x**2 -2*19*x +18.6
#Método da bissecção
def Bisection_Method(f, a, b, tol, kmax):
#x recebe o valor médio de a e b
x = (a+b)/2
#Erro inicial
erro = 2*tol
count = 0
#loop de iteração
while erro > tol and count < kmax:
#Condição para atribuir o valor médio
if f(a)*f(x) < 0:
b = x
else:
a = x
x0 = x
x = (a+b)/2
erro = abs(x-x0)
count += 1
#Retorna o x encontrado e a quantidade de iterações
return x, count
#Método de newton
def Newton_Method(f, df, x0, tol, kmax):
count = 0
#loop de iterações
while abs(f(x0)/df(x0)) > tol and count < kmax:
x0 = x0 - f(x0)/df(x0)
count += 1
#Returna o x encontrado e a quantidade de iterações
return x0, count
#Resultado para o método da bisecção
bissec_raiz, bissec_count = Bisection_Method(dist_pq, 3.15, 3.2, 0.001, 200)
#Resultado para o método de Newton
newton_raiz, newton_count = Newton_Method(dist_pq, dx_dist_pq, 3.15, 0.001, 200)
#Valores esperados
raiz = 3.173
altura = p(raiz)
print("Metodo da bissecção:")
print(" Intervalo inicial: (3.15, 3.2)")
print(" - raiz:", bissec_raiz)
print(" - iterações:", bissec_count)
print(" - Altura:", p(bissec_raiz))
print("\nMetodo de newton:")
print(" Chute inicial: 3.15")
print(" - raiz:", newton_raiz)
print(" - iterações:", newton_count)
print(" - Altura:", p(newton_raiz))
print("\nValor esperado:")
print(" - raiz:", raiz)
print(" - Altura:", altura)
```
Para ambos os métodos foi utilizado o critério de parada, precisão 0,001, então o que temos de maior diferença nos resultados são os chutes iniciais e quantidade de iterações.
Dado o método, podemos dizer que o da bissecção têm uma dependência maior do intervalo inicial escolhido enquanto o de Newton depende mais da função matemática em si, pois é utilizada sua derivada, e foram necessárias mais iterações para chegar à mesma precisão do método da bissecção.
No geral, para ambos os métodos é preciso saber a previsão para o resultado de forma a ajustar os parâmetros.
| github_jupyter |
# Chapter 11
*Modeling and Simulation in Python*
Copyright 2021 Allen Downey
License: [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International](https://creativecommons.org/licenses/by-nc-sa/4.0/)
```
# install Pint if necessary
try:
import pint
except ImportError:
!pip install pint
# download modsim.py if necessary
from os.path import basename, exists
def download(url):
filename = basename(url)
if not exists(filename):
from urllib.request import urlretrieve
local, _ = urlretrieve(url, filename)
print('Downloaded ' + local)
download('https://raw.githubusercontent.com/AllenDowney/' +
'ModSimPy/master/modsim.py')
# import functions from modsim
from modsim import *
```
[Click here to run this chapter on Colab](https://colab.research.google.com/github/AllenDowney/ModSimPy/blob/master//chapters/chap11.ipynb)
In this chapter, we develop a model of an epidemic as it spreads in a
susceptible population, and use it to evaluate the effectiveness of
possible interventions.
My presentation of the model in the next few chapters is based on an excellent article by David Smith and Lang Moore, "The SIR Model for Spread of Disease," *Journal of Online Mathematics and its Applications*, December 2001, available at <http://modsimpy.com/sir>.
## The Freshman Plague
Every year at Olin College, about 90 new students come to campus from
around the country and the world. Most of them arrive healthy and happy, but usually at least one brings with them some kind of infectious disease. A few weeks later, predictably, some fraction of the incoming class comes down with what we call "The Freshman Plague".
In this chapter we introduce a well-known model of infectious disease,
the Kermack-McKendrick model, and use it to explain the progression of
the disease over the course of the semester, predict the effect of
possible interventions (like immunization) and design the most effective intervention campaign.
So far we have done our own modeling; that is, we've chosen physical
systems, identified factors that seem important, and made decisions
about how to represent them. In this chapter we start with an existing
model and reverse-engineer it. Along the way, we consider the modeling
decisions that went into it and identify its capabilities and
limitations.
## The SIR model
The Kermack-McKendrick model is an example of an **SIR model**,
so-named because it represents three categories of people:
- **S**: People who are "susceptible", that is, capable of
contracting the disease if they come into contact with someone who
is infected.
- **I**: People who are "infectious", that is, capable of passing
along the disease if they come into contact with someone
susceptible.
- **R**: People who are "recovered". In the basic version of the
model, people who have recovered are considered to be immune to
reinfection. That is a reasonable model for some diseases, but not
for others, so it should be on the list of assumptions to reconsider later.
Let's think about how the number of people in each category changes over time. Suppose we know that people with the disease are infectious for a period of 4 days, on average. If 100 people are infectious at a
particular point in time, and we ignore the particular time each one
became infected, we expect about 1 out of 4 to recover on any particular day.
Putting that a different way, if the time between recoveries is 4 days, the recovery rate is about 0.25 recoveries per day, which we'll denote with the Greek letter gamma, $\gamma$, or the variable name `gamma`.
If the total number of people in the population is $N$, and the fraction currently infectious is $i$, the total number of recoveries we expect per day is $\gamma i N$.
Now let's think about the number of new infections. Suppose we know that each susceptible person comes into contact with 1 person every 3 days, on average, in a way that would cause them to become infected if the other person is infected. We'll denote this contact rate with the Greek letter beta, $\beta$.
It's probably not reasonable to assume that we know $\beta$ ahead of
time, but later we'll see how to estimate it based on data from previous outbreaks.
If $s$ is the fraction of the population that's susceptible, $s N$ is
the number of susceptible people, $\beta s N$ is the number of contacts per day, and $\beta s i N$ is the number of those contacts where the other person is infectious.
In summary:
- The number of recoveries we expect per day is $\gamma i N$; dividing by $N$ yields the fraction of the population that recovers in a day, which is $\gamma i$.
- The number of new infections we expect per day is $\beta s i N$;
dividing by $N$ yields the fraction of the population that gets
infected in a day, which is $\beta s i$.
This model assumes that the population is closed; that is, no one
arrives or departs, so the size of the population, $N$, is constant.
## The SIR equations
If we treat time as a continuous quantity, we can write differential
equations that describe the rates of change for $s$, $i$, and $r$ (where $r$ is the fraction of the population that has recovered):
$$\begin{aligned}
\frac{ds}{dt} &= -\beta s i \\
\frac{di}{dt} &= \beta s i - \gamma i\\
\frac{dr}{dt} &= \gamma i\end{aligned}$$
To avoid cluttering the equations, I leave it implied that $s$ is a function of time, $s(t)$, and likewise for $i$ and $r$.
SIR models are examples of **compartment models**, so-called because
they divide the world into discrete categories, or compartments, and
describe transitions from one compartment to another. Compartments are
also called **stocks** and transitions between them are called
**flows**.
In this example, there are three stocks---susceptible, infectious, and
recovered---and two flows---new infections and recoveries. Compartment
models are often represented visually using stock and flow diagrams (see <http://modsimpy.com/stock>).
The following figure shows the stock and flow diagram for an SIR
model.

Stocks are represented by rectangles, flows by arrows. The widget in the middle of the arrows represents a valve that controls the rate of flow; the diagram shows the parameters that control the valves.
## Implementation
For a given physical system, there are many possible models, and for a
given model, there are many ways to represent it. For example, we can
represent an SIR model as a stock-and-flow diagram, as a set of
differential equations, or as a Python program. The process of
representing a model in these forms is called **implementation**. In
this section, we implement the SIR model in Python.
I'll represent the initial state of the system using a `State` object
with state variables `S`, `I`, and `R`; they represent the fraction of
the population in each compartment.
We can initialize the `State` object with the *number* of people in each compartment, assuming there is one infected student in a class of 90:
```
init = State(S=89, I=1, R=0)
show(init)
```
And then convert the numbers to fractions by dividing by the total:
```
init /= init.sum()
show(init)
```
For now, let's assume we know the time between contacts and time between
recoveries:
```
tc = 3 # time between contacts in days
tr = 4 # recovery time in days
```
We can use them to compute the parameters of the model:
```
beta = 1 / tc # contact rate in per day
gamma = 1 / tr # recovery rate in per day
```
I'll use a `System` object to store the parameters and initial
conditions. The following function takes the system parameters and returns a new `System` object:
```
def make_system(beta, gamma):
init = State(S=89, I=1, R=0)
init /= init.sum()
return System(init=init, t_end=7*14,
beta=beta, gamma=gamma)
```
The default value for `t_end` is 14 weeks, about the length of a
semester.
Here's what the `System` object looks like.
```
system = make_system(beta, gamma)
show(system)
```
## The update function
At any point in time, the state of the system is represented by a
`State` object with three variables, `S`, `I` and `R`. So I'll define an update function that takes as parameters the current
time, a `State` object, and a `System` object:
```
def update_func(t, state, system):
s, i, r = state
infected = system.beta * i * s
recovered = system.gamma * i
s -= infected
i += infected - recovered
r += recovered
return State(S=s, I=i, R=r)
```
The first line uses a feature we have not seen before, **multiple
assignment**. The value on the right side is a `State` object that
contains three values. The left side is a sequence of three variable
names. The assignment does just what we want: it assigns the three
values from the `State` object to the three variables, in order.
The variables `s`, `i` and `r`, are lowercase to distinguish them
from the state variables, `S`, `I` and `R`.
The update function computes `infected` and `recovered` as a fraction of the population, then updates `s`, `i` and `r`. The return value is a `State` that contains the updated values.
We can call `update_func` like this:
```
state = update_func(0, init, system)
show(state)
```
The result is the new `State` object.
You might notice that this version of `update_func` does not use one of its parameters, `t`. I include it anyway because update functions
sometimes depend on time, and it is convenient if they all take the same parameters, whether they need them or not.
## Running the simulation
Now we can simulate the model over a sequence of time steps:
```
def run_simulation1(system, update_func):
state = system.init
for t in range(0, system.t_end):
state = update_func(t, state, system)
return state
```
The parameters of `run_simulation` are the `System` object and the
update function. The `System` object contains the parameters, initial
conditions, and values of `0` and `t_end`.
We can call `run_simulation` like this:
```
final_state = run_simulation1(system, update_func)
show(final_state)
```
The result indicates that after 14 weeks (98 days), about 52% of the
population is still susceptible, which means they were never infected,
almost 48% have recovered, which means they were infected at some point, and less than 1% are actively infected.
## Collecting the results
The previous version of `run_simulation` only returns the final state,
but we might want to see how the state changes over time. We'll consider two ways to do that: first, using three `TimeSeries` objects, then using a new object called a `TimeFrame`.
Here's the first version:
```
def run_simulation2(system, update_func):
S = TimeSeries()
I = TimeSeries()
R = TimeSeries()
state = system.init
S[0], I[0], R[0] = state
for t in range(0, system.t_end):
state = update_func(t, state, system)
S[t+1], I[t+1], R[t+1] = state
return S, I, R
```
First, we create `TimeSeries` objects to store the results.
Next we initialize `state` and the first elements of `S`, `I` and
`R`.
Inside the loop, we use `update_func` to compute the state of the system at the next time step, then use multiple assignment to unpack the elements of `state`, assigning each to the corresponding `TimeSeries`.
At the end of the function, we return the values `S`, `I`, and `R`. This is the first example we have seen where a function returns more than one value.
We can run the function like this:
```
S, I, R = run_simulation2(system, update_func)
```
We'll use the following function to plot the results:
```
def plot_results(S, I, R):
S.plot(style='--', label='Susceptible')
I.plot(style='-', label='Infected')
R.plot(style=':', label='Resistant')
decorate(xlabel='Time (days)',
ylabel='Fraction of population')
```
And run it like this:
```
plot_results(S, I, R)
```
It takes about three weeks (21 days) for the outbreak to get going, and about five weeks (35 days) to peak. The fraction of the population that's infected is never very high, but it adds up. In total, almost half the population gets sick.
## Now with a TimeFrame
If the number of state variables is small, storing them as separate
`TimeSeries` objects might not be so bad. But a better alternative is to use a `TimeFrame`, which is another object defined in the ModSim
library.
A `TimeFrame` is a kind of a `DataFrame`, which we used earlier to store world population estimates.
Here's a more concise version of `run_simulation` using a `TimeFrame`:
```
def run_simulation(system, update_func):
frame = TimeFrame(columns=system.init.index)
frame.loc[0] = system.init
for t in range(0, system.t_end):
frame.loc[t+1] = update_func(t, frame.loc[t], system)
return frame
```
The first line creates an empty `TimeFrame` with one column for each
state variable. Then, before the loop starts, we store the initial
conditions in the `TimeFrame` at `0`. Based on the way we've been using
`TimeSeries` objects, it is tempting to write:
```
frame[0] = system.init
```
But when you use the bracket operator with a `TimeFrame` or `DataFrame`, it selects a column, not a row.
To select a row, we have to use `loc`, like this:
```
frame.loc[0] = system.init
```
Since the value on the right side is a `State`, the assignment matches
up the index of the `State` with the columns of the `TimeFrame`; that
is, it assigns the `S` value from `system.init` to the `S` column of
`frame`, and likewise with `I` and `R`.
We use the same feature to write the loop more concisely, assigning the `State` we get from `update_func` directly to the next row of
`frame`.
Finally, we return `frame`. We can call this version of `run_simulation` like this:
```
results = run_simulation(system, update_func)
```
And plot the results like this:
```
plot_results(results.S, results.I, results.R)
```
As with a `DataFrame`, we can use the dot operator to select columns
from a `TimeFrame`.
## Summary
This chapter presents an SIR model of infectious disease and two ways to collect the results, using several `TimeSeries` objects or a single `TimeFrame`.
In the next chapter we'll use the model to explore the effect of immunization and other interventions.
But first you might want to work on these exercises.
## Exercises
**Exercise** Suppose the time between contacts is 4 days and the recovery time is 5 days. After 14 weeks, how many students, total, have been infected?
Hint: what is the change in `S` between the beginning and the end of the simulation?
```
# Solution goes here
# Solution goes here
# Solution goes here
```
| github_jupyter |
<br>
# Einblick in das Rechnen mit <i>agla</i>
von Holger Böttcher - hbomat@posteo.de
<br><br>
Diese Arbeit steht unter der freien Lizenz [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/deed.de)
### Abitur Bayern 2019
### Geometrie Teil B, Aufgabengruppe 1
<br>
Quelle: [serlo.org](https://de.serlo.org/mathe/deutschland/bayern/gymnasium/abiturpr%C3%BCfungen-l%C3%B6sung/mathematik-abitur-bayern-2019/geometrie,-teil-b,-aufgabengruppe-1)
<br><i>Die Variablen $x_1, x_2, x_3$ sind mit $x, y, z$ bezeichnet</i>
<br>
Eine Geothermieanlage fördert durch einen Bohrkanal heißes Wasser aus einer
<br>wasserführenden Gesteinsschicht an die Erdoberfläche. In einem Modell entspricht
<br>die $xy$ - Ebene eines kartesischen Koordinatensystems der horizontal verlaufenden
<br>Erdoberfläche. Eine Längeneinheit im Koordinatensystem entspricht einem Kilometer
<br>in der Realität.
Der Bohrkanal besteht aus zwei Abschnitten, die im Modell vereinfacht
<br>durch die Strecken $[AP]$ und $[PQ]$ mit den Punkten $A\,(0\,|\,0\,|\,0), P\,(0\,|\,0\,|\,-1)$ und
<br>$Q\,(1\,|\,1\,|\,-3,5)$ beschrieben werden (vgl. Abbildung).
<img src='bilder/bayern3.png', align='left'>
<br><b>a)</b> Berechnen Sie auf der Grundlage des Modells die Gesamtlänge des Bohrkanals
<br>$\:\:\:\:$auf Meter gerundet.
<br>
<br><b>b)</b> Beim Übergang zwischen den beiden Abschnitten des Bohrkanals muss die
<br>$\:\:\:\:$Bohrrichtung um den Winkel geändert werden, der im Modell durch den
<br>$\:\:\:\:$Schnittwinkel der beiden Geraden $AP$ und $PQ$ beschrieben wird. Bestimmen Sie
<br>$\:\:\:\:$ die Größe dieses Winkels.<br><br>
Im Modell liegt die obere Begrenzungsfläche der wasserführenden Gesteinsschicht in
<br>der Ebene $E$ und die untere Begrenzungsfläche in einer zu $E$ parallelen Ebene $F$. Die
<br>Ebene $E$ enthält den Punkt $Q$. Die Strecke $[PQ]$ steht senkrecht auf der Ebene $E$
<br>(vgl. Abbildung).
<br>
<br><b>c)</b> Bestimmen Sie eine Gleichung der Ebene $E$ in Normalenform.
<br>$\:\:\:\:$(*zur Kontrolle:* $E : 4x + 4y -10z - 43 = 0$ )
<br>
<br><b>d)</b> Der Bohrkanal wird geradlinig verlängert und verlässt die wasserführende<br>
$\:\:\:\:$Gesteinsschicht in einer Tiefe von 3600 m unter der Erdoberfläche. Die<br>
$\:\:\:\:$Austrittsstelle wird im Modell als Punkt $R$ auf der Geraden $PQ$ beschrieben.<br>
$\:\:\:\:$Bestimmen Sie die Koordinaten von $R$ und ermitteln Sie die Dicke der <br>
$\:\:\:\:$wasserführenden Gesteinsschicht auf Meter gerundet.<br>
$\:\:\:\:$(*zur Kontrolle:* $x$- *und* $y$*-Koordinate von* $R:\: 1,04$)
Ein zweiter Bohrkanal wird benötigt, durch den das entnommene Wasser abgekühlt<br>
zurück in die wasserführende Gesteinsschicht geleitet wird. Der Bohrkanal soll <br>
geradlinig und senkrecht zur Erdoberfläche verlaufen. Für den Beginn des Bohrkanals <br>
an der Erdoberfläche kommen nur Bohrstellen in Betracht, die im Modell durch einen <br>
Punkt $B\,(t\,|\,-t\,|\, 0)$ mit $t \in \mathbb{R}$ beschrieben
werden können.<br><br>
<b>e)</b> Zeigen Sie rechnerisch, dass der zweite Bohrkanal die wasserführende <br>
$\:\:\:\:$Gesteinsschicht im Modell im Punkt $T\,(t \,|\, -t \,| \, -4,3)$ erreicht, und erläutern Sie,<br>
$\:\:\:\:$wie die Länge des zweiten Bohrkanals bis zur wasserführenden Gesteinsschicht<br>
$\:\:\:\:$von der Lage der zugehörigen Bohrstelle beeinflusst wird.<br><br>
<b>f)</b> Aus energetischen Gründen soll der Abstand der beiden Stellen, an denen die <br>
$\:\:\:\:$beiden Bohrkanäle auf die wasserführende Gesteinsschicht treffen, mindestens <br>
$\:\:\:\:$1500 m betragen. Entscheiden Sie auf der Grundlage des Modells, ob diese<br>
$\:\:\:\:$Bedingung für jeden möglichen zweiten Bohrkanal erfüllt wird
<br><br>
```
%run agla/start
A = O # Ursprung, vordefiniert
P = v(0, 0, -1); Q = v(1, 1, -3.5)
A, P, Q
```
### a)
```
ab = Abstand(A, P) + Abstand(P, Q)
ab, (ab*1000).n(4) # in Meter
```
### b)
```
Winkel(Gerade(A, v(A, P)), Gerade(P, v(P, Q)), d=2) # oder
Winkel(v(A, P), v(P, Q), d=2)
```
### c)
$E\,$ wird über den Stütz- und den Normalenvektor erzeugt
```
E = Ebene(Q, v(P, Q))
E.koord
```
### d)
$R\,$ wird als Punkt der Geraden $PQ$ berechnet, dessen $z$-Koordinate $-3.6$ beträgt
Die Gleichung wird aus $\:R.z = -3.6\:$ gewonnen, indem die rechte Seite zu $0$ <br>
gemacht und die linke Seite genommen wird
```
R = Gerade(P, v(P, Q)).pkt(t)
L = löse(R.z + 3.6)
L
```
Einsetzen von $t$ in $R\:$ (als Wert eines *dictionary*-Elementes) und zur<br>
Dezimaldarstellung übergehen
```
R = R.subs(t, L[t]).dez
R
```
Die Dicke ist der Abstand zwischen $Q$ und $R$
```
dicke = Abstand(Q, R)
dicke * 1000 # in Meter
```
### e)
```
B = v(-t, t, 0)
```
Ein Bohrkanal verläuft entlang einer Geraden durch $B$, die parallel zur $z$-Achse liegt
```
g = Gerade(B, v(0, 0, 1), s) # Parameter darf hier nicht t sein
g.prg
```
$T$ ist dann Schnittpunkt dieser Geraden mit $E$
```
T = g.schnitt(E)
T.dez
```
Die Länge des zweiten Bohrkanals ist konstant, sie hängt nicht von der Lage ab
```
länge = Abstand(B, T)
länge
```
Das bedeutet, dass die Gerade, auf der alle Punkte $B$ liegen, parallel zur Ebene ist
```
h = Gerade(B)
h.prg
parallel(h, E)
```
Eine kleine Zeichnung verdeutlicht die Situation
```
zeichne([h, 2], E)
```
### f)
Der Abstand von $Q$ zu einem beliebigen $T$ ist
```
Abstand(Q, T)
```
Der Ausdruck unter der Wurzel nimmt seinen kleinsten Wert bei $t=0$ an, der <br>
Abstand ist also immer größer oder gleich
```
sqrt(66/25)
```
Damit ist die Bedingung erfüllt
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
import random
A = 1
T = 1
sample_rate = 100
num_random_processes = 1000
E_N0 = np.arange(-10, 20, 1)
def receiver_filter(rt, which):
if which == 1:
gt = np.fromfunction(lambda i: ((0*sample_rate < i) & (i <= T*sample_rate))*A, (sample_rate+10,)).astype(np.float)
return signal.convolve(rt,gt)/sample_rate
elif which == 2:
return rt
else:
ht = np.fromfunction(lambda i: ((0*sample_rate < i) & (i <= T*sample_rate))*((i/sample_rate)*np.sqrt(3))/T, (sample_rate+10,)).astype(np.float)
return signal.convolve(rt, ht)/sample_rate
def random_pulse():
return A if random.getrandbits(1) == 1 else -A
def calc_N0(avg_powerDB):
return ((A**2)*(T*sample_rate))/(10**(avg_powerDB/10))
def add_AWGN(gt, avg_powerDB):
N0 = calc_N0(avg_powerDB)
AWGN = np.random.normal(0, N0/2, sample_rate+10)
return gt+AWGN
def sample_at_T(yt):
return yt[T*sample_rate]
def mak_decision(y):
return A if y >= 0 else -A
def calc_prob_error(num_random_processes, avg_powerDB, which_filter):
num_wrong_decisions = 0
for _ in range(num_random_processes):
magnitude = random_pulse()
gt = np.fromfunction(lambda i: ((0*sample_rate < i) & (i <= T*sample_rate))*magnitude, (sample_rate+10,)).astype(np.float)
rt = add_AWGN(gt, avg_powerDB)
yt = receiver_filter(rt, which_filter)
yT = sample_at_T(yt)
out = mak_decision(yT)
if (out == A and magnitude == -A) or (out == -A and magnitude == A):
num_wrong_decisions += 1
return num_wrong_decisions/num_random_processes
def plot_out(avg_powerDB):
gt = np.fromfunction(lambda i: ((0*sample_rate < i) & (i <= T*sample_rate))*A,
(sample_rate+10,)).astype(np.float)
rt = add_AWGN(gt, avg_powerDB)
yt1 = receiver_filter(rt, 1)
yt2 = receiver_filter(rt, 2)
yt3 = receiver_filter(rt, 3)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(17,5))
fig.suptitle("output of receive filters with AWGN with (E/N0 = {})".format(avg_powerDB))
ax1.plot(np.stack((yt1,), axis=1))
ax1.set_title("filter 1")
ax1.set(xlabel='time', ylabel='y(t)')
ax2.plot(np.stack((yt2,), axis=1))
ax2.set_title("filter 2")
ax2.set(xlabel='time', ylabel='y(t)')
ax3.plot(np.stack((yt3,), axis=1))
ax3.set_title("filter 3")
ax3.set(xlabel='time', ylabel='y(t)')
plt.show()
def calc_prob_all(avg_powerDB):
prob1 = calc_prob_error(num_random_processes, avg_powerDB, 1)
prob2 = calc_prob_error(num_random_processes, avg_powerDB, 2)
prob3 = calc_prob_error(num_random_processes, avg_powerDB, 3)
print("probability of error of receive filter1 with avg_power = {} db is {}".format(avg_powerDB, prob1))
print("probability of error of receive filter2 with avg_power = {} db is {}".format(avg_powerDB, prob2))
print("probability of error of receive filter3 with avg_power = {} db is {}".format(avg_powerDB, prob3))
def prob_error_list(E_N0, which_filter):
prob_error = []
for each in E_N0:
prob_error.append(calc_prob_error(num_random_processes, each, which_filter))
return prob_error
plot_out(avg_powerDB= -10)
calc_prob_all(avg_powerDB= -10)
plot_out(avg_powerDB= 10)
calc_prob_all(avg_powerDB= 10)
plot_out(avg_powerDB= 15)
calc_prob_all(avg_powerDB= 15)
plot_out(avg_powerDB= 20)
calc_prob_all(avg_powerDB= 20)
plot_out(avg_powerDB= 50)
calc_prob_all(avg_powerDB= 50)
prob_error1 = prob_error_list(E_N0, 1)
prob_error2 = prob_error_list(E_N0, 2)
prob_error3 = prob_error_list(E_N0, 3)
plt.plot(E_N0, prob_error1, label='receive filter1')
plt.plot(E_N0, prob_error2, label='receive filter2')
plt.plot(E_N0, prob_error3, label='receive filter3')
plt.legend()
plt.title('probability of error VS. E/N0')
plt.xlabel('E/No')
plt.ylabel('Probability of Error')
plt.show()
```
# Question 1: Is BER increasing or decreasing with E/N0, and why ?
## Our answer:
As it is shown in the output, BER decreases while E/N0 is increasing. this beacause of, as long as E/N0 increases, N0 decreases and that means that the power of the noise becomes much smaller than the power of the pulse such that it cannot highly affect the pulse signal
# Question 2: which has the lowest BER, and why ?
## Our answer:
As it is shown in the output, filter one has the lowset BER as it is a matched filter = g(T-t), proved in the lec that h(t)optimal = g(T-t) maximizes the SNR and minimizes BER.. simply why?! as the output of the convolution at T is the energy of the pulse so the BER is minimum.
| github_jupyter |
Soal 1: Bitcoin Prices
* **Line Graph**
* Tahun 2018 cenderung memberikan keuntungan yang lebih baik
```
import matplotlib.pyplot as plt
x = [3228.7, 3430.4, 3502.5, 3570.9, 3597.2, 3616.8, 3661.4, 3677.8, 3706.8, 3785.4,
3823.1, 3920.4, 3944.3, 4002.5, 4006.4, 4111.8, 4120.4, 4196.2, 5046.2, 5051.8,
5265.9, 5290.2, 5621.8, 5830.9, 6167.3, 6184.3, 6231.6, 6254.8, 6321.7, 6379.1,
6386.2, 6398.9, 6427.1, 6494.2, 6505.8, 6519.0, 6572.2, 6596.3, 6603.9, 6729.6,
6734.8, 6765.5, 6905.7, 6938.2, 7014.3, 7080.8, 7156.2, 7189.6, 7190.3, 7262.6, 7321.5]
y = [7324.1,7361.3, 7376.8, 7408.7, 7510.9, 7515.8, 7546.6, 7646.6, 7874.9, 7901.4,
7957.3, 8004.4, 8027.4, 8127.3, 8208.5, 8234.1, 8245.1, 8304.4, 8459.5, 8497.3,
8545.7, 8547.4, 8559.6, 8762.0, 8812.5, 8923.1, 9230.6, 9241.1, 9300.6, 9352.4,
9492.1, 9594.4, 9704.3, 9853.5, 9993.0, 10131.0, 10218.1, 10337.3, 10461.1,
10721.7, 10815.7, 10826.7, 11073.5, 11268.0, 11314.5, 11364.9, 11402.3, 11467.5,
11906.5, 12858.9, 14292.2]
plt.plot(x, y)
plt.ylabel("Koordinat Y")
plt.xlabel("tKoordinat X")
plt.title('Daftar Harga Bitcoin Pada Tahun 2018 & 2019')
plt.show()
import matplotlib.pyplot as plt
x = [3228.7, 3430.4, 3502.5, 3570.9, 3597.2, 3616.8, 3661.4, 3677.8, 3706.8, 3785.4,
3823.1, 3920.4, 3944.3, 4002.5, 4006.4, 4111.8, 4120.4, 4196.2, 5046.2, 5051.8,
5265.9, 5290.2, 5621.8, 5830.9, 6167.3, 6184.3, 6231.6, 6254.8, 6321.7, 6379.1,
6386.2, 6398.9, 6427.1, 6494.2, 6505.8, 6519.0, 6572.2, 6596.3, 6603.9, 6729.6,
6734.8, 6765.5, 6905.7, 6938.2, 7014.3, 7080.8, 7156.2, 7189.6, 7190.3, 7262.6, 7321.5]
y = [7324.1,7361.3, 7376.8, 7408.7, 7510.9, 7515.8, 7546.6, 7646.6, 7874.9, 7901.4,
7957.3, 8004.4, 8027.4, 8127.3, 8208.5, 8234.1, 8245.1, 8304.4, 8459.5, 8497.3,
8545.7, 8547.4, 8559.6, 8762.0, 8812.5, 8923.1, 9230.6, 9241.1, 9300.6, 9352.4,
9492.1, 9594.4, 9704.3, 9853.5, 9993.0, 10131.0, 10218.1, 10337.3, 10461.1,
10721.7, 10815.7, 10826.7, 11073.5, 11268.0, 11314.5, 11364.9, 11402.3, 11467.5,
11906.5, 12858.9, 14292.2]
plt.plot(x, y, marker='o')
plt.ylabel("Koordinat Y")
plt.xlabel("Koordinat X")
plt.title('Daftar Harga Bitcoin Pada Tahun 2018 & 2019')
plt.show()
import matplotlib.pyplot as plt
x = [3228.7, 3430.4, 3502.5, 3570.9, 3597.2, 3616.8, 3661.4, 3677.8, 3706.8, 3785.4,
3823.1, 3920.4, 3944.3, 4002.5, 4006.4, 4111.8, 4120.4, 4196.2, 5046.2, 5051.8,
5265.9, 5290.2, 5621.8, 5830.9, 6167.3, 6184.3, 6231.6, 6254.8, 6321.7, 6379.1,
6386.2, 6398.9, 6427.1, 6494.2, 6505.8, 6519.0, 6572.2, 6596.3, 6603.9, 6729.6,
6734.8, 6765.5, 6905.7, 6938.2, 7014.3, 7080.8, 7156.2, 7189.6, 7190.3, 7262.6, 7321.5]
y = [7324.1,7361.3, 7376.8, 7408.7, 7510.9, 7515.8, 7546.6, 7646.6, 7874.9, 7901.4,
7957.3, 8004.4, 8027.4, 8127.3, 8208.5, 8234.1, 8245.1, 8304.4, 8459.5, 8497.3,
8545.7, 8547.4, 8559.6, 8762.0, 8812.5, 8923.1, 9230.6, 9241.1, 9300.6, 9352.4,
9492.1, 9594.4, 9704.3, 9853.5, 9993.0, 10131.0, 10218.1, 10337.3, 10461.1,
10721.7, 10815.7, 10826.7, 11073.5, 11268.0, 11314.5, 11364.9, 11402.3, 11467.5,
11906.5, 12858.9, 14292.2]
z = [3228.7, 3430.4, 3502.5, 3570.9, 3597.2, 3616.8, 3661.4, 3677.8, 3706.8, 3785.4,
3823.1, 3920.4, 3944.3, 4002.5, 4006.4, 4111.8, 4120.4, 4196.2, 5046.2, 5051.8,
5265.9, 5290.2, 5621.8, 5830.9, 6167.3, 6184.3, 6231.6, 6254.8, 6321.7, 6379.1,
6386.2, 6398.9, 6427.1, 6494.2, 6505.8, 6519.0, 6572.2, 6596.3, 6603.9, 6729.6,
6734.8, 6765.5, 6905.7, 6938.2, 7014.3, 7080.8, 7156.2, 7189.6, 7190.3, 7262.6, 7321.5]
plt.plot(x, y, marker='o')
plt.plot(x, z, linestyle='--')
plt.ylabel("Koordinat Y")
plt.xlabel("Koordinat X")
plt.title('Daftar Harga Bitcoin Pada Tahun 2018 & 2019')
plt.show()
```
Soal 2: Permen
* **Pie Chart**
* Peluang yang memilih permen kopiko = 15.0%
```
import matplotlib.pyplot as plt
nama_permen = ('mentos', 'kopiko', 'golia', 'yupie', 'fishermen')
jumlah_permen = (52, 39, 78, 13, 78)
plt.pie(jumlah_permen, labels=nama_permen)
plt.show
import matplotlib.pyplot as plt
nama_permen = ('mentos', 'kopiko', 'golia', 'yupie', 'fishermen')
jumlah_permen = (52, 39, 78, 13, 78)
plt.pie(jumlah_permen, labels=nama_permen, autopct= '%1.1f%%')
plt.show
import matplotlib.pyplot as plt
nama_permen = ('mentos', 'kopiko', 'golia', 'yupie', 'fishermen')
jumlah_permen = (52, 39, 78, 13, 78)
warna = ('#FAFAD2','#A0522D', '#AFEEEE', '#FFB6C1', '#E6E6FA')
plt.pie(jumlah_permen, labels=nama_permen, colors=warna)
plt.show
import matplotlib.pyplot as plt
nama_permen = ('mentos', 'kopiko', 'golia', 'yupie', 'fishermen')
jumlah_permen = (52, 39, 78, 13, 78)
warna = ('#FAFAD2','#A0522D', '#AFEEEE', '#FFB6C1', '#E6E6FA')
explode =(0,0.1, 0,0,0)
plt.pie(jumlah_permen, labels=nama_permen, autopct= '%1.1f%%', colors=warna, explode=explode)
plt.show
import matplotlib.pyplot as plt
nama_permen = ('mentos', 'kopiko', 'golia', 'yupie', 'fishermen')
jumlah_permen = (52, 39, 78, 13, 78)
warna = ('#FAFAD2','#A0522D', '#AFEEEE', '#FFB6C1', '#E6E6FA')
explode =(0,0.1, 0,0,0)
plt.title('Peluang Memilih Permen Kopiko')
plt.pie(jumlah_permen, labels=nama_permen, autopct= '%1.1f%%', colors=warna, explode=explode, shadow=True)
plt.show
```
Soal 3: Makanan
* **Bar Chart**
1. Popularitas dari makanan penutup = Ice Cream
2. 3 makanan penutup yang harus disingkirkan = Kue Wajik, Pastel, Puding Vanila
```
import matplotlib.pyplot as plt
import numpy as np
makanan_penutup =('Donat', 'Pastel', 'Kue Coklat', 'Ice Cream', 'Puding Vanila', 'Brownies',
'Puding Strawberry', 'Puding Coklat', 'Ice Cream Nutela', 'Kue Coklat-Keju',
'Kue Wajik', 'Kue Sus', 'Mochi')
data = (14, 5, 12, 19, 6, 8, 12, 9, 10, 17, 2, 9, 13)
data_x = np.arange(len(makanan_penutup))
plt.bar(data_x, data)
plt.show()
import matplotlib.pyplot as plt
import numpy as np
makanan_penutup =('Donat', 'Pastel', 'Kue Coklat', 'Ice Cream', 'Puding Vanila', 'Brownies',
'Puding Strawberry', 'Puding Coklat', 'Ice Cream Nutela', 'Kue Coklat-Keju',
'Kue Wajik', 'Kue Sus', 'Mochi')
data = (14, 5, 12, 19, 6, 8, 12, 9, 10, 17, 2, 9, 13)
data_x = np.arange(len(makanan_penutup))
plt.bar(data_x, data, tick_label=makanan_penutup)
plt.xticks(rotation = 90)
plt.ylabel("Data Penjualan (Rp)")
plt.title('Penjualan Makanan Penutup')
plt.show()
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
makanan_penutup =('Donat', 'Pastel', 'Kue Coklat', 'Ice Cream', 'Puding Vanila', 'Brownies',
'Puding Strawberry', 'Puding Coklat', 'Ice Cream Nutela', 'Kue Coklat-Keju',
'Kue Wajik', 'Kue Sus', 'Mochi')
data = (14, 5, 12, 19, 6, 8, 12, 9, 10, 17, 2, 9, 13)
data_x = np.arange(len(makanan_penutup))
df = pd.DataFrame({'Jenis':makanan_penutup, 'Penjualan':data})
df.sort_values(by='Penjualan', inplace=True)
plt.bar(data_x, df['Penjualan'], tick_label=df['Jenis'])
plt.xticks(rotation = 90)
plt.ylabel("Data Penjualan (Rp)")
plt.title('Penjualan Makanan Penutup')
plt.show()
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
makanan_penutup =('Donat', 'Pastel', 'Kue Coklat', 'Ice Cream', 'Puding Vanila', 'Brownies',
'Puding Strawberry', 'Puding Coklat', 'Ice Cream Nutela', 'Kue Coklat-Keju',
'Kue Wajik', 'Kue Sus', 'Mochi')
data = (14, 5, 12, 19, 6, 8, 12, 9, 10, 17, 2, 9, 13)
data_x = np.arange(len(makanan_penutup))
warna = ['#0000FF' for _ in range(len(df))]
warna [-1] = '#87CEFA'
warna [0] = '#FF0000'
warna [1] = '#FF0000'
warna [2] = '#FF0000'
plt.figure(figsize=(20,10))
df = pd.DataFrame({'Jenis':makanan_penutup, 'Penjualan':data})
df.sort_values(by='Penjualan', inplace=True)
plt.bar(data_x, df['Penjualan'], tick_label=df['Jenis'], color = warna)
plt.xticks(rotation = 90)
plt.ylabel("Data Penjualan (Rp)")
plt.title('Penjualan Makanan Penutup')
plt.show()
```
Soal 4: Penggunaan CPU
* **Heatmap**
1. Jam berapa pekerja biasanya makan siang = 13.00
2. Tidak, pekerja tersebut tidak bekerja pada akhir pekan
3. Pekerja mulai bekerja pada komputer mereka pada malam hari yaitu **Hari Minggu**
```
import seaborn as sns
hari = ['Senin', 'Selasa', 'Rabu', 'Kamis', 'Jumat', 'Sabtu', 'Minggu']
jam = ['00.00', '01.00', '02.00', '03.00', '04.00', '05.00', '06.00', '07.00', '08.00',
'09.00', '10.00', '11.00', '12.00', '13.00', '14.00', '15.00', '16.00', '17.00',
'18.00', '19.00', '20.00', '21.00', '22.00', '23.00']
datapenggunaan_cpu = [
[2, 2, 4, 2, 4, 1, 1, 4, 4, 12, 22, 23, 45, 9, 33, 56, 23, 40, 21, 6, 6, 2, 2, 3], # Senin
[1, 2, 3, 2, 3, 2, 3, 2, 7, 22, 45, 44, 33, 9, 23, 19, 33, 56, 12, 2, 3, 1, 2, 2], # Selasa
[2, 3, 1, 2, 4, 4, 2, 2, 1, 2, 5, 31, 54, 7, 6, 34, 68, 34, 49, 6, 6, 2, 2, 3], # Rabu
[1, 2, 3, 2, 4, 1, 2, 4, 1, 17, 24, 18, 41, 3, 44, 42, 12, 36, 41, 2, 2, 4, 2, 4], # Kamis
[4, 1, 2, 2, 3, 2, 5, 1, 2, 12, 33, 27, 43, 8, 38, 53, 29, 45, 39, 3, 1, 1, 3, 4], # Jumat
[2, 3, 1, 2, 2, 5, 2, 8, 4, 2, 3, 1, 5, 1, 2, 3, 2, 6, 1, 2, 2, 1, 4, 3], # Sabtu
[1, 2, 3, 1, 1, 3, 4, 2, 3, 1, 2, 2, 5, 3, 2, 1, 4, 2, 45, 26, 33, 2, 2, 1], # Minggu
]
sns.heatmap(datapenggunaan_cpu, yticklabels=hari, xticklabels = jam)
import seaborn as sns
hari = ['Senin', 'Selasa', 'Rabu', 'Kamis', 'Jumat', 'Sabtu', 'Minggu']
jam = ['00.00', '01.00', '02.00', '03.00', '04.00', '05.00', '06.00', '07.00', '08.00',
'09.00', '10.00', '11.00', '12.00', '13.00', '14.00', '15.00', '16.00', '17.00',
'18.00', '19.00', '20.00', '21.00', '22.00', '23.00']
datapenggunaan_cpu = [
[2, 2, 4, 2, 4, 1, 1, 4, 4, 12, 22, 23, 45, 9, 33, 56, 23, 40, 21, 6, 6, 2, 2, 3], # Senin
[1, 2, 3, 2, 3, 2, 3, 2, 7, 22, 45, 44, 33, 9, 23, 19, 33, 56, 12, 2, 3, 1, 2, 2], # Selasa
[2, 3, 1, 2, 4, 4, 2, 2, 1, 2, 5, 31, 54, 7, 6, 34, 68, 34, 49, 6, 6, 2, 2, 3], # Rabu
[1, 2, 3, 2, 4, 1, 2, 4, 1, 17, 24, 18, 41, 3, 44, 42, 12, 36, 41, 2, 2, 4, 2, 4], # Kamis
[4, 1, 2, 2, 3, 2, 5, 1, 2, 12, 33, 27, 43, 8, 38, 53, 29, 45, 39, 3, 1, 1, 3, 4], # Jumat
[2, 3, 1, 2, 2, 5, 2, 8, 4, 2, 3, 1, 5, 1, 2, 3, 2, 6, 1, 2, 2, 1, 4, 3], # Sabtu
[1, 2, 3, 1, 1, 3, 4, 2, 3, 1, 2, 2, 5, 3, 2, 1, 4, 2, 45, 26, 33, 2, 2, 1], # Minggu
]
sns.heatmap(datapenggunaan_cpu, yticklabels=hari, xticklabels = jam, cmap='coolwarm')
```
Soal 5: Jamur
* Scatter Plot
* Letak pusat pertumbuhan jamur ada di (10, 12.5)
```
import matplotlib.pyplot as plt
#import numpy as np
#import pandas as pd
x = [4.61, 5.08, 5.18, 7.82, 10.46, 7.66, 7.6, 9.32, 14.04, 9.95, 4.95,
7.23, 5.21, 8.64, 10.08, 8.32, 12.83, 7.51, 7.82, 6.29, 0.04, 6.62,
13.16, 6.34, 0.09, 10.04, 13.06, 9.54, 11.32, 7.12, -0.67, 10.5, 8.37,
7.24, 9.18, 10.12, 12.29, 8.53, 11.11, 9.65, 9.42, 8.61, -0.67, 5.94,
6.49, 7.57, 3.11, 8.7, 5.28, 8.28, 9.55, 8.33, 13.7, 6.65, 2.4, 3.54,
9.19, 7.51, -0.68, 8.47, 14.82, 5.31, 14.01, 8.75, -0.57, 5.35, 10.51,
3.11, -0.26 , 5.74, 8.33, 6.5, 13.85, 9.78, 4.91, 4.19, 14.8, 10.04,
13.47, 3.28]
y = [-2.36, -3.41, 13.01, -2.91, -2.28, 12.83, 13.13, 11.94, 0.93,
-2.76, 13.31, -3.57, -2.33, 12.43, -1.83, 12.32, -0.42, -3.08, -2.98,
12.46, 8.34, -3.19, -0.47, 12.78, 2.12, -2.72, 10.64, 11.98, 12.21,
12.52, 5.53, 11.72, 12.91, 12.56, -2.49, 12.08, -1.09, -2.89, -1.78,
-2.47, 12.77, 12.41, 5.33, -3.23, 13.45, -3.41, 12.46, 12.1, -2.56,
12.51, -2.37, 12.76, 9.69, 12.59, -1.12, -2.8, 12.94, -3.55, 7.33,
12.59, 2.92, 12.7, 0.5, 12.57, 6.39, 12.84, -1.95, 11.76, 6.82, 12.44,
13.28, -3.46, 0.7, -2.55, -2.37, 12.48, 7.26, -2.45, 0.31, -2.51]
plt.scatter(x, y)
plt.ylabel("Koordinat y")
plt.xlabel("Koordinat x")
plt.title('Penelitian Jamur')
plt.show()
```
| github_jupyter |
# Solving 10 Queens using pygenetic
In this example we are going to walk through the usage of GAEngine to solve the N-Queens problem
The objective would be to place queens on single board such that all are in safe position
<b>Each configuration of board represents a potential candidate solution for the problem</b>
## 1. Chromosome Representation
<img src="nQueens-Chromosome.png" style="width:700px;">
For the given chess board, the chromosome is encoded as the row number in which each the queen is present in each column of the chess board. It can also be encoded as the column number in which each the queen is present in each row of the chess board (as done in this code)
This can be easily achieved by using the `RangeFactory` of `pygenetic`. <br/>
The `RangeFactory` takes the following parameters
* minValue = minimum value a gene can take = 0 <br/>
* maxValue = minimum value a gene can take = 9 <br/>
* duplicates = if duplicates are allowed = False <br/>
* noOfGenes = number of genes in the chromosome = 10
```
from pygenetic import ChromosomeFactory
factory = ChromosomeFactory.ChromosomeRangeFactory(minValue=0,maxValue=9,noOfGenes=10,duplicates=False)
```
You can also check if the factory works as expected by calling `createChromosome` function and observing the chromosome produced by the factory
```
# Code to test if factory works as expected
for i in range(5):
print('Chromosome created: ', factory.createChromosome())
```
## 2. Fitness function
Fitness for a given chromosome is the number of non-intersecting queens for that given chess board configuration. Hence the highest fitness for a N X N chess board is N. Hence, we have a maximization GA problem with the aim of achieving fitness value N.
We can easily define such fitness functions in python taking a chromosome as a parameter
```
def fitness(board):
fitness = 0
for i in range(len(board)):
isSafe = True
for j in range(len(board)):
if i!=j:
# Shouldn't be present on same row/diagonal
if board[i] == board[j] or abs(board[i] - board[j])==abs(i-j):
isSafe = False
break
if(isSafe==True):
fitness += 1
return fitness
```
We need then create a `GAEngine` instance from the `pygenetic` package and set the following
* `factory` = the range factory instance we had intially created
* `population_size = 500` would be a good number for this problem
* `cross_prob = 0.8`
* `mut_prob = 0.2`
* `fitness_type = ('equal', 10)` since our objective in this GA is to achieve the fitness value of 10
```
from pygenetic import GAEngine
ga = GAEngine.GAEngine(factory,population_size=500,fitness_type=('equal',10),mut_prob = 0.2,cross_prob = 0.8)
```
We can now add the fitness function we had defined to this `GAEngine` instance
```
ga.setFitnessHandler(fitness)
```
## 3. Determing other attributes of the GA
Many Standard Crossover, Mutation, Selection and Fitness functions are present in the `Utils` module of the `pygenetic` package.
```
from pygenetic import Utils
```
### Crossover
Traditional crossover methods such as 1-point, 2-point crossover cannot be used since it create duplicate genes in the offsprings. In the popularly used `distinct` crossover, the first half of the chromosome is kept the same while the second half is obtained by sequentially traversing the second chromosome and adding elements only if that element is not already present.
<img src="nQueens-crossover.png" style="width:700px;">
This can be done using the `addCrossoverHandler` of the pygenetic module which takes as parameters
* crossover_function = the crossover function to be used
* weight = the weightage the crossover function needs to be given (mainly used when multiple crossovers are added)
```
ga.addCrossoverHandler(Utils.CrossoverHandlers.distinct, 4)
```
### Mutation
The use of the mutation technique of `swap` as shown in the diagram also ensures that each element in the chromosome is a unique number and that there are no duplicates. This is a suitable for mutation function for this problem
<img src="nQueens-mutation.png" style="width:700px">
This can be done using the `addMutationHandler` of the pygenetic module which takes as parameters
* mutation_function = the mutation function to be used
* weight = the weightage the mutation function needs to be given (mainly used when multiple mutations are added)
```
ga.addMutationHandler(Utils.MutationHandlers.swap,2)
```
## Selection
The selection function `best` chooses the best (1 - cross_prob) percent of the population. Hence, this function is one of the possible selection handlers which can be used in our genetic algorithm
```
ga.setSelectionHandler(Utils.SelectionHandlers.best)
```
## 4. Time to Evolve
This can be easily done using the `evolve` function of the GAEngine instance. It takes the `noOfIterations` as a parameter. Let's evolve it for 100 generations
```
ga.evolve(100)
```
We can get the best member by using the `best_fitness` attribute of the `GAEngine`.
It returns a tuple having
* chromsome having best fitness
* best fitness value
```
best = ga.best_fitness
print(best)
```
We can decode the chromosome into a chess board accordingly
```
def print_board(chromosome):
for i in chromosome:
for x in range(i):
print("-",end=' ')
print('Q', end=' ')
for x in range(len(chromosome)-i-1):
print("-",end=' ')
print()
print('Best Board is')
print_board(ga.best_fitness[0])
```
## 5. Plotting the Statistics
- The functionality for plotting the best, worst, average fitness values across iterations is present in `plot_statistics` function of statistics.py module. The function takes a list of attributes to be plotted.
- These attributes can be `best-fitness`,`worst-fitness`,`avg-fitness`, `'diversity`, `mutation_rate`
- The diversity and mutation rate values over iterations can also be visualized
```
import matplotlib.pyplot as plt
fig = ga.statistics.plot_statistics(['best-fitness','worst-fitness','avg-fitness'])
plt.show()
fig = ga.statistics.plot_statistics(['diversity'])
plt.show()
fig = ga.statistics.plot_statistics(['mutation_rate'])
plt.show()
```
| github_jupyter |
# Event-Driven Simulation of M/M/1 Queues
We start with a single-server queue with a FIFO queuing discipline. For M/M/1 queue, the customer inter-arrival time and the service time are both exponentially distributed. There is only one server for the queue. When a customer arrives at the queue, he will find himself at the end of the queue, where he will wait until he gets to the front of the queue. He will enter service as soon as the server finishes with the previous customer. Then the customer will get served for some time and then leave the system.
We use simulus to simulate this scenario. Let's start with the easy way using the event-driven approach, where we handle the customer arrival and departure events, since these are the only instances when the state the queue may change.
Before we start this section, let's first run the following so that we can set up the environment and import the necessary packages.
```
import random
import numpy as np
import scipy.stats as stats
import simulus
from qmodels.rng import expon
import matplotlib.pyplot as plt
%matplotlib inline
```
## Scheduling Event Handlers
When a customer arrives at the queue, two things happen. One is that we need to schedule the arrival of the next customer. The other is that we check whether the customer can be served immediately (that is, the customer is the only one in the system upon arrival). If so, we need to schedule the departure of this customer.
In simulus, to schedule an event in the future, we use the simulator's `sched()`, which takes the name of a function (which we call the event handler), followed by the list of arguments to be passed to the event handler (we have none for this example). The 'offset' argument is a keyword argument for the `sched()` function (not for the event handler); it specifies the relative time from now the event is scheduled to happen.
The event handler for customer arrival is listed below. Note that we use a global varaible `num_in_system` to track the current number of customers in the system.
```
def arrive():
global num_in_system
print('%g: customer arrives (num_in_system=%d->%d)' %
(sim.now, num_in_system, num_in_system+1))
# increment the total number of customers in system
num_in_system += 1
# schedule next customer's arrival
sim.sched(arrive, offset=next(inter_arrival_time))
# the arrived customer is the only one in system
if num_in_system == 1:
# schedule the customer's departure
sim.sched(depart, offset=next(service_time))
```
When a customer gets served, he leaves the system. After that, if there are other customers waiting, the next customer will be served immediately. For that, we need to schedule the departure of that customer.
The event handler for customer departure is listed below:
```
def depart():
global num_in_system
print('%g: customer departs (num_in_system=%d->%d)' %
(sim.now, num_in_system, num_in_system-1))
# decrement the total number of customers in system
num_in_system -= 1
# there are remaining customers in system
if num_in_system > 0:
# schedule the next customer's departure
sim.sched(depart, offset=next(service_time))
```
Now we are ready to simulate the queue. We instantiate a simulator. We then create two generators, one for the inter-arrival time and the other for the service time. The two generators use separate random streams, seeded from the simulator-specific random sequence. Before we start the simulation, we schedule the first customer's arrival. And finally we run the simulation for 10 simulated seconds.
```
random.seed(13579) # global random seed
sim = simulus.simulator('mm1')
inter_arrival_time = expon(1.2, sim.rng().randrange(2**32))
service_time = expon(0.8, sim.rng().randrange(2**32))
num_in_system = 0
sim.sched(arrive, offset=next(inter_arrival_time))
sim.run(10)
```
## Statistics Collection and Plotting
The output from the previous simulation is kind of boring. Let's gather some statistics and try to plot them.
We want to collect the wait time of the customers. To do that, we need to remember the time when the customer arrives at the system and also the time when the customer departs from the system. There are many ways to do the bookkeeping, the easy way we found is to simply use a data structure to keep track of the waiting customers. We use a deque (double-ended queue). We insert the time at which a customer enters the system and remove a time when the customer leaves. The time in-between is the customers wait time (including both queuing time and time in service).
We also want to track the number of customers in the system as it changes over time. For this, we use a list and each entry in the list is a tuple consisted of the time and the number of customers enqueued at the time.
The event handlers revised with bookkeeping are listed below:
```
from collections import deque
def arrive():
# add the customer to the end of the queue
queue.append(sim.now)
in_systems.append((sim.now, len(queue)))
# schedule next customer's arrival
sim.sched(arrive, offset=next(inter_arrival_time))
# the arrived customer is the only one in system
if len(queue) == 1:
# schedule the customer's departure
sim.sched(depart, offset=next(service_time))
def depart():
# remove a customer from the head of the queue
t = queue.popleft()
in_systems.append((sim.now, len(queue)))
waits.append(sim.now-t)
# there are remaining customers in system
if len(queue) > 0:
# schedule the next customer's departure
sim.sched(depart, offset=next(service_time))
```
Now we can run the simulation to gather the statistics. We can calculate the sample mean and standard deviation of the wait time. We can also calculate the (time-weighted) average number of customers in the system. If one plots the number of customers in system over time (which is a step function), it's the area under the curve divided by time.
```
queue = deque()
in_systems = [(0,0)]
waits = []
sim = simulus.simulator('mm1')
inter_arrival_time = expon(1.2, sim.rng().randrange(2**32))
service_time = expon(0.8, sim.rng().randrange(2**32))
sim.sched(arrive, offset=next(inter_arrival_time))
sim.run(10000)
print('wait times: %r...' % waits[:3])
print('number customers in systems: %r...' % in_systems[:3])
waits = np.array(waits)
print("wait time: mean=%g, stdev=%g" %
(waits.mean(), waits.std()))
# area under curve divided by time is the
# average number of customers in system
auc, last_t, last_l = 0, 0, 0
for t, l in in_systems:
auc += (t-last_t)*last_l
last_t, last_l = t, l
print("avg number of customers in system = %g" % (auc/last_t))
```
The queuing theory for M/M/1 queue tells us that in steady state, the mean wait time should be $1/(\mu - \lambda)$ and the mean number of customers in system should be $\lambda/(\mu - \lambda)$. That is, $\lambda=1/1.2=5/6$ and $\mu=1/0.8=5/4$. Therefore, the mean wait time should be 2.4, and the mean number of customers in system should be 2.0. We can use long-run simulation to obtain better steady-state estimations. But in most simulation cases, we'd be more interested in the transient behaviors of the system under study.
The data collected during simulation can also be used to generate many interesting plots. For example, we can plot the number of customers in the system as it changes over time. In the following, we only plot the first few.
```
plt.step(*zip(*in_systems[:20]), where='post')
plt.title("M/M/1 Queue")
plt.xlabel("Time")
plt.ylabel("Number Customers in System")
plt.show()
```
We can plot histogram of the wait time. And we can examine the percentage of customers who have experienced long wait time, say, more than 10 seconds.
```
plt.hist(waits, alpha=0.5, bins='auto', density=True)
plt.show()
print('%g%% customers experienced wait time more than 10 seconds,' %
(100*(waits>10).sum()/len(waits)))
print('while 90%% of customers experienced wait time no more than %g seconds' %
np.percentile(waits, 90))
```
The above plot shows that the mean wait time increases as we increase 'b', which increases both the mean and the standard deviation of the service time. The confidence interval also seems to be getting larger as we increase 'b'.
As 'b' increases, the service rate decreases and the difference between the arrival rate and service rate decreases as a result. It takes longer for the simulation to reach steady state. We fixed each simulation run to last for 1000 simulated seconds, which may not be sufficient for a larger 'b'.
## Source Code
The following is the source code for the event-driven M/M/1 model. To make the code more organized and reusable, we wrap the variables and both `arrive` and `depart` methods inside the class `mm1`. We also use Python's logging module to provide user with the option to turn printing on and off.
```
# %load '../qmodels/mm1.py'
import random
from collections import deque
import numpy as np
import scipy.stats as stats
import simulus
from qmodels.rng import *
__all__ = ['mm1']
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
class mm1(object):
def __init__(self, sim, mean_iat, mean_svtime):
self.sim = sim
self.inter_arrival_time = expon(mean_iat, sim.rng().randrange(2**32))
self.service_time = expon(mean_svtime, sim.rng().randrange(2**32))
self.queue = deque()
self.in_systems = [(0,0)]
self.waits = []
sim.sched(self.arrive, offset=next(self.inter_arrival_time))
def arrive(self):
'''Event handler for customer arrival.'''
log.info('%g: customer arrives (num_in_system=%d->%d)' %
(sim.now, len(self.queue), len(self.queue)+1))
# add the customer to the end of the queue
self.queue.append(self.sim.now)
self.in_systems.append((self.sim.now, len(self.queue)))
# schedule next customer's arrival
self.sim.sched(self.arrive, offset=next(self.inter_arrival_time))
# the arrived customer is the only one in system
if len(self.queue) == 1:
# schedule the customer's departure
self.sim.sched(self.depart, offset=next(self.service_time))
def depart(self):
'''Event handler for customer departure.'''
log.info('%g: customer departs (num_in_system=%d->%d)' %
(sim.now, len(self.queue), len(self.queue)-1))
# remove a customer from the head of the queue
t = self.queue.popleft()
self.in_systems.append((self.sim.now, len(self.queue)))
self.waits.append(self.sim.now-t)
# there are remaining customers in system
if len(self.queue) > 0:
# schedule the next customer's departure
self.sim.sched(self.depart, offset=next(self.service_time))
if __name__ == '__main__':
# turn on logging for all messages
logging.basicConfig()
logging.getLogger(__name__).setLevel(logging.DEBUG)
random.seed(13579) # global random seed
sim = simulus.simulator('mm1') # create a simulator instance
q = mm1(sim, 1.2, 0.8) # create the m/m/1 queue
sim.run(10)
```
| github_jupyter |
[View in Colaboratory](https://colab.research.google.com/github/DJCordhose/ai/blob/master/notebooks/nlp/3-gru-dropout.ipynb)
```
# Based on
# https://github.com/fchollet/deep-learning-with-python-notebooks/blob/master/6.2-understanding-recurrent-neural-networks.ipynb
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline
%pylab inline
import pandas as pd
print(pd.__version__)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
print(tf.__version__)
# let's see what compute devices we have available, hopefully a GPU
sess = tf.Session()
devices = sess.list_devices()
for d in devices:
print(d.name)
# a small sanity check, does tf seem to work ok?
hello = tf.constant('Hello TF!')
print(sess.run(hello))
from tensorflow import keras
print(keras.__version__)
# https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification
max_features = 10000 # number of words to consider as features
maxlen = 500 # cut texts after this number of words (among top max_features most common words)
# each review is encoded as a sequence of word indexes
# indexed by overall frequency in the dataset
# output is 0 (negative) or 1 (positive)
imdb = tf.keras.datasets.imdb.load_data(num_words=max_features)
(raw_input_train, y_train), (raw_input_test, y_test) = imdb
# https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences
input_train = tf.keras.preprocessing.sequence.pad_sequences(raw_input_train, maxlen=maxlen)
input_test = tf.keras.preprocessing.sequence.pad_sequences(raw_input_test, maxlen=maxlen)
input_train.shape, input_test.shape, y_train.shape, y_test.shape
```
## GRU
```
# tf.keras.layers.GRU?
# Batch Normalization:
# https://towardsdatascience.com/batch-normalization-in-neural-networks-1ac91516821c
# https://www.quora.com/Why-does-batch-normalization-help
embedding_dim = 32
dropout = 0.15
recurrent_dropout = 0.2
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(name='embedding', input_dim=max_features, output_dim=embedding_dim, input_length=maxlen))
# https://arxiv.org/ftp/arxiv/papers/1701/1701.05923.pdf
# n = output dimension
# m = input dimension
# Total number of parameters for
# RNN = n**2 + nm + n
# GRU = 3 × (n**2 + nm + n)
# LSTM = 4 × (n**2 + nm + n)
model.add(tf.keras.layers.GRU(name='gru1', units=32, dropout=dropout, recurrent_dropout=recurrent_dropout, return_sequences=True))
# for embedding: 32*2 (“standard deviation” parameter (gamma), “mean” parameter (beta)) trainable parameters
# and 32*2 (moving_mean and moving_variance) non-trainable parameters
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dropout(dropout))
# stack recurrent layers like with fc
model.add(tf.keras.layers.GRU(name='gru2', units=32))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dropout(dropout))
# binary classifier
model.add(tf.keras.layers.Dense(name='fc', units=32, activation='relu'))
model.add(tf.keras.layers.Dense(name='classifier', units=1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
model.summary()
batch_size = 1000
%time history = model.fit(input_train, y_train, epochs=10, batch_size=batch_size, validation_split=0.2)
train_loss, train_accuracy = model.evaluate(input_train, y_train, batch_size=batch_size)
train_accuracy
test_loss, test_accuracy = model.evaluate(input_test, y_test, batch_size=batch_size)
test_accuracy
# precition
model.predict(input_test[0:5])
# ground truth
y_test[0:5]
# ignore this, it is just technical code to plot decision boundaries
# Adapted from:
# http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html
# http://jponttuset.cat/xkcd-deep-learning/
def plot_history(history, samples=100, init_phase_samples=None, plot_line=False):
epochs = history.params['epochs']
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
every_sample = int(epochs / samples)
acc = pd.DataFrame(acc).iloc[::every_sample, :]
val_acc = pd.DataFrame(val_acc).iloc[::every_sample, :]
loss = pd.DataFrame(loss).iloc[::every_sample, :]
val_loss = pd.DataFrame(val_loss).iloc[::every_sample, :]
if init_phase_samples:
acc = acc.loc[init_phase_samples:]
val_acc = val_acc.loc[init_phase_samples:]
loss = loss.loc[init_phase_samples:]
val_loss = val_loss.loc[init_phase_samples:]
fig, ax = plt.subplots(nrows=2, figsize=(20,10))
ax[0].plot(acc, 'bo', label='Training acc')
ax[0].plot(val_acc, 'b', label='Validation acc')
ax[0].set_title('Training and validation accuracy')
ax[0].legend()
if plot_line:
x, y, _ = linear_regression(acc)
ax[0].plot(x, y, 'bo', color='red')
x, y, _ = linear_regression(val_acc)
ax[0].plot(x, y, 'b', color='red')
ax[1].plot(loss, 'bo', label='Training loss')
ax[1].plot(val_loss, 'b', label='Validation loss')
ax[1].set_title('Training and validation loss')
ax[1].legend()
if plot_line:
x, y, _ = linear_regression(loss)
ax[1].plot(x, y, 'bo', color='red')
x, y, _ = linear_regression(val_loss)
ax[1].plot(x, y, 'b', color='red')
from sklearn import linear_model
def linear_regression(data):
x = np.array(data.index).reshape(-1, 1)
y = data.values.reshape(-1, 1)
regr = linear_model.LinearRegression()
regr.fit(x, y)
y_pred = regr.predict(x)
return x, y_pred, regr.coef_
plot_history(history, samples=10)
plot_history(history, samples=10, init_phase_samples=2, plot_line=True)
```
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Cargamos-librerias" data-toc-modified-id="Cargamos-librerias-1">Cargamos librerias</a></span><ul class="toc-item"><li><span><a href="#metricas-de-evaluacion-(sigmas)-+-funciones-de-utilidad" data-toc-modified-id="metricas-de-evaluacion-(sigmas)-+-funciones-de-utilidad-1.1">metricas de evaluacion (sigmas) + funciones de utilidad</a></span></li><li><span><a href="#Datos-de-entrenamiento!" data-toc-modified-id="Datos-de-entrenamiento!-1.2">Datos de entrenamiento!</a></span></li><li><span><a href="#usamos-🐼" data-toc-modified-id="usamos-🐼-1.3">usamos 🐼</a></span></li><li><span><a href="#preprocesamiento-para-X-y-Y" data-toc-modified-id="preprocesamiento-para-X-y-Y-1.4">preprocesamiento para X y Y</a></span></li></ul></li><li><span><a href="#ML-con-Scikit-learn" data-toc-modified-id="ML-con-Scikit-learn-2">ML con Scikit-learn</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#Regression-Logistica" data-toc-modified-id="Regression-Logistica-2.0.1">Regression Logistica</a></span><ul class="toc-item"><li><span><a href="#Coeficientes" data-toc-modified-id="Coeficientes-2.0.1.1">Coeficientes</a></span></li></ul></li></ul></li><li><span><a href="#predecir-probabilidades" data-toc-modified-id="predecir-probabilidades-2.1">predecir probabilidades</a></span></li><li><span><a href="#SGDclassifier-(Regression-Logistica)" data-toc-modified-id="SGDclassifier-(Regression-Logistica)-2.2">SGDclassifier (Regression Logistica)</a></span><ul class="toc-item"><li><span><a href="#Actividad:-Evalua!" data-toc-modified-id="Actividad:-Evalua!-2.2.1">Actividad: Evalua!</a></span></li></ul></li><li><span><a href="#Regularizacion" data-toc-modified-id="Regularizacion-2.3">Regularizacion</a></span></li></ul></li><li><span><a href="#Actividad:" data-toc-modified-id="Actividad:-3">Actividad:</a></span><ul class="toc-item"><li><span><a href="#Metodos-de-ensembles" data-toc-modified-id="Metodos-de-ensembles-3.1">Metodos de ensembles</a></span></li><li><span><a href="#predecir-probabilidades" data-toc-modified-id="predecir-probabilidades-3.2">predecir probabilidades</a></span></li><li><span><a href="#Modelos-de-arboles:-feature-importance" data-toc-modified-id="Modelos-de-arboles:-feature-importance-3.3">Modelos de arboles: feature importance</a></span></li><li><span><a href="#Mejorando-la-regla-de-decision" data-toc-modified-id="Mejorando-la-regla-de-decision-3.4">Mejorando la regla de decision</a></span><ul class="toc-item"><li><span><a href="#en-vez-de-0.5-usaremos-un-percentil" data-toc-modified-id="en-vez-de-0.5-usaremos-un-percentil-3.4.1">en vez de 0.5 usaremos un percentil</a></span></li></ul></li><li><span><a href="#Probabilidad-de-corte" data-toc-modified-id="Probabilidad-de-corte-3.5">Probabilidad de corte</a></span></li></ul></li><li><span><a href="#Actividad:" data-toc-modified-id="Actividad:-4">Actividad:</a></span></li></ul></div>

# Cargamos librerias
```
%matplotlib inline
%config InlineBackend.figure_format='retina'
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy as sc
import pandas as pd
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
import os
from IPython.display import display
import sys
```
## metricas de evaluacion (sigmas) + funciones de utilidad

```
from sklearn.metrics import roc_curve, auc
def AMSScore(s,b):
return np.sqrt (2.*( (s + b + 10.)*np.log(1.+s/(b+10.))-s))
def eval_model(Y_true_train,Y_pred_train,w_train,Y_true_test,Y_pred_test,w_test):
ratio = float(len(X_train)) /float(len(X_test))
TruePositive_train = w_train*(Y_true_train==1.0)*(1.0/ratio)
TrueNegative_train = w_train*(Y_true_train==0.0)*(1.0/ratio)
TruePositive_valid = w_test*(Y_true_test==1.0)*(1.0/(1-ratio))
TrueNegative_valid = w_test*(Y_true_test==0.0)*(1.0/(1-ratio))
s_train = sum ( TruePositive_train*(Y_pred_train==1.0) )
b_train = sum ( TrueNegative_train*(Y_pred_train==1.0) )
s_test = sum ( TruePositive_valid*(Y_pred_test==1.0) )
b_test = sum ( TrueNegative_valid*(Y_pred_test==1.0) )
score_train = AMSScore(s_train,b_train)
score_test = AMSScore(s_test,b_test)
print('--- Resultados --')
print('- AUC train: {:.3f} '.format(sk.metrics.roc_auc_score(Y_train,Y_train_pred)))
print('- AUC test : {:.3f} '.format(sk.metrics.roc_auc_score(Y_test,Y_test_pred)))
print('- AMS train: {:.3f} sigma'.format(score_train))
print('- AMS test : {:.3f} sigma'.format(score_test))
return score_train, score_test
def plot_roc(clf,Y_test,Y_test_prob):
fpr, tpr, thresholds = roc_curve(Y_test, Y_test_prob)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, alpha=0.3, label=str(clf.__class__.__name__))
plt.plot(np.linspace(0,1,100),np.linspace(0,1,100), lw=2, alpha=0.3, label='Suerte')
plt.legend(loc='lower right')
plt.xlim([0,1])
plt.ylim([0,1])
plt.tight_layout()
return
```
## Datos de entrenamiento!
Quieres saber mas? Visita [http://higgsml.lal.in2p3.fr/documentation](http://higgsml.lal.in2p3.fr/documentation)
```
!wget
```
## usamos 🐼
```
df=pd.read_csv('datos/training.csv')
print(df.shape)
df.head(1)
```
## preprocesamiento para X y Y
```
Y = df['Label'].replace(to_replace=['s','b'],value=[1,0]).values
weights = df['Weight'].values
X = df.drop(['EventId','Label','Weight'],axis=1).values
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test,w_train,w_test = train_test_split(X,Y,weights,train_size=0.3)
print(X_train.shape,Y_train.shape,w_train.shape)
print(X_test.shape,Y_test.shape,w_test.shape)
```
# ML con Scikit-learn
](extra/sklearn_logo.png)
### Regression Logistica
** Modelo :** $h_{\theta}(x) = g(\theta^{T}x) = g(\sum \theta_i x_i +b)$ con $g(z)=\frac{1}{1+e^{-z}}$
** optimizador, metrica?**
```
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(verbose=1)
clf.fit(X_train,Y_train)
```
#### Coeficientes
$$\sum \theta_i x_i + b $$
```
print('a = {}'.format(clf.coef_))
print('b = {}'.format(clf.intercept_))
sns.distplot(clf.coef_,kde=False)
plt.show()
```
## predecir probabilidades
```
Y_train_pred = clf.predict(X_train)
Y_test_pred = clf.predict(X_test)
Y_train_prob=clf.predict_proba(X_train)[:,1]
Y_test_prob =clf.predict_proba(X_test)[:,1]
print('AUC:')
print('train: {:2.4f}'.format(sk.metrics.roc_auc_score(Y_train,Y_train_pred)))
print('test: {:2.4f}'.format(sk.metrics.roc_auc_score(Y_test,Y_test_pred)))
eval_model(Y_train,Y_train_pred,w_train,Y_test,Y_test_pred,w_test)
x = np.linspace(-30,30,100)
plt.plot(x,1.0/(1+np.exp(-x)))
plt.show()
from sklearn.metrics import roc_curve, auc
fpr, tpr, thresholds = roc_curve(Y_test, Y_test_prob)
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, alpha=0.3, label=str(clf.__class__.__name__))
plt.plot(np.linspace(0,1,100),np.linspace(0,1,100), lw=2, alpha=0.3, label='Suerte')
plt.legend(loc='lower right')
plt.xlim([0,1])
plt.ylim([0,1])
plt.xlabel('Falsos Positivos')
plt.ylabel('Falsos Negativos')
plt.tight_layout()
plt.show()
```
## SGDclassifier (Regression Logistica)
** Modelo :** $h_{\theta}(x) = g(\theta^{T}x)$ con $g(z)=\frac{1}{1+e^{-z}}$
** Costo :** $$J(\theta)=-\frac{1}{m}\sum_{i=1}^{m}y^{i}\log(h_\theta(x^{i}))+(1-y^{i})\log(1-h_\theta(x^{i}))$$
** Optimizador:** Descenso de gradient
Ojo, la derivada del costo es:
$$ \frac{\partial}{\partial\theta_{j}}J(\theta) =\sum_{i=1}^{m}(h_\theta(x^{i})-y^i)x_j^i$$
```
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss='log',verbose=1,max_iter=500)
clf.fit(X_train,Y_train)
```
### Actividad: Evalua!
## Regularizacion
** Costo :** $$J(\theta)=-\frac{1}{m}\sum_{i=1}^{m}y^{i}\log(h_\theta(x^{i}))+(1-y^{i})\log(1-h_\theta(x^{i}))$$
** $L2$**: $$ + \alpha \sum \theta_i^2$$
** $L1$**: $$ + \frac{\lambda}{1}\sum |\theta_i|$$
```
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss='log',alpha=0.5,l1_ratio=0.2,verbose=1,max_iter=500)
clf.fit(X_train,Y_train)
```
# Actividad:
* Entrena un modelo para investigar el efecto de solo suar regularizacion L2 (apaga L1)
* Entrena un modelo para investigar el efecto de solo suar regularizacion L1 (apaga L2)
* Checa histogramas de tus pesos (coef)
## Metodos de ensembles
```
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(verbose=1)
clf.fit(X_train,Y_train)
```
## predecir probabilidades
```
Y_train_pred = clf.predict(X_train)
Y_test_pred = clf.predict(X_test)
Y_train_prob=clf.predict_proba(X_train)[:,1]
Y_test_prob =clf.predict_proba(X_test)[:,1]
eval_model(Y_train,Y_train_pred,w_train,Y_test,Y_test_pred,w_test)
plot_roc(clf,Y_test,Y_test_prob)
```
## Modelos de arboles: feature importance
```
importances = clf.feature_importances_
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print('{:d}. X_{:d} ({:2.4f})'.format(f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
```
## Mejorando la regla de decision
### en vez de 0.5 usaremos un percentil
## Probabilidad de corte
```
sns.distplot(Y_train_prob)
plt.show()
pcut = np.percentile(Y_train_prob,80)
pcut
Y_train_pred = Y_train_prob > pcut
Y_test_pred = Y_test_prob > pcut
eval_model(Y_train,Y_train_pred,w_train,Y_test,Y_test_pred,w_test)
```
# Actividad:
* Escoge algun algoritmo que no hayamos visto.
* Trata de entender la idea central en 5 minutos.
* Identifica los componentes (Modelo, funcion objectivo, optimizador)
* Entrenar un algoritmo.
* Optimizar los hiperparametros.
| github_jupyter |

## Data-X: Titanic Survival Analysis
Data from: https://www.kaggle.com/c/titanic/data
**Authors:** Several public Kaggle Kernels, edits by Alexander Fred Ojala & Kevin Li
<img src="data/Titanic_Variable.png">
# Note
Install xgboost package in your pyhton enviroment:
try:
```
$ conda install py-xgboost
```
```
'''
# You can also install the package by running the line below
# directly in your notebook
''';
#!conda install py-xgboost --y
```
## Import packages
```
# No warnings
import warnings
warnings.filterwarnings('ignore') # Filter out warnings
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB # Gaussian Naive Bayes
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier #stochastic gradient descent
from sklearn.tree import DecisionTreeClassifier
import xgboost as xgb
# Plot styling
sns.set(style='white', context='notebook', palette='deep')
plt.rcParams[ 'figure.figsize' ] = 9 , 5
```
### Define fancy plot to look at distributions
```
# Special distribution plot (will be used later)
def plot_distribution( df , var , target , **kwargs ):
row = kwargs.get( 'row' , None )
col = kwargs.get( 'col' , None )
facet = sns.FacetGrid( df , hue=target , aspect=4 , row = row , col = col )
facet.map( sns.kdeplot , var , shade= True )
facet.set( xlim=( 0 , df[ var ].max() ) )
facet.add_legend()
plt.tight_layout()
```
## References to material we won't cover in detail:
* **Gradient Boosting:** http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/
* **Naive Bayes:** http://scikit-learn.org/stable/modules/naive_bayes.html
* **Perceptron:** http://aass.oru.se/~lilien/ml/seminars/2007_02_01b-Janecek-Perceptron.pdf
## Input Data
```
train_df = pd.read_csv('data/train.csv')
test_df = pd.read_csv('data/test.csv')
combine = [train_df, test_df]
# NOTE! When we change train_df or test_df the objects in combine
# will also change
# (combine is only a pointer to the objects)
# combine is used to ensure whatever preprocessing is done
# on training data is also done on test data
```
# Exploratory Data Anlysis (EDA)
We will analyze the data to see how we can work with it and what makes sense.
```
train_df
print(train_df.columns)
# preview the data
train_df.head(10)
# General data statistics
train_df.describe()
# Data Frame information (null, data type etc)
train_df.info()
```
### Comment on the Data
<div class='alert alert-info'>
`PassengerId` is a random number and thus does not contain any valuable information. `Survived, Passenger Class, Age Siblings Spouses, Parents Children` and `Fare` are numerical values -- so we don't need to transform them, but we might want to group them (i.e. create categorical variables). `Sex, Embarked` are categorical features that we need to map to integer values. `Name, Ticket` and `Cabin` might also contain valuable information.
</div>
# Preprocessing Data
```
# check dimensions of the train and test datasets
print("Shapes Before: (train) (test) = ", \
train_df.shape, test_df.shape)
# Drop columns 'Ticket', 'Cabin', need to do it for both test
# and training
train_df = train_df.drop(['Ticket', 'Cabin'], axis=1)
test_df = test_df.drop(['Ticket', 'Cabin'], axis=1)
combine = [train_df, test_df]
print("Shapes After: (train) (test) =", train_df.shape, test_df.shape)
# Check if there are null values in the datasets
print(train_df.isnull().sum())
print()
print(test_df.isnull().sum())
```
# Data Preprocessing
```
train_df.head(5)
```
### Hypothesis
The Title of the person is a feature that can predict survival
```
# List example titles in Name column
train_df.Name[:5]
# from the Name column we will extract title of each passenger
# and save that in a column in the dataset called 'Title'
# if you want to match Titles or names with any other expression
# refer to this tutorial on regex in python:
# https://www.tutorialspoint.com/python/python_reg_expressions.htm
# Create new column called title
for dataset in combine:
dataset['Title'] = dataset.Name.str.extract(' ([A-Za-z]+)\.',\
expand=False)
# Double check that our titles makes sense (by comparing to sex)
pd.crosstab(train_df['Title'], train_df['Sex'])
# same for test set
pd.crosstab(test_df['Title'], test_df['Sex'])
# We see common titles like Miss, Mrs, Mr, Master are dominant, we will
# correct some Titles to standard forms and replace the rarest titles
# with single name 'Rare'
for dataset in combine:
dataset['Title'] = dataset['Title'].\
replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr',\
'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') #Mademoiselle
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') #Madame
# Now that we have more logical titles, and a few groups
# we can plot the survival chance for each title
train_df[['Title', 'Survived']].groupby(['Title']).mean()
# We can also plot it
sns.countplot(x='Survived', hue="Title", data=train_df, order=[1,0])
plt.xticks(range(2),['Made it','Deceased']);
# Title dummy mapping
for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Title)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head()
train_df = train_df.drop(['Name', 'Title', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name', 'Title'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape
```
## Gender column
```
# Map Sex to binary categories
for dataset in combine:
dataset['Sex'] = dataset['Sex'] \
.map( {'female': 1, 'male': 0} ).astype(int)
train_df.head()
```
### Handle missing values for age
We will now guess values of age based on sex (male / female)
and socioeconomic class (1st,2nd,3rd) of the passenger.
The row indicates the sex, male = 0, female = 1
More refined estimate than only median / mean etc.
```
guess_ages = np.zeros((2,3),dtype=int) #initialize
guess_ages
# Fill the NA's for the Age columns
# with "qualified guesses"
for idx,dataset in enumerate(combine):
if idx==0:
print('Working on Training Data set\n')
else:
print('-'*35)
print('Working on Test Data set\n')
print('Guess values of age based on sex and pclass of the passenger...')
for i in range(0, 2):
for j in range(0,3):
guess_df = dataset[(dataset['Sex'] == i) \
&(dataset['Pclass'] == j+1)]['Age'].dropna()
# Extract the median age for this group
# (less sensitive) to outliers
age_guess = guess_df.median()
# Convert random age float to int
guess_ages[i,j] = int(age_guess)
print('Guess_Age table:\n',guess_ages)
print ('\nAssigning age values to NAN age values in the dataset...')
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) \
& (dataset.Pclass == j+1),'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
print()
print('Done!')
train_df.head()
# Split into age bands and look at survival rates
train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False)\
.mean().sort_values(by='AgeBand', ascending=True)
# Plot distributions of Age of passangers who survived
# or did not survive
plot_distribution( train_df , var = 'Age' , target = 'Survived' ,\
row = 'Sex' )
# Change Age column to
# map Age ranges (AgeBands) to integer values of categorical type
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']=4
train_df.head()
# Note we could just run
# dataset['Age'] = pd.cut(dataset['Age'], 5,labels=[0,1,2,3,4])
# remove AgeBand from before
train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head()
```
# Create variable for Family Size
How did the number of people the person traveled with impact the chance of survival?
```
# SibSp = Number of Sibling / Spouses
# Parch = Parents / Children
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
# Survival chance with FamilySize
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# Plot it, 1 is survived
sns.countplot(x='Survived', hue="FamilySize", data=train_df, order=[1,0]);
# Binary variable if the person was alone or not
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=False).mean()
# We will only use the binary IsAlone feature for further analysis
train_df = train_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
test_df = test_df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1)
combine = [train_df, test_df]
train_df.head()
# We can also create new features based on intuitive combinations
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head(8)
```
# Port the person embarked from
Let's see how that influences chance of survival
```
# To replace Nan value in 'Embarked', we will use the mode
# in 'Embaraked'. This will give us the most frequent port
# the passengers embarked from
freq_port = train_df.Embarked.dropna().mode()[0]
freq_port
# Fill NaN 'Embarked' Values in the datasets
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean().sort_values(by='Survived', ascending=False)
# Let's plot it
sns.countplot(x='Survived', hue="Embarked", data=train_df, order=[1,0]);
# Create categorical dummy variables for Embarked values
for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Embarked)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head()
# Drop Embarked
for dataset in combine:
dataset.drop('Embarked', axis=1, inplace=True)
```
## Handle continuous values in the Fare column
```
# Fill the NA values in the Fares column with the median
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
test_df.head()
# q cut will find ranges equal to the quantile of the data
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
for dataset in combine:
dataset.loc[ dataset['Fare'] <= 7.91, 'Fare'] = 0
dataset.loc[(dataset['Fare'] > 7.91) & \
(dataset['Fare'] <= 14.454), 'Fare'] = 1
dataset.loc[(dataset['Fare'] > 14.454) & \
(dataset['Fare'] <= 31), 'Fare'] = 2
dataset.loc[ dataset['Fare'] > 31, 'Fare'] = 3
dataset['Fare'] = dataset['Fare'].astype(int)
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
train_df.head
```
## Finished
```
train_df.head(7)
# All features are approximately on the same scale
# no need for feature engineering / normalization
test_df.head(7)
# Check correlation between features
# (uncorrelated features are generally more powerful predictors)
colormap = plt.cm.viridis
plt.figure(figsize=(12,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train_df.astype(float).corr().round(2)\
,linewidths=0.1,vmax=1.0, square=True, cmap=colormap, \
linecolor='white', annot=True);
```
# Next Up: Machine Learning!
Now we will Model, Predict, and Choose algorithm for conducting the classification
Try using different classifiers to model and predict. Choose the best model from:
* Logistic Regression
* KNN
* SVM
* Naive Bayes
* Decision Tree
* Random Forest
* Perceptron
* XGBoost
## Setup Train and Validation Set
```
X = train_df.drop("Survived", axis=1) # Training & Validation data
Y = train_df["Survived"] # Response / Target Variable
# Since we don't have labels for the test data
# this won't be used. It's only for Kaggle Submissions
X_submission = test_df.drop("PassengerId", axis=1).copy()
print(X.shape, Y.shape)
# Split training and test set so that we test on 20% of the data
# Note that our algorithms will never have seen the validation
# data during training. This is to evaluate how good our estimators are.
np.random.seed(1337) # set random seed for reproducibility
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2)
print(X_train.shape, Y_train.shape)
print(X_val.shape, Y_val.shape)
```
## Scikit-Learn general ML workflow
1. Instantiate model object
2. Fit model to training data
3. Let the model predict output for unseen data
4. Compare predicitons with actual output to form accuracy measure
# Logistic Regression
```
logreg = LogisticRegression() # instantiate
logreg.fit(X_train, Y_train) # fit
Y_pred = logreg.predict(X_val) # predict
acc_log = round(logreg.score(X_val, Y_val) * 100, 2) # evaluate
acc_log
# Support Vector Machines
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_val)
acc_svc = round(svc.score(X_val, Y_val) * 100, 2)
acc_svc
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_val)
acc_knn = round(knn.score(X_val, Y_val) * 100, 2)
acc_knn
# Perceptron
perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
Y_pred = perceptron.predict(X_val)
acc_perceptron = round(perceptron.score(X_val, Y_val) * 100, 2)
acc_perceptron
# XGBoost
gradboost = xgb.XGBClassifier(n_estimators=1000)
gradboost.fit(X_train, Y_train)
Y_pred = gradboost.predict(X_val)
acc_perceptron = round(gradboost.score(X_val, Y_val) * 100, 2)
acc_perceptron
# Random Forest
random_forest = RandomForestClassifier(n_estimators=1000)
random_forest.fit(X_train, Y_train)
Y_pred = random_forest.predict(X_val)
acc_random_forest = round(random_forest.score(X_val, Y_val) * 100, 2)
acc_random_forest
# Look at importnace of features for random forest
def plot_model_var_imp( model , X , y ):
imp = pd.DataFrame(
model.feature_importances_ ,
columns = [ 'Importance' ] ,
index = X.columns
)
imp = imp.sort_values( [ 'Importance' ] , ascending = True )
imp[ : 10 ].plot( kind = 'barh' )
print ('Training accuracy Random Forest:',model.score( X , y ))
plot_model_var_imp(random_forest, X_train, Y_train)
# How to create a Kaggle submission:
Y_submission = random_forest.predict(X_submission)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_submission
})
submission.to_csv('titanic.csv', index=False)
```
# Legacy code (not used anymore)
```python
# Map title string values to numbers so that we can make predictions
title_mapping = {"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5}
for dataset in combine:
dataset['Title'] = dataset['Title'].map(title_mapping)
dataset['Title'] = dataset['Title'].fillna(0)
# Handle missing values
train_df.head()
```
```python
# Drop the unnecessary Name column (we have the titles now)
train_df = train_df.drop(['Name', 'PassengerId'], axis=1)
test_df = test_df.drop(['Name'], axis=1)
combine = [train_df, test_df]
train_df.shape, test_df.shape
```
```python
# Create categorical dummy variables for Embarked values
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].map( {'S': 0, 'C': 1, 'Q': 2} ).astype(int)
train_df.head()
```
| github_jupyter |
Now it's your turn to test your new knowledge of **missing values** handling. You'll probably find it makes a big difference.
# Setup
The questions will give you feedback on your work. Run the following cell to set up the feedback system.
```
# Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex2 import *
print("Setup Complete")
```
In this exercise, you will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course).

Run the next code cell without changes to load the training and validation sets in `X_train`, `X_valid`, `y_train`, and `y_valid`. The test set is loaded in `X_test`.
```
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
X_full = pd.read_csv('../input/train.csv', index_col='Id')
X_test_full = pd.read_csv('../input/test.csv', index_col='Id')
# Remove rows with missing target, separate target from predictors
X_full.dropna(axis=0, subset=['SalePrice'], inplace=True)
y = X_full.SalePrice
X_full.drop(['SalePrice'], axis=1, inplace=True)
# To keep things simple, we'll use only numerical predictors
X = X_full.select_dtypes(exclude=['object'])
X_test = X_test_full.select_dtypes(exclude=['object'])
# Break off validation set from training data
X_train, X_valid, y_train, y_valid = train_test_split(X, y, train_size=0.8, test_size=0.2,
random_state=0)
```
Use the next code cell to print the first five rows of the data.
```
X_train.head()
```
You can already see a few missing values in the first several rows. In the next step, you'll obtain a more comprehensive understanding of the missing values in the dataset.
# Step 1: Preliminary investigation
Run the code cell below without changes.
```
# Shape of training data (num_rows, num_columns)
print(X_train.shape)
# Number of missing values in each column of training data
missing_val_count_by_column = (X_train.isnull().sum())
print(missing_val_count_by_column[missing_val_count_by_column > 0])
```
### Part A
Use the above output to answer the questions below.
```
# Fill in the line below: How many rows are in the training data?
num_rows = ____
# Fill in the line below: How many columns in the training data
# have missing values?
num_cols_with_missing = ____
# Fill in the line below: How many missing entries are contained in
# all of the training data?
tot_missing = ____
# Check your answers
step_1.a.check()
#%%RM_IF(PROD)%%
num_rows = 1168
num_cols_with_missing = 3
tot_missing = 212 + 6 + 58
step_1.a.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_1.a.hint()
#_COMMENT_IF(PROD)_
step_1.a.solution()
```
### Part B
Considering your answers above, what do you think is likely the best approach to dealing with the missing values?
```
# Check your answer (Run this code cell to receive credit!)
step_1.b.check()
#_COMMENT_IF(PROD)_
step_1.b.hint()
```
To compare different approaches to dealing with missing values, you'll use the same `score_dataset()` function from the tutorial. This function reports the [mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) (MAE) from a random forest model.
```
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# Function for comparing different approaches
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
```
# Step 2: Drop columns with missing values
In this step, you'll preprocess the data in `X_train` and `X_valid` to remove columns with missing values. Set the preprocessed DataFrames to `reduced_X_train` and `reduced_X_valid`, respectively.
```
# Fill in the line below: get names of columns with missing values
____ # Your code here
# Fill in the lines below: drop columns in training and validation data
reduced_X_train = ____
reduced_X_valid = ____
# Check your answers
step_2.check()
#%%RM_IF(PROD)%%
# Get names of columns with missing values
cols_with_missing = [col for col in X_train.columns
if X_train[col].isnull().any()]
# Drop columns in training and validation data
reduced_X_train = X_train.drop(cols_with_missing, axis=1)
reduced_X_valid = X_valid.drop(cols_with_missing, axis=1)
step_2.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_2.hint()
#_COMMENT_IF(PROD)_
step_2.solution()
```
Run the next code cell without changes to obtain the MAE for this approach.
```
print("MAE (Drop columns with missing values):")
print(score_dataset(reduced_X_train, reduced_X_valid, y_train, y_valid))
```
# Step 3: Imputation
### Part A
Use the next code cell to impute missing values with the mean value along each column. Set the preprocessed DataFrames to `imputed_X_train` and `imputed_X_valid`. Make sure that the column names match those in `X_train` and `X_valid`.
```
from sklearn.impute import SimpleImputer
# Fill in the lines below: imputation
____ # Your code here
imputed_X_train = ____
imputed_X_valid = ____
# Fill in the lines below: imputation removed column names; put them back
imputed_X_train.columns = ____
imputed_X_valid.columns = ____
# Check your answers
step_3.a.check()
#%%RM_IF(PROD)%%
# Imputation
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
step_3.a.assert_check_failed()
#%%RM_IF(PROD)%%
# Imputation
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.fit_transform(X_valid))
# Imputation removed column names; put them back
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
step_3.a.assert_check_failed()
#%%RM_IF(PROD)%%
# Imputation
my_imputer = SimpleImputer()
imputed_X_train = pd.DataFrame(my_imputer.fit_transform(X_train))
imputed_X_valid = pd.DataFrame(my_imputer.transform(X_valid))
# Imputation removed column names; put them back
imputed_X_train.columns = X_train.columns
imputed_X_valid.columns = X_valid.columns
step_3.a.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_3.a.hint()
#_COMMENT_IF(PROD)_
step_3.a.solution()
```
Run the next code cell without changes to obtain the MAE for this approach.
```
print("MAE (Imputation):")
print(score_dataset(imputed_X_train, imputed_X_valid, y_train, y_valid))
```
### Part B
Compare the MAE from each approach. Does anything surprise you about the results? Why do you think one approach performed better than the other?
```
# Check your answer (Run this code cell to receive credit!)
step_3.b.check()
#_COMMENT_IF(PROD)_
step_3.b.hint()
```
# Step 4: Generate test predictions
In this final step, you'll use any approach of your choosing to deal with missing values. Once you've preprocessed the training and validation features, you'll train and evaluate a random forest model. Then, you'll preprocess the test data before generating predictions that can be submitted to the competition!
### Part A
Use the next code cell to preprocess the training and validation data. Set the preprocessed DataFrames to `final_X_train` and `final_X_valid`. **You can use any approach of your choosing here!** in order for this step to be marked as correct, you need only ensure:
- the preprocessed DataFrames have the same number of columns,
- the preprocessed DataFrames have no missing values,
- `final_X_train` and `y_train` have the same number of rows, and
- `final_X_valid` and `y_valid` have the same number of rows.
```
# Preprocessed training and validation features
final_X_train = ____
final_X_valid = ____
# Check your answers
step_4.a.check()
#%%RM_IF(PROD)%%
# Imputation
final_imputer = SimpleImputer(strategy='median')
final_X_train = pd.DataFrame(final_imputer.fit_transform(X_train))
final_X_valid = pd.DataFrame(final_imputer.transform(X_valid))
# Imputation removed column names; put them back
final_X_train.columns = X_train.columns
final_X_valid.columns = X_valid.columns
step_4.a.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.a.hint()
#_COMMENT_IF(PROD)_
step_4.a.solution()
```
Run the next code cell to train and evaluate a random forest model. (*Note that we don't use the `score_dataset()` function above, because we will soon use the trained model to generate test predictions!*)
```
# Define and fit model
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(final_X_train, y_train)
# Get validation predictions and MAE
preds_valid = model.predict(final_X_valid)
print("MAE (Your approach):")
print(mean_absolute_error(y_valid, preds_valid))
```
### Part B
Use the next code cell to preprocess your test data. Make sure that you use a method that agrees with how you preprocessed the training and validation data, and set the preprocessed test features to `final_X_test`.
Then, use the preprocessed test features and the trained model to generate test predictions in `preds_test`.
In order for this step to be marked correct, you need only ensure:
- the preprocessed test DataFrame has no missing values, and
- `final_X_test` has the same number of rows as `X_test`.
```
# Fill in the line below: preprocess test data
final_X_test = ____
# Fill in the line below: get test predictions
preds_test = ____
# Check your answers
step_4.b.check()
#%%RM_IF(PROD)%%
# Preprocess test data
final_X_test = pd.DataFrame(final_imputer.transform(X_test))
# Get test predictions
preds_test = model.predict(final_X_test)
step_4.b.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_4.b.hint()
#_COMMENT_IF(PROD)_
step_4.b.solution()
```
Run the next code cell without changes to save your results to a CSV file that can be submitted directly to the competition.
```
# Save test predictions to file
output = pd.DataFrame({'Id': X_test.index,
'SalePrice': preds_test})
output.to_csv('submission.csv', index=False)
```
# Submit your results
Once you have successfully completed Step 4, you're ready to submit your results to the leaderboard! (_You also learned how to do this in the previous exercise. If you need a reminder of how to do this, please use the instructions below._)
First, you'll need to join the competition if you haven't already. So open a new window by clicking on [this link](https://www.kaggle.com/c/home-data-for-ml-course). Then click on the **Join Competition** button.

Next, follow the instructions below:
#$SUBMIT_TO_COMP$
# Keep going
Move on to learn what **[categorical variables](#$NEXT_NOTEBOOK_URL$)** are, along with how to incorporate them into your machine learning models. Categorical variables are very common in real-world data, but you'll get an error if you try to plug them into your models without processing them first!
| github_jupyter |
```
# load libs
import torch
import argparse
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
import numpy as np
from data.datasets import MNIST
import torch.utils.data as data_utils
from sklearn.decomposition import PCA
import torch.nn.functional as F
from torch.autograd import Variable
print (torch.__version__)
batch_size=1
test_batch_size=1
kwargs={}
train_loader=data_utils.DataLoader(MNIST(root='./data',train=True,process=False,transform=transforms.Compose([
transforms.Scale((32,32)),
transforms.ToTensor(),
])),batch_size=batch_size,shuffle=True,**kwargs)
test_loader=data_utils.DataLoader(MNIST(root='./data',train=False,process=False,transform=transforms.Compose([
transforms.Scale((32,32)),
transforms.ToTensor(),
])),batch_size=test_batch_size,shuffle=True,**kwargs)
def create_all_train_dataset():
datasets = []
train_label = []
for data in train_loader:
data_numpy = data[0].numpy()
label_numpy = data[1].numpy()
data_numpy = np.squeeze(data_numpy)
datasets.append(data_numpy)
train_label.append(label_numpy)
datasets = np.array(datasets)
datasets=np.expand_dims(datasets,axis=1)
print ('Numpy train dataset shape is {}'.format(datasets.shape))
return datasets,train_label
def create_all_test_dataset():
datasets = []
test_label = []
for data in test_loader:
data_numpy = data[0].numpy()
label_numpy = data[1].numpy()
data_numpy = np.squeeze(data_numpy)
datasets.append(data_numpy)
test_label.append(label_numpy)
datasets = np.array(datasets)
datasets=np.expand_dims(datasets,axis=1)
print ('Numpy test dataset shape is {}'.format(datasets.shape))
return datasets,test_label
def PCA_and_augment(data_in, num_key_comp):
# data reshape
data=np.reshape(data_in,(data_in.shape[0],-1))
print ('PCA_and_augment: {}'.format(data.shape))
# mean removal
mean = np.mean(data, axis=0)
datas_mean_remov = data - mean
print ('PCA_and_augment meanremove shape: {}'.format(datas_mean_remov.shape))
# PCA, retain all components
#pca=PCA(n_components = num_key_comp)
pca=PCA(n_components=num_key_comp)
pca.fit(datas_mean_remov)
#eng=np.cumsum(pca.explained_variance_ratio_)
#f_num = np.count_nonzero(eng < 0.999)
#comps=pca.components_[:f_num,:]
comps=pca.components_
# augment, DC component doesn't
comps_aug=[vec*(-1) for vec in comps[:-1]]
comps_complete=np.vstack((comps,comps_aug))
print ('PCA_and_augment comps_complete shape: {}'.format(comps_complete.shape))
return comps_complete
from itertools import product
def fit_pca_shape(datasets,depth):
factor=np.power(2,depth)
length=32/factor
print ('fit_pca_shape: length: {}'.format(length))
idx1=range(0,int(length),2)
idx2=[i+2 for i in idx1]
print ('fit_pca_shape: idx1: {}'.format(idx1))
data_lattice=[datasets[:,:,i:j,k:l] for ((i,j),(k,l)) in product(zip(idx1,idx2),zip(idx1,idx2))]
data_lattice=np.array(data_lattice)
print ('fit_pca_shape: data_lattice.shape: {}'.format(data_lattice.shape))
#shape reshape
data=np.reshape(data_lattice,(data_lattice.shape[0]*data_lattice.shape[1],data_lattice.shape[2],2,2))
print ('fit_pca_shape: reshape: {}'.format(data.shape))
return data
def ret_filt_patches(aug_anchors,input_channels):
shape=int(aug_anchors.shape[1]/4)
num=int(aug_anchors.shape[0])
filt=np.reshape(aug_anchors,(num,shape,4))
# reshape to kernels, (# output_channels,# input_channels,2,2)
filters=np.reshape(filt,(num,shape,2,2))
return filters
def conv_and_relu(filters,datasets,stride=2):
# torch data change
filters_t=torch.from_numpy(filters)
datasets_t=torch.from_numpy(datasets)
# Variables
filt=Variable(filters_t).type(torch.FloatTensor)
data=Variable(datasets_t).type(torch.FloatTensor)
# Convolution
output=F.conv2d(data,filt,stride=stride)
# Relu
relu_output=F.relu(output)
return relu_output,filt
def one_stage_saak_trans(datasets=None,depth=0,num_key_comp=5):
# intial dataset, (60000,1,32,32)
# channel change: 1->7
print ('one_stage_saak_trans: datasets.shape {}'.format(datasets.shape))
input_channels=datasets.shape[1]
# change data shape, (14*60000,4)
data_flatten=fit_pca_shape(datasets,depth)
# augmented components, first round: (7,4), only augment AC components
comps_complete=PCA_and_augment(data_flatten,num_key_comp)
print ('one_stage_saak_trans: comps_complete: {}'.format(comps_complete.shape))
# get filter, (7,1,2,2)
filters=ret_filt_patches(comps_complete,input_channels)
print ('one_stage_saak_trans: filters: {}'.format(filters.shape))
# output (60000,7,14,14)
relu_output,filt=conv_and_relu(filters,datasets,stride=2)
data=relu_output.data.numpy()
print ('one_stage_saak_trans: output: {}'.format(data.shape))
return data,filt,relu_output,filters
def five_stage_saak_trans():
filters = []
data_train,train_label=create_all_train_dataset()
data_test,test_label = create_all_test_dataset()
original_train_dataset=data_train
original_test_dataset=data_test
num_key_comp = [3,4,7,6,8]
for i in range(5):
print ('{} stage of saak transform_train: '.format(i))
data_train,filt,output,f=one_stage_saak_trans(data_train,depth=i,num_key_comp=num_key_comp[i])
filters.append(f)
for i in range(5):
print ('{} stage of saak transform_test: '.format(i))
relu_output,filt=conv_and_relu(filters[i],data_test,stride=2)
data_test=relu_output.data.numpy()
return data_train,data_test,train_label,test_label
saak_train,saak_test,train_label,test_label=five_stage_saak_trans()
print(saak_train.shape)
print(saak_test.shape)
#print(saak_train[0])
saak_train = saak_train.reshape((60000,-1))
saak_test = saak_test.reshape((10000,-1))
print(saak_train.shape)
print(saak_test.shape)
#train_pca = PCA()
#train_pca.fit(saak_train)
#eng=np.cumsum(train_pca.explained_variance_ratio_)
#f_num = np.count_nonzero(eng < 0.90)
#print(f_num)
#saak_train=train_pca.transform(saak_train)[:,:f_num]
#saak_test=train_pca.transform(saak_test)[:,:f_num]
#print(saak_train.shape)
#print(saak_test.shape)
# def load_train_label():
# f = open('./data/raw/train-labels-idx1-ubyte')
# loaded = np.fromfile(file=f,dtype = np.uint8)
# loaded = loaded[8:].reshape(60000).astype(np.uint8)
# return loaded
# train_label = load_train_label()
# print(train_label.shape)
# #print(train_label[0])
# def load_test_label():
# f = open('./data/raw/t10k-labels-idx1-ubyte')
# loaded = np.fromfile(file=f,dtype = np.uint8)
# loaded = loaded[8:].reshape(10000).astype(np.uint8)
# return loaded
# test_label = load_test_label()
# print(test_label.shape)
from sklearn.svm import SVC
svm_train = SVC(
C=1.0,
cache_size=200,
class_weight=None,
coef0=0.0,
decision_function_shape='ovr',
degree=3,
gamma='auto',
kernel='rbf',
max_iter=2000,
probability=False,
random_state=None,
shrinking=True,
tol=0.001,
verbose=False,
)
svm_train.fit(saak_train,train_label)
train_result = svm_train.predict(saak_train)
test_result = svm_train.predict(saak_test)
#accuray_train=np.count_nonzero(train_result==train_label)
#accuray_test=np.count_nonzero(test_result==test_label)
#print("train_accuray is: " + str(accuray_train/60000.0))
#print("test_accuray is: " + str(accuray_test/10000.0))
accuray = 0
for i in range(60000):
if train_label[i]==train_result[i]:
accuray = accuray + 1
print("num of correct classification_train: " + str(accuray))
print("accuray_train: " + str(accuray/60000.0))
accuray = 0
for i in range(10000):
if test_label[i]==test_result[i]:
accuray = accuray + 1
print("num of correct classification_test: " + str(accuray))
print("accuray_test: " + str(accuray/10000.0))
```
| github_jupyter |
```
import numpy as np
import cv2
import matplotlib.pyplot as plt
from tensorflow.keras import models
import tensorflow.keras.backend as K
import tensorflow as tf
from sklearn.metrics import f1_score
import requests
import xmltodict
import json
plateCascade = cv2.CascadeClassifier('indian_license_plate.xml')
#detect the plate and return car + plate image
def plate_detect(img):
plateImg = img.copy()
roi = img.copy()
plate_part = np.array([])
plateRect = plateCascade.detectMultiScale(plateImg,scaleFactor = 1.2, minNeighbors = 7)
for (x,y,w,h) in plateRect:
roi_ = roi[y:y+h, x:x+w, :]
plate_part = roi[y:y+h, x:x+w, :]
cv2.rectangle(plateImg,(x+2,y),(x+w-3, y+h-5),(0,255,0),3)
#print(type(roi))
#print(roi.shape)
return plateImg, plate_part
#normal function to display
def display_img(img):
img_ = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
plt.imshow(img_)
plt.show()
def find_contours(dimensions, img) :
#finding all contours in the image using
#retrieval mode: RETR_TREE
#contour approximation method: CHAIN_APPROX_SIMPLE
cntrs, _ = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#Approx dimensions of the contours
lower_width = dimensions[0]
upper_width = dimensions[1]
lower_height = dimensions[2]
upper_height = dimensions[3]
#Check largest 15 contours for license plate character respectively
cntrs = sorted(cntrs, key=cv2.contourArea, reverse=True)[:15]
ci = cv2.imread('contour.jpg')
x_cntr_list = []
target_contours = []
img_res = []
for cntr in cntrs :
#detecting contour in binary image and returns the coordinates of rectangle enclosing it
intX, intY, intWidth, intHeight = cv2.boundingRect(cntr)
#checking the dimensions of the contour to filter out the characters by contour's size
if intWidth > lower_width and intWidth < upper_width and intHeight > lower_height and intHeight < upper_height :
x_cntr_list.append(intX)
char_copy = np.zeros((44,24))
#extracting each character using the enclosing rectangle's coordinates.
char = img[intY:intY+intHeight, intX:intX+intWidth]
char = cv2.resize(char, (20, 40))
cv2.rectangle(ci, (intX,intY), (intWidth+intX, intY+intHeight), (50,21,200), 2)
#plt.imshow(ci, cmap='gray')
char = cv2.subtract(255, char)
char_copy[2:42, 2:22] = char
char_copy[0:2, :] = 0
char_copy[:, 0:2] = 0
char_copy[42:44, :] = 0
char_copy[:, 22:24] = 0
img_res.append(char_copy) # List that stores the character's binary image (unsorted)
#return characters on ascending order with respect to the x-coordinate
#plt.show()
#arbitrary function that stores sorted list of character indeces
indices = sorted(range(len(x_cntr_list)), key=lambda k: x_cntr_list[k])
img_res_copy = []
for idx in indices:
img_res_copy.append(img_res[idx])# stores character images according to their index
img_res = np.array(img_res_copy)
return img_res
def segment_characters(image) :
#pre-processing cropped image of plate
#threshold: convert to pure b&w with sharpe edges
#erod: increasing the backgroung black
#dilate: increasing the char white
img_lp = cv2.resize(image, (333, 75))
img_gray_lp = cv2.cvtColor(img_lp, cv2.COLOR_BGR2GRAY)
_, img_binary_lp = cv2.threshold(img_gray_lp, 200, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)
img_binary_lp = cv2.erode(img_binary_lp, (3,3))
img_binary_lp = cv2.dilate(img_binary_lp, (3,3))
LP_WIDTH = img_binary_lp.shape[0]
LP_HEIGHT = img_binary_lp.shape[1]
img_binary_lp[0:3,:] = 255
img_binary_lp[:,0:3] = 255
img_binary_lp[72:75,:] = 255
img_binary_lp[:,330:333] = 255
#estimations of character contours sizes of cropped license plates
dimensions = [LP_WIDTH/6,
LP_WIDTH/2,
LP_HEIGHT/10,
2*LP_HEIGHT/3]
#plt.imshow(img_binary_lp, cmap='gray')
#plt.show()
cv2.imwrite('contour.jpg',img_binary_lp)
#getting contours
char_list = find_contours(dimensions, img_binary_lp)
return char_list
#It is the harmonic mean of precision and recall
#Output range is [0, 1]
#Works for both multi-class and multi-label classification
def f1score(y, y_pred):
return f1_score(y, tf.math.argmax(y_pred, axis=1), average='micro')
def custom_f1score(y, y_pred):
return tf.py_function(f1score, (y, y_pred), tf.double)
model = models.load_model('license_plate_character.pkl', custom_objects= {'custom_f1score': custom_f1score})
def fix_dimension(img):
new_img = np.zeros((28,28,3))
for i in range(3):
new_img[:,:,i] = img
return new_img
def show_results(pl_char):
dic = {}
characters = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i,c in enumerate(characters):
dic[i] = c
output = []
for i,ch in enumerate(pl_char):
img_ = cv2.resize(ch, (28,28), interpolation=cv2.INTER_AREA)
img = fix_dimension(img_)
img = img.reshape(1,28,28,3)
y_ = model.predict_classes(img)[0]
character = dic[y_] #
output.append(character)
plate_number = ''.join(output)
return plate_number
# final_plate = show_results(char)
# print(final_plate)
def get_vehicle_info(plate_number):
r = requests.get("http://www.regcheck.org.uk/api/reg.asmx/CheckIndia?RegistrationNumber={0}&username=tom123".format(str(plate_number)))
data = xmltodict.parse(r.content)
jdata = json.dumps(data)
df = json.loads(jdata)
df1 = json.loads(df['Vehicle']['vehicleJson'])
return df1
import re
def plate_info(numberPlate):
pattern = '^[A-Z]{2}[0-9]{1,2}([A-Z])?([A-Z]*)?[0-9]{4}$'
if len(numberPlate) > 10:
numberPlate = numberPlate[-10:]
return get_vehicle_info(numberPlate)
# else:
# return get_vehicle_info(numberPlate)
elif re.match(pattern,numberPlate) != None:
return get_vehicle_info(numberPlate)
else:
return None
cam = cv2.VideoCapture('videoplay.mp4') #license_video.mp4 have to be yours, I haven't uploaded for privacy concern
if cam.isOpened() == False:
print("Video not imported")
plate_list = []
info_list = []
while(cam.isOpened()):
ret, frame = cam.read()
if ret == True:
car_plate, plate_img = plate_detect(frame)
cv2.imshow("License Video",car_plate)
if len(plate_img) > 0:
plate_char = segment_characters(plate_img)
#print(plate_char)
number_plate = show_results(plate_char)
if number_plate not in plate_list:
final_result = plate_info(number_plate)
if final_result != None:
plate_list.append(number_plate)
info_list.append(final_result)
#print(final_result)
if cv2.waitKey(1) == 27:
break
else:
break
print(info_list[0])
cam.release()
cv2.destroyAllWindows()
#For privacy reasons, the shown result is also not of the video used
```
| github_jupyter |
Diodes
===
The incident flux and the current that is generated by a photodiode subjected to it are related by
$$
\begin{equation}
\begin{split}
I(A)=&\sum_{i,j}P_{i,j}(W)R_{j}(A/W)+D(A)\\
P_{i,j}(W)=&I_{i,j}(Hz)E_{j}(\text{keV})\\
R_{j}(A/W)=&\frac{e(C)}{E_{h}(\text{keV})}[1-e^{-\mu(E_{j})\rho d}]
\end{split}
\end{equation}
$$
where P the incident power, R the spectral responsivity, D the dark current, $E_i$ the energy of the incident photon, $E_j$ the energy of the detected photon, $E_{h}$ the energy to create an electron-hole pair, $I_{i,j}$ the detected flux of line $j$ due to line $i$ and diode density $\rho$, mass attenuation coefficient $\mu$ and thickness $d$.
The relationship between the detected flux and the flux at the sample position is given by
$$
\begin{equation}
I_{i,j}(Hz)=I_{0}(Hz) w_i Y_{i,j} = I_{s}(Hz)\frac{w_i Y_{i,j}}{\sum_k w_k T_{s}(E_{k})}
\end{equation}
$$
with the following factors
* $I_0$: total flux before detection
* $I_s$: the total flux seen by the sample
* $T_s$: the "transmission" between source and sample (product of several transmission factors and optics efficiency)
* $w_k$: the fraction of primary photons with energy $E_{k}$
* $Y_{i,j}$: the "rate" of detected line $j$ due to source line $i$ (not including detector attenuation)
The line fractions at the sample position are
$$
\begin{equation}
\begin{split}
I_{i,s}=& I_0 w_i T_{s}(E_{i})\\
w_{i,s} =& \frac{I_{i,s}}{\sum_k I_{k,s}} = \frac{w_i T_{s}(E_{i})}{\sum_k w_k T_{s}(E_{k})}
\end{split}
\end{equation}
$$
The relationship between the flux reaching the sample and the current measured by a pn-diode can be summarized as
$$
\begin{equation}
\begin{split}
I(A)=&I_{s}(Hz)C_s(C)+D(A)\\
C_s(C) =& \frac{\sum_{i,j} w_i Y_{i,j}C_j}{\sum_k w_k T_{s}(E_{k})}\\
C_j(C) =& E_{j}(\text{keV})\frac{e(C)}{E_{h}(\text{keV})}[1-e^{-\mu(E_{j})\rho d}]\\
\end{split}
\end{equation}
$$
where $C_s$ the charge generated per photon reaching the sample and $C_j$ the charge generated per photon reaching the diode. A simplified relationship with a lookup table can be used
$$
\begin{equation}
C_s(C) = \sum_i w_i \mathrm{LUT}(E_i)
\end{equation}
$$
Finally in order to allow a fast read-out, current is converted to frequency by an oscillator
$$
\begin{equation}
I(\text{Hz})=\frac{F_{\text{max}}(Hz)}{V_{\text{max}}(V)}
\frac{V_{\text{max}}(V)}{I_{\text{max}}(A)}I(A)+F_{0}(Hz)
\end{equation}
$$
where $F_{\text{max}}$ the maximal frequency that can be detected, $F_{0}$ a fixed offset, $V_{\text{max}}$ the maximal output voltage of the ammeter and input voltage of the oscillator, $\frac{V_{\text{max}}(V)}{I_{\text{max}}(A)}$ the "gain" of the ammeter. Sometimes $I_{\text{max}}(A)$ is referred to as the diode "gain".
Absolute diode
--------------
An absolute diode has a spectral responsivity $R(A/W)$ which behaves as theoretically expected
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from spectrocrunch.detectors import diode
det = diode.factory("sxm_ptb")
print(det)
det.model = False
energy = det.menergy
R = det.spectral_responsivity(energy)
plt.plot(energy,R,marker='o',linestyle="",label='measured')
det.model = True
energy = np.linspace(1,10,100)
R = det.spectral_responsivity(energy)
plt.plot(energy,R,label='model')
plt.legend()
plt.xlabel('Energy (keV)')
plt.ylabel('Spectral responsivity (A/W)')
plt.show()
```
Calibrated diode
----------------
The spectral responsivity $R(A/W)$ of a calibrated diode is determined by the difference in response with an absolute diode
```
det = diode.factory("sxm_idet",npop=4)
print(det)
det.model = False
energy = det.menergy
R = det.spectral_responsivity(energy)
plt.plot(energy,R,marker='o',linestyle="",label='measured')
det.model = True
energy = np.linspace(1,12,100)
R = det.spectral_responsivity(energy)
plt.plot(energy,R,label='model')
det.model = False
R = det.spectral_responsivity(energy)
plt.plot(energy,R,label='used')
plt.legend(loc='best')
plt.xlabel('Energy (keV)')
plt.ylabel('Spectral responsivity (A/W)')
plt.show()
```
Direct detection
------------------
When $Y_{i,j}$ only contains transmission factors (i.e. not fluorescence/scattering from a secondary target) the diode measures $I_s$ directly. The relationship between flux $I_s(Hz)$ and diode response $I(Hz)$ is determined by the spectral responsivity which should be known (absolute diode) or calibrated (with respect to an absolute diode):
```
from spectrocrunch.optics import xray as xrayoptics
atmpath = xrayoptics.Filter(material="vacuum", thickness=1)
det = diode.factory("sxm_idet",optics=[atmpath])
print(det)
Is = 3e9
energy = np.linspace(2,10,100)
for gain in [1e7]:
for model in [False,True]:
det.gain = gain
det.model = model
I = det.fluxtocps(energy[:, np.newaxis], Is).to("Hz").magnitude
plt.plot(energy,I,label="{:.0e} V/A{}".\
format(gain," (model)" if model else ""))
plt.gca().axhline(y=det.oscillator.Fmax.to("Hz").magnitude,label="Fmax",\
color='k',linestyle='--')
plt.title("Flux = {:.0e} Hz".format(Is))
plt.legend(loc='best')
plt.xlabel('Energy (keV)')
plt.ylabel('Response (Hz)')
plt.show()
```
Indirect detection
------------------
The conversion factors $Y_{i,j}$ can be calculated from the cross-sections of the secondary target.
### Without optics
When the indirect diode measures the flux downstream from the optics (or there are no optics at all), the relationship between flux and measured count-rate is known (because $T_s$ is known):
```
atmpath = xrayoptics.Filter(material="vacuum", thickness=1)
iodet = diode.factory("sxm_iodet1",optics=[atmpath])
iodet.gain = 1e9
print(iodet.geometry)
Is = 5e9
energy = np.linspace(1,10,50)
I = iodet.fluxtocps(energy[:, np.newaxis], Is).to("Hz").magnitude
plt.plot(energy,I)
plt.gca().axhline(y=iodet.oscillator.Fmax.to("Hz").magnitude,label="Fmax",\
color='k',linestyle='--')
plt.title("Flux = {:.0e} Hz".format(Is))
plt.xlabel('Energy (keV)')
plt.ylabel('Response (Hz)')
plt.show()
```
### With optics
In case the indirect diode is upstream from the optics, transmission $T_s$ needs to be calibrated with a direct diode. This is done by measuring a changing flux at fixed energy, e.g. by scanning a series of attenuators. The flux is calculated from the direct diode and used to calibrate the response of the indirect diode:
```
iodet = diode.factory("sxm_iodet1",optics=[atmpath, "KB"])
iodet.gain = 1e8
idet = diode.factory("sxm_idet")
idet.gain = 1e6
energy = 7
idetresp = np.linspace(3e4,5e4,100)
fluxmeas = idet.cpstoflux(energy,np.random.poisson(idetresp))
iodetresp = np.random.poisson(np.linspace(2e5,3e5,100))
fitinfo = iodet.calibrate(iodetresp,fluxmeas,energy,caliboption="optics")
print(iodet.geometry)
plt.plot(fluxmeas,iodetresp,marker='o',linestyle="")
plt.plot(fluxmeas,iodet.fluxtocps(energy,fluxmeas))
label = "\n".join(["{} = {}".format(k,v) for k,v in fitinfo.items()])
plt.annotate(label,xy=(0.5,0.1),xytext=(0.5,0.1),\
xycoords="axes fraction",textcoords="axes fraction")
plt.title("Gain = {:~.0e}".format(iodet.gain))
plt.xlabel('Flux (Hz)')
plt.ylabel('Response (Hz)')
plt.show()
```
Note that the slope is $C_s(C)$ (the charge generated per photon reaching the sample, expressed here in units of elementary charge) and the intercept $D(A)$ (the dark current of the diode).
### Manual calibration
Calibration can also be done manually for a single flux-reponse pair. The response is expected to be $I(Hz)$ but it can also be $I(A)$. If you want a linear energy interpolation, calibration can also be simplified in which case it simply stores a lookup table for $C_s(C)$.
```
#Specify quantities manually with units:
#from spectrocrunch.patch.pint import ureg
#current = ureg.Quantity(1e-8,"A")
for simple in [True, False]:
iodet = diode.factory("sxm_iodet1",optics=[atmpath, "KB"],simplecalibration=simple)
iodet.gain = 1e8
# Calibrate with Hz-Hz pair
cps = 100000
flux = 1e9
energy = 6
iodet.calibrate(cps,flux,energy,caliboption="optics")
current = iodet.fluxtocurrent(energy,flux)
# Calibrate with A-Hz pair
energy = 10
current *= 0.5
iodet.calibrate(current,flux,energy,caliboption="optics")
label = "C$_s$ table" if simple else "Calibrated T$_s$"
print(label)
print(iodet)
print("")
energy = np.linspace(6,10,10)
response = [iodet.fluxtocps(en,flux).magnitude for en in energy]
plt.plot(energy,response,label=label)
plt.legend()
plt.title("Gain = {:~.0e}, Flux = {:.0e}".format(iodet.Rout,flux))
plt.xlabel('Energy (keV)')
plt.ylabel('Response (Hz)')
plt.show()
```
| github_jupyter |
# Datasets
<div style="position: absolute; right:0;top:0"><a href="./importer.ipynb" style="text-decoration: none"> <font size="5">←</font></a>
<a href="../evaluation.py.ipynb" style="text-decoration: none"> <font size="5">↑</font></a></div>
This is an overview of all currently supported datasets. Datasets marked with ° contain ground truth class labels.
Datasets marked with * are currently not working.
- [20 Newsgroup](#20-Newsgroup)°
- [ACM](#ACM)
- [ATD](#ATD)
- [Classic4](#Classic4)°
- [DBLP](#DBLP)*
- [L5 - Yahoo! Answers Manner Questions](#L5---Yahoo!-Answers-Manner-Questions)°
- [Reuters](#Reuters)°
- [TweetsLA](#TweetsLA)
- [TweetsODP](#TweetsODP)°
- [US Consumer Finance Complaints](#US-Consumer-Finance-Complaints)°
---
## 20 Newsgroup
Identifier | newsgroup
-----------|-------
Importer | [NewsgroupImporter](./newsgroup.py)
Source | https://scikit-learn.org/stable/datasets/index.html#newsgroups-dataset
Documents | 18846
Classes | 20
Data will be downloaded to `data/raw/sklearn` automatically by the import script. Parameters:
- `remove` (list of strings)
Possible values are: `headers`, `footers`, `quotes`.
See https://scikit-learn.org/0.19/modules/generated/sklearn.datasets.fetch_20newsgroups.html#sklearn.datasets.fetch_20newsgroups
## ACM
Identifier | acm
-----------|-------
Importer | [ACMImporter](./acm.py)
Source | https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/27695
Documents | 36396
Classes | -
Download the abstract.zip file to `data/raw/acm`.
## ATD
Identifier | atd
-----------|-------
Importer | [ATDImporter](./atd.py)
Source | various
Documents | 105
Classes | -
Originally 105 files of text extracted from publications of the participants of the ATD conference in Washington D.C. 2018. It is not publicly available, but you may use this class as a generic dataset. To do so, put .txt files in the `data/raw/atd` folder, one file per document. It does not support classes.
## Classic4
Identifier | classic4
-----------|-------
Importer | [ClassicImporter](./classic4.py)
Source | http://www.dataminingresearch.com/index.php/2010/09/classic3-classic4-datasets/
Documents | 7095
Classes | 4
Download the classicdocs.rar ("*You can freely download the whole collection (1.5MB RAR file).*") and extract it.
All files should be in the `data/raw/classic4` folder.
Files are named according to their class ("cacm","cisi","cran","med") plus an integer.
## DBLP
Identifier | dblp
-----------|-------
Source | http://dblp.org/xml/release/
Documents | ?
Classes | ?
Download and extract `dblp-2018-11-01.xml` to `data/raw/dblp`.
## L5 - Yahoo! Answers Manner Questions
Identifier | yahooL5
-----------|-------
Importer | [YahooImporter](./yahoo.py)
Source | https://webscope.sandbox.yahoo.com/catalog.php?datatype=l&did=10
Documents | 142594
Classes | 24
Version | 2.0
Request and download the Webscope_L5.tgz file. Extract and copy `manner.xml` to `data/raw/yahooL5`.
## Reuters
Identifier | reuters
-----------|-------
Importer | [ReutersImporter](./reuters.py)
Source | https://www.nltk.org/data.html
Documents | 10788
Classes | 90
This dataset can be downloaded with the *Natural Language Toolkit*. Set `data/raw/nltk` as the download directory when following these [instructions](https://www.nltk.org/data.html).
In short:
1. Run `python -m nltk.downloader` from your environment.
2. Set `data/raw/nltk` as the download directory.
3. From `corpora` select `reuters`.
4. Press Download.
Parameters:
- `min_docs_per_class` (int)
Only keep classes (and corresponding documents) containing at least `min_docs_per_class` documents.
## TweetsLA
Identifier | tweetsla
-----------|-------
Importer | [TweetsLAImporter](./tweetsla.py)
Source | Twitter API
Documents | ~90.000 per day
Classes | -
Put one or multiple .zip files in the `data/raw/tweetsla` folder.
Each archive should contain one or multiple text files where each line is a tweet in json format.
If it does contain an `extended_tweet` field the full text will be used.
## TweetsODP
Identifier | tweetsodp
-----------|-------
Importer | [TweetsODPImporter](./tweetsodp.py)
Source | http://www.zubiaga.org/datasets/odptweets/
Documents | ~13000000
Classes | 15
1. Gather data
Download ODPtweets-Mar17-29.tar.bz2 and ODPtweets-Apr12-24.tar.bz2 to `data/raw/tweetsodp`.
2. Extract ids and classes
Run python evaluation.py -s tweetsodp
This will create ODPtweets-Apr12-24.txt and ODPtweets-Mar17-29.txt containing one tweet id per line.
3. Download Tweets
- Download Hydrator from https://github.com/DocNow/hydrator
- Open tweet id files from step 2 and set titles as ODPtweets-Apr12-24 and ODPtweets-Mar17-29 accordingly.
- This should create two json files, about 18GB each.
4. Cleanup (optional)
You can delete the .txt files.
5. Compress (optional)
Zip the .json files as ODPtweets-Apr12-24.json.zip and ODPtweets-Mar17-29.json.zip to save storage space.
## US Consumer Finance Complaints
Identifier | classic4
-----------|-------
Importer | [ComplaintsImporter](./complaints.py)
Source | https://www.kaggle.com/cfpb/us-consumer-finance-complaints
Documents | 66806 (715437)
Classes | 46 (90)
Copy `consumer_complaints.csv` to `data/raw/complaints`.
| github_jupyter |
# Accessing C Struct Data
This notebook illustrates the use of `@cfunc` to connect to data defined in C.
## Via CFFI
Numba can map simple C structure types (i.e. with scalar members only) into NumPy structured `dtype`s.
Let's start with the following C declarations:
```
from cffi import FFI
src = """
/* Define the C struct */
typedef struct my_struct {
int i1;
float f2;
double d3;
float af4[7];
} my_struct;
/* Define a callback function */
typedef double (*my_func)(my_struct*, size_t);
"""
ffi = FFI()
ffi.cdef(src)
```
We can create `my_struct` data by doing:
```
# Make a array of 3 my_struct
mydata = ffi.new('my_struct[3]')
ptr = ffi.cast('my_struct*', mydata)
for i in range(3):
ptr[i].i1 = 123 + i
ptr[i].f2 = 231 + i
ptr[i].d3 = 321 + i
for j in range(7):
ptr[i].af4[j] = i * 10 + j
```
Using `numba.core.typing.cffi_utils.map_type` we can convert the `cffi` type into a Numba `Record` type.
```
from numba.core.typing import cffi_utils
cffi_utils.map_type(ffi.typeof('my_struct'), use_record_dtype=True)
```
The function type can be mapped in a signature:
```
sig = cffi_utils.map_type(ffi.typeof('my_func'), use_record_dtype=True)
sig
```
and `@cfunc` can take that signature directly:
```
from numba import cfunc, carray
@cfunc(sig)
def foo(ptr, n):
base = carray(ptr, n) # view pointer as an array of my_struct
tmp = 0
for i in range(n):
tmp += base[i].i1 * base[i].f2 / base[i].d3 + base[i].af4.sum()
return tmp
```
Testing the cfunc via the `.ctypes` callable:
```
addr = int(ffi.cast('size_t', ptr))
print("address of data:", hex(addr))
result = foo.ctypes(addr, 3)
result
```
## Manually creating a Numba `Record` type
Sometimes it is useful to create a `numba.types.Record` type directly. The easiest way is to use the `Record.make_c_struct()` method. Using this method, the field offsets are calculated from the natural size and alignment of prior fields.
In the example below, we will manually create the *my_struct* structure from above.
```
from numba import types
my_struct = types.Record.make_c_struct([
# Provides a sequence of 2-tuples i.e. (name:str, type:Type)
('i1', types.int32),
('f2', types.float32),
('d3', types.float64),
('af4', types.NestedArray(dtype=types.float32, shape=(7,)))
])
my_struct
```
Here's another example to demonstrate the offset calculation:
```
padded = types.Record.make_c_struct([
('i1', types.int32),
('pad0', types.int8), # padding bytes to move the offsets
('f2', types.float32),
('pad1', types.int8), # padding bytes to move the offsets
('d3', types.float64),
])
padded
```
Notice how the byte at `pad0` and `pad1` moves the offset of `f2` and `d3`.
A function signature can also be created manually:
```
new_sig = types.float64(types.CPointer(my_struct), types.uintp)
print('signature:', new_sig)
# Our new signature matches the previous auto-generated one.
print('signature matches:', new_sig == sig)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/TeachingTextMining/TextClassification/blob/main/02-SA-Transformers-Basic/02-TextClassification-with-Transformers.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## Clasificación de textos utilizando Transformers
La clasificación de textos consiste en, dado un texto, asignarle una entre varias categorías. Algunos ejemplos de esta tarea son:
- dado un tweet, categorizar su connotación como positiva, negativa o neutra.
- dado un post de Facebook, clasificarlo como portador de un lenguaje ofensivo o no.
En la actividad exploraremos cómo utilizar soluciones *out of the box* para esta tarea incluidas en la librería [Transformers](https://huggingface.co/transformers/) y su aplicación para clasificar reviews de [IMDB](https://www.imdb.com/) sobre películas en las categorías \[$positive$, $negative$\].
Puede encontrar más información sobre este problema en [Kaggle](https://www.kaggle.com/lakshmi25npathi/imdb-dataset-of-50k-movie-reviews) y en [Large Movie Review Datase](http://ai.stanford.edu/~amaas/data/sentiment/).
**Instrucciones:**
- siga las indicaciones y comentarios en cada apartado.
**Después de esta actividad nos habremos familiarizado con:**
- seleccionar e instanciar modelos pre-entrenados para realizar clasificación de textos.
- cómo instanciar un pipeline para la clasificación de textos utilizando la librería Transformers.
- utilizar este pipeline para clasificar nuevos textos.
**Requerimientos**
- python 3.6.12 - 3.8
- tensorflow==2.3.0
- transformers==4.2.1
- pandas==1.1.5
- plotly==4.13.0
- tqdm==4.56.0
- scikit-learn==0.24.0
<a name="sec:setup"></a>
### Instalación de librerías e importación de dependencias.
Para comenzar, es preciso instalar las dependencias, realizar los imports necesarios y definir algunas funciones auxiliares.
Ejecute las siguientes casillas prestando atención a las instrucciones adicionales en los comentarios.
```
# instalar librerías. Esta casilla es últil por ejemplo si se ejecuta el cuaderno en Google Colab
# Note que existen otras dependencias como tensorflow, etc. que en este caso se encontrarían ya instaladas
%%capture
!pip install transformers==4.2.1 # porqué esta versión especifica?
print('Done!')
# reset environment
%reset -f
# para cargar datos y realizar pre-procesamiento básico
import pandas as pd
from collections import Counter
# para evaluar los modelos
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, roc_curve, auc
from sklearn.utils.multiclass import unique_labels
# para construir gráficas y realizar análisis exploratorio de los datos
import plotly.graph_objects as go
import plotly.figure_factory as ff
import plotly.express as px
from tqdm import tqdm # para crear una barra de progreso.
# algoritmos de clasificación, tokenizadores, etc.
from transformers import TextClassificationPipeline, DistilBertTokenizer, TFDistilBertForSequenceClassification, ModelCard
from transformers.tokenization_utils import TruncationStrategy
import tripadvisor as tp
print('Done!')
# función auxiliar para realizar predicciones con el modelo
def predict_model(model, cfg, data, batch_size=128, pref='m'):
"""
data: list of the text to predict
pref: identificador para las columnas (labels_[pref], scores_[pref]_[class 1], etc.)
"""
res = {}
size = len(data)
res_dic = {}
for i in tqdm(range(0, size, batch_size)):
batch_text = data[i:i+batch_size]
results = model(batch_text, truncation=cfg['truncation'])
# formatear la salida que para cada instancia es una lista tipo con un diccionario para cada clase, con llaves label, score.
# por ejemplo, para tres clases [{'label':'NEUTRAL', 'score':0.5}, {'label':'NEGATIVE', 'score':0.1}, {'label':'POSITIVE', 'score':0.4}]
for inst in results:
for cat in inst:
cn = f'scores_{pref}_{cat["label"].lower()}'
if cn in res_dic.keys():
res_dic[cn].append(cat['score'])
else:
res_dic[cn] = list([cat['score']])
res = pd.DataFrame(res_dic, columns=sorted(list(res_dic.keys())))
res[f'labels_{pref}'] = res.idxmax(axis=1).apply(lambda n: n.split('_')[2]) # label = categoría con mayor probabilidad
res = res.reindex(columns=sorted(res.columns))
return res
# función auxiliar que evalúa los resultados de una clasificación
def evaluate_model(y_true, y_pred, y_score=None, pos_label='positive'):
print('==== Sumario de la clasificación ==== ')
print(classification_report(y_true, y_pred))
print('Accuracy -> {:.2%}\n'.format(accuracy_score(y_true, y_pred)))
# graficar matriz de confusión
display_labels = sorted(unique_labels(y_true, y_pred), reverse=True)
cm = confusion_matrix(y_true, y_pred, labels=display_labels)
z = cm[::-1]
x = display_labels
y = x[::-1].copy()
z_text = [[str(y) for y in x] for x in z]
fig_cm = ff.create_annotated_heatmap(z, x=x, y=y, annotation_text=z_text, colorscale='Viridis')
fig_cm.update_layout(
height=400, width=400,
showlegend=True,
margin={'t':150, 'l':0},
title={'text' : 'Matriz de Confusión', 'x':0.5, 'xanchor': 'center'},
xaxis = {'title_text':'Valor Real', 'tickangle':45, 'side':'top'},
yaxis = {'title_text':'Valor Predicho', 'tickmode':'linear'},
)
fig_cm.show()
# # curva roc (definido para clasificación binaria)
# fig_roc = None
# if y_score is not None:
# fpr, tpr, thresholds = roc_curve(y_true, y_score, pos_label=pos_label)
# fig_roc = px.area(
# x=fpr, y=tpr,
# title={'text' : f'Curva ROC (AUC={auc(fpr, tpr):.4f})', 'x':0.5, 'xanchor': 'center'},
# labels=dict(x='Ratio Falsos Positivos', y='Ratio Verdaderos Positivos'),
# width=400, height=400
# )
# fig_roc.add_shape(type='line', line=dict(dash='dash'), x0=0, x1=1, y0=0, y1=1)
#
# fig_roc.update_yaxes(scaleanchor="x", scaleratio=1)
# fig_roc.update_xaxes(constrain='domain')
#
# fig_roc.show()
print('Done!')
```
### Carga de datos y análisis exploratorio
El primer paso consiste en obtener los datos relacionados con nuestra tarea dejándolos en el formato adecuado. Existen diferentes opciones, entre estas:
- montar nuestra partición de Google Drive y leer un fichero desde esta.
- leer los datos desde un fichero en una carpeta local.
- leer los datos directamente de un URL.
En este caso, se encuentran en un fichero separado por comas con la siguiente estructura:
| Phrase | Sentiment|
| ------ | ------ |
| This movie is really not all that bad... | positive |
Ejecute la siguiente casilla para leer los datos.
```
# descomente las siguientes 3 líneas para leer datos desde Google Drive, asumiendo que se trata de un fichero llamado review.csv localizado dentro de una carpeta llamada 'Datos' en su Google Drive
#from google.colab import drive
#drive.mount('/content/drive')
#path = '/content/drive/MyDrive/Datos/ejemplo_review_train.csv'
# descomente la siguiente línea para leer los datos desde un archivo local, por ejemplo, asumiendo que se encuentra dentro de un directorio llamado sample_data
#path = './sample_data/ejemplo_review_train.csv'
# descomente la siguiente línea para leer datos desde un URL
path = 'https://raw.githubusercontent.com/TeachingTextMining/TextClassification/main/01-SA-Pipeline/sample_data/ejercicio_tripadvisor.csv'#'https://github.com/TeachingTextMining/TextClassification/raw/main/01-SA-Pipeline/sample_data/ejemplo_review_train.csv'
# leer los datos
data = tp.load_data_tripadvisor(path)#pd.read_csv(path, sep=',')
print(data.head())
print('Done!')
```
Una vez leídos los datos, ejecute la siguiente casilla para construir una gráfica que muestra la distribución de clases en el corpus.
```
text_col = 'Opinion' #'Phrase' # columna del dataframe que contiene el texto (depende del formato de los datos)
class_col = 'Sentiment' # columna del dataframe que contiene la clase (depende del formato de los datos)
# obtener algunas estadísticas sobre los datos
categories = sorted(data[class_col].unique(), reverse=False)
hist= Counter(data[class_col])
print(f'Total de instancias -> {data.shape[0]}')
print(f'Distribución de clases -> {{item[0]:round(item[1]/len(data[class_col]), 3) for item in sorted(hist.items(), key=lambda x: x[0])}}')
print(f'Categorías -> {categories}')
print(f'Comentario de ejemplo -> {data[text_col][0]}')
print(f'Categoría del comentario -> {data[class_col][0]}')
fig = go.Figure(layout=go.Layout(height=400, width=600))
fig.add_trace(go.Bar(x=categories, y=[hist[cat] for cat in categories]))
fig.show()
print('Done!')
```
Finalmente, ejecute la siguiente casilla para crear los conjuntos de entrenamiento y validación que se utilizarán para entrenar y validar los modelos.
```
# obtener conjuntos de entrenamiento (90%) y validación (10%)
seed = 0 # fijar random_state para reproducibilidad
train, val = train_test_split(data, test_size=.1, stratify=data[class_col], random_state=seed)
print('Done!')
```
### Carga del modelo pre-entrenado
La librería Transformers provee diferentes modelos listos para usar en la tarea de clasificación de textos. Una forma flexible de lograrlo consiste en:
- seleccionar un modelo pre-entrenado adecuado para la tarea. Podemos examinar los modelos disponibles en [https://huggingface.co/models](https://huggingface.co/models). Estaremos utilizando el llamado [distilbert-base-uncased-finetuned-sst-2-english](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english) que permite clasificar un texto en idioma inglés de acuerdo con su connotación **positiva** o **negativa**.
- instanciar el modelo y su correspondiente tokenizador.
- crear un pipeline para la clasificación de textos, en este caso utilizando la clase [TextClassificationPipeline](https://huggingface.co/transformers/main_classes/pipelines.html#transformers.TextClassificationPipeline) es una libreria que enganchael modelo anterior que tienen el vocabulario.
- utilizar el pipeline para clasificar textos.
Ejecute la siguiente celda para instanciar el modelo y el correspondiente tokenizador.
**Note que:**
- la práctica recomendada al crear un nuevo modelo para Transformers es hacerlo disponible mediante un fichero que contiene los elementos necesarios para su posterior uso, como son el modelo, el tokenizador y una tarjeta con metadatos sobre el modelo.
- es conveniente indagar sobre el modelo base utilizado, en este caso **DistilBert**, esto permitirá seleccionar las clases adecuadas para instanciar el modelo.
```
# configuraciones
cfg = {} # diccionario para organizar los objetos que son parámetros del modelo, etc.
cfg['framework'] = 'tf'
cfg['task'] = 'sentiment-analysis'
cfg['trained_model_name'] = 'distilbert-base-uncased-finetuned-sst-2-english'
cfg['max_length'] = 512 # máxima longitud de secuencia recomendada por DistilBERT
cfg['truncation'] = TruncationStrategy.ONLY_FIRST
# cargar el tokenizador, disponible en Transformers. Establecer model_max_length para cuando el tokenizador sea llamado, trunque automáticamente.
#En este vector coloca la información semantica de la palabra, y sus elmentos semanticamente similares.
cfg['tokenizer'] = DistilBertTokenizer.from_pretrained(cfg['trained_model_name'] , model_max_length=cfg['max_length'])
# cargar el modelo, disponible en Transformers , en la Librería?
cfg['transformer'] = TFDistilBertForSequenceClassification.from_pretrained(cfg['trained_model_name'])
cfg['modelcard'] = ModelCard.from_pretrained(cfg['trained_model_name'])
# instanciar el pipeline para la clasificación de textos
#
model = TextClassificationPipeline(model=cfg['transformer'], tokenizer=cfg['tokenizer'], modelcard=None, framework=cfg['framework'], task=cfg['task'], return_all_scores=True)
print('Done!')
```
Ejecute la siguiente celda para clasificar una la frase. Alternativamente, puede modificar el texto incluyendo uno de su preferencia. Recuerde que debe ser en idioma inglés.
```
# ejemplo de texto a clasificar, # lista [texto 1, text 2, ..., texto n]
text = ['Brian De Palma\'s undeniable virtuosity can\'t really camouflage the fact that his plot here is a thinly disguised\
\"Psycho\" carbon copy, but he does provide a genuinely terrifying climax. His "Blow Out", made the next year, was an improvement.']
m_pred = predict_model(model, cfg, text, pref='m' )
# el nombre de los campos dependerá de pref al llamar a predic_model y las clases. Ver comentarios en la definición de la función
pred_labels = m_pred['labels_m'].values[0]
pred_proba = m_pred['scores_m_positive'].values[0]
print(f'\nLa categoría de la frase es -> {pred_labels}')
print(f'El score asignado a la clase positiva es -> {pred_proba:.2f}')
print('Done!')
```
### Evaluación del modelo
En este caso no ha sido necesario entrenar el modelo, no obstante, lo evaluaremos en un conjunto reviews para los que se conoce su categoría de modo que podamos estimar el desempeño en nuevos datos.
Ejecute la siguiente casilla para evaluar el modelo en la porción de validación separada previamente.
**Notar que:**
- la salida del modelo es un diccionario con 'label' y 'score'. Debemos formatearla para poder comparar con los valores de referencia.
- para evitar problemas relacionados con el consumo de memoria, se realizará la predicción de instancias por lotes. Además, se utilizará TruncationStrategy.ONLY_FIRST para indicar al pipeline que trunque las secuencias con longitud mayor a la recomendada por el modelo.
```
# predecir y evaluar conjunto de validación con el modelo
data = val
true_labels = data[class_col]
m_pred = predict_model(model, cfg, data[text_col].to_list(), batch_size=128, pref='m')
# el nombre de los campos dependerá de pref al llamar a predic_model y las clases. Ver comentarios en la definición de la función
evaluate_model(true_labels, m_pred['labels_m'], m_pred['scores_m_positive'], 'positive')
print('Done!')
```
### Predicción de nuevos datos
Una vez evaluado el modelo para estimar su rendimiento en nuestro problema, podemos utilizarlo para predecir nuevos datos. En el ejemplo, utilizaremos la porción de prueba preparada inicialmente.
Ejecute la siguiente casilla para cargar los datos, descomentando las instrucciones necesarias según sea el caso.
```
# descomente las siguientes 3 líneas para leer datos desde Google Drive, asumiendo que se trata de un fichero llamado review.csv localizado dentro de una carpeta llamada 'Datos' en su Google Drive
#from google.colab import drive
#drive.mount('/content/drive')
#path = '/content/drive/MyDrive/Datos/ejemplo_review_train.csv'
# descomente la siguiente línea para leer los datos desde un archivo local, por ejemplo, asumiendo que se encuentra dentro de un directorio llamado sample_data
#path = './sample_data/ejemplo_review_train.csv'
# descomente la siguiente línea para leer datos desde un URL
path = 'https://github.com/TeachingTextMining/TextClassification/raw/main/02-SA-Transformers-Basic/sample_data/ejemplo_review_test.csv'
# leer los datos
new_data = pd.read_csv(path, sep=',')
print('Done!')
```
Ejecute la siguiente celda para clasificar los textos. Tenga en cuenta que, en dependencia del entorno de ejecución, la cantidad de textos y su longitud, la ejecución puede tardar varios minutos o requerir una cantidad de memoria no disponible.
```
# predecir los datos de prueba
m_pred = predict_model(model, cfg, new_data[text_col].to_list(), batch_size=128, pref='m')
pred_labels = m_pred['labels_m']
# obtener algunas estadísticas sobre la predicción en el conjunto de pruebas
categories = sorted(pred_labels.unique(), reverse=True)
hist = Counter(pred_labels.values)
fig = go.Figure(layout=go.Layout(height=400, width=600))
fig.add_trace(go.Bar(x=categories, y=[hist[cat] for cat in categories]))
fig.show()
print('Done!')
```
| github_jupyter |
# Final Project
## Daniel Blessing
## Can we use historical data from professional league of legends games to try and predict the results of future contests?
## Load Data
```
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier # ensemble models we're trying out
from sklearn.model_selection import train_test_split # train test split for CV
from sklearn.metrics import accuracy_score, f1_score # two evalutaion metrics for binary classification
from sklearn.pipeline import Pipeline # robust pipeline
from sklearn.impute import *
from sklearn.preprocessing import *
from sklearn.compose import * # preprocessing importance
from sklearn.base import BaseEstimator #for randomized CV
from sklearn.model_selection import RandomizedSearchCV
# data pocessing
import numpy as np
import pandas as pd
# load data from local else download it from github
filename = 'Dev.csv'
remote_location = 'https://raw.githubusercontent.com/Drblessing/predcting_LoL/master/Lol.csv'
try:
# Local version
df = pd.read_csv(filename,index_col = 0)
except FileNotFoundError or ParserError:
# Grab the remote file and save it
df = pd.read_csv(remote_location,index_col = 0)
df.to_csv(filename)
# create X,y datasets and train_test split them
y = df['bResult']
df = df.drop(columns = ['bResult'])
X = df
X_train, X_val, y_train, y_val = train_test_split(X, y, train_size=0.8, shuffle=True,random_state = 42)
# Legacy Feature Engineering
'''pregame_data = Lol[['bResult','blueTopChamp','blueJungleChamp','blueMiddleChamp','blueADCChamp','blueSupportChamp',
'blueBans','redTopChamp','redJungleChamp','redMiddleChamp','redADCChamp','redSupportChamp','redBans']]
# process list of bans into individual columns
pregame_data_b = pregame_data.assign(names=pregame_data.blueBans.str.split(","))
pregame_data_r = pregame_data.assign(names=pregame_data.redBans.str.split(","))
blue_bans = pregame_data_b.names.apply(pd.Series)
red_bans = pregame_data_r.names.apply(pd.Series)
blue_names = {0: "b1", 1: "b2",2:"b3",3:"b4",4:"b5"}
red_names = {0:"r1",1:"r2",2:"r3",3:"r4",4:"r5"}
blue_bans = blue_bans.rename(columns=blue_names)
red_bans = red_bans.rename(columns=red_names)
pregame_data = pd.concat([pregame_data, blue_bans,red_bans], axis=1)
# drop legacy columns
pregame_data = pregame_data.drop(columns = ['blueBans','redBans'])
# define y and drop it
y = pregame_data['bResult']
pregame_data = pregame_data.drop(columns = ['bResult'])
# fix blue bans strings
pregame_data.b1 = pregame_data.b1.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.b2 = pregame_data.b2.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.b3 = pregame_data.b3.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.b4 = pregame_data.b4.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.b5 = pregame_data.b5.str.replace('[','').str.replace(']','').str.replace("'",'')
# fix red bans strings
pregame_data.r1 = pregame_data.r1.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.r2 = pregame_data.r2.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.r3 = pregame_data.r3.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.r4 = pregame_data.r4.str.replace('[','').str.replace(']','').str.replace("'",'')
pregame_data.r5 = pregame_data.r5.str.replace('[','').str.replace(']','').str.replace("'",'')''';
```
## Visuatlizations
```
# visualizations
import matplotlib.pyplot as plt
from collections import Counter
x = ['Blue win','Red win']
heights = [0,0]
heights[0] = sum(y)
heights[1] = len(y) - sum(y)
plt.bar(x,heights);
plt.ylabel('Number of Games won');
plt.xlabel('Team');
plt.title('Number of wins by team color in competitive LoL 2015-2017');
# check general accuracy of naive model
bw = sum(y)/len(y)
print(f'Percentage of games won by blue team: {bw*100:.2f} %')
# load champs
champs = Counter(X['blueADCChamp'])
l = champs.keys()
v = champs.values()
# get rid of one off champs
l = [l_ for l_ in l if champs[l_] > 1]
v = [v_ for v_ in v if v_ > 1]
plt.pie(v,labels=l);
plt.title('Distribution of ADC champs for competitive Lol 2015-2017')
```
## Model Building
```
# define categorical variables, all of our data
categorical_columns = (X.dtypes == object)
# impute missing values and hot encode categories
cat_pipe = Pipeline([('imputer', SimpleImputer(strategy = 'constant', fill_value='Unknown', add_indicator=True)),
('ohe', OneHotEncoder(handle_unknown='ignore'))])
# process categorical variables
preprocessing = ColumnTransformer([('categorical', cat_pipe, categorical_columns)], remainder='passthrough')
# Helper class for RandomizedSearchCV
class DummyEstimator(BaseEstimator):
"Pass through class, methods are present but do nothing."
def fit(self): pass
def score(self): pass
# create pipeline
pipe = Pipeline(steps = [('preprocessing', preprocessing),
('clf', DummyEstimator())])
search_space = [
{'clf': [ExtraTreesClassifier(n_jobs=-1,random_state=42)],
'clf__criterion': ['gini', 'entropy'],
'clf__min_samples_leaf': np.linspace(1, 30, 5, dtype=int),
'clf__bootstrap': [True, False],
'clf__class_weight': [None, 'balanced', 'balanced_subsample'],
'clf__n_estimators': np.linspace(50, 500, 8, dtype=int)},
{'clf': [RandomForestClassifier(n_jobs=-1,random_state=42)],
'clf__criterion': ['gini', 'entropy'],
'clf__min_samples_leaf': np.linspace(1, 10, 4, dtype=int),
'clf__bootstrap': [True, False],
'clf__class_weight': [None, 'balanced', 'balanced_subsample'],
'clf__n_estimators': np.linspace(50, 300, 5, dtype=int)}]
gs = RandomizedSearchCV(pipe,
search_space,
scoring='accuracy', # accuracy for game prediction
n_iter=30,
cv=5,
n_jobs=-1)
gs.fit(X, y);
gs.best_score_, gs.best_params_
# Results:
'''
(0.5510498687664042,
{'clf__n_estimators': 178,
'clf__min_samples_leaf': 30,
'clf__criterion': 'gini',
'clf__class_weight': None,
'clf__bootstrap': True,
'clf': ExtraTreesClassifier(bootstrap=True, min_samples_leaf=30, n_estimators=178,
n_jobs=-1, random_state=42)})'''
```
## Evaluation Metric
```
pipe = Pipeline([('preprocessing', preprocessing),
('clf', ExtraTreesClassifier(
bootstrap = True,
min_samples_leaf = 15,
n_estimators = 114,
n_jobs = -1,
criterion = 'gini',
class_weight = None,
random_state=42))])
pipe.fit(X_train,y_train);
y_pred = pipe.predict(X_val)
accuracy = accuracy_score(y_val,y_pred)
f1 = f1_score(y_val,y_pred)
print(f"accuracy: {accuracy:,.6f}")
print(f"f1: {f1:,.6f}")
```
## Results
```
print(f'Model accuracy: {accuracy*100:.2f} %')
print(f'Naive accuracy: {bw*100:.2f} %')
print(f'Prediction improvement from model: {abs(bw-accuracy)*100:.2f} %')
```
| github_jupyter |
```
%matplotlib inline
import os
import time
import torch
from torch.autograd import Variable
from torchvision import datasets, transforms
import numpy as np
import scipy.io
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
# import scipy.misc
from PIL import Image
import cv2
from darknet import Darknet
import dataset
from utils import *
from MeshPly import MeshPly
# Create new directory
def makedirs(path):
if not os.path.exists( path ):
os.makedirs( path )
def valid(datacfg, modelcfg, weightfile):
def truths_length(truths, max_num_gt=50):
for i in range(max_num_gt):
if truths[i][1] == 0:
return i
# Parse configuration files
data_options = read_data_cfg(datacfg)
valid_images = data_options['valid']
meshname = data_options['mesh']
backupdir = data_options['backup']
name = data_options['name']
gpus = data_options['gpus']
fx = float(data_options['fx'])
fy = float(data_options['fy'])
u0 = float(data_options['u0'])
v0 = float(data_options['v0'])
im_width = int(data_options['width'])
im_height = int(data_options['height'])
if not os.path.exists(backupdir):
makedirs(backupdir)
# Parameters
seed = int(time.time())
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
torch.cuda.manual_seed(seed)
save = False
visualize = True
testtime = True
num_classes = 1
testing_samples = 0.0
edges_corners = [[0, 1], [0, 2], [0, 4], [1, 3], [1, 5], [2, 3], [2, 6], [3, 7], [4, 5], [4, 6], [5, 7], [6, 7]]
if save:
makedirs(backupdir + '/test')
makedirs(backupdir + '/test/gt')
makedirs(backupdir + '/test/pr')
# To save
testing_error_trans = 0.0
testing_error_angle = 0.0
testing_error_pixel = 0.0
errs_2d = []
errs_3d = []
errs_trans = []
errs_angle = []
errs_corner2D = []
preds_trans = []
preds_rot = []
preds_corners2D = []
gts_trans = []
gts_rot = []
gts_corners2D = []
# Read object model information, get 3D bounding box corners
mesh = MeshPly(meshname)
vertices = np.c_[np.array(mesh.vertices), np.ones((len(mesh.vertices), 1))].transpose()
corners3D = get_3D_corners(vertices)
try:
diam = float(options['diam'])
except:
diam = calc_pts_diameter(np.array(mesh.vertices))
# Read intrinsic camera parameters
intrinsic_calibration = get_camera_intrinsic(u0, v0, fx, fy)
# Get validation file names
with open(valid_images) as fp:
tmp_files = fp.readlines()
valid_files = [item.rstrip() for item in tmp_files]
# Specicy model, load pretrained weights, pass to GPU and set the module in evaluation mode
model = Darknet(modelcfg)
model.print_network()
model.load_weights(weightfile)
model.cuda()
model.eval()
test_width = model.test_width
test_height = model.test_height
num_keypoints = model.num_keypoints
num_labels = num_keypoints * 2 + 3
# Get the parser for the test dataset
valid_dataset = dataset.listDataset(valid_images,
shape=(test_width, test_height),
shuffle=False,
transform=transforms.Compose([transforms.ToTensor(),]))
# Specify the number of workers for multiple processing, get the dataloader for the test dataset
kwargs = {'num_workers': 4, 'pin_memory': True}
test_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=1, shuffle=False, **kwargs)
logging(" Testing {}...".format(name))
logging(" Number of test samples: %d" % len(test_loader.dataset))
# Iterate through test batches (Batch size for test data is 1)
count = 0
for batch_idx, (data, target) in enumerate(test_loader):
# Images
img = data[0, :, :, :]
img = img.numpy().squeeze()
img = np.transpose(img, (1, 2, 0))
t1 = time.time()
# Pass data to GPU
data = data.cuda()
target = target.cuda()
# Wrap tensors in Variable class, set volatile=True for inference mode and to use minimal memory during inference
data = Variable(data, volatile=True)
t2 = time.time()
# Forward pass
output = model(data).data
t3 = time.time()
# Using confidence threshold, eliminate low-confidence predictions
all_boxes = get_region_boxes(output, num_classes, num_keypoints)
t4 = time.time()
# Evaluation
# Iterate through all batch elements
for box_pr, target in zip([all_boxes], [target[0]]):
# For each image, get all the targets (for multiple object pose estimation, there might be more than 1 target per image)
truths = target.view(-1, num_keypoints*2+3)
# Get how many objects are present in the scene
num_gts = truths_length(truths)
# Iterate through each ground-truth object
for k in range(num_gts):
box_gt = list()
for j in range(1, 2*num_keypoints+1):
box_gt.append(truths[k][j])
box_gt.extend([1.0, 1.0])
box_gt.append(truths[k][0])
# Denormalize the corner predictions
corners2D_gt = np.array(np.reshape(box_gt[:18], [9, 2]), dtype='float32')
corners2D_pr = np.array(np.reshape(box_pr[:18], [9, 2]), dtype='float32')
corners2D_gt[:, 0] = corners2D_gt[:, 0] * im_width
corners2D_gt[:, 1] = corners2D_gt[:, 1] * im_height
corners2D_pr[:, 0] = corners2D_pr[:, 0] * im_width
corners2D_pr[:, 1] = corners2D_pr[:, 1] * im_height
preds_corners2D.append(corners2D_pr)
gts_corners2D.append(corners2D_gt)
# Compute corner prediction error
corner_norm = np.linalg.norm(corners2D_gt - corners2D_pr, axis=1)
corner_dist = np.mean(corner_norm)
errs_corner2D.append(corner_dist)
# Compute [R|t] by pnp
R_gt, t_gt = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_gt, np.array(intrinsic_calibration, dtype='float32'))
R_pr, t_pr = pnp(np.array(np.transpose(np.concatenate((np.zeros((3, 1)), corners3D[:3, :]), axis=1)), dtype='float32'), corners2D_pr, np.array(intrinsic_calibration, dtype='float32'))
# Compute translation error
trans_dist = np.sqrt(np.sum(np.square(t_gt - t_pr)))
errs_trans.append(trans_dist)
# Compute angle error
angle_dist = calcAngularDistance(R_gt, R_pr)
errs_angle.append(angle_dist)
# Compute pixel error
Rt_gt = np.concatenate((R_gt, t_gt), axis=1)
Rt_pr = np.concatenate((R_pr, t_pr), axis=1)
proj_2d_gt = compute_projection(vertices, Rt_gt, intrinsic_calibration)
proj_2d_pred = compute_projection(vertices, Rt_pr, intrinsic_calibration)
proj_corners_gt = np.transpose(compute_projection(corners3D, Rt_gt, intrinsic_calibration))
proj_corners_pr = np.transpose(compute_projection(corners3D, Rt_pr, intrinsic_calibration))
norm = np.linalg.norm(proj_2d_gt - proj_2d_pred, axis=0)
pixel_dist = np.mean(norm)
errs_2d.append(pixel_dist)
if visualize:
# Visualize
plt.xlim((0, im_width))
plt.ylim((0, im_height))
# plt.imshow(scipy.misc.imresize(img, (im_height, im_width)))
plt.imshow(cv2.resize(img, (im_width, im_height)))
# Projections
for edge in edges_corners:
plt.plot(proj_corners_gt[edge, 0], proj_corners_gt[edge, 1], color='g', linewidth=3.0)
plt.plot(proj_corners_pr[edge, 0], proj_corners_pr[edge, 1], color='b', linewidth=3.0)
plt.gca().invert_yaxis()
plt.show()
# Compute 3D distances
transform_3d_gt = compute_transformation(vertices, Rt_gt)
transform_3d_pred = compute_transformation(vertices, Rt_pr)
norm3d = np.linalg.norm(transform_3d_gt - transform_3d_pred, axis=0)
vertex_dist = np.mean(norm3d)
errs_3d.append(vertex_dist)
# Sum errors
testing_error_trans += trans_dist
testing_error_angle += angle_dist
testing_error_pixel += pixel_dist
testing_samples += 1
count = count + 1
if save:
preds_trans.append(t_pr)
gts_trans.append(t_gt)
preds_rot.append(R_pr)
gts_rot.append(R_gt)
np.savetxt(backupdir + '/test/gt/R_' + valid_files[count][-8:-3] + 'txt', np.array(R_gt, dtype='float32'))
np.savetxt(backupdir + '/test/gt/t_' + valid_files[count][-8:-3] + 'txt', np.array(t_gt, dtype='float32'))
np.savetxt(backupdir + '/test/pr/R_' + valid_files[count][-8:-3] + 'txt', np.array(R_pr, dtype='float32'))
np.savetxt(backupdir + '/test/pr/t_' + valid_files[count][-8:-3] + 'txt', np.array(t_pr, dtype='float32'))
np.savetxt(backupdir + '/test/gt/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_gt, dtype='float32'))
np.savetxt(backupdir + '/test/pr/corners_' + valid_files[count][-8:-3] + 'txt', np.array(corners2D_pr, dtype='float32'))
t5 = time.time()
# Compute 2D projection error, 6D pose error, 5cm5degree error
px_threshold = 5 # 5 pixel threshold for 2D reprojection error is standard in recent sota 6D object pose estimation works
eps = 1e-5
acc = len(np.where(np.array(errs_2d) <= px_threshold)[0]) * 100. / (len(errs_2d)+eps)
acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)
acc3d10 = len(np.where(np.array(errs_3d) <= diam * 0.1)[0]) * 100. / (len(errs_3d)+eps)
acc5cm5deg = len(np.where((np.array(errs_trans) <= 0.05) & (np.array(errs_angle) <= 5))[0]) * 100. / (len(errs_trans)+eps)
corner_acc = len(np.where(np.array(errs_corner2D) <= px_threshold)[0]) * 100. / (len(errs_corner2D)+eps)
mean_err_2d = np.mean(errs_2d)
mean_corner_err_2d = np.mean(errs_corner2D)
nts = float(testing_samples)
if testtime:
print('-----------------------------------')
print(' tensor to cuda : %f' % (t2 - t1))
print(' forward pass : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' prediction time : %f' % (t4 - t1))
print(' eval : %f' % (t5 - t4))
print('-----------------------------------')
# Print test statistics
logging('Results of {}'.format(name))
logging(' Acc using {} px 2D Projection = {:.2f}%'.format(px_threshold, acc))
logging(' Acc using 10% threshold - {} vx 3D Transformation = {:.2f}%'.format(diam * 0.1, acc3d10))
logging(' Acc using 5 cm 5 degree metric = {:.2f}%'.format(acc5cm5deg))
logging(" Mean 2D pixel error is %f, Mean vertex error is %f, mean corner error is %f" % (mean_err_2d, np.mean(errs_3d), mean_corner_err_2d))
logging(' Translation error: %f m, angle error: %f degree, pixel error: % f pix' % (testing_error_trans/nts, testing_error_angle/nts, testing_error_pixel/nts) )
if save:
predfile = backupdir + '/predictions_linemod_' + name + '.mat'
scipy.io.savemat(predfile, {'R_gts': gts_rot, 't_gts':gts_trans, 'corner_gts': gts_corners2D, 'R_prs': preds_rot, 't_prs':preds_trans, 'corner_prs': preds_corners2D})
datacfg = 'cfg/ape.data'
modelcfg = 'cfg/yolo-pose.cfg'
weightfile = 'backup/ape/model_backup.weights'
valid(datacfg, modelcfg, weightfile)
```
| github_jupyter |
```
%load_ext autoreload
%pylab inline
%autoreload 2
import seaborn as sns
import pandas as pd
import numpy as np
import sys
sys.path.append('..')
import tensorflow as tf
from tuning_manifold.fnp_model import Predictor
from tuning_manifold.util import negloglik, pearson
tfk = tf.keras
# construct a model with architecture matching the saved file
neurons = 16
stimuli = 2048 # make this longer because we draw additional samples to measure prediction
cell_latent_dim = 24
image_shape = [36, 64, 1]
architecture = [[17,16],[5,8],[3,4],[3,4],16,'same','batch']
inputs = [tfk.Input([stimuli, neurons], name='responses'), tfk.Input([stimuli, *image_shape], name='stimuli')]
predictor = Predictor(cell_latent_dim=cell_latent_dim, architecture=architecture, cummulative=True, contrastive_weight=0, l2_weight=0)
model = tfk.Model(inputs, predictor(inputs))
model.compile(loss=negloglik, metrics=[pearson, 'mse'], optimizer=tf.optimizers.Adam(learning_rate=1e-3, clipnorm=10))
model.load_weights('fnp_mouse_visual')
# Load data into memory. Follow instruction in the PNO directory to
# download the test dataset
from dataset import FileTreeDataset
dat = FileTreeDataset('../pno/data/Lurz2020/static20457-5-9-preproc0', 'images', 'responses')
trials = range(len(dat))
stimuli = np.stack([dat[i][0][0] for i in trials], axis=0)
responses = np.stack([dat[i][1] for i in trials], axis=0)
# compare to the same units used in the PNO experiment
test_units = np.load('20457-5-9_test_units.npy')
trials = dat.trial_info.tiers == 'train' # this indicates not repeated
test_stimuli = stimuli[np.newaxis, trials, ..., np.newaxis].astype(np.float32)
test_responses = responses[np.newaxis, trials, ...][..., np.isin(dat.neurons.unit_ids, test_units) ].astype(np.float32)
# Can use the version of these in the predictor to use samples from
# distribution, or these to sample the mean (when testing)
import tensorflow as tf
import tensorflow_probability as tfp
from tuning_manifold.fnp_model import DeepSetSimple, HigherRankOperator, image_to_distribution
from tuning_manifold.util import interpolate_bilinear
tfk = tf.keras
tfpl = tfp.layers
predictor = model.layers[2]
location_predictor = predictor.location_predictor
# draw samples from the distribution and move them from the batch dimension
heatmap_to_dist_mean = tf.keras.layers.Lambda(lambda x: tf.expand_dims(image_to_distribution(x).mean(), axis=1))
mean_location_predictor = tfk.Sequential([
# Perform convolution on each g-response image and output flattend version
location_predictor.layers[0],
# Exclusive set collapse
DeepSetSimple(True),
# Take the collapsed image and convert to distribution
HigherRankOperator(heatmap_to_dist_mean)
], name='mean_location_predictor')
feature_mlp = predictor.feature_mlp
feature_mlp.layers[5] = tfpl.MultivariateNormalTriL(cell_latent_dim, convert_to_tensor_fn=lambda x: x.mean())
def compute_summary(predictor, inputs, return_im_feat=False, samples=1):
responses, stimuli = inputs
# convolve input stimuli
g = predictor.im_conv_wrapper(stimuli)
gr = predictor.crc([responses, g])
sample_locations = mean_location_predictor(gr)
# extract the image feature for each trial x neuron estimate of the location
bs, stimuli, Ny, Nx, Nc = g.shape
bs, stimuli, neurons, samples, coordinates = sample_locations.shape
tf.assert_equal(coordinates, 2)
im_feat = interpolate_bilinear(tf.reshape(g, [-1, Ny, Nx, Nc]),
tf.reshape(sample_locations, [-1, neurons * samples, 2]))
im_feat = tf.reshape(im_feat, [-1, stimuli, neurons, samples, Nc])
# construct vector for each trial that includes information about the responses
# and the feature, including a STA type response
response_samples = tf.tile(responses[:, :, :, None, None], [1, 1, 1, samples, 1])
x2 = tf.concat([im_feat, im_feat * response_samples, response_samples], axis=-1)
# then let those interact through an MLP and then compute an average feature.
# again for trial N this is computed only using information from the other
# trials. This should compute a summary statistics describing a neuron (other
# than the spatial location) based on those other trials.
cell_summary = feature_mlp(x2)
if not return_im_feat:
return sample_locations, cell_summary
else:
return sample_locations, cell_summary, im_feat
def compute_rs(model, inputs, max_trial=1000, trials=[10, 25, 50, 100, 250, 500, 1000]):
import scipy
responses, stimuli = inputs
r = responses[:,:max_trial,...]
s = stimuli[:,:max_trial,...]
predictor = model.layers[-1]
sample_location, cell_summary = compute_summary(predictor, (r, s))
im_conv = predictor.im_conv_wrapper.operator
g = im_conv(stimuli[0, max_trial:])
rs = []
for trial in trials:
trial_sample_locations = sample_location[0, -1, :, 0, :]
w, b = cell_summary[0, trial-1, :, 0, :-1], cell_summary[0, trial-1, :, 0, -1]
w = tf.expand_dims(w, 0)
b = tf.expand_dims(b, 0)
locations = tf.reshape(trial_sample_locations, [1, trial_sample_locations.shape[0], trial_sample_locations.shape[-1]])
locations = tf.tile(locations, [g.shape[0], 1, 1])
im_feat = interpolate_bilinear(g, locations)
t = tf.reduce_sum(tf.multiply(im_feat, w), axis=-1) + b
t = tf.nn.elu(t) + 1
r = [scipy.stats.pearsonr(responses[0, max_trial:, i], t[:, i].numpy())[0] for i in range(t.shape[1])]
rs.append(r)
return trials, np.array(rs)
all_rs = []
for i in np.arange(0, 1000, 10):
trials, rs = compute_rs(model, (test_responses[:, :, i:i+10], test_stimuli),
max_trial=1024, trials=np.arange(25,1025,25))
all_rs.append(rs)
all_rs = np.concatenate(all_rs, axis=1)
plt.semilogx(trials, np.mean(all_rs, axis=1), 'k.-')
plt.xlabel('Observation set size (K)')
plt.ylabel('Pearson R');
sns.despine(trim=False)
```
| github_jupyter |
# Scikit-Learn Classification
- Pandas Documentation: http://pandas.pydata.org/
- Scikit Learn Documentation: http://scikit-learn.org/stable/documentation.html
- Seaborn Documentation: http://seaborn.pydata.org/
```
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
```
## 1. Read data from Files
```
df = pd.read_csv('../data/geoloc_elev.csv')
```
## 2. Quick Look at the data
```
type(df)
df.info()
df.head()
df.tail()
df.describe()
df['source'].value_counts()
df['target'].value_counts()
```
## 3. Visual exploration
```
import seaborn as sns
sns.pairplot(df, hue='target')
```
## 4. Define target
```
y = df['target']
y.head()
```
## 5. Feature engineering
```
raw_features = df.drop('target', axis='columns')
raw_features.head()
```
### 1-hot encoding
```
X = pd.get_dummies(raw_features)
X.head()
```
## 6. Train/Test split
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3, random_state=0)
```
## 7. Fit a Decision Tree model
```
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier(max_depth=3, random_state=0)
model.fit(X_train, y_train)
```
## 8. Accuracy score on benchmark, train and test sets
```
from sklearn.metrics import confusion_matrix, classification_report
y_pred = model.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
pd.DataFrame(cm,
index=["Miss", "Hit"],
columns=['pred_Miss', 'pred_Hit'])
print(classification_report(y_test, y_pred))
```
## 10. Feature Importances
```
importances = pd.Series(model.feature_importances_, index=X.columns)
importances.plot(kind='barh')
```
## 11. Display the decision boundary
```
hticks = np.linspace(-2, 2, 101)
vticks = np.linspace(-2, 2, 101)
aa, bb = np.meshgrid(hticks, vticks)
not_important = np.zeros((len(aa.ravel()), 4))
ab = np.c_[aa.ravel(), bb.ravel(), not_important]
c = model.predict(ab)
cc = c.reshape(aa.shape)
ax = df.plot(kind='scatter', c='target', x='lat', y='lon', cmap='bwr')
ax.contourf(aa, bb, cc, cmap='bwr', alpha=0.2)
```
## Exercise
Iterate and improve on the decision tree model. Now you have a basic pipeline example. How can you improve the score? Try some of the following:
1. change some of the initialization parameters of the decision tree re run the code.
- Does the score change?
- Does the decision boundary change?
2. try some other model like Logistic Regression, Random Forest, SVM, Naive Bayes or any other model you like from [here](http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html)
3. what's the highest score you can get?
*Copyright © 2017 Francesco Mosconi & CATALIT LLC. All rights reserved.*
| github_jupyter |
_Lambda School Data Science — Model Validation_
# Begin the modeling process
Objectives
- Train/Validate/Test split
- Cross-Validation
- Begin with baselines
## Why care about model validation?
Rachel Thomas, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/)
> An all-too-common scenario: a seemingly impressive machine learning model is a complete failure when implemented in production. The fallout includes leaders who are now skeptical of machine learning and reluctant to try it again. How can this happen?
> One of the most likely culprits for this disconnect between results in development vs results in production is a poorly chosen validation set (or even worse, no validation set at all).
Owen Zhang, [Winning Data Science Competitions](https://www.slideshare.net/OwenZhang2/tips-for-data-science-competitions/8)
> Good validation is _more important_ than good models.
James, Witten, Hastie, Tibshirani, [An Introduction to Statistical Learning](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 2.2, Assessing Model Accuracy
> In general, we do not really care how well the method works training on the training data. Rather, _we are interested in the accuracy of the predictions that we obtain when we apply our method to previously unseen test data._ Why is this what we care about?
> Suppose that we are interested test data in developing an algorithm to predict a stock’s price based on previous stock returns. We can train the method using stock returns from the past 6 months. But we don’t really care how well our method predicts last week’s stock price. We instead care about how well it will predict tomorrow’s price or next month’s price.
> On a similar note, suppose that we have clinical measurements (e.g. weight, blood pressure, height, age, family history of disease) for a number of patients, as well as information about whether each patient has diabetes. We can use these patients to train a statistical learning method to predict risk of diabetes based on clinical measurements. In practice, we want this method to accurately predict diabetes risk for _future patients_ based on their clinical measurements. We are not very interested in whether or not the method accurately predicts diabetes risk for patients used to train the model, since we already know which of those patients have diabetes.
### We'll look at 4 methods of model validation
- Performance estimation
- 2-way holdout method (**train/test split**)
- (Repeated) k-fold **cross-validation without independent test set**
- Model selection (hyperparameter optimization) and performance estimation ← ***We usually want to do this***
- 3-way holdout method (**train/validation/test split**)
- (Repeated) k-fold **cross-validation with independent test set**
<img src="https://sebastianraschka.com/images/blog/2018/model-evaluation-selection-part4/model-eval-conclusions.jpg" width="600">
Source: https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html
## Why begin with baselines?
[My mentor](https://www.linkedin.com/in/jason-sanchez-62093847/) [taught me](https://youtu.be/0GrciaGYzV0?t=40s):
>***Your first goal should always, always, always be getting a generalized prediction as fast as possible.*** You shouldn't spend a lot of time trying to tune your model, trying to add features, trying to engineer features, until you've actually gotten one prediction, at least.
> The reason why that's a really good thing is because then ***you'll set a benchmark*** for yourself, and you'll be able to directly see how much effort you put in translates to a better prediction.
> What you'll find by working on many models: some effort you put in, actually has very little effect on how well your final model does at predicting new observations. Whereas some very easy changes actually have a lot of effect. And so you get better at allocating your time more effectively.
My mentor's advice is echoed and elaborated in several sources:
[Always start with a stupid model, no exceptions](https://blog.insightdatascience.com/always-start-with-a-stupid-model-no-exceptions-3a22314b9aaa)
> Why start with a baseline? A baseline will take you less than 1/10th of the time, and could provide up to 90% of the results. A baseline puts a more complex model into context. Baselines are easy to deploy.
[Measure Once, Cut Twice: Moving Towards Iteration in Data Science](https://blog.datarobot.com/measure-once-cut-twice-moving-towards-iteration-in-data-science)
> The iterative approach in data science starts with emphasizing the importance of getting to a first model quickly, rather than starting with the variables and features. Once the first model is built, the work then steadily focuses on continual improvement.
[*Data Science for Business*](https://books.google.com/books?id=4ZctAAAAQBAJ&pg=PT276), Chapter 7.3: Evaluation, Baseline Performance, and Implications for Investments in Data
> *Consider carefully what would be a reasonable baseline against which to compare model performance.* This is important for the data science team in order to understand whether they indeed are improving performance, and is equally important for demonstrating to stakeholders that mining the data has added value.
## Baseline is an overloaded term
Baseline has multiple meanings, as discussed in the links above.
#### The score you'd get by guessing a single value
> A baseline for classification can be the most common class in the training dataset.
> A baseline for regression can be the mean of the training labels. —[Will Koehrsen](https://twitter.com/koehrsen_will/status/1088863527778111488)
#### The score you'd get by guessing in a more granular way
> A baseline for time-series regressions can be the value from the previous timestep.
#### Fast, first models that beat guessing
What my mentor was talking about.
#### Complete, tuned "simpler" model
Can be simpler mathematically and computationally. For example, Logistic Regression versus Deep Learning.
Or can be simpler for the data scientist, with less work. For example, a model with less feature engineering versus a model with more feature engineering.
#### Minimum performance that "matters"
To go to production and get business value.
#### Human-level performance
Your goal may to be match, or nearly match, human performance, but with better speed, cost, or consistency.
Or your goal may to be exceed human performance.
## Weather data — mean baseline
Let's try baselines for regression.
You can [get Past Weather by Zip Code from Climate.gov](https://www.climate.gov/maps-data/dataset/past-weather-zip-code-data-table). I downloaded the data for my town: Normal, Illinois.
```
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
url = 'https://raw.githubusercontent.com/rrherr/baselines/master/weather/weather-normal-il.csv'
weather = pd.read_csv(url, parse_dates=['DATE']).set_index('DATE')
weather['2015':'2018'].TMAX.plot()
plt.title('Daily high temperature in Normal, IL');
```
Over the years, across the seasons, the average daily high temperature in my town is about 63 degrees.
```
weather.TMAX.mean()
```
If I predicted that every day, the high will be 63 degrees, I'd be off by about 19 degrees on average.
```
from sklearn.metrics import mean_absolute_error
predicted = [weather.TMAX.mean()] * len(weather)
mean_absolute_error(weather.TMAX, predicted)
```
But, with time series data like this, we can get a better baseline.
*Data Science for Business* explains,
> Weather forecasters have two simple—but not simplistic—baseline models that they compare against. ***One (persistence) predicts that the weather tomorrow is going to be whatever it was today.*** The other (climatology) predicts whatever the average historical weather has been on this day from prior years. Each model performs considerably better than random guessing, and both are so easy to compute that they make natural baselines of comparison. Any new, more complex model must beat these.
Let's predict that the weather tomorrow is going to be whatever it was today. Which is another way of saying that the weather today is going to be whatever it was yesterday.
We can engineer this feature with one line of code, using the pandas [`shift`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shift.html) function.
This new baseline is off by less than 6 degress on average.
```
weather['TMAX_yesterday'] = weather.TMAX.shift(1)
weather.dropna(inplace=True) # Drops the first date, because it doesn't have a "yesterday"
mean_absolute_error(weather.TMAX, weather.TMAX_yesterday)
```
## Adult Census Income — Train/Test Split — majority class baseline
Load the data. It has already been split into train and test.
https://archive.ics.uci.edu/ml/datasets/adult
```
names = ['age',
'workclass',
'fnlwgt',
'education',
'education-num',
'marital-status',
'occupation',
'relationship',
'race',
'sex',
'capital-gain',
'capital-loss',
'hours-per-week',
'native-country',
'income']
train = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data', header=None, names=names)
test = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test', header=None, names=names, skiprows=[0])
train.shape, test.shape
```
Assign to X and y
```
X_train = train.drop(columns='income')
y_train = train.income == ' >50K'
X_test = test.drop(columns='income')
y_test = test.income == ' >50K.'
X_train.shape, X_test.shape, y_train.shape, y_test.shape
```
Majority class baseline
```
# TODO
```
`y_pred` has the same shape as `y_test`
```
# TODO
```
all predictions are the majority class
```
# TODO
```
Baseline accuracy is __% by guessing the majority class for every prediction
```
# TODO
```
This makes sense, because the majority class occurs __% of the time in the test dataset
```
# TODO
```
## Adult Census Income — Train/Test Split — fast first models
Scikit-learn expects no nulls...
```
def no_nulls(df):
return not any(df.isnull().sum())
no_nulls(X_train)
```
Scikit-learn also expects all numeric features. (No strings / "object" datatypes.) ...
```
def all_numeric(df):
from pandas.api.types import is_numeric_dtype
return all(is_numeric_dtype(df[col]) for col in df)
all_numeric(X_train)
```
Instead of encoding nonnumeric features, we can just exclude them from the training data.
```
# TODO
```
We'll do the same with the test data.
```
# TODO
```
Then fit a Logistic Regression on the training data (only the numeric features).
Test accuracy improves to ___
```
# TODO
```
Let's try scaling our data first.
Test accuracy improves to ___
```
# TODO
```
One-hot-encode all the categorical featues.
Install the Category Encoder library
https://github.com/scikit-learn-contrib/categorical-encoding
```
!pip install category_encoders
import category_encoders as ce
```
Add the library's OneHotEncoder to a pipeline.
Before, we fit on `X_train_numeric` and predict on `X_test_numeric`.
Now, we fit on `X_train` and predict on `X_test` (the original dataframes which include categorical columns)
Test accuracy improves to ___
```
# TODO
```
Visualize coefficients
```
plt.figure(figsize=(10,30))
plt.title('Coefficients')
coefficients = pipeline.named_steps['logisticregression'].coef_[0]
feature_names = pipeline.named_steps['onehotencoder'].transform(X_train).columns
pd.Series(coefficients, feature_names).sort_values().plot.barh(color='gray');
```
## Adult Census Income — Cross Validation with independent test set — fast first models
`cross_val_score(pipeline, X_train, y_train, cv=10)` repeats this process 10 times:
- Use 9/10 of the training data to fit the model pipeline
- Use 1/10 of the training data to predict and score the model pipeline
The test data is not used here — it remains independent, held out.
```
# TODO
```
`cross_val_score(cv=10)` returns 10 scores
```
# TODO
```
<img src="https://sebastianraschka.com/images/blog/2016/model-evaluation-selection-part3/loocv.png" width="400">
Source: https://sebastianraschka.com/blog/2016/model-evaluation-selection-part3.html
The scores have low variance.
```
# TODO
```
<img src="https://sebastianraschka.com/images/blog/2016/model-evaluation-selection-part2/visual_bias_variance.png" width="400">
Source: https://sebastianraschka.com/blog/2016/model-evaluation-selection-part2.html
## Adult Census Income — Train/Validation/Test split — fast first models
How to get from a two-way split ...
```
X_train.shape, X_test.shape, y_train.shape, y_test.shape
```
... to a three-way split?
Can use the `sklearn.model_selection.train_test_split` function to split the training data into training and validation data.
```
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train)
X_train.shape, X_val.shape, X_test.shape, y_train.shape, y_val.shape, y_test.shape
```
Fit on the training set.
Predict and score with the validation set.
Do not use the test set.
```
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
accuracy_score(y_val, y_pred)
```
## What to do with the test set?
Hold it out. Keep it in a "vault." Don't touch it until you're done with your models.
Here's one way to save your test set for later, with the feather file format.
https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#io-feather
```
!pip install -U feather-format
X_test.reset_index(drop=True).to_feather('X_test.feather')
pd.DataFrame(y_test).reset_index(drop=True).to_feather('y_test.feather')
```
You can save the files from Google Colab to your local machine
```
from google.colab import files
files.download('X_test.feather')
files.download('y_test.feather')
```
You can delete the variables from your notebook's runtime
```
del X_test, y_test
```
## Why hold out an independent test set?
Owen Zhang, [Winning Data Science Competitions](https://www.slideshare.net/OwenZhang2/tips-for-data-science-competitions)
- There are many ways to overfit. Beware of "multiple comparison fallacy." There is a cost in "peeking at the answer."
- Good validation is _more important_ than good models. Simple training/validation split is _not_ enough. When you looked at your validation result for the Nth time, you are training models on it.
- If possible, have "holdout" dataset that you do not touch at all during model build process. This includes feature extraction, etc.
- What if holdout result is bad? Be brave and scrap the project.
Hastie, Tibshirani, and Friedman, [The Elements of Statistical Learning](http://statweb.stanford.edu/~tibs/ElemStatLearn/), Chapter 7: Model Assessment and Selection
> If we are in a data-rich situation, the best approach is to randomly divide the dataset into three parts: a training set, a validation set, and a test set. The training set is used to fit the models; the validation set is used to estimate prediction error for model selection; the test set is used for assessment of the generalization error of the final chosen model. Ideally, the test set should be kept in a "vault," and be brought out only at the end of the data analysis. Suppose instead that we use the test-set repeatedly, choosing the model with the smallest test-set error. Then the test set error of the final chosen model will underestimate the true test error, sometimes substantially.
Andreas Mueller and Sarah Guido, [Introduction to Machine Learning with Python](https://books.google.com/books?id=1-4lDQAAQBAJ&pg=PA270)
> The distinction between the training set, validation set, and test set is fundamentally important to applying machine learning methods in practice. Any choices made based on the test set accuracy "leak" information from the test set into the model. Therefore, it is important to keep a separate test set, which is only used for the final evaluation. It is good practice to do all exploratory analysis and model selection using the combination of a training and a validation set, and reserve the test set for a final evaluation - this is even true for exploratory visualization. Strictly speaking, evaluating more than one model on the test set and choosing the better of the two will result in an overly optimistic estimate of how accurate the model is.
Hadley Wickham, [R for Data Science](https://r4ds.had.co.nz/model-intro.html#hypothesis-generation-vs.hypothesis-confirmation)
> There is a pair of ideas that you must understand in order to do inference correctly:
> 1. Each observation can either be used for exploration or confirmation, not both.
> 2. You can use an observation as many times as you like for exploration, but you can only use it once for confirmation. As soon as you use an observation twice, you’ve switched from confirmation to exploration.
> This is necessary because to confirm a hypothesis you must use data independent of the data that you used to generate the hypothesis. Otherwise you will be over optimistic. There is absolutely nothing wrong with exploration, but you should never sell an exploratory analysis as a confirmatory analysis because it is fundamentally misleading.
> If you are serious about doing an confirmatory analysis, one approach is to split your data into three pieces before you begin the analysis.
## Should you shuffle?
Sometimes yes, sometimes no. Experts may seem to give conflicting advice! Partly, that perception is true. Partly, the right choices depend on your data and your goals.
[Hastie, Tibshirani, and Friedman](http://statweb.stanford.edu/~tibs/ElemStatLearn/) write,
> randomly divide the dataset into three parts: a training set, a validation set, and a test set.
But [Rachel Thomas](
https://www.fast.ai/2017/11/13/validation-sets/) asks, "When is a random subset not good enough?" and gives many good examples.
> If your data is a time series, choosing a random subset of the data will be both too easy (you can look at the data both before and after the dates your are trying to predict) and not representative of most business use cases (where you are using historical data to build a model for use in the future).
> In the Kaggle distracted driver competition, the independent data are pictures of drivers at the wheel of a car, and the dependent variable is a category such as texting, eating, or safely looking ahead. If you were the insurance company building a model from this data, note that you would be most interested in how the model performs on drivers you haven’t seen before (since you would likely have training data only for a small group of people).
> A similar dynamic was at work in the Kaggle fisheries competition to identify the species of fish caught by fishing boats in order to reduce illegal fishing of endangered populations. The test set consisted of boats that didn’t appear in the training data. This means that you’d want your validation set to include boats that are not in the training set.
[Owen Zhang](https://www.slideshare.net/OwenZhang2/tips-for-data-science-competitions/10) recommends using your most recent data for your hold out test set, instead of choosing your test set with random shuffling. But he says you are free to use random splits to train and tune models.
> Make validation dataset as realistic as possible. Usually this means "out-of-time" validation. You are free to use "in-time" random split to build models, tune parameters, etc. But hold out data should be out-of-time.
Note that Owen Zhang's slide could be confusing, because of ambiguous terminology:
- What he calls "validation dataset" and "hold out data" is what we're calling "test set."
- He also says "cross-validation" is an "exception to the rule" to use "when data is extremely small." There he is talking about "Cross-validation _without_ independent test set", _not_ "Cross-validation _with_ independent test set."
**Sometimes you need to shuffle, like in this next example:**
## Iris flowers — Train/Validation/Test split?
Load the Iris dataset
```
from sklearn.datasets import load_iris
iris = load_iris()
X = iris.data
y = iris.target
```
What would happen if you didn't shuffle this data?
```
y
```
Let's try it!
We'll do a train/validation/test split, with and without random shuffling.
[Rachel Thomas]( https://www.fast.ai/2017/11/13/validation-sets/) points out that "sklearn has a `train_test_split` method, but no `train_validation_test_split`."
So we can write our own:
```
def train_validation_test_split(
X, y, train_size=0.8, val_size=0.1, test_size=0.1,
random_state=None, shuffle=True):
assert train_size + val_size + test_size == 1
X_train_val, X_test, y_train_val, y_test = train_test_split(
X, y, test_size=test_size, random_state=random_state, shuffle=shuffle)
X_train, X_val, y_train, y_val = train_test_split(
X_train_val, y_train_val, test_size=val_size/(train_size+val_size),
random_state=random_state, shuffle=shuffle)
return X_train, X_val, X_test, y_train, y_val, y_test
```
Split Iris data into train/validation/test sets, _without_ random shuffling
```
X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split(
X, y, shuffle=False)
```
Look at the train, validation, and test targets
```
y_train
y_val
y_test
```
Split Iris data into train/validation/test sets, _with_ random shuffling
```
X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split(
X, y, shuffle=True)
```
Look at the train, validation, and test targets
```
y_train
y_val
y_test
```
That's better, but there's just not enough data for a three-way split. In fact, there's not much data for a two-way split. The Iris dataset is a rare example where you probably do want to use cross-validation _without_ an independent test set.
## Bank Marketing — shuffled or split by time?
https://archive.ics.uci.edu/ml/datasets/Bank+Marketing
The data is related with direct marketing campaigns of a Portuguese banking institution. The marketing campaigns were based on phone calls. Often, more than one contact to the same client was required, in order to access if the product (bank term deposit) would be ('yes') or not ('no') subscribed.
bank-additional-full.csv with all examples (41188) and 20 inputs, **ordered by date (from May 2008 to November 2010)**
```
!wget https://archive.ics.uci.edu/ml/machine-learning-databases/00222/bank-additional.zip
!unzip bank-additional.zip
%cd bank-additional
bank = pd.read_csv('bank-additional-full.csv', sep=';')
X = bank.drop(columns='y')
y = bank['y'] == 'yes'
```
### Shuffled split?
```
X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split(X, y, shuffle=True)
[array.shape for array in (X_train, X_val, X_test, y_train, y_val, y_test)]
y_train.mean(), y_val.mean(), y_test.mean()
```
### Split by time?
```
X_train, X_val, X_test, y_train, y_val, y_test = train_validation_test_split(X, y, shuffle=False)
[array.shape for array in (X_train, X_val, X_test, y_train, y_val, y_test)]
y_train.mean(), y_val.mean(), y_test.mean()
y.rolling(500).mean().plot();
```
***The "right" choice here is unclear, but either way, it will make a big difference!***
# ASSIGNMENT options
- Replicate the lesson code. [Do it "the hard way" or with the "Benjamin Franklin method."](https://docs.google.com/document/d/1ubOw9B3Hfip27hF2ZFnW3a3z9xAgrUDRReOEo-FHCVs/edit)
- Apply the lesson to other datasets you've worked with before, and compare results.
- Choose how to split the Bank Marketing dataset. Train and validate baseline models.
- Get weather data for your own area and calculate both baselines. _"One (persistence) predicts that the weather tomorrow is going to be whatever it was today. The other (climatology) predicts whatever the average historical weather has been on this day from prior years."_ What is the mean absolute error for each baseline? What if you average the two together?
- When would this notebook's pipelines fail? How could you fix them? Add more [preprocessing](https://scikit-learn.org/stable/modules/preprocessing.html) and [imputation](https://scikit-learn.org/stable/modules/impute.html) to your [pipelines](https://scikit-learn.org/stable/modules/compose.html) with scikit-learn.
- [This example from scikit-learn documentation](https://scikit-learn.org/stable/auto_examples/compose/plot_column_transformer_mixed_types.html) demonstrates its improved `OneHotEncoder` and new `ColumnTransformer` objects, which can replace functionality from third-party libraries like category_encoders and sklearn-pandas. Adapt this example, which uses Titanic data, to work with another dataset.
| github_jupyter |
# LSTM Stock Predictor Using Closing Prices
In this notebook, you will build and train a custom LSTM RNN that uses a 10 day window of Bitcoin closing prices to predict the 11th day closing price.
You will need to:
1. Prepare the data for training and testing
2. Build and train a custom LSTM RNN
3. Evaluate the performance of the model
## Data Preparation
In this section, you will need to prepare the training and testing data for the model. The model will use a rolling 10 day window to predict the 11th day closing price.
You will need to:
1. Use the `window_data` function to generate the X and y values for the model.
2. Split the data into 70% training and 30% testing
3. Apply the MinMaxScaler to the X and y values
4. Reshape the X_train and X_test data for the model. Note: The required input format for the LSTM is:
```python
reshape((X_train.shape[0], X_train.shape[1], 1))
```
```
import numpy as np
import pandas as pd
%matplotlib inline
# Set the random seed for reproducibility
# Note: This is for the homework solution, but it is good practice to comment this out and run multiple experiments to evaluate your model
from numpy.random import seed
seed(1)
from tensorflow import random
random.set_seed(2)
# Load the fear and greed sentiment data for Bitcoin
df = pd.read_csv('btc_sentiment.csv', index_col="date", infer_datetime_format=True, parse_dates=True)
df = df.drop(columns="fng_classification")
df.head()
# Load the historical closing prices for Bitcoin
df2 = pd.read_csv('btc_historic.csv', index_col="Date", infer_datetime_format=True, parse_dates=True)['Close']
df2 = df2.sort_index()
df2.tail()
# Join the data into a single DataFrame
df = df.join(df2, how="inner")
df.tail()
df.head()
# This function accepts the column number for the features (X) and the target (y)
# It chunks the data up with a rolling window of Xt-n to predict Xt
# It returns a numpy array of X any y
def window_data(df, window, feature_col_number, target_col_number):
X = []
y = []
for i in range(len(df) - window - 1):
features = df.iloc[i:(i + window), feature_col_number]
target = df.iloc[(i + window), target_col_number]
X.append(features)
y.append(target)
return np.array(X), np.array(y).reshape(-1, 1)
# Predict Closing Prices using a 10 day window of previous closing prices
# Then, experiment with window sizes anywhere from 1 to 10 and see how the model performance changes
window_size = 10
# Column index 0 is the 'fng_value' column
# Column index 1 is the `Close` column
feature_column = 1
target_column = 1
X, y = window_data(df, window_size, feature_column, target_column)
# Use 70% of the data for training and the remaineder for testing
split = int(0.7 * len(X))
X_train = X[: split]
X_test = X[split:]
y_train = y[: split]
y_test = y[split:]
from sklearn.preprocessing import MinMaxScaler
# Use the MinMaxScaler to scale data between 0 and 1.
# Creating a MinMaxScaler object
scaler = MinMaxScaler()
# Fitting the MinMaxScaler object with the features data X
scaler.fit(X)
# Scaling the features training and testing sets
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
# Fitting the MinMaxScaler object with the target data Y
scaler.fit(y)
# Scaling the target training and testing sets
y_train_scaled = scaler.transform(y_train)
y_test_scaled = scaler.transform(y_test)
# Reshape the features for the model
X_train_scaled = X_train_scaled.reshape((X_train_scaled.shape[0], X_train_scaled.shape[1], 1))
X_test_scaled = X_test_scaled.reshape((X_test_scaled.shape[0], X_test_scaled.shape[1], 1))
```
---
## Build and Train the LSTM RNN
In this section, you will design a custom LSTM RNN and fit (train) it using the training data.
You will need to:
1. Define the model architecture
2. Compile the model
3. Fit the model to the training data
### Hints:
You will want to use the same model architecture and random seed for both notebooks. This is necessary to accurately compare the performance of the FNG model vs the closing price model.
```
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
# Build the LSTM model.
# The return sequences need to be set to True if you are adding additional LSTM layers, but
# You don't have to do this for the final layer.
# Note: The dropouts help prevent overfitting
# Note: The input shape is the number of time steps and the number of indicators
# Note: Batching inputs has a different input shape of Samples/TimeSteps/Features
# Defining the LSTM RNN model.
model = Sequential()
# Initial model setup
number_units = 50
dropout_fraction = 0.2
# Layer 1
model.add(LSTM(
units=number_units,
return_sequences=True,
input_shape=(X_train_scaled.shape[1], 1)))
model.add(Dropout(dropout_fraction))
# Layer 2
model.add(LSTM(units=number_units, return_sequences=True))
model.add(Dropout(dropout_fraction))
# Layer 3
model.add(LSTM(units=number_units))
model.add(Dropout(dropout_fraction))
# Output layer
model.add(Dense(1))
# Compile the model
model.compile(optimizer="adam", loss="mean_squared_error")
# Summarize the model
model.summary()
# Train the model
# Use at least 10 epochs
# Do not shuffle the data
# Experiement with the batch size, but a smaller batch size is recommended
model.fit(X_train_scaled, y_train_scaled, epochs=50, shuffle=False, batch_size=10, verbose=1)
```
---
## Model Performance
In this section, you will evaluate the model using the test data.
You will need to:
1. Evaluate the model using the `X_test` and `y_test` data.
2. Use the X_test data to make predictions
3. Create a DataFrame of Real (y_test) vs predicted values.
4. Plot the Real vs predicted values as a line chart
### Hints
Remember to apply the `inverse_transform` function to the predicted and y_test values to recover the actual closing prices.
```
# Evaluate the model
model.evaluate(X_test_scaled, y_test_scaled)
# Make some predictions
predicted = model.predict(X_test_scaled)
predicted_prices = scaler.inverse_transform(predicted)
real_prices = scaler.inverse_transform(y_test_scaled.reshape(-1, 1))
# Create a DataFrame of Real and Predicted values
stocks = pd.DataFrame({
"Real": real_prices.ravel(),
"Predicted": predicted_prices.ravel()
}, index = df.index[-len(real_prices): ])
stocks.head()
# Plot the real vs predicted values as a line chart
stocks.plot(title="Real Vs. Predicted Prices")
```
| github_jupyter |
# Markov-Chain Monte Carlo (MCMC)
## Lotka-Volterra Model
\begin{equation}
\begin{split}
\frac{dx}{dt} =& v-v^3-w+I_{ext}\\
\frac{dy}{dt} =& \frac{1}{\tau}(v-a-bw)
\end{split}
\end{equation}
```
import argparse
import os
import matplotlib
import matplotlib.pyplot as plt
from jax.experimental.ode import odeint
import jax.numpy as jnp
from jax.random import PRNGKey
import numpyro
import numpyro.distributions as dist
from numpyro.examples.datasets import LYNXHARE, load_dataset
from numpyro.infer import MCMC, NUTS, Predictive
def dz_dt(z, t, theta):
"""
Lotka–Volterra equations. Real positive parameters `alpha`, `beta`, `gamma`, `delta`
describes the interaction of two species.
"""
u = z[0]
v = z[1]
alpha, beta, gamma, delta = theta[..., 0], theta[..., 1], theta[..., 2], theta[..., 3]
du_dt = (alpha - beta * v) * u
dv_dt = (-gamma + delta * u) * v
return jnp.stack([du_dt, dv_dt])
def model(N, y=None):
"""
:param int N: number of measurement times
:param numpy.ndarray y: measured populations with shape (N, 2)
"""
# initial population
z_init = numpyro.sample("z_init", dist.LogNormal(jnp.log(10), 1), sample_shape=(2,))
# measurement times
ts = jnp.arange(float(N))
# parameters alpha, beta, gamma, delta of dz_dt
theta = numpyro.sample(
"theta",
dist.TruncatedNormal(low=0., loc=jnp.array([0.5, 0.05, 1.5, 0.05]),
scale=jnp.array([0.5, 0.05, 0.5, 0.05])))
# integrate dz/dt, the result will have shape N x 2
z = odeint(dz_dt, z_init, ts, theta, rtol=1e-5, atol=1e-3, mxstep=500)
# measurement errors, we expect that measured hare has larger error than measured lynx
sigma = numpyro.sample("sigma", dist.Exponential(jnp.array([1, 2])))
# measured populations (in log scale)
numpyro.sample( "y" , dist.Normal(jnp.log(z), sigma), obs=y)
device = 'cpu'
num_samples = 1000
num_chains = 2
num_warmup = 200
numpyro.set_platform(device)
numpyro.set_host_device_count(num_chains)
_, fetch = load_dataset(LYNXHARE, shuffle=False)
year, data = fetch() # data is in hare -> lynx order
# use dense_mass for better mixing rate
mcmc = MCMC(NUTS(model, dense_mass=True),
num_warmup, num_samples, num_chains=num_chains, progress_bar= True)
mcmc.run(PRNGKey(1), N=data.shape[0], y=jnp.log(data))
mcmc.print_summary()
# predict populations
ax = plt.figure(figsize=(15.5, 7.2))
y_pred = Predictive(model, mcmc.get_samples())(PRNGKey(2), data.shape[0])["y"]
pop_pred = jnp.exp(y_pred)
mu, pi = jnp.mean(pop_pred, 0), jnp.percentile(pop_pred, (10, 90), 0)
plt.plot(year, data[:, 0], "ko", mfc="none", ms=4, label="true hare", alpha=0.67)
plt.plot(year, data[:, 1], "bx", label="true lynx")
plt.plot(year, mu[:, 0], "k-.", label="pred hare", lw=1, alpha=0.67)
plt.plot(year, mu[:, 1], "b--", label="pred lynx")
plt.fill_between(year, pi[0, :, 0], pi[1, :, 0], color="k", alpha=0.2)
plt.fill_between(year, pi[0, :, 1], pi[1, :, 1], color="b", alpha=0.3)
plt.gca().set(ylim=(0, 160), xlabel="year", ylabel="population (in thousands)")
plt.title("Posterior predictive (80% CI) with predator-prey pattern.")
plt.legend()
plt.tight_layout()
plt.show()
mcmc.print_summary()
params = mcmc.get_samples()['theta']
fig, ax = plt.subplots(4,2, figsize=(20, 40))
ax[0,0].plot(params[:num_samples,0]), ax[0,1].hist(params[:num_samples,0])
ax[1,0].plot(params[:num_samples,1]), ax[1,1].hist(params[:num_samples,1])
ax[2,0].plot(params[:num_samples,2]), ax[2,1].hist(params[:num_samples,2])
ax[3,0].plot(params[:num_samples,3]), ax[3,1].hist(params[:num_samples,3])
for i in range(4):
ax[i,0].set_title('Trace plot')
ax[i,0].set_xlabel('Chain Iter')
ax[i,1].set_ylabel('Value')
ax[i,1].set_ylabel('Freq')
plt.show()
params.all
```
| github_jupyter |
# mpra_analyze_SL
Margaret Guo
12/10/2020
```
library(MPRAnalyze)
library(tidyverse)
library(pheatmap)
# library(mpra)
# library(QuASAR)
# library(BiocParallel)
# library(stringr)
# library(reshape2)
# library(ggplot2)
# library(gridExtra)
# library(VennDiagram)
getwd()
list.files(getwd())
count_rna = read.csv('dedup_counts/count_matrix.csv',row.names=1)
head(count_rna)
colnames(count_rna)
get_tables = function(miseq_out){
miseq_out$id = as.character(miseq_out$id)
miseq_out = miseq_out%>%
select(id, count)%>%
separate(id, c("chr", "loc", "allele_info", "letter", "bc_id"), sep='[.]')%>%
mutate(allele = ifelse(grepl("Ref",miseq_out$id),'ref','alt'))%>%
mutate(column = str_c(allele, bc_id, sep = '_'))%>%
mutate(snp_name = str_c(chr, loc, sep = '_'))%>%
select(snp_name, count, column, allele, bc_id )%>%
group_by(snp_name, column)%>%
summarise(count = sum(count),
allele = unique(allele),
bc_id = unique(bc_id))%>%
ungroup()
colAnnon = miseq_out%>%
select( -snp_name,-count)%>%
mutate(barcode_allelic = as.factor(str_c(allele, bc_id, sep = '_')))%>%
distinct()%>%
arrange(column)%>%
column_to_rownames('column')
countTable = miseq_out%>%
select(-allele, -bc_id)%>%
spread(column,count)%>%
replace(is.na(.), 0)%>%
arrange(snp_name)%>%
column_to_rownames('snp_name')
# countTable = as.matrix(countTable)
return(list(colAnnon, countTable))
}
get_tables_withbatch = function(miseq_out){
miseq_out$id = as.character(miseq_out$id)
miseq_out = miseq_out%>%
select(id, count,batch)%>%
separate(id, c("chr", "loc", "allele_info", "letter", "bc_id"), sep='[.]')%>%
mutate(allele = ifelse(grepl("Ref",miseq_out$id),'ref','alt'))%>%
mutate(column = str_c(allele, bc_id, batch, sep = '_'))%>%
mutate(snp_name = str_c(chr, loc, sep = '_'))%>%
select(snp_name, count, column, allele, bc_id, batch )%>%
group_by(snp_name, column, batch)%>%
summarise(count = sum(count),
allele = unique(allele),
bc_id = unique(bc_id))%>%
ungroup()
colAnnon = miseq_out%>%
select( -snp_name,-count)%>%
mutate(barcode_allelic = as.factor(str_c(allele, bc_id, sep = '_')))%>%
distinct()%>%
arrange(column)%>%
column_to_rownames('column')
countTable = miseq_out%>%
select(-allele, -bc_id, -batch)%>%
spread(column,count)%>%
replace(is.na(.), 0)%>%
arrange(snp_name)%>%
column_to_rownames('snp_name')
# countTable = as.matrix(countTable)
return(list(colAnnon, countTable))
}
# read_rna_count_csv = function(rna_count_csv){
# }
# read in dna
dna_df = read.csv('../miseq_mg_081019/libpsych_plasmid_qc_701_501_count.csv')
result_dna = get_tables(dna_df)
colAnnon_dna = result_dna[[1]]
dnaCount = result_dna[[2]]
colAnnon_dna
```
### IMPORTANT ADD IN TISSUE SPECIFIC INFO BELOW
#### HEK293T
save_prefix = 'mpraanalyze_HEK293_'
sample_ids = c('HEK293_1', 'HEK293_2', 'HEK293_3')
#### Astrocytes (AST)
save_prefix = 'mpraanalyze_AST1_'
sample_ids = c('AST_1', 'AST_2')
#### H9-D0
save_prefix = 'mpraanalyze_H9D0_'
sample_ids = c('H9_D0_1A', 'H9_D0_1B', 'H9_D0_2')
#### H9-D2
save_prefix = 'mpraanalyze_H9D2_'
sample_ids = c('H9_D2_1', 'H9_D2_2', 'H9_D2_3', 'H9_D2_4')
#### H9-D4 (removed H9_D4_3A because of clustering issues)
save_prefix = 'mpraanalyze_H9D4_'
sample_ids = c('H4_D4_1', 'H9_D4_2', 'H9_D4_3B')
#### H9-D10 (removed H9_D10_2 because of clustering issues)
save_prefix = 'mpraanalyze_H9D10_'
sample_ids = c('H9_D10_1', 'H9_D10_4', 'H9_D10_3')
#### SL
save_prefix = 'mpraanalyze_SL_'
sample_ids = c('SL_1', 'SL_2', 'SL_3A', 'SL_3B', 'SL_4')
#### SLC
save_prefix = 'mpraanalyze_SLC_'
sample_ids = c('SLC_1', 'SLC_2', 'SLC_3', 'SLC_4)
```
#### SL
save_prefix = 'D_mpraanalyze_barcode_allelic/mpraanalyze_SL_'
sample_ids = c('SL_1', 'SL_2', 'SL_3A', 'SL_3B', 'SL_4')
tissue = 'SL'
rna_df= data.frame()
for (sample_id in sample_ids){
# read in each lane
rna_df_rep_L1 = read.csv(paste0('dedup_counts/',sample_id, '_L1_libpsych_virus_dupumi_count.csv'))
rna_df_rep_L1 = dplyr::select(rna_df_rep_L1, id, count)
rna_df_rep_L2 = read.csv(paste0('dedup_counts/',sample_id, '_L2_libpsych_virus_dupumi_count.csv'))
rna_df_rep_L2 = dplyr::select(rna_df_rep_L2, id, count)
# merge lanes by summing and add a variable indicating batch
rna_df_rep = full_join(rna_df_rep_L1,rna_df_rep_L2,by='id')
rna_df_rep$count.x[is.na(rna_df_rep$count.x)] = 0
rna_df_rep$count.y[is.na(rna_df_rep$count.y)] = 0
rna_df_rep$count= rna_df_rep$count.x +rna_df_rep$count.y
rna_df_rep$batch= tail(strsplit(sample_id,'_')[[1]],n=1)
#add to overall dataframe
rna_df = rbind(rna_df, rna_df_rep)
}
# get overall dataframe and annotations
result_rna = get_tables_withbatch(rna_df)
colAnnon_rna = result_rna[[1]]
rnaCount = result_rna[[2]]
result_rna_nobatch = get_tables(rna_df)
colAnnon_rna_nobatch = result_rna_nobatch[[1]]
rnaCount_nobatch = result_rna_nobatch[[2]]
snp="chr3_50374568"
rnaCount_nobatch[snp,]
log(sum(as.matrix(rnaCount_nobatch[snp,])[,11:20])/sum(as.matrix(rnaCount_nobatch[snp,])[,1:10]))# log ref/alt
#1.201612 reported
# filter dna for those found in rna
dnaCount_new = dnaCount %>%
rownames_to_column('snpname') %>%
filter(row.names(dnaCount) %in% row.names(rnaCount))%>%
column_to_rownames('snpname')
# filter rna for those found in dna
rnaCount_new = rnaCount %>%
rownames_to_column('snpname') %>%
filter(row.names(rnaCount) %in% row.names(dnaCount_new))%>%
column_to_rownames('snpname')
# filter rna for those found in dna
rnaCount_nobatch_new = rnaCount_nobatch %>%
rownames_to_column('snpname') %>%
filter(row.names(rnaCount_nobatch) %in% row.names(dnaCount_new))%>%
column_to_rownames('snpname')
rnaCount_nobatch_new = rnaCount_nobatch_new[rownames(dnaCount_new),colnames(dnaCount_new)]
dim(dnaCount)
dim(rnaCount)
dim(dnaCount_new)
dim(rnaCount_new)
dim(rnaCount_nobatch_new)
colAnnon_dna
```
# MPRAnalyze
```
# create MpraObject
obj <- MpraObject(dnaCounts = as.matrix(dnaCount_new), rnaCounts = as.matrix(rnaCount_new),
dnaAnnot = colAnnon_dna, rnaAnnot = colAnnon_rna)
# ## Data Normalization
# ## If the library factors are different for the DNA and RNA data, separate
# ## estimation of these factors is needed. We can also change the estimation
# ## method (Upper quartile by default)
obj <- estimateDepthFactors(obj, #lib.factor = c("allele", "bc_id"),
which.lib = "dna",
depth.estimator = "uq")
obj <- estimateDepthFactors(obj, lib.factor = c("batch"),#lib.factor = c("allele"),
which.lib = "rna",
depth.estimator = "uq")
# obj <- estimateDepthFactors(obj, lib.factor = c("allele"),
# which.lib = "rna",
# depth.estimator = "uq")
```
## Quantitative Analysis
These estimates can then be used to identify and classify SNPs that induce a higher transcription rate (ASE aka Allele specific expression)
```
# obj <- analyzeQuantification(obj = obj,
# dnaDesign = ~ bc_id + allele,
# rnaDesign = ~ allele+batch)
```
We can now extract the transcription rate estimates from the model, denoted ‘alpha values’ in the MPRAnalyze model, and use the testing functionality to test for activtiy. extracting alpha values is done with the getAlpha function, that will provide separate values per-factor if a factor is provided. In this case we want a separate alpha estimate by condition:
```
# ##extract alpha values from the fitted model
# alpha <- getAlpha(obj, by.factor = "allele")
# ##visualize the estimates
# par(bg='white')
# boxplot(alpha)
```
## Comparative Analysis
MPRAnalyze also supports comparative analyses, in this case: identifying enhancers that are differentially active between conditions. While we can do this indirectly by taking the quantification results and identify enhancers that are active in one condition but not the other, a direct compartive analysis is more sensitive, and allows identification of enhancers that are more or less active, avoiding the binarization of activity. MPRAnalyze also leverages negative controls to estbalish the null differential behavior, thereby correcting any systemic bias that may be present in the data. In terms of syntax, this analysis is done very similarly to quantification, with an additional reduced model that describes the null hypothesis. In this case, the null hypothesis is no differential activtiy between conditions, so the reduced model is an empty model (intercept only)
2212 steps for model takes around ~5-10 min to run
```
ptm <- proc.time()
obj <- suppressMessages(analyzeComparative(obj = obj,
dnaDesign = ~ barcode_allelic,
rnaDesign = ~ allele,
reducedDesign = ~ 1))
proc.time() - ptm
##
# with the fitted model, we can now test for differential activity, by calling testLrt
res <- testLrt(obj)
head(res)
summary(res)
# for filtering out the snps with < 5 bc
bc_filt_num = 3
rna_df_up_filt = rna_df%>%
separate(id, c("chr", "loc", "allele_info", "letter", "bc_id"), sep='[.]')%>%
group_by(chr,loc, allele_info, letter)%>%
summarise(num_bc = n())%>%
ungroup()%>%
filter(num_bc>bc_filt_num) %>%
mutate(snp_name = str_c(chr, loc, sep = '_'))
snp_names_up_filt = rna_df_up_filt$snp_name
length(snp_names_up_filt)
summary(rna_df_up_filt$num_bc)
ggplot(rna_df_up_filt,aes(x=num_bc))+geom_bar()
# # METHOD 1 dna normalized fold change
# res$normalt = log2(rowSums(rnaCount_nobatch_new[,1:10]+1)/rowSums(dnaCount_new[,1:10]+1))
# res$normref = log2(rowSums(rnaCount_nobatch_new[,11:20]+1)/rowSums(dnaCount_new[,11:20]+1))
# res$logFC = log2(res$normalt/res$normref)
#METHOD 2: just log2 ref/alt in rna
# res$logFC = log2(rowSums(rnaCount_nobatch_new[,1:10]+1)/rowSums(rnaCount_nobatch_new[,11:20]+1))
# # trimming
# res$pval = pmax(res$pval, 1e-6)
# res$logFC = pmin(res$logFC, 10)
# res$logFC = pmax(res$logFC, 1e-6)
dim(res)
res = res%>%
rownames_to_column()%>%
filter(rowname %in% snp_names_up_filt)%>%
mutate(neglog10_p = -log10(pval))%>%
mutate(neglog10_p_x_logFC = neglog10_p*logFC)%>%
arrange(desc(neglog10_p_x_logFC))%>%
drop_na()
# When the hypothesis teseting is simple (two-condition comparison), a fold-change estimate is also available:
## plot log Fold-Change
par(bg='white')
plot(density(res$logFC))
## plot volcano
par(bg='white')
plot(res$logFC, -log10(res$pval))
pdf(paste0(save_prefix, 'volcano.pdf'))
par(bg='white')
plot(res$logFC, -log10(res$pval))
dev.off()
## plot qqplot
# pdf(paste0(save_prefix, 'volcano.pdf'))
par(bg='white')
qq_df = data.frame(emp_pval = sort(res$pval))
qq_df$theoretical_quantile = cume_dist(qq_df$emp_pval)
ggplot(qq_df, aes(x = -log10(theoretical_quantile), y = -log10(emp_pval)))+
geom_point(size=1) +
geom_abline(slope=1, intercept = 0, color = scales::brewer_pal(palette = "Set1")(1)[1]) +
labs(x = "-log10 theoretical p-value", y = "-log10 observed p-value",
title = paste0(tissue, " QQplot")) + ylim(0, 30)+
cowplot::theme_cowplot()
ggsave(paste0(save_prefix, 'qqplot.pdf'))
# plot(1:length(-log10(res$pval)), -log10(res$pval))
# dev.off()
```
# annotate
```
lib_csv = '../../psych_lib_info.csv'
lib_df = read.csv(lib_csv)
lib_df = mutate(lib_df, name_abbr = str_c(Chr, Position, sep='_'))
head(lib_df)
# thres = 0.05/dim(res)[1]
res_filt = res %>%
# filter(pval < 0.05)
filter(fdr<0.05)
dim(res)
dim(res_filt)
res_filt = res_filt %>%
left_join(lib_df, by=c('rowname'='name_abbr'))
res_filt
## plot volcano
# pdf(paste0(save_prefix, 'volcano.pdf'))
# par(bg='white')
ggplot(res, aes(x=logFC, y = -log10(pval))) + geom_point()+
geom_point(data=res_filt, color='red')+
labs(x = "log fold change", y = "-log10 p-value",
title = paste0(tissue, " volcano plot")) + xlim(-5, 5)+ ylim(0,30)+
cowplot::theme_cowplot()
# plot(res$logFC, -log10(res$pval),title(paste0(tissue, 'Volcano Plot')))
ggsave(paste0(save_prefix, 'volcano.pdf'))
# dev.off()
write.csv(res,paste0(save_prefix, 'comp.csv'))
write.csv(res_filt,paste0(save_prefix, 'comp_filt.csv'))
sort(unique(res_filt$eGene))
sort(unique(res_filt$GeneName))
```
| github_jupyter |
## Work
### 請結合前面的知識與程式碼,比較不同的 regularization 的組合對訓練的結果與影響:如 dropout, regularizers, batch-normalization 等
```
import os
import keras
import itertools
# Disable GPU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
train, test = keras.datasets.cifar10.load_data()
## 資料前處理
def preproc_x(x, flatten=True):
x = x / 255.
if flatten:
x = x.reshape((len(x), -1))
return x
def preproc_y(y, num_classes=10):
if y.shape[-1] == 1:
y = keras.utils.to_categorical(y, num_classes)
return y
x_train, y_train = train
x_test, y_test = test
# Preproc the inputs
x_train = preproc_x(x_train)
x_test = preproc_x(x_test)
# Preprc the outputs
y_train = preproc_y(y_train)
y_test = preproc_y(y_test)
from keras.regularizers import l1
from keras.layers import Dropout
from keras.layers import BatchNormalization
"""
建立神經網路,並加入 L1 或 L2
"""
def build_mlp(input_shape, output_units=10, num_neurons=[512, 256, 128], l1_ratio=1e-4, drp_ratio=0.2):
input_layer = keras.layers.Input(input_shape)
for i, n_units in enumerate(num_neurons):
if i == 0:
x = keras.layers.Dense(units=n_units,
activation="relu",
name="hidden_layer"+str(i+1),
kernel_regularizer=l1(l1_ratio))(input_layer)
else:
x = keras.layers.Dense(units=n_units,
activation="relu",
name="hidden_layer"+str(i+1),
kernel_regularizer=l1(l1_ratio))(x)
x = Dropout(drp_ratio)(x)
x = BatchNormalization()(x)
out = keras.layers.Dense(units=output_units, activation="softmax", name="output")(x)
model = keras.models.Model(inputs=[input_layer], outputs=[out])
return model
## 超參數設定
LEARNING_RATE = 1e-3
EPOCHS = 50
BATCH_SIZE = 256
MOMENTUM = 0.95
L1_EXP = 1e-2
DROP_RATIO = 0.25
BATCH_SIZE = 1024
model = build_mlp(input_shape=x_train.shape[1:], l1_ratio=L1_EXP, drp_ratio=DROP_RATIO)
model.summary()
optimizer = keras.optimizers.SGD(lr=LEARNING_RATE, nesterov=True, momentum=MOMENTUM)
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer=optimizer)
model.fit(x_train, y_train,
epochs=EPOCHS,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
shuffle=True)
# Collect results
train_loss = model.history.history["loss"]
valid_loss = model.history.history["val_loss"]
train_acc = model.history.history["accuracy"]
valid_acc = model.history.history["val_accuracy"]
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(range(len(train_loss)), train_loss, label="train loss")
plt.plot(range(len(valid_loss)), valid_loss, label="valid loss")
plt.legend()
plt.title("Loss")
plt.show()
plt.plot(range(len(train_acc)), train_acc, label="train accuracy")
plt.plot(range(len(valid_acc)), valid_acc, label="valid accuracy")
plt.legend()
plt.title("Accuracy")
plt.show()
```
| github_jupyter |
[source](../../api/alibi_detect.ad.adversarialae.rst)
# Adversarial Auto-Encoder
## Overview
The adversarial detector follows the method explained in the [Adversarial Detection and Correction by Matching Prediction Distributions](https://arxiv.org/abs/2002.09364) paper. Usually, autoencoders are trained to find a transformation $T$ that reconstructs the input instance $x$ as accurately as possible with loss functions that are suited to capture the similarities between x and $x'$ such as the mean squared reconstruction error. The novelty of the adversarial autoencoder (AE) detector relies on the use of a classification model-dependent loss function based on a distance metric in the output space of the model to train the autoencoder network. Given a classification model $M$ we optimise the weights of the autoencoder such that the [KL-divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) between the model predictions on $x$ and on $x'$ is minimised. Without the presence of a reconstruction loss term $x'$ simply tries to make sure that the prediction probabilities $M(x')$ and $M(x)$ match without caring about the proximity of $x'$ to $x$. As a result, $x'$ is allowed to live in different areas of the input feature space than $x$ with different decision boundary shapes with respect to the model $M$. The carefully crafted adversarial perturbation which is effective around x does not transfer to the new location of $x'$ in the feature space, and the attack is therefore neutralised. Training of the autoencoder is unsupervised since we only need access to the model prediction probabilities and the normal training instances. We do not require any knowledge about the underlying adversarial attack and the classifier weights are frozen during training.
The detector can be used as follows:
* An adversarial score $S$ is computed. $S$ equals the K-L divergence between the model predictions on $x$ and $x'$.
* If $S$ is above a threshold (explicitly defined or inferred from training data), the instance is flagged as adversarial.
* For adversarial instances, the model $M$ uses the reconstructed instance $x'$ to make a prediction. If the adversarial score is below the threshold, the model makes a prediction on the original instance $x$.
This procedure is illustrated in the diagram below:

The method is very flexible and can also be used to detect common data corruptions and perturbations which negatively impact the model performance. The algorithm works well on tabular and image data.
## Usage
### Initialize
Parameters:
* `threshold`: threshold value above which the instance is flagged as an adversarial instance.
* `encoder_net`: `tf.keras.Sequential` instance containing the encoder network. Example:
```python
encoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(32, 32, 3)),
Conv2D(32, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Conv2D(64, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Conv2D(256, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Flatten(),
Dense(40)
]
)
```
* `decoder_net`: `tf.keras.Sequential` instance containing the decoder network. Example:
```python
decoder_net = tf.keras.Sequential(
[
InputLayer(input_shape=(40,)),
Dense(4 * 4 * 128, activation=tf.nn.relu),
Reshape(target_shape=(4, 4, 128)),
Conv2DTranspose(256, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Conv2DTranspose(64, 4, strides=2, padding='same',
activation=tf.nn.relu, kernel_regularizer=l1(1e-5)),
Conv2DTranspose(3, 4, strides=2, padding='same',
activation=None, kernel_regularizer=l1(1e-5))
]
)
```
* `ae`: instead of using a separate encoder and decoder, the AE can also be passed as a `tf.keras.Model`.
* `model`: the classifier as a `tf.keras.Model`. Example:
```python
inputs = tf.keras.Input(shape=(input_dim,))
outputs = tf.keras.layers.Dense(output_dim, activation=tf.nn.softmax)(inputs)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
```
* `hidden_layer_kld`: dictionary with as keys the number of the hidden layer(s) in the classification model which are extracted and used during training of the adversarial AE, and as values the output dimension for the hidden layer. Extending the training methodology to the hidden layers is optional and can further improve the adversarial correction mechanism.
* `model_hl`: instead of passing a dictionary to `hidden_layer_kld`, a list with tf.keras models for the hidden layer K-L divergence computation can be passed directly.
* `w_model_hl`: Weights assigned to the loss of each model in `model_hl`. Also used to weight the K-L divergence contribution for each model in `model_hl` when computing the adversarial score.
* `temperature`: Temperature used for model prediction scaling. Temperature <1 sharpens the prediction probability distribution which can be beneficial for prediction distributions with high entropy.
* `data_type`: can specify data type added to metadata. E.g. *'tabular'* or *'image'*.
Initialized adversarial detector example:
```python
from alibi_detect.ad import AdversarialAE
ad = AdversarialAE(
encoder_net=encoder_net,
decoder_net=decoder_net,
model=model,
temperature=0.5
)
```
### Fit
We then need to train the adversarial detector. The following parameters can be specified:
* `X`: training batch as a numpy array.
* `loss_fn`: loss function used for training. Defaults to the custom adversarial loss.
* `w_model`: weight on the loss term minimizing the K-L divergence between model prediction probabilities on the original and reconstructed instance. Defaults to 1.
* `w_recon`: weight on the mean squared error reconstruction loss term. Defaults to 0.
* `optimizer`: optimizer used for training. Defaults to [Adam](https://arxiv.org/abs/1412.6980) with learning rate 1e-3.
* `epochs`: number of training epochs.
* `batch_size`: batch size used during training.
* `verbose`: boolean whether to print training progress.
* `log_metric`: additional metrics whose progress will be displayed if verbose equals True.
* `preprocess_fn`: optional data preprocessing function applied per batch during training.
```python
ad.fit(X_train, epochs=50)
```
The threshold for the adversarial score can be set via ```infer_threshold```. We need to pass a batch of instances $X$ and specify what percentage of those we consider to be normal via `threshold_perc`. Even if we only have normal instances in the batch, it might be best to set the threshold value a bit lower (e.g. $95$%) since the the model could have misclassified training instances leading to a higher score if the reconstruction picked up features from the correct class or some instances might look adversarial in the first place.
```python
ad.infer_threshold(X_train, threshold_perc=95, batch_size=64)
```
### Detect
We detect adversarial instances by simply calling `predict` on a batch of instances `X`. We can also return the instance level adversarial score by setting `return_instance_score` to True.
The prediction takes the form of a dictionary with `meta` and `data` keys. `meta` contains the detector's metadata while `data` is also a dictionary which contains the actual predictions stored in the following keys:
* `is_adversarial`: boolean whether instances are above the threshold and therefore adversarial instances. The array is of shape *(batch size,)*.
* `instance_score`: contains instance level scores if `return_instance_score` equals True.
```python
preds_detect = ad.predict(X, batch_size=64, return_instance_score=True)
```
### Correct
We can immediately apply the procedure sketched out in the above diagram via ```correct```. The method also returns a dictionary with `meta` and `data` keys. On top of the information returned by ```detect```, 3 additional fields are returned under `data`:
* `corrected`: model predictions by following the adversarial detection and correction procedure.
* `no_defense`: model predictions without the adversarial correction.
* `defense`: model predictions where each instance is corrected by the defense, regardless of the adversarial score.
```python
preds_correct = ad.correct(X, batch_size=64, return_instance_score=True)
```
## Examples
### Image
[Adversarial detection on CIFAR10](../../examples/ad_ae_cifar10.nblink)
| github_jupyter |
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"></ul></div>
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"></ul></div>
```
# default_exp text.symbols
# export
""" from https://github.com/keithito/tacotron """
"""
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. """
from uberduck_ml_dev.text import cmudict
_pad = "_"
_punctuation_nvidia_taco2 = "!'(),.:;? "
_punctuation = "!'\",.:;? "
_math = "#%&*+-/[]()"
_special = "@©°½—₩€$"
_special_nvidia_taco2 = "-"
_accented = "áçéêëñöøćž"
_numbers = "0123456789"
_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
_letters_ipa = "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ"
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as
# uppercase letters):
_arpabet = ["@" + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = (
list(_pad + _punctuation + _math + _special + _accented + _numbers + _letters)
+ _arpabet
)
symbols_nvidia_taco2 = (
[_pad]
+ list(_special_nvidia_taco2)
+ list(_punctuation_nvidia_taco2)
+ list(_letters)
+ _arpabet
)
symbols_with_ipa = symbols + list(_letters_ipa)
grad_tts_symbols = list(_pad + "-" + "!'(),.:;? " + _letters) + _arpabet
DEFAULT_SYMBOLS = "default"
IPA_SYMBOLS = "ipa"
NVIDIA_TACO2_SYMBOLS = "nvidia_taco2"
GRAD_TTS_SYMBOLS = "gradtts"
SYMBOL_SETS = {
DEFAULT_SYMBOLS: symbols,
IPA_SYMBOLS: symbols_with_ipa,
NVIDIA_TACO2_SYMBOLS: symbols_nvidia_taco2,
GRAD_TTS_SYMBOLS: grad_tts_symbols,
}
# export
import re
symbol_to_id = {
DEFAULT_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[DEFAULT_SYMBOLS])},
IPA_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[IPA_SYMBOLS])},
NVIDIA_TACO2_SYMBOLS: {
s: i for i, s in enumerate(SYMBOL_SETS[NVIDIA_TACO2_SYMBOLS])
},
GRAD_TTS_SYMBOLS: {s: i for i, s in enumerate(SYMBOL_SETS[GRAD_TTS_SYMBOLS])},
}
id_to_symbol = {
DEFAULT_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[DEFAULT_SYMBOLS])},
IPA_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[IPA_SYMBOLS])},
NVIDIA_TACO2_SYMBOLS: {
i: s for i, s in enumerate(SYMBOL_SETS[NVIDIA_TACO2_SYMBOLS])
},
GRAD_TTS_SYMBOLS: {i: s for i, s in enumerate(SYMBOL_SETS[GRAD_TTS_SYMBOLS])},
}
curly_re = re.compile(r"(.*?)\{(.+?)\}(.*)")
words_re = re.compile(
r"([a-zA-ZÀ-ž]+['][a-zA-ZÀ-ž]{1,2}|[a-zA-ZÀ-ž]+)|([{][^}]+[}]|[^a-zA-ZÀ-ž{}]+)"
)
def symbols_to_sequence(symbols, symbol_set=DEFAULT_SYMBOLS, ignore_symbols=["_", "~"]):
return [
symbol_to_id[symbol_set][s]
for s in symbols
if should_keep_symbol(s, symbol_set, ignore_symbols)
]
def arpabet_to_sequence(text, symbol_set=DEFAULT_SYMBOLS):
return symbols_to_sequence(["@" + s for s in text.split()], symbol_set=symbol_set)
def should_keep_symbol(s, symbol_set=DEFAULT_SYMBOLS, ignore_symbols=["_", "~"]):
return s in symbol_to_id[symbol_set] and s not in ignore_symbols
print(words_re.findall("The quick"))
print(words_re.findall("I'm blue,"))
print(words_re.findall("L'monj'ello"))
print(words_re.findall("{ S IY } { EH M }"))
assert should_keep_symbol(" ")
assert not should_keep_symbol("\n")
assert should_keep_symbol(".")
# NOTE: arpabet_to_sequence does not properly handle whitespace, it should take single words only.
assert (
len(arpabet_to_sequence("{ S IY } { EH M } { Y UW } { D IH K SH AH N EH R IY }"))
== 15
)
assert arpabet_to_sequence("{ S IY }") == [168, 148]
# But symbols_to_sequence hanldes whitespace
assert len(symbols_to_sequence("C M U Dictionary")) == 16
arpabet_to_sequence("{ H AH1 N D R IH D}")
len(SYMBOL_SETS["default"])
```
| github_jupyter |
# **Testing for Stuctural Breaks in Time Series Data with a Chow Test**
## **I. Introduction**
I've written a bit on forecasting future stock prices and distributions of future stock prices. I'm proud of the models I built for those articles, but they will eventually be no more predictive than a monkey throwing darts at a board. Perhaps they'll perform worse.
This will happen because the underlying system, of which we are modeling an aspect, will change. For an extreme example, a company whose stock we are trying to model goes out of business. The time series just ends. For a more subtle example, let's look at the relationship between oil prices and dollar exchange rates.
I took historical real USD exchange rates measured against a broad basket of currencies and oil prices (WTI) going from January 1986 to February 2019 and indexed them to January 2000. I then took the natural logarithm of each, because this would give us the growth rate if we differenced the data and is a common transformation with time series data (and for dealing with skewed variables in non-time series analysis).
As you can see, they appear inversely related over time. When one goes up, the other goes down. This makes sense because when people outside the US want to buy oil, they often need to use USD for the transaction. Oil prices rise and they need to exchange more of their domestic currency to buy the same amount. This in turn strengthens the dollar and the exchange rate goes down as demand for USD increases and supply of foreign currencies increase. (An exchange rate of 1 means it takes 1 USD to buy 1 unit of foreign currency. If it is 2, it takes 2 USD to buy 1 unit of foreign currency. If it is 0.5, 1 USD buys 2 units of the foreign currency).
But, does the inverse relationship remain constant over time? Are there periods where a movement in one corresponds to a larger movement in the other relative to other times? This type of change in the relationship between oil prices and USD exchange rates could occur for a variety of reasons. For example, a major currency crisis across a region driving up demand for safe USD, while reducing demand for oil as the economy weakens. Perhaps a bunch of currencies disappear and one major one forms as the countries join a monetary union, like the EU.
```
# for linear algebra and mathematical functions
import numpy as np
# for dataframe manipulation
import pandas as pd
# for data visualization
import matplotlib.pyplot as plt
# for setting plot size
import matplotlib as mpl
# for changing the plot size in the Jupyter Notebook output
%matplotlib inline
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# reads in data on historical oil prices and dollar exchange rates
full_data = pd.read_csv('Oil Data.csv')
# generates a variable for the growth rate of the Real Trade Weighted U.S. Dollar Index:
# Broad, Goods indexed to January 2000
index_value = float(full_data.loc[full_data.Date == '01-2000']['TWEXBPA'].values)
full_data['broad_r'] = list(full_data.TWEXBPA / index_value)
full_data['ebroad_r'] = np.log(full_data.broad_r)
# generates a variable for the growth rate of the Real Trade Weighted U.S. Dollar Index:
# Major Currencies, Goods indexed to January 2000
index_value = float(full_data.loc[full_data.Date == '01-2000']['TWEXMPA'].values)
full_data['major_r'] = list(full_data.TWEXMPA / index_value)
full_data['emajor_r'] = np.log(full_data.major_r)
# generates a variable for the growth rate of the Real Trade Weighted U.S. Dollar Index:
# Other Important Trading Partners, Goods indexed to January 2000
index_value = float(full_data.loc[full_data.Date == '01-2000']['TWEXOPA'].values)
full_data['oitp_r'] = list(full_data.TWEXOPA / index_value)
full_data['eoitp_r'] = np.log(full_data.oitp_r)
# generates a variable for the growth rate of Crude Oil Prices: West Texas Intermediate
# (WTI) - Cushing, Oklahoma indexed to January 2000
index_value = float(full_data.loc[full_data.Date == '01-2000']['MCOILWTICO'].values)
# adjusts for inflation prior to indexing to January 2000
full_data['po_r'] = full_data.MCOILWTICO / (full_data.Fred_CPIAUCNS / 100) / index_value
full_data['epo_r'] = np.log(full_data.po_r)
# creates a column for month
full_data.Date = pd.to_datetime(full_data.Date)
full_data['month'] = full_data.Date.map(lambda x: x.month)
# creates a list of all the variables of interest
variables_to_keep = ['epo_r', 'Date', 'month', 'ebroad_r', 'emajor_r', 'eoitp_r']
# creates a new dataframe containing only the variables of interest
my_data = full_data[variables_to_keep]
# creates dummy variables for each month, dropping January to avoid multicollinearity
my_data = pd.concat([my_data, pd.get_dummies(my_data.month, drop_first = True)], axis = 1)
# sets the Date as the index
my_data.index = pd.DatetimeIndex(my_data.Date)
# drops these columns for a tidy data set
my_data = my_data.drop(['month', 'Date'], axis = 1)
# the code below plots the real oil price growth rate with the USD vs Broad Currency Basket
# exchange growth rate
# Create some mock data
time = my_data.index
epo_r = my_data.epo_r
ebroad_r = my_data.ebroad_r
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.set_xlabel('Date (Monthly)')
ax1.set_ylabel('Natural Log of Oil Prices', color = color)
ax1.plot(time, epo_r, color=color)
ax1.tick_params(axis = 'y', labelcolor = color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
ax2.set_ylabel('Natural Log of USD Exchange Rate vs. Broad Currency Basket',
color = color) # we already handled the x-label with ax1
ax2.plot(time, ebroad_r, color = color)
ax2.tick_params(axis = 'y', labelcolor = color)
plt.title('Natural Log of Oil Prices and USD Exchange Rates indexed to January 2000')
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
```
## **II. Detecting a Suspected Break at a Known Date: The Chow Test**
The Chow Test tests if the true coefficients in two different regression models are equal. The null hypothesis is they are equal and the alternative hypothesis is they are not. Another way of saying this is that the null hypothesis is the model before the possible break point is the same as the model after the possible break point. The alternative hypothesis is the model fitting each periods are different.
It formally tests this by performing an F-test on the Chow Statistic which is (RSS_pooled - (RSS1 + RSS2))/(number of independent variables plus 1 for the constant) divided by (RSS1 + RSS2)/(Number of observations in subsample 1 + Number of observations in subsample 2 - 2*(number of independent variables plus 1 for the constant).
The models in each of the models (pooled, 1, 2) must have normally distributed error with mean 0, as well as independent and identically distributed errors, to satisfy the Gauss-Markov assumptions.
I use the Chow test to test for a structural break at the introduction of the Euro in January 1999. This seems a reasonable possible structural break, because a handful of major currencies, and some minor ones, disappeared and a new very important currency was created. The creation of the Euro certainly qualifies as a major shock to currency markets and perhaps to the oil vs. dollar relationship.
```
#imports the chowtest package as ct, which is written by yours truly
import chowtest as ct
```
Translating the independently and identically distributed residual requirement to English translates as constant mean and variance without serial correlation in the residuals. To test for this, I tested for auto-correlation and heteroskedasticity in my residuals. I did the same tests on their growth rates (the difference in natural logs). I also plotted the residuals and estimated their mean.
The auto-correlation function plots strongly suggest that the residuals from the simple OLS model have strong auto-correlation, while the residuals from the OLS of the growth rates are not auto-correlated.
```
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# to reduce typing, I saved ebroad_r as X and epo_r as y
X = pd.DataFrame(my_data[['ebroad_r']])
y = pd.DataFrame(my_data.epo_r)
# to reduce typing, I saved the differenced ebroad_r as X_diff and epo_r as y_diff
X_diff = X.diff().dropna()
y_diff = y.diff().dropna()
# saves the residuals from the undifferenced X and y OLS model
un_diffed_resids = ct.linear_residuals(X, y).residuals
# saves the residuals from the differenced X and y OLS model
diffed_resids = ct.linear_residuals(X_diff, y_diff).residuals
# plots the ACF for the residuals of the OLS regression of epo_r on ebroad_r
pd.plotting.autocorrelation_plot(un_diffed_resids)
plt.show()
# plots the ACF for the residuals of the OLS regression of the differenced epo_r on
# differenced ebroad_r
pd.plotting.autocorrelation_plot(diffed_resids)
plt.show()
```
The Breusch-Pagan Test shows that heteroskedasticity is present in the OLS model. It is also present in the model of growth rates, but is much less severe.
```
from statsmodels.stats.diagnostic import het_breuschpagan
# tests for heteroskedasticity in the full-sample residuals
print('F-statistic for the Breusch-Pagan Test the OLS model: ' +
str(het_breuschpagan(un_diffed_resids, X)[2]))
print('p-value for the Breusch-Pagan F-Test the OLS model: ' +
str(het_breuschpagan(un_diffed_resids, X)[3]))
# tests for heteroskedasticity in the full-sample residuals
print('F-statistic for the Breusch-Pagan Test the growth rate OLS model: ' +
str(het_breuschpagan(diffed_resids, X_diff)[2]))
print('p-value for the Breusch-Pagan R-Test the growth rate OLS model: ' +
str(het_breuschpagan(diffed_resids, X_diff)[3]))
```
The histograms of residuals show a bell-curve shape to the residuals of the OLS model looking at growth rates. The histogram of residuals for the regular OLS model show a possibly double-humped shape.
```
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# plots the histogram of residuals
plt.hist(un_diffed_resids)
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# plots the histogram of residuals
plt.hist(diffed_resids)
```
The normality tests for the residuals from each model are both failures.
```
# imports the normality test from scipy.stats
from scipy.stats import normaltest
# performs the normality test on the residuals from the non-differenced OLS model
print(normaltest(un_diffed_resids))
# performs the normality test on the residuals from the differenced OLS model
print(normaltest(diffed_resids))
```
Despite failing the normality tests, the mean of the residuals of both models are essentially 0. The model of growth rates has residuals that are are independently distributed and bell-shaped based on the ACF plot, even though there is evidence of heteroskedasticity at the 0.05 significance level. For these reasons, I will proceed with my analysis using the growth rate model and assume my Chow Test result will be robust to the non-normality of residuals.
```
print('Mean of OLS residuals: ' + str(np.mean(un_diffed_resids)))
print('Mean of OLS model of growth rate residuals: ' + str(np.mean(diffed_resids)))
```
I come to the same conclusion for the models estimating before and after the split dates and proceed with the Chow Test.
```
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# creates split dates for our sample period
stop = '1999-01-01'
start = '1999-02-01'
# plots the ACF for the residuals of the OLS regression of the differenced epo_r on
# differenced ebroad_r
pd.plotting.autocorrelation_plot(ct.linear_residuals(X_diff.loc[:stop],
y_diff.loc[:stop]).residuals)
plt.show()
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# tests for heteroskedasticity in the full-sample residuals
print('F-statistic for the Breusch-Pagan Test the growth rate OLS model: ' +
str(het_breuschpagan(ct.linear_residuals(X_diff.loc[:stop],
y_diff.loc[:stop]).residuals,
X_diff.loc[:stop])[2]))
print('p-value for the Breusch-Pagan F-Test the growth rate OLS model: ' +
str(het_breuschpagan(ct.linear_residuals(X_diff.loc[:stop],
y_diff.loc[:stop]).residuals,
X_diff.loc[:stop])[3]))
print('Mean of OLS model of growth rate residuals pre-Euro: ' +
str(np.mean(ct.linear_residuals(X_diff.loc[:stop],
y_diff.loc[:stop]).residuals)))
# plots the histogram of residuals
plt.hist(ct.linear_residuals(X_diff.loc[:stop], y_diff.loc[:stop]).residuals)
plt.show
# sets the plot size to 12x8
mpl.rcParams['figure.figsize'] = (12,8)
# plots the ACF for the residuals of the OLS regression of the differenced epo_r on
# differenced ebroad_r
pd.plotting.autocorrelation_plot(ct.linear_residuals(X_diff[start:],
y_diff[start:]).residuals)
plt.show()
# tests for heteroskedasticity in the full-sample residuals
print('F-statistic for the Breusch-Pagan Test the growth rate OLS model: ' +
str(het_breuschpagan(ct.linear_residuals(X_diff.loc[start:],
y_diff.loc[start:]).residuals,
X_diff.loc[start:])[2]))
print('p-value for the Breusch-Pagan F-Test the growth rate OLS model: ' +
str(het_breuschpagan(ct.linear_residuals(X_diff.loc[start:],
y_diff.loc[start:]).residuals,
X_diff.loc[start:])[3]))
print('Mean of OLS model of growth rate residuals pre-Euro: ' +
str(np.mean(ct.linear_residuals(X_diff.loc[start:],
y_diff.loc[start:]).residuals)))
# plots the histogram of residuals
plt.hist(ct.linear_residuals(X_diff.loc[start:], y_diff.loc[start:]).residuals)
```
The result of the Chow Test is a Chow Test statistic of about 4.24 tested against an F-distribution with 2 and 394 degrees of freedom. The p-value is about 0.0009, meaning if the models before and after the split date are actually the same and we did an infinite number of trials, 0.09% of our results would show this level of difference in the models due to sampling error. It is safe to say that the model of real oil price and dollar exchange growth rates is different pre-Euro and post-Euro introduction.
```
# performs the Chow Test
ct.ChowTest(X.diff().dropna(), y.diff().dropna(), stop, start)
```
| github_jupyter |
### About
The goal of this script is to process a few common keyphrase datasets, including
- **Tokenize**: by default using method from Meng et al. 2017, which fits more for academic text since it splits strings by hyphen etc. and makes tokens more fine-grained.
- keep [_<>,\(\)\.\'%]
- replace digits with < digit >
- split by [^a-zA-Z0-9_<>,#&\+\*\(\)\.\'%]
- **Determine present/absent phrases**: determine whether a phrase appears verbatim in a text. This is believed a very important step for the evaluation of keyphrase-related tasks, since in general extraction methods cannot recall any phrases don't appear in the source text.
```
import os
import sys
import re
import json
import numpy as np
from collections import defaultdict
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
module_path = os.path.abspath(os.path.join('../onmt'))
if module_path not in sys.path:
sys.path.append(module_path)
import kp_evaluate
import onmt.keyphrase.utils as utils
dataset_names = ['inspec', 'krapivin', 'nus', 'semeval', 'kp20k', 'duc', 'stackexchange']
dataset_names = ['inspec', 'krapivin', 'nus', 'semeval', 'duc']
json_base_dir = '/Users/memray/project/kp/OpenNMT-kpg/data/keyphrase/json/' # path to the json folder
for dataset_name in dataset_names:
print(dataset_name)
input_json_path = os.path.join(json_base_dir, dataset_name, '%s_test.json' % dataset_name)
output_json_path = os.path.join(json_base_dir, dataset_name, '%s_test_meng17token.json' % dataset_name)
doc_count, present_doc_count, absent_doc_count = 0, 0, 0
tgt_num, present_tgt_num, absent_tgt_num = [], [], []
# with open(input_json_path, 'r') as input_json, open(output_json_path, 'w') as output_json:
with open(input_json_path, 'r') as input_json:
for json_line in input_json:
json_dict = json.loads(json_line)
if dataset_name == 'stackexchange':
json_dict['abstract'] = json_dict['question']
json_dict['keywords'] = json_dict['tags']
del json_dict['question']
del json_dict['tags']
title = json_dict['title']
abstract = json_dict['abstract']
fulltext = json_dict['fulltext'] if 'fulltext' in json_dict else ''
keywords = json_dict['keywords']
if isinstance(keywords, str):
keywords = keywords.split(';')
json_dict['keywords'] = keywords
# remove all the abbreviations/acronyms in parentheses in keyphrases
keywords = [re.sub(r'\(.*?\)|\[.*?\]|\{.*?\}', '', kw) for kw in keywords]
# tokenize text
title_token = utils.meng17_tokenize(title)
abstract_token = utils.meng17_tokenize(abstract)
fulltext_token = utils.meng17_tokenize(fulltext)
keywords_token = [utils.meng17_tokenize(kw) for kw in keywords]
# replace numbers
title_token = utils.replace_numbers_to_DIGIT(title_token, k=2)
abstract_token = utils.replace_numbers_to_DIGIT(abstract_token, k=2)
fulltext_token = utils.replace_numbers_to_DIGIT(fulltext_token, k=2)
keywords_token = [utils.replace_numbers_to_DIGIT(kw, k=2) for kw in keywords_token]
src_token = title_token+["."] + abstract_token + fulltext_token
tgts_token = keywords_token
# print(json_dict)
# print(src_token)
# print(tgts_token)
# split tgts by present/absent
src_seq = src_token
tgt_seqs = tgts_token
present_tgt_flags, _, _ = utils.if_present_duplicate_phrases(src_seq, tgt_seqs)
present_tgts = [tgt for tgt, present in zip(tgt_seqs, present_tgt_flags) if present]
absent_tgts = [tgt for tgt, present in zip(tgt_seqs, present_tgt_flags) if ~present]
doc_count += 1
present_doc_count = present_doc_count + 1 if len(present_tgts) > 0 else present_doc_count
absent_doc_count = absent_doc_count + 1 if len(absent_tgts) > 0 else absent_doc_count
tgt_num.append(len(tgt_seqs))
present_tgt_num.append(len(present_tgts))
absent_tgt_num.append(len(absent_tgts))
# write to output json
tokenized_dict = {'src': src_token, 'tgt': tgts_token,
'present_tgt': present_tgts, 'absent_tgt': absent_tgts}
json_dict['meng17_tokenized'] = tokenized_dict
# output_json.write(json.dumps(json_dict) + '\n')
print('#doc=%d, #present_doc=%d, #absent_doc=%d, #tgt=%d, #present=%d, #absent=%d, %%absent=%.3f%%'
% (doc_count, present_doc_count, absent_doc_count,
sum(tgt_num), sum(present_tgt_num), sum(absent_tgt_num),
sum(absent_tgt_num) / sum(tgt_num) * 100.0))
```
### source=title+abstract
```
inspec
#doc=500, #present_doc=497, #absent_doc=381, #tgt=4913, #present=3858, #absent=1055
krapivin
#doc=460, #present_doc=437, #absent_doc=417, #tgt=2641, #present=1485, #absent=1156
nus
#doc=211, #present_doc=207, #absent_doc=195, #tgt=2461, #present=1263, #absent=1198
semeval
#doc=100, #present_doc=100, #absent_doc=99, #tgt=1507, #present=671, #absent=836
kp20k
#doc=19987, #present_doc=19048, #absent_doc=16357, #tgt=105181, #present=66595, #absent=38586
duc
#doc=308, #present_doc=308, #absent_doc=38, #tgt=2484, #present=2421, #absent=63
stackexchange
#doc=16000, #present_doc=13475, #absent_doc=10984, #tgt=43131, #present=24809, #absent=18322
```
### source=title+abstract+fulltext
```
inspec
#doc=500, #present_doc=497, #absent_doc=381, #tgt=4913, #present=3858, #absent=1055
krapivin
#doc=460, #present_doc=460, #absent_doc=238, #tgt=2641, #present=2218, #absent=423
nus
#doc=211, #present_doc=211, #absent_doc=126, #tgt=2461, #present=2158, #absent=303
semeval
#doc=100, #present_doc=100, #absent_doc=65, #tgt=1507, #present=1373, #absent=134
duc
#doc=308, #present_doc=308, #absent_doc=38, #tgt=2484, #present=2421, #absent=63
```
| github_jupyter |
# Assignment #1 - Multilayer Perceptron
Deep Learning / Spring 1399, Iran University of Science and Technology
---
**Please pay attention to these notes:**
<br><br>
- **Assignment Due: ** 1398/12/19 23:59:00
- If you need any additional information, please review the assignment page on the course website.
- The items you need to answer are highlighted in red and the coding parts you need to implement are denoted by:
```
########################################
# Put your implementation here #
########################################
```
- We always recommend co-operation and discussion in groups for assignments. However, each student has to finish all the questions by him/herself. If our matching system identifies any sort of copying, you'll be responsible for consequences. So, please mention his/her name if you have a team-mate.
- Students who audit this course should submit their assignments like other students to be qualified for attending the rest of the sessions.
- Finding any sort of copying will zero down that assignment grade and also will be counted as two negative assignment for your final score.
- When you are ready to submit, please follow the instructions at the end of this notebook.
- If you have any questions about this assignment, feel free to drop us a line. You may also post your questions on the course Forum page.
- You must run this notebook on Google Colab platform, it depends on Google Colab VM for some of the depencecies.
- **Before starting to work on the assignment Please fill your name in the next section *AND Remember to RUN the cell.* **
<br>
Assignment Page: [https://iust-deep-learning.github.io/982/assignments/01_Multilayer_Perceptron](https://iust-deep-learning.github.io/982/assignments/01_Multilayer_Perceptron)
Course Forum: [https://groups.google.com/forum/#!forum/dl982/](https://groups.google.com/forum/#!forum/dl982/)
---
Fill your information here & run the cell
```
#@title Enter your information & "RUN the cell!!" { run: "auto" }
student_id = 0#@param {type:"integer"}
student_name = "" #@param {type:"string"}
Your_Github_account_Email = "" #@param {type:"string"}
print("your student id:", student_id)
print("your name:", student_name)
from pathlib import Path
ASSIGNMENT_PATH = Path('asg01')
ASSIGNMENT_PATH.mkdir(parents=True, exist_ok=True)
```
## 1. MLP from Scratch
In this assignment, you will explore and implement the properties of a primary deep learning model called ***multilayer perceptron(MLP)***. Basically, the goal of an MLP is to learn a non-linear mapping from inputs to outputs. We can show this mapping as $y = f(x; \theta)$ , where $x$ is the input and $\theta$ is a vector of all the parameters in the network, which we're trying to learn.
As you see in the figure, every MLP network consists of an input layer, an output layer, and one or more hidden layers in between. Each layer consists of one or more cells called Neurons. In every Neuron, a dot product between the inputs of the cell and a weight vector is calculated. The result of the dot product then goes through a non-linear function (activation function e.g. $tanh$ or $sigmoid$) and gives us the output of the neuron.
<center>
<img src=https://github.com/mehrdad-naser-73/982/raw/master/static_files/assignments/asg01_assets/pics/MLP.jpg width="500" align="center">
</center>
<br>
Thoughout this assignment, inputs will be matrices with the shape of $b \times M$ where $b$ is the batch size and $M$ is the number of features of inputs. <br>
As for the equations, let's compute the output of the $i$th layer:
$$A^i = f(A^{i-1}w^i + b^i)$$
Imagine that $(i-1)$th and $i$th layer have sizes of $n$ and $p$ respectively. The dimensions of weight and bias will be as follows:
<br><br>
$$w^{n\times p} , b^{1\times p}$$
<br>
Numpy is the only package you're allowed to use for implementing your MLP in this assignment, so let's import it in the cell below!
```
import numpy as np
```
### 1.1 Activation Functions
Now let's implement some activation functions! Linear, Relu and Sigmoid are the functions that we'll need in this assignment. Note that you should also implement their derivatives since you'll need them later for back-propagation.
```
## We've implemented the Linear activation function for you
def linear(x, deriv=False):
return x if not deriv else np.ones_like(x)
def relu(x, deriv=False):
"""
Args:
x: A numpy array of any shape
deriv: True or False. determines if we want the derivative of the function or not.
Returns:
relu_out: A numpy array of the same shape as x.
Basically relu function or its derivative applied to every element x
"""
########################################
# Put your implementation here #
########################################
return relu_out
def sigmoid(x, deriv=False):
"""
Args:
x: A numpy array of any shape
deriv: True or False. determines if we want the derivative of the function or not.
Returns:
sig_out: A numpy array of the same shape as x.
Basically sigmoid function or its derivative applied to every element x
"""
########################################
# Put your implementation here #
########################################
return sig_out
# Test your implementation
!wget -q https://github.com/iust-deep-learning/982/raw/master/static_files/assignments/asg01_assets/act_test.npy
x, relu_out, sig_out = np.load('act_test.npy', allow_pickle=True)
assert np.allclose( relu_out[0], relu(x, deriv=True), atol=1e-6, rtol=1e-5) and np.allclose(relu_out[1], relu(x, deriv=False), atol=1e-6, rtol=1e-5)
assert np.allclose(sig_out[0], sigmoid(x, deriv=True), atol=1e-6, rtol=1e-5) and np.allclose(sig_out[1], sigmoid(x, deriv=False), atol=1e-6, rtol=1e-5)
```
**Question**: Why do activation functions have to be non-linear? Could any non-linear function be used as an activation function?
<font color=red>Write your answers here</font>
### 1.2 Forward Propagation
Now let's implement our MLP class. This class handles adding layers and doing the forward propagation. Here are the attributes of this class:
<br> - __parameters__: A list of dictionaries in the form of _{'w': weight, 'b': bias}_ where _weight_ and _bias_ are weight matrix and bias vector of a layer.
<br>- __act_funcs__: A list of activation functions used in the corresponding layer.
<br>- __activations__: A list of matrices each corresponding to the output of each layer.
<br>- __layer_ins__: A list of matrices each corresponding to the input of each layer.
<br> Note that we store inputs and outputs of the layers because we'll need them later for implementing the back-propagation algorithm.
You only need to complete the _feed_forward_ function in the MLP class. This function performs forward propagation on the input.
```
class MLP:
def __init__(self, input_dim):
"""
Args:
input_dim: An integer determining the inpu dimension of the MLP
"""
self.input_dim = input_dim
self.parameters = []
self.act_funcs = []
self.activations = []
self.layer_ins = []
def add_layer(self, layer_size, act_func=linear):
"""
Add layers to the MLP using this function
Args:
layer_size: An integer determinig the number of neurons in the layer
act_func: A function applied to the units in the layer
"""
### Size of the previous layer of mlp
prev_size = self.input_dim if not self.parameters else self.parameters[-1]['w'].shape[-1]
### Weight scale used in He initialization
weight_scale = np.sqrt(2/prev_size)
### initializing the weights and bias of the layer
weight = np.random.normal(size=(prev_size, layer_size))*weight_scale
bias = np.ones(layer_size) *0.1
### Add weights and bias of the layer to the parameters of the MLP
self.parameters.append({'w': weight, 'b': bias})
### Add the layer's activation function
self.act_funcs.append(act_func)
def feed_forward(self, X):
"""
Propagate the inputs forward using this function
Args:
X: A numpy array of shape (b, input_dim) where b is the batch size and input_dim is the dimension of the input
Returns:
mlp_out: A numpy array of shape (b, out_dim) where b is the batch size and out_dim is the dimension of the output
Hint: Don't forget to store inputs and outputs of each layer in self.layer_ins and self.activations respectively
"""
self.activations = []
self.layer_ins = []
mlp_out = X
########################################
# Put your implementation here #
########################################
return mlp_out
# Test your implementation
import pickle
!wget -q https://github.com/iust-deep-learning/982/raw/master/static_files/assignments/asg01_assets/mlptest.pkl
x = np.random.normal(size=(512, 100))
mlp = MLP(100)
mlp.add_layer(64, relu)
mlp.add_layer(32, relu)
out = mlp.feed_forward(x)
assert len(mlp.parameters) == 2
assert mlp.activations[0].shape == tuple([512, 64]) and mlp.layer_ins[0].shape == tuple([512, 64])
assert mlp.activations[1].shape == tuple([512, 32]) and mlp.layer_ins[1].shape == tuple([512, 32])
assert out.shape == tuple([512, 32])
assert np.array_equal(mlp.activations[-1], out)
x, out, parameters = pickle.load(open('mlptest.pkl', 'rb'))
mlp.parameters = parameters
assert np.allclose( out, mlp.feed_forward(x), atol=1e-6, rtol=1e-5)
```
__Question__: In the _add_layer_ function of the MLP class, we used a method called _He initialization_ to initialize the weights. Explain how this method can help with the training of an MLP?
<font color=red>Write your answers here</font>
### 1.3 Loss Function
In the previous sections, we implemented an MLP that accepts an input $x$ and propagates it forward and produces an output $\hat{y}$. The next step in implementing our MLP is to see how good our network's output $\hat{y}$ is compared to the target output $y$! This is where the loss function comes in. This function gets $y$ and $\hat{y}$ as its inputs and returns a scaler as its output. This scaler indicates how good current parameters of the network are. <br>
the choice of this function depends on the task, e.g regression or binary classification. Since you'll be doing a multiclass classification later in this assignment, let's implement the cross-entropy function. Cross-entropy is the function mostly used for classification tasks but to use it in a multiclass setting, the network's outputs must be passed through a softmax activation function and the target output must be in one-hot encoded format.<br>
<center>
<img src=https://github.com/mehrdad-naser-73/982/raw/master/static_files/assignments/asg01_assets/pics/Capture.PNG width="500" align="center">
</center>
<br>
$$Softmax(\hat{y})_i = \frac{e^{\hat{y}_i}}{\sum^{C}_j e^{\hat{y}_j}} $$ <br>
$$ Cross Entropy(y, \hat{y}) = -\sum_i^C {y_i log(Softmax(\hat{y})_i)}$$
Where $y$ and $\hat{y}$ are two one-hot encoded vectors. $y$ is a single target label and $\hat{y}$ is a single output.<br>
Now let's first implement the softmax activation function! Note that the above formulas are for a single sample, however you should implement the batch version!
```
def softmax(y_hat):
"""
Apply softmax to the inputs
Args:
y_hat: A numpy array of shape (b, out_dim) where b is the batch size and out_dim is the output dimension of the network(number of classes)
Returns:
soft_out: A numpy array of shape (b, out_dim)
"""
########################################
# Put your implementation here #
########################################
return soft_out
# Test your implementation
y_hat = np.random.normal(size=(100, 5))
y_soft = softmax(y_hat)
assert y_hat.shape == y_soft.shape
assert all([(y - 1.)<1e-5 for y in np.sum(y_soft, axis=1)])
y_hat = np.array([[10,10,10,10], [0,0,0,0]])
assert np.allclose( softmax(y_hat), np.array([[0.25, 0.25, 0.25, 0.25], [0.25, 0.25, 0.25, 0.25]]), atol=1e-6, rtol=1e-5)
```
Now implement the categorical cross-entropy function ("categorical" refers to multiclass classification). Note that the inputs are in batches, so the loss of a batch of samples will be the average of losses of samples in the batch.
```
def categorical_cross_entropy(y, y_soft):
"""
Compute the categorical cross entropy loss
Args:
y: A numpy array of shape (b, out_dim). Target labels of network.
y_soft: A numpy array of shape (b, out_dim). Output of the softmax activation function
Returns:
loss: A scaler of type float. Average loss over a batch.
Hint: Use np.mean to compute average loss of a batch
"""
########################################
# Put your implementation here #
########################################
return loss
# Test your implementation
y = np.array([[1,0,0], [0,0,1], [1,0,0], [0,1,0]])
y_hat = np.array([[10,1,1], [0,-1,9], [100,-9,9], [0.1,12,10]])
y_soft = softmax(y_hat)
assert round(categorical_cross_entropy(y, y_soft), 3) == 0.032
```
Great! You have implemented both softmax and categorical cross-entropy functions. Now instead of applying softmax activation function to the output layer of the MLP and then using categorical cross-entropy as loss function, we can merge these two steps and make a softmax categorical cross-entropy loss function and use linear activation function in the output layer! The reason behind this is that the gradient of the softmax categorical cross-entropy loss with respect to the MLP's output is efficiently calculated as:
<br>
$$ Softmax(\hat{y}) - y$$
for a single sample. Here $\hat{y}$ is the MLP's output and $y$ is the target output (labels).<br>
Now let's implement the softmax categorical cross-entropy function!
```
def softmax_categorical_cross_entropy(y, y_hat, return_grad=False):
"""
Compute the softmax categorical cross entropy loss
Args:
y: A numpy array of shape (b, out_dim). Target labels of network.
y_hat: A numpy array of shape (b, out_dim). Output of the output layer of the network
return_grad: If True return gradient of the loss with respect to y_hat. If False just return the loss
Returns:
loss: A scaler of type float. Average loss over a batch.
"""
y_soft = softmax(y_hat)
if not return_grad:
loss = categorical_cross_entropy(y, y_soft)
return loss
else:
loss_grad = (y_soft - y)/y.shape[0]
return loss_grad
```
### 1.4 Back-Propagation
After calculating the loss of the MLP, we need to propagate this loss back to the hidden layers in order to calculate the gradient of the loss with respect to the weights and biases of the network. The algorithm used to calculate these gradients is called back-propagation or simply backprop. Backprop uses chain rule to compute the gradients of the network parameters. Now let's go over the steps of this algorithm (This is the fully matrix-based version):
- calculate gradient of the loss with respect to $\hat{y}$
<br> $g \longleftarrow \nabla_\hat{y} Loss$
- for each layer $L$ starting from the ouput layer: <br>
   $g \longleftarrow g \odot f^\prime(input^{(L)})$   ($input^{(L)}$ is the input of $L$th layer and $f$ is the activation function)<br>
   $\nabla_{b^{(L)}}Loss \longleftarrow \sum_i^{batch} {g_i}$ <br>
   $\nabla_{w^{(L)}}Loss \longleftarrow output^{(L-1)T}g$   ($output^{(L-1)}$ is the output of $(L-1)$th layer ) <br>
   $g \longleftarrow gw^{(L)T}$
Check [this](http://neuralnetworksanddeeplearning.com/chap2.html) for a detailed explanation of the back-propagation algorithm.
Now implement the back-propagation algorithm!
```
def mlp_gradients(mlp, loss_function, x, y):
"""
Compute the gradient of loss with respect to mlp's weights and biases
Args:
mlp: An object of MLP class
loss_function: A function used as loss function of the mlp
x: A numpy array of shape (batch_size, input_dim). The mlp's input
y: A numpy array of shape (batch_size, num_classes). Target labels
Returns:
gradients: A list of dictionaries {'w': dw, 'b': db} corresponding to the dictionaries in mlp.parameters
dw is the gradient of loss with respect to the weights of the layer
db is the gradient of loss with respect to the bias of the layer
"""
gradients = []
### get the output of the network
y_hat = mlp.activations[-1]
num_layers = len(mlp.parameters)
### compute gradient of the loss with respect to network output
g = loss_function(y, y_hat, return_grad=True)
### You'll need the input in the last step of backprop so let's make a new list with x in the beggining
activations = [x] + mlp.activations
for i in reversed(range(num_layers)):
########################################
# Put your implementation here #
########################################
return gradients
# Test your implementation
import pickle
!wget -q https://github.com/iust-deep-learning/982/raw/master/static_files/assignments/asg01_assets/grad_test.zip
!unzip grad_test.zip
x = np.load('grad_x.npy')
y = np.load('grad_y.npy')
mlp = pickle.load(open('grad_mlp_test.pkl', 'rb'))
expected_grads = pickle.load(open('grads', 'rb'))
mlp.feed_forward(x)
grads = mlp_gradients(mlp, softmax_categorical_cross_entropy, x, y)
assert all([np.allclose(eg['w'], g['w'], atol=1e-6, rtol=1e-5) and
np.allclose(eg['b'], g['b'], atol=1e-6, rtol=1e-5)
for eg, g in zip(expected_grads, grads)])
```
### 1.5 Optimizaion
Now that we've computed the gradients of the parameters of our MLP, we should optimize these parameters using the gradients in order for the network to produce better outputs. <br>
Gradient descent is an optimizaion method that iteratively moves the paramters in the oposite direction of their gradients. Below is the update rule for gradient descent:
<br><br>
$$ w \leftarrow w - \alpha \nabla_wLoss$$
<br>
Where $\alpha$ is the learning rate hyperparameter.<br>
There are three main variants of gradient descent: stochastic gradient descent, mini-batch gradient descent and batch gradient descent. <br>
Mini-batch gradient descent is the most used variant in practice and that's what we'll use in this assignment
Let's perform a step of gradient descent on a simple MLP!
```
x = np.random.normal(size=(16, 10))
y = np.eye(16)
lr = 0.1
### Define the mlp
mlp = MLP(x.shape[-1])
mlp.add_layer(16)
mlp.add_layer(8)
mlp.add_layer(y.shape[-1])
### compute mlp's output
y_hat = mlp.feed_forward(x)
### print current loss
print("loss before gradient descent: ", softmax_categorical_cross_entropy(y, y_hat))
### Compute gradients of the mlp's parameters
grads = mlp_gradients(mlp, softmax_categorical_cross_entropy, x, y)
### perform gradient descent
mlp.parameters = [{'w':p['w']-lr*g['w'], 'b':p['b']-lr*g['b']} for g, p in zip(grads, mlp.parameters)]
### compute mlp's output again after gradeint descent
y_hat = mlp.feed_forward(x)
### print loss after gradient descent
print("loss after gradient descent: ", softmax_categorical_cross_entropy(y, y_hat))
```
__Question__: Do gradient descent steps always decrease the loss? why? (Hint: toy with the learning rate in the axample above!)
<font color=red>Write your answers here</font>
Instead of using gradient descent, we'll be using an extention of it called gradient descent with momentum. So instead of updating the parameters based only on current gradients, we take into account the gradients from previous steps! This way, parameter updates will have lower variance and convergence will be faster and smoother.
$$ v \leftarrow \gamma v - \alpha \nabla_wLoss$$
$$ w \leftarrow w + v$$
Where $w$ is denotes mlp's weights and $v$ is called velocity which is basically a weighted average of all previous gradients.<br>
Here $\gamma$ determines how fast effects of the previous gradients fade and $\alpha$ is the learning rate.
Now let's implement the SGD class!
```
class SGD:
def __init__(self, lr=0.01, momentum=0.9):
"""
Args:
lr: learning rate of the SGD optimizer
momentum: momentum of the SGD optimizer
Hint: velocity should be a list of dictionaries just like mlp.parameters
"""
self.lr = lr
self.momentum = momentum
### initialize velocity
self.velocity = []
def step(self, parameters, grads):
"""
Perform a gradient descent step
Args:
parameters: A list of dictionaries {'w': weights , 'b': bias}. MLP's parameters.
grads: A list of dictionaries {'w': dw, 'b': db}. gradient of MLP's parameters. Basically the output of "mlp_gradients" function you implemented!
Returns:
Updated_parameters: A list of dictionaries {'w': weights , 'b': bias}. mlp's parameters after performing a step of gradient descent.
"""
########################################
# Put your implementation here #
########################################
return Updated_parameters
```
## 2. Classifying Kannada Handwritten Digits
In this part of the assignment, you'll use the MLP you implemented in the first part to classify Kannada handwritten digits!<br> This dataset consists of 60000 images of handwritten digits in Kannada script.<br>
You can check [this](https://github.com/vinayprabhu/Kannada_MNIST) github repository for more information about the dataset.
let's download the dataset:
```
!wget -q https://github.com/iust-deep-learning/982/raw/master/static_files/assignments/asg01_assets/kannada.zip
!unzip kannada.zip
import pandas as pd
import matplotlib.pyplot as plt
train = pd.read_csv('train.csv')
train.head()
```
As you can see, the first column of the dataframe is the label, and the rest of the columns are the pixels. Let's put the dataset in numpy arrays. Also, we must normalize the pixel values to [0,1] range to help the convergence of our MLP model.
```
x = train.values[:, 1:]/255.
y = train.values[:, 0]
plt.imshow(x[10000].reshape(28, 28))
```
As we are doing a multiclass classification, the labels must be in one-hot encoded format. <br>
```
def one_hot_encoder(y):
y = y.reshape(-1)
num_samples = y.shape[0]
max_label = np.max(y)
one_hot = np.zeros((num_samples, max_label+1))
one_hot[np.arange(num_samples),y] = 1
return one_hot
```
Now let's transform the labels into one-hot encoded format!
```
y = one_hot_encoder(y)
```
We've implemented the _get_mini_batches_ function below. This function transforms the dataset into multiple batches. We need this function because we'll be doing mini-batch gradient descent.
```
import math
def get_mini_batches(x, y, batch_size, shuffle=True):
idx = list(range(len(x)))
np.random.shuffle(idx)
steps = math.ceil(len(x)/batch_size)
x, y = x[idx, :], y[idx, :]
for i in range(steps):
yield (x[i*batch_size: (i+1)*batch_size], y[i*batch_size: (i+1)*batch_size])
```
Evaluation metrics are used to measure the performance of a model after training. The choice of this metric depends on factors like the nature of the task (e.g classification or regression) or a dataset's characteristics (e.g class imbalance). For multiclass classification with balanced classes, accuracy is a reasonable choice.
We've implemented the accuracy function in the cell below:
```
def accuracy(y, y_hat):
return np.mean(np.argmax(y, axis=-1)==np.argmax(y_hat, axis=-1))
```
Now let's split the dataset into train and validatoin sets:
```
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x, y, stratify=y)
```
Everything is now ready for training our MLP! Create your MLP model in the cell bellow. The choice of the number of layers, their sizes and their activation functions is up to you.
```
mlp = MLP(x_train.shape[-1])
########################################
# Put your implementation here #
########################################
```
Let's set some hyper-parameters. Feel free to change these hyper-parameters however you see fit!
```
epochs = 10
Batch_size = 1024
sgd_lr = 0.1
sgd_momentum = 0.9
```
Now let's train the network!
```
from tqdm import tqdm_notebook
### Defining a optimizer
optimizer = SGD(lr=sgd_lr, momentum=sgd_momentum)
train_loss, val_loss, train_accs, val_accs = [], [], [], []
for i in range(epochs):
mini_batches = get_mini_batches(x_train, y_train, Batch_size)
for xx, yy in tqdm_notebook(mini_batches, desc='epoch {}'.format(i+1)):
### forward propagation
mlp.feed_forward(xx)
### compute gradients
grads = mlp_gradients(mlp, softmax_categorical_cross_entropy, xx, yy)
### optimization
mlp.parameters = optimizer.step(mlp.parameters, grads)
y_hat = mlp.feed_forward(x_train)
y_hat_val = mlp.feed_forward(x_val)
val_loss.append(softmax_categorical_cross_entropy(y_val, y_hat_val))
train_loss.append(softmax_categorical_cross_entropy(y_train, y_hat))
train_acc = accuracy(y_train, y_hat)*100
val_acc = accuracy(y_val, y_hat_val)*100
train_accs.append(train_acc)
val_accs.append(val_acc)
print("training acc: {:.2f} %".format(train_acc))
print("test acc: {:.2f} %".format(val_acc))
```
Let's visualize accuracy and loss for train and validation sets during training:
```
plt.plot(list(range(len(train_loss))), train_loss, label='train')
plt.plot(list(range(len(val_loss))), val_loss, label='val')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.show()
plt.plot(list(range(len(train_accs))), train_accs, label='train')
plt.plot(list(range(len(val_accs))), val_accs, label='val')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.legend()
plt.show()
```
__Question__: Looking at loss and accuracy plots, how would you describe your model in terms of bias and variance?
<font color=red>Write your answers here</font>
```
```
# Submission
Congratulations! You finished the assignment & you're ready to submit your work. Please follow the instructions:
1. Check and review your answers. Make sure all of the cell outputs are what you want.
2. Select File > Save.
3. Run **Make Submission** cell, It may take several minutes and it may ask you for your credential.
4. Run **Download Submission** cell to obtain your submission as a zip file.
5. Grab the downloaded file (`dl_asg01__xx__xx.zip`) and upload it via https://forms.gle/2dogVcZhfBvBC1aM6
**Note: ** We need your Github token to create (if doesn't exist previously) new repository to store learned model data. Also Google Drvie token enable us to download current notebook & create submission. If you are intrested feel free to check our code.
## Make Submission (Run the cell)
```
#@title
! pip install -U --quiet PyDrive > /dev/null
! wget -q https://github.com/github/hub/releases/download/v2.10.0/hub-linux-amd64-2.10.0.tgz
import os
import time
import yaml
import json
from google.colab import files
from IPython.display import Javascript
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
asg_name = 'assignment_1'
script_save = '''
require(["base/js/namespace"],function(Jupyter) {
Jupyter.notebook.save_checkpoint();
});
'''
repo_name = 'iust-deep-learning-assignments'
submission_file_name = 'dl_asg01__%s__%s.zip'%(student_id, student_name.lower().replace(' ', '_'))
! tar xf hub-linux-amd64-2.10.0.tgz
! cd hub-linux-amd64-2.10.0/ && chmod a+x install && ./install
! hub config --global hub.protocol https
! hub config --global user.email "$Your_Github_account_Email"
! hub config --global user.name "$student_name"
! hub api --flat -X GET /user
! hub api -F affiliation=owner -X GET /user/repos > repos.json
repos = json.load(open('repos.json'))
repo_names = [r['name'] for r in repos]
has_repository = repo_name in repo_names
if not has_repository:
get_ipython().system_raw('! hub api -X POST -F name=%s /user/repos > repo_info.json' % repo_name)
repo_info = json.load(open('repo_info.json'))
repo_url = repo_info['clone_url']
else:
for r in repos:
if r['name'] == repo_name:
repo_url = r['clone_url']
stream = open("/root/.config/hub", "r")
token = list(yaml.load_all(stream))[0]['github.com'][0]['oauth_token']
repo_url_with_token = 'https://'+token+"@" +repo_url.split('https://')[1]
! git clone "$repo_url_with_token"
! cp -r "$ASSIGNMENT_PATH" "$repo_name"/
! cd "$repo_name" && git add -A
! cd "$repo_name" && git commit -m "Add assignment 01 results"
! cd "$repo_name" && git push -u origin master
sub_info = {
'student_id': student_id,
'student_name': student_name,
'repo_url': repo_url,
'asg_dir_contents': os.listdir(str(ASSIGNMENT_PATH)),
'dateime': str(time.time()),
'asg_name': asg_name
}
json.dump(sub_info, open('info.json', 'w'))
Javascript(script_save)
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
file_id = drive.ListFile({'q':"title='%s.ipynb'"%asg_name}).GetList()[0]['id']
downloaded = drive.CreateFile({'id': file_id})
downloaded.GetContentFile('%s.ipynb'%asg_name)
! jupyter nbconvert --to script "$asg_name".ipynb > /dev/null
! jupyter nbconvert --to html "$asg_name".ipynb > /dev/null
! zip "$submission_file_name" "$asg_name".ipynb "$asg_name".html "$asg_name".txt info.json > /dev/null
print("##########################################")
print("Done! Submisson created, Please download using the bellow cell!")
```
## Download Submission (Run the cell)
```
files.download(submission_file_name)
```
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.