code
stringlengths 2.5k
150k
| kind
stringclasses 1
value |
|---|---|
# Statistics & Data Analysis
## Req
#### Import Requirements
##### HTML formatting
```
from IPython.display import HTML
HTML("""<style type="text/css">
table.dataframe td, table.dataframe th {
max-width: none;
</style>
""")
HTML("""<style type="text/css">
table.dataframe td, table.dataframe th {
max-width: none;
white-space: normal;
}
</style>
""")
HTML("""<style type="text/css">
table.dataframe td, table.dataframe th {
max-width: none;
white-space: normal;
line-height: normal;
}
</style>
""")
HTML("""<style type="text/css">
table.dataframe td, table.dataframe th {
max-width: none;
white-space: normal;
line-height: normal;
padding: 0.3em 0.5em;
}
</style>
""")
import numpy as np
import pandas as pd
import scipy
import matplotlib.pyplot as plt
from pandas.api.types import CategoricalDtype
from plotnine import *
from scipy.stats import *
import scikit_posthocs as sp
data = pd.read_csv("./NewCols.csv")
```
## Calculating the differences between the noremalized values.
```
data_control = data[data["treatment"] == "baseline"]
data_control.to_csv("./control.csv")
data_treatment = data[data["treatment"] == "intravenous LPS"]
data_control.to_csv("./lps.csv")
procData = data_treatment
procData['diff_AVAR2'] = (
np.array(data_treatment["AVAR2"]) - np.array(data_control["AVAR2"])).tolist()
procData["diff_CVAR2"] = (
np.array(data_treatment["CVAR2"]) - np.array(data_control["CVAR2"])).tolist()
procData["diff_AWT2"] = (np.array(data_treatment["AWT2"]) -
np.array(data_control["AWT2"])).tolist()
procData["diff_CWT2"] = (np.array(data_treatment["CWT2"]) -
np.array(data_control["CWT2"])).tolist()
procData["diff_total2"] = (
np.array(data_treatment["total2"]) - np.array(data_control["total2"])).tolist()
procData["diff_totalA"] = (
np.array(data_treatment["totalA"]) - np.array(data_control["totalA"])).tolist()
procData["diff_totalC"] = (
np.array(data_treatment["totalC"]) - np.array(data_control["totalC"])).tolist()
procData["diff_totalWT"] = (np.array(
data_treatment["totalWT"]) - np.array(data_control["totalWT"])).tolist()
procData["diff_totalVar"] = (np.array(
data_treatment["totalVar"]) - np.array(data_control["totalVar"])).tolist()
procData.to_csv("./procData.csv")
newDF= data_control[["testGroup","tg2"]]
newDF
newDF.rename(columns = {'testGroup':'c_tg','tg2':'c_tg2'}, inplace=True)
newDF
newDF.index = procData.index
procData= pd.concat([procData,newDF], axis=1)
```
#### Difference Table
```
pd.set_option('display.max_rows', procData.shape[0]+1)
diff_data = procData.loc[ :,"diff_AVAR2":"diff_totalVar" ]
diff_data.to_csv("./diffData.csv")
diff_data.describe()
diff_data.var()
diff_data.std()
diff_data.skew()
diff_data.kurtosis().tolist()
diff_data.kurtosis()
```
## Graph Data -
```
from plotnine import *
ggplot(data, aes(x='treatment', y='AWT2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["AWT2"],data_treatment["AWT2"])
ggplot(data, aes(x='treatment', y='CWT2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["CWT2"],data_treatment["CWT2"])
ggplot(data, aes(x='treatment', y='AVAR2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["AVAR2"],data_treatment["AVAR2"])
ggplot(data, aes(x='treatment', y='CVAR2') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(data_control["CVAR2"],data_treatment["CVAR2"])
removed_outliers = data.total2.between(data.total2.quantile(.05), data.total2.quantile(.95))
data_total= data[removed_outliers]
ggplot(data_total, aes(x='treatment',y="total2" ), ) + geom_boxplot(outlier_shape = "") + geom_jitter(data_total,aes(y="total2",colour='treatment',shape='treatment') ) + ggtitle("QQ Plot of IRAK-1 expression per GbP") + xlab("Treatment") + ylab("Total IRAK-1 Levels per Gigabase pair") + ylim(data_total.total2.quantile(.05), data_total.total2.quantile(.95))
a = 0.05
wilcoxon(diff_data["diff_total2"])
removed_outliers_diffData = diff_data.diff_total2.between(diff_data.diff_total2.quantile(.05), diff_data.diff_total2.quantile(.95))
difftotalData=diff_data[removed_outliers_diffData]
ggplot(difftotalData, aes( x='0',y='diff_total2') ) + geom_boxplot() + geom_point(color="red") + ylim(difftotalData.diff_total2.quantile(.05), difftotalData.diff_total2.quantile(.95)) + ggtitle("QQ Plot of changes in IRAK-1 levels per Gbp") + xlab("Treatment") + ylab("Changes in IRAK-1 Levels per Gigabase pair")
data_plot = data_treatment
controlData = data_control['total2']
controlData
data_plot["ctrl_total2"]=controlData.to_list()
data_plot
from sklearn.linear_model import LinearRegression
model = LinearRegression().fit(data_plot.total2.to_numpy().reshape((-1, 1)), data_plot.ctrl_total2)
r_sq= model.score(data_plot.total2.to_numpy().reshape((-1, 1)), data_plot.ctrl_total2)
print('coefficient of determination:', r_sq)
print('intercept:', model.intercept_)
print('slope:', model.coef_)
ggplot(data_plot,aes(x='total2',y='ctrl_total2') ) + geom_point() + geom_smooth(method='lm')
from sklearn import linear_model
lm = linear_model.LinearRegression()
shapiro_test = shapiro(data_control['total2'])
shapiro_test
shapiro_test = shapiro(data_treatment['total2'])
shapiro_test
shapiro_test = shapiro(diff_data['diff_total2'])
shapiro_test
ggplot(data, aes(x='treatment', y='totalVar') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalVar"])
ggplot(data, aes(x='treatment', y='totalWT') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalWT"])
ggplot(data, aes(x='treatment', y='totalA') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalA"])
ggplot(data, aes(x='treatment', y='totalC') ) + geom_boxplot() + geom_jitter(data,aes(colour='treatment',shape='treatment'))
a = 0.05
wilcoxon(diff_data["diff_totalC"])
```
## Statistics
### Total 2 Comparison
#### Wilcoxon non-parametric
```
a = 0.05
w, p = wilcoxon(data_control["total2"],data_treatment["total2"])
print(w, p)
if (p < a):
print("As P"+str(p)+" is less than a: "+str(a))
print( "we reject the Null Hypothesis.")
print(". There is significant difference betwween the groups")
else:
print("As P"+p+" is larger than a: "+str(a))
print( "we FAIL TO reject the Null Hypothesis.")
print(". There is NOT a significant difference betwween the groups")
```
#### Freidman's Anova
```
sp.posthoc_nemenyi_friedman(diff_data)
```
Friedman Tes
### other
```
a = 0.05
w, p = wilcoxon((data_control["totalA"]/data_control["totalC"] ),(data_treatment["totalA"]/data_treatment["totalC"]))
print(w, p)
a = 0.05
w, p = wilcoxon((data_control["AVAR2"]/data_control["CVAR2"] ),(data_treatment["AVAR2"]/data_treatment["CVAR2"]))
print(w, p)
a = 0.05
w, p = wilcoxon((data_control["AWT2"]/data_control["CWT2"] ),(data_treatment["AWT2"]/data_treatment["CWT2"]))
print(w, p)
ggplot()+geom_histogram(procData,aes(x="tg2"))
ggplot()+geom_histogram(procData,aes(x="mutant"))
ggplot()+geom_bar(procData,aes(x="spliceVariant",fill="mutant"))
ggplot()+geom_col(procData,aes(x="spliceVariant",y="diff_totalA/diff_totalC",fill="mutant"))
a = 0.05
diff_data = procData[(data["totalC"] > 0 ) & (data["totalA"] > 0 )]
ggplot()+geom_histogram(diff_data,aes(x="tg2"))
w, p = wilcoxon((diff_data["totalC"] )/(diff_data["totalA"]))
print(w, p)
a = 0.05
w, p = wilcoxon(data_control["total2"],data_treatment["total2"])
print(w, p)
```
2 graphs
1. Do the Table
3. Black and white
3. Make sure its not sloppy
4.
control, LPS & Difference.
correlation plot for each patient - total 2 & diff_total2
Look for A/C ratios
ggplot(data_plot,aes(x='total2',y='ctrl_total2') ) + geom_point(colour) + geom_smooth(method='lm')
|
github_jupyter
|
<a name="top"></a>
<div style="width:1000 px">
<div style="float:right; width:98 px; height:98px;">
<img src="https://raw.githubusercontent.com/Unidata/MetPy/master/metpy/plots/_static/unidata_150x150.png" alt="Unidata Logo" style="height: 98px;">
</div>
<h1>Basic Time Series Plotting</h1>
<h3>Unidata Python Workshop</h3>
<div style="clear:both"></div>
</div>
<hr style="height:2px;">
<div style="float:right; width:250 px"><img src="http://matplotlib.org/_images/date_demo.png" alt="METAR" style="height: 300px;"></div>
## Overview:
* **Teaching:** 45 minutes
* **Exercises:** 30 minutes
### Questions
1. How can we obtain buoy data from the NDBC?
1. How are plots created in Python?
1. What features does Matplotlib have for improving our time series plots?
1. How can multiple y-axes be used in a single plot?
### Objectives
1. <a href="#loaddata">Obtaining data</a>
1. <a href="#basictimeseries">Basic timeseries plotting</a>
1. <a href="#multiy">Multiple y-axes</a>
<a name="loaddata"></a>
## Obtaining Data
To learn about time series analysis, we first need to find some data and get it into Python. In this case we're going to use data from the [National Data Buoy Center](http://www.ndbc.noaa.gov). We'll use the [pandas](http://pandas.pydata.org) library for our data subset and manipulation operations after obtaining the data with siphon.
Each buoy has many types of data availabe, you can read all about it in the [NDBC Web Data Guide](https://www.ndbc.noaa.gov/docs/ndbc_web_data_guide.pdf). There is a mechanism in siphon to see which data types are available for a given buoy.
```
from siphon.simplewebservice.ndbc import NDBC
data_types = NDBC.buoy_data_types('46042')
print(data_types)
```
In this case, we'll just stick with the standard meteorological data. The "realtime" data from NDBC contains approximately 45 days of data from each buoy. We'll retreive that record for buoy 51002 and then do some cleaning of the data.
```
df = NDBC.realtime_observations('46042')
df.tail()
```
Let's get rid of the columns with all missing data. We could use the `drop` method and manually name all of the columns, but that would require us to know which are all `NaN` and that sounds like manual labor - something that programmers hate. Pandas has the `dropna` method that allows us to drop rows or columns where any or all values are `NaN`. In this case, let's drop all columns with all `NaN` values.
```
df = df.dropna(axis='columns', how='all')
df.head()
```
<div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>Use the realtime_observations method to retreive supplemental data for buoy 41002. **Note** assign the data to something other that df or you'll have to rerun the data download cell above. We suggest using the name supl_obs.</li>
</ul>
</div>
```
# Your code goes here
# supl_obs =
```
#### Solution
```
# %load solutions/get_obs.py
```
Finally, we need to trim down the data. The file contains 45 days worth of observations. Let's look at the last week's worth of data.
```
import pandas as pd
idx = df.time >= (pd.Timestamp.utcnow() - pd.Timedelta(days=7))
df = df[idx]
df.head()
```
We're almost ready, but now the index column is not that meaningful. It starts at a non-zero row, which is fine with our initial file, but let's re-zero the index so we have a nice clean data frame to start with.
```
df.reset_index(drop=True, inplace=True)
df.head()
```
<a href="#top">Top</a>
<hr style="height:2px;">
<a name="basictimeseries"></a>
## Basic Timeseries Plotting
Matplotlib is a python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. We're going to learn the basics of creating timeseries plots with matplotlib by plotting buoy wind, gust, temperature, and pressure data.
```
# Convention for import of the pyplot interface
import matplotlib.pyplot as plt
# Set-up to have matplotlib use its support for notebook inline plots
%matplotlib inline
```
We'll start by plotting the windspeed observations from the buoy.
```
plt.rc('font', size=12)
fig, ax = plt.subplots(figsize=(10, 6))
# Specify how our lines should look
ax.plot(df.time, df.wind_speed, color='tab:orange', label='Windspeed')
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Speed (m/s)')
ax.set_title('Buoy Wind Data')
ax.grid(True)
ax.legend(loc='upper left');
```
Our x axis labels look a little crowded - let's try only labeling each day in our time series.
```
# Helpers to format and locate ticks for dates
from matplotlib.dates import DateFormatter, DayLocator
# Set the x-axis to do major ticks on the days and label them like '07/20'
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%m/%d'))
fig
```
Now we can add wind gust speeds to the same plot as a dashed yellow line.
```
# Use linestyle keyword to style our plot
ax.plot(df.time, df.wind_gust, color='tab:olive', linestyle='--',
label='Wind Gust')
# Redisplay the legend to show our new wind gust line
ax.legend(loc='upper left')
fig
```
<div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>Create your own figure and axes (<code>myfig, myax = plt.subplots(figsize=(10, 6))</code>) which plots temperature.</li>
<li>Change the x-axis major tick labels to display the shortened month and date (i.e. 'Sep DD' where DD is the day number). Look at the
<a href="https://docs.python.org/3.6/library/datetime.html#strftime-and-strptime-behavior">
table of formatters</a> for help.
<li>Make sure you include a legend and labels!</li>
<li><b>BONUS:</b> try changing the <code>linestyle</code>, e.g., a blue dashed line.</li>
</ul>
</div>
```
# Your code goes here
```
#### Solution
<div class="alert alert-info">
<b>Tip</b>:
If your figure goes sideways as you try multiple things, try running the notebook up to this point again
by using the Cell -> Run All Above option in the menu bar.
</div>
```
# %load solutions/basic_plot.py
```
<a href="#top">Top</a>
<hr style="height:2px;">
<a name="multiy"></a>
## Multiple y-axes
What if we wanted to plot another variable in vastly different units on our plot? <br/>
Let's return to our wind data plot and add pressure.
```
# plot pressure data on same figure
ax.plot(df.time, df.pressure, color='black', label='Pressure')
ax.set_ylabel('Pressure')
ax.legend(loc='upper left')
fig
```
That is less than ideal. We can't see detail in the data profiles! We can create a twin of the x-axis and have a secondary y-axis on the right side of the plot. We'll create a totally new figure here.
```
fig, ax = plt.subplots(figsize=(10, 6))
axb = ax.twinx()
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Speed (m/s)')
ax.set_title('Buoy Data')
ax.grid(True)
# Plotting on the first y-axis
ax.plot(df.time, df.wind_speed, color='tab:orange', label='Windspeed')
ax.plot(df.time, df.wind_gust, color='tab:olive', linestyle='--', label='Wind Gust')
ax.legend(loc='upper left');
# Plotting on the second y-axis
axb.set_ylabel('Pressure (hPa)')
axb.plot(df.time, df.pressure, color='black', label='pressure')
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%b %d'))
```
We're closer, but the data are plotting over the legend and not included in the legend. That's because the legend is associated with our primary y-axis. We need to append that data from the second y-axis.
```
fig, ax = plt.subplots(figsize=(10, 6))
axb = ax.twinx()
# Same as above
ax.set_xlabel('Time')
ax.set_ylabel('Speed (m/s)')
ax.set_title('Buoy 41056 Wind Data')
ax.grid(True)
# Plotting on the first y-axis
ax.plot(df.time, df.wind_speed, color='tab:orange', label='Windspeed')
ax.plot(df.time, df.wind_gust, color='tab:olive', linestyle='--', label='Wind Gust')
# Plotting on the second y-axis
axb.set_ylabel('Pressure (hPa)')
axb.plot(df.time, df.pressure, color='black', label='pressure')
ax.xaxis.set_major_locator(DayLocator())
ax.xaxis.set_major_formatter(DateFormatter('%b %d'))
# Handling of getting lines and labels from all axes for a single legend
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = axb.get_legend_handles_labels()
axb.legend(lines + lines2, labels + labels2, loc='upper left');
```
<div class="alert alert-success">
<b>EXERCISE</b>:
Create your own plot that has the following elements:
<ul>
<li>A blue line representing the wave height measurements.</li>
<li>A green line representing wind speed on a secondary y-axis</li>
<li>Proper labels/title.</li>
<li>**Bonus**: Make the wave height data plot as points only with no line. Look at the documentation for the linestyle and marker arguments.</li>
</ul>
</div>
```
# Your code goes here
```
#### Solution
```
# %load solutions/adv_plot.py
```
<a href="#top">Top</a>
<hr style="height:2px;">
|
github_jupyter
|
```
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.datasets import mnist
from tensorflow.contrib.eager.python import tfe
# enable eager mode
tf.enable_eager_execution()
tf.set_random_seed(0)
np.random.seed(0)
if not os.path.exists('weights/'):
os.makedirs('weights/')
# constants
units = 64
batch_size = 256
epochs = 2
num_classes = 10
# dataset loading
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((-1, 28, 28)) # 28 timesteps, 28 inputs / timestep
x_test = x_test.reshape((-1, 28, 28)) # 28 timesteps, 28 inputs / timeste
# one hot encode the labels. convert back to numpy as we cannot use a combination of numpy
# and tensors as input to keras
y_train_ohe = tf.one_hot(y_train, depth=num_classes).numpy()
y_test_ohe = tf.one_hot(y_test, depth=num_classes).numpy()
print('x train', x_train.shape)
print('y train', y_train_ohe.shape)
print('x test', x_test.shape)
print('y test', y_test_ohe.shape)
```
# Bi-Directional LSTM
Writing a Bi-directional LSTM in keras is super simple with the Bidirectional wrapper. However the speed of such a model is slower than expected.
Some fixes for it are to use the GPU implementation for all the cells, and to unroll the entire RNN before hand. In normal Keras and Tensorflow, unrolling the RNN yields significant speed improvements since the symbolic loop is replaced with the unrolled graph representation of the RNN.
In Eager, I don't believe it is doing much to help with the speed.
```
class BiRNN(tf.keras.Model):
def __init__(self, units, num_classes, merge_mode='concat', num_layers=1):
super(BiRNN, self).__init__()
self.impl = 1 if tfe.num_gpus() == 0 else 2
self.cells = [tf.keras.layers.LSTMCell(units, implementation=self.impl) for _ in range(num_layers)]
self.rnn = tf.keras.layers.RNN(self.cells, unroll=True) # slower if not unrolled - probably because it is using K.rnn() internally.
self.bidirectional = tf.keras.layers.Bidirectional(self.rnn, merge_mode=merge_mode)
self.classifier = tf.keras.layers.Dense(num_classes)
def call(self, inputs, training=None, mask=None):
x = self.bidirectional(inputs)
output = self.classifier(x)
# softmax op does not exist on the gpu, so always use cpu
with tf.device('/cpu:0'):
output = tf.nn.softmax(output)
return output
device = '/cpu:0' if tfe.num_gpus() == 0 else '/gpu:0'
with tf.device(device):
# build model and optimizer
model = BiRNN(units, num_classes, num_layers=2)
model.compile(optimizer=tf.train.AdamOptimizer(0.01), loss='categorical_crossentropy',
metrics=['accuracy'])
# TF Keras tries to use entire dataset to determine shape without this step when using .fit()
# Fix = Use exactly one sample from the provided input dataset to determine input/output shape/s for the model
dummy_x = tf.zeros((1, 28, 28))
model._set_inputs(dummy_x)
# train
model.fit(x_train, y_train_ohe, batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test_ohe), verbose=1)
# evaluate on test set
scores = model.evaluate(x_test, y_test_ohe, batch_size, verbose=1)
print("Final test loss and accuracy :", scores)
saver = tfe.Saver(model.variables)
saver.save('weights/07_01_bi_rnn/weights.ckpt')
```
|
github_jupyter
|
This notebook contains a prototype for a workflow that would allow you to compare observations that were sampled in dicrete time to the model output in continuous time. Only the first 14 cells work, and even then they are so unbelievably slow as to be almost entirely useless.
```
import sys
sys.path.append('/ocean/kflanaga/MEOPAR/analysis-keegan/notebooks/Tools')
import numpy as np
import numpy.polynomial.polynomial as poly
import matplotlib.pyplot as plt
import os
import math
import pandas as pd
from erddapy import ERDDAP
import netCDF4 as nc
import datetime as dt
from salishsea_tools import evaltools as et, viz_tools, places
import gsw
import matplotlib.gridspec as gridspec
import matplotlib as mpl
import matplotlib.dates as mdates
import cmocean as cmo
import scipy.interpolate as sinterp
import pickle
import cmocean
import json
import f90nml
import xarray as xr
import datetime as dt
import Keegan_eval_tools as ket
from collections import OrderedDict
fs=16
mpl.rc('xtick', labelsize=fs)
mpl.rc('ytick', labelsize=fs)
mpl.rc('legend', fontsize=fs)
mpl.rc('axes', titlesize=fs)
mpl.rc('axes', labelsize=fs)
mpl.rc('figure', titlesize=fs)
mpl.rc('font', size=fs)
mpl.rc('font', family='sans-serif', weight='normal', style='normal')
import warnings
#warnings.filterwarnings('ignore')
from IPython.display import Markdown, display
%matplotlib inline
```
```
year=2010
modelversion='nowcast-green.201905'
PATH= '/results2/SalishSea/nowcast-green.201905/'
datadir='/ocean/eolson/MEOPAR/obs/WADE/ptools_data/ecology'
##### Loading in pickle file data
saveloc='/ocean/kflanaga/MEOPAR/savedData/WADE_nutribot_pickles'
with open(os.path.join(saveloc,f'data_WADE_{modelversion}_{year}.pkl'),'rb') as hh:
data=pickle.load(hh)
#creating new dictionaries that make it easy to call on specific years.
datstat=dict()
for ind, istation in enumerate(data.Station.unique()):
datstat[istation]=data.loc[data.Station == istation]
%%time
start= dt.datetime(2010,1,1)
end=dt.datetime(2010,12,31) # the code called below (evaltools.index_model_files) includes the end date
# in the values returned
basedir='/results2/SalishSea/nowcast-green.201905/'
nam_fmt='nowcast'
flen=1 # files contain 1 day of data each
ftype= 'ptrc_T' # load bio files
tres=24 # 1: hourly resolution; 24: daily resolution <- try changing to 1 and loading hourly data
flist=et.index_model_files(start,end,basedir,nam_fmt,flen,ftype,tres)
# flist contains paths: file pathes; t_0 timestemp of start of each file; t_n: timestamp of start of next file
# get model i,j of location S3 from places
ij,ii=places.PLACES['S3']['NEMO grid ji']
ik=2 # choose surface level
ii=data[data.Station == 'BUD005'].i.unique()[0]
ij=data[data.Station == 'BUD005'].j.unique()[0]
ik=2
bio=xr.open_mfdataset(flist['paths'])
%%time
tt=bio.time_counter
NO23=bio.nitrate.isel(deptht=ik,y=ij,x=ii) #.cell will give closest to two meters
#this is where we have the depth problem.
def TsByStation_ind2 (df,datstat,regions,obsvar,modvar,year,ylim,figsize=(14,40),loc='lower left',depth=5):
stations=[]
for r in regions:
sta0=df[df['Basin']==r].Station.unique()
stations.append(sta0)
stations = [val for sublist in stations for val in sublist]
fig,ax=plt.subplots(math.ceil(len(stations)/2),2,figsize=figsize)
new_stat = [stations[i:i+2] for i in range(0, len(stations), 2)]
for si,axi in zip(new_stat,ax):
for sj,axj in zip(si,axi):
#The creation of the observed data points
ps=[]
obs0=et._deframe(df.loc[(df['dtUTC'] >= dt.datetime(year,1,1))&(df['dtUTC']<= dt.datetime(year,12,31))&(df['Station']==sj)&(df['Z']<=depth),[obsvar]])
time0=et._deframe(df.loc[(df['dtUTC'] >= dt.datetime(year,1,1))&(df['dtUTC']<= dt.datetime(year,12,31))&(df['Station']==sj)&(df['Z']<=depth),['dtUTC']])
p0,=axj.plot(time0,obs0,'.',color='blue',label=f'Observed {obsvar}',marker='o',fillstyle='none')
ps.append(p0)
# The creation of the model data line
ii=data[data.Station == sj].i.unique()[0]
ij=data[data.Station == sj].j.unique()[0]
ik=0
tt=bio.time_counter
NO23=bio[modvar].isel(deptht=ik,y=ij,x=ii)
p0,=axj.plot(tt,NO23,'-',color='darkorange',label='Nitrate')
ps.append(p0)
#labeling and formatting
axj.set_ylabel('Concentration ($\mu$M)')
axj.set_xlim(tt[0],tt[-1])
axj.legend(handles=ps,prop={'size': 10},loc=loc)
axj.set_xlabel(f'Date',fontsize=13)
axj.set_ylabel(f'{obsvar} ($\mu$M)',fontsize=13)
axj.set_title(f'{df[df.Station==sj].Basin.unique()[0]} ({sj})', fontsize=13)
axj.set_ylim(ylim)
yearsFmt = mdates.DateFormatter('%d %b')
axj.xaxis.set_major_formatter(yearsFmt)
for tick in axj.xaxis.get_major_ticks():
tick.label.set_fontsize(13)
for tick in axj.yaxis.get_major_ticks():
tick.label.set_fontsize(13)
plt.tight_layout()
plt.setp(axj.get_xticklabels(), rotation=30, horizontalalignment='right')
obsvar='NO23'
modvar='nitrate'
regions=['Hood Canal Basin']
lims=(0,40)
TsByStation_ind2(data,datstat,regions,obsvar,modvar,year,lims,figsize=(14,14),loc='lower left')
bio.close()
```
Hmmm The fact that there are multiple different points at different depths make this technique mostly useless. Even if I fix it so that there are multiple lines or something, it will take so long it will be almost useless. Perhaps If I only look at observations at a certain depth it can be at least a little helpful.
```
# Now we are actually loading everything from a website/ online database instead of from our own results storage.
server = "https://salishsea.eos.ubc.ca/erddap"
protocol = "griddap"
dataset_id = "ubcSSg3DBiologyFields1hV19-05"
response = "nc"
variables = [
"nitrate",
"time",
]
fourkmlat = 4/110.574
fourkmlon = 4/(111.320*np.cos(50*np.pi/180.))
lon, lat = places.PLACES['S3']['lon lat']
constraints = {
"time>=": "2015-02-01T00:00:00Z",
"time<=": "2015-04-01T00:00:00Z",
}
print(constraints)
obs = ERDDAP(server=server, protocol=protocol,)
obs.dataset_id = dataset_id
obs.variables = variables
obs.constraints = constraints
obs
print(obs.get_download_url())
obs_pd = obs.to_pandas(index_col="time (UTC)", parse_dates=True,).dropna()
obs_pd
server = "https://salishsea.eos.ubc.ca/erddap"
protocol = "tabledap"
dataset_id = "ubcONCTWDP1mV18-01"
response = "nc"
variables = [
"latitude",
"longitude",
"chlorophyll",
"time",
]
fourkmlat = 4/110.574
fourkmlon = 4/(111.320*np.cos(50*np.pi/180.))
lon, lat = places.PLACES['S3']['lon lat']
constraints = {
"time>=": "2015-02-01T00:00:00Z",
"time<=": "2015-04-01T00:00:00Z",
"latitude>=": lat - fourkmlat,
"latitude<=": lat + fourkmlat,
"longitude>=": lon - fourkmlon,
"longitude<=": lon + fourkmlon,
}
print(constraints)
obs = ERDDAP(server=server, protocol=protocol,)
obs.dataset_id = dataset_id
obs.variables = variables
obs.constraints = constraints
obs_pd = obs.to_pandas(index_col="time (UTC)", parse_dates=True,).dropna()
obs_pd
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/adamuas/intuitive_intro_to_ann_ml/blob/master/Section_1_Implement_your_own_neuron_from_scratch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# An Intuitive Introduction to Artificial Neural Networks and Machine Learning
This notebook walks you through the implementation of a neuron, its activation function (i.e. a function to model the excitation stage of the neuron) and its output function (i.e. a function to model the firing stage of the neuron).
```
# First off lets start by importing some useful python packages
import pandas as pd
import numpy as np
from sklearn.datasets import load_digits, load_diabetes
from matplotlib import pylab as plt
# install tensorflow 2
import tensorflow as tf
%matplotlib inline
print('tensorflow version:{}'.format(tf.__version__))
```
## Section 1 - Implementing your own neuron
### Section 1.1 - The Neuron
```
class Neuron:
"""
A model of our artificla neuron.
"""
def __init__(self, n_inputs, bias=1.0, weight_fn=None, activation_fn=None):
"""
Constructor for our neuron
params:
n_inputs - number of input connections/weights
bias - bias of the neuron
activation_fn - activation function that models excitation
weight_fn - input combination function
"""
# randomly initialize the weights (1 weight for each input)
# Weights are supposed to represent the connection strength of the dendrites of our neuron.
self.weights = np.random.randn(n_inputs)
# bias of the neuron
self.bias = bias
# number of inputs the neuron recevies
self.n_inputs = n_inputs
# activation function of the neuron
self.activation_fn = activation_fn
# weight function of the neuron
self.weight_fn = weight_fn
def stimulate(self, inputs, verbose=False):
"""
Propagates the inputs through the weights and returns the output of the neuron
params:
inputs - inputs signals for the neuron
returns:
output value - output signal of the neuron
"""
# initialize our output value
output_value= 0
action_potential = 0
# pass through activation function
if self.weight_fn:
action_potential = self.weight_fn(weights=self.weights,
inputs=inputs)
# add neuron's bias
action_potential = action_potential + self.bias
if verbose:
print('Action Potential: {}'.format(action_potential))
if self.activation_fn:
output_value = self.activation_fn(action_potential=action_potential)
if verbose:
print('Output Value: {}'.format(output_value))
return output_value
def __repr__(self):
"""
Returns a string representation of our Artificial Neuron
returns:
String
"""
return "<neuron>\nweights: {}\nbias: {}\nactivation_fn: {}\n weight_fn:{}\n</neuron>".format(self.weights,
self.bias,
self.activation_fn,
self.weight_fn)
```
### Section 1.2 - Weight Function (Model's Input Combination Function)
```
def weighted_sum(weights,inputs):
"""
Weighted sum activation function
This is supposed to model the action excitation stage of the artificial neuron.
It models how excited the neuron should be based on the inputs (stimulus)
parameters:
W - Weights (Weights of the neuron)
I - inputs (features used to make the decision)
returns:
action_potentatial - the degree of activation for that particular neuron
"""
action_potential = 0
for input_i, weight_i in zip(inputs, weights):
action_potential += input_i * weight_i
return action_potential
```
**Things to think about:**
* Why use weighted sum?
### Section 1.2 - Activation Function (Model's firing stage)
```
def sigmoid(action_potential):
"""
Sigmoid output function
This is supposed to model the firing stage of the neuron.
It models how much the neuron should output based on the action potential
generated from the excitation stage.
return:
returns the output value of the neuron
"""
return 1/(1 + np.exp(-1 * (action_potential)))
```
**Things to think about**:
* Why a sigmoid activation function?
#### Behaviour of Sigmoids
```
X = range(15)
y = [sigmoid(x) for x in X]
df = pd.DataFrame.from_dict({'X': X, 'y':y})
df.head()
df.plot(x='X',y='y')
plt.title('Behaviour Of Sigmoids')
```
The Sigmoid output function allows us to squash the output values of neuron's while preserving their magnitudes. So with greater output values we get closer and closer to 1.0,and with smaller and smaller output values we get closer to 0.0
## (5) Stimulate Our Neuron
We will be stimulating our neuron with the Iris dataset. It is a simple dataset that has measures of the iris plant such as the petal length, and width along with the corresponding species of the iris plant. It is a gentle introduction to simple datasets.
### 5.1 - Load the Iris Dataset from Sklearn
```
# lets import the iris dataset.
from sklearn.datasets import load_iris
# load the iris dataset
data = load_iris()
print(data['DESCR'])
```
### 5.2 - Create our Neuron
Here we will specify the number of input features as the number of attributes we have available in the Iris dataset because we will like to use those attributes to make predictions about the Iris plant.
```
# specify the number of inputs features we want the neruon to consider
IRIS_FEATURES = 4
neuron = Neuron(n_inputs=IRIS_FEATURES, weight_fn=weighted_sum, activation_fn=sigmoid)
print(neuron)
```
### 5.3 - Stimulate the neuron with the iris Dataset
Here we will use a few samples of the iris dataset to stimulate the neuron.
```
inputs = data['data']
targets = data['target']
num_samples = 5
for i in range(num_samples):
input_i = inputs[i]
target_i = targets[i]
print('input: {}'.format(input_i))
output_i = neuron.stimulate(input_i)
print('output: {}'.format(output_i))
```
|
github_jupyter
|
```
from IPython.core.display import HTML
def css_styling():
styles = open("./styles/custom.css", "r").read()
return HTML(styles)
css_styling()
```
# Introduction to Version Control
This is an introductory guide to the basic functions of Git version control software and the GitHub code hosting site that we will use during the Introduction to Programming Python course.
The examples in this guide will show you how to:
-
## Git
__What is Git?__
Git is *version control* software.
__What is version control software?__
Software that tracks and manages changes to project without overwriting any part of the project.
Typically, when you save a file, for example a word document, you either:
- overwrite the previous version (save)
- save the file under a new name (save as)
This means we either:
- Lose the previous version
- End up with multiple files
In programming we often want to:
- make a small change to our program
- test our change works before moving on.
- easily revert to a previous version if we don't like the changes
It makes sense to incrementally save our work as we go along.
That way, if we break something we can just go back to the previous version.
But this can lead to many files:
<img src="../../../ILAS_seminars/intro to python/img/many_files_same_name.png" alt="Drawing" style="width: 300px;"/>
How can we tell what each one does?
We could try giving them meaningful names:
<img src="../../../ILAS_seminars/intro to python/img/many_files.gif" alt="Drawing" style="width: 300px;"/>
But the name can only tell us a little bit of information...
...before they start getting really long!
<img src="../../../ILAS_seminars/intro to python/img/many_files_different_names.png" alt="Drawing" style="width: 300px;"/>
Things get very confusing!
And many files take up lots of space on your computer.
### How Git works
Git creates a folder in the same directory as your file.
The directory containing both the file being tracked and the Git folder is now referred to as a repository or "repo".
(The folder is hidden.)
The folder being tracked by git is referred to as a repository or "repo".
You can keep any type of file in a repository (code files, text files, image files....).
It logs changes you make to the file.
It track of multiple files within this directory.
It stores a *commit message* with each change you *commit*, saying what you changed:
<img src="../../../ILAS_seminars/intro to python/img/git_commit_.png" alt="Drawing" style="width: 300px;"/>
So if you make a mistake, you can just reset to a previous version.
<img src="../../../ILAS_seminars/intro to python/img/git_reset.png" alt="Drawing" style="width: 300px;"/>
When you commit chanegs, Git does not save two versions of the same file.
Git only saves the __difference__ between two files.
This minimises the amount of space that tracking your changes takes up on your computer,
__Example:__ Between files r3 and r4, the information saved is
> -juice <br>
> +soup
<img src="../../../ILAS_seminars/intro to python/img/git_diff.png" alt="Drawing" style="width: 500px;"/>
### Advantages and Disadventages of Git
A __great thing__ about git is that it was made by programmers for programmers.
Professional developers and most other professionals who write code, use git (or other version control software) to manage their files, workflow and collaborations.
It has an enourmous range of functionality.
A __problem__ with so much freedom is that it can be easy to get things wrong.
Git can be difficult to use.
To keep things nice and easy we will learn only the basics of using Git.
Even this basic understanding will give you essential skills that are used every day by professional programmers.
A __problem__ with Git is that it was made by programmers for programmers.
We have to use the command line (or Terminal) to access it.
There is no user interface.
It can be difficult to visualise what is going on.
<img src="../../../ILAS_seminars/intro to python/img/git_command_line.png" alt="Drawing" style="width: 500px;"/>
## GitHub
To provide a visual interface we can use an online *host site* to store and view code...
A repo can be a local folder on your computer.
A repo can also be a storage space on GitHub or another online host site.
<img src="../../../ILAS_seminars/intro to python/img/github-logo.jpg" alt="Drawing" style="width: 200px;"/>
GitHub.com is a "code hosting site".
It provides a visual interface to view code, the changes (*commits*) and to share and collaborate with others.
There are many code hosting sites, however Github has a large community of users.
So for programmers, it works like a social media site like Facebook or instagram.
<img src="../../../ILAS_seminars/intro to python/img/github-logo.jpg" alt="Drawing" style="width: 200px;"/>
Let's start by downloading your interactive textbook from github.com
Open a web browser and go to:
https://github.com/hphilamore/ILAS_python
This is a __repository__.
It is an online directory where this project, the textbook, is stored.
We can look at previous versions of the code by selecting *commits*...
We can easily view the difference ("diff") between the previous and current version.
You are going to download a personal copy of the textbook to you user area.
Please log on to the computer.
### Introduction to the Command Line.
We are going to download the textbook using the command line.
To open the terminal:
- press "win key" + "R"
- type: __cmd__
- press enter
A terminal will launch.
The *command prompt* will say something like:
C:¥Users¥Username:
The C tells us that we are on the C drive of the computer.
Lets switch to the M drive where the user (you!) can save files.
In the terminal type:
>`M:`
...and press enter.
You should see the command prompt change.
<img src="../../../ILAS_seminars/intro to python/img/KUterminalMdrive.png" alt="Drawing" style="width: 700px;"/>
To see what is on the M drive type:
>`dir`
..and press enter.
You will see all the folders in your personal user area.
Double click on the computer icon on the desktop.
Double click on Home Directory (M:).
You should see the same folders as those listed in the terminal.
To navigate to documents type:
>`cd Documents`
cd stands for "change directory".
We can move down the filesystem of the computer by typing:
>`cd`
followed by the name of the folder we want to move to.
The folder must be:
- on the same branch
- one step from our current location
<img src="../../../ILAS_seminars/intro to python/img/directory_tree.gif" alt="Drawing" style="width: 200px;"/>
Type:
>`dir`
again to view the contents of your Documents folder.
To move back up by one step, type:
>`cd ..`
Try this now.
<img src="../../../ILAS_seminars/intro to python/img/directory_tree.gif" alt="Drawing" style="width: 200px;"/>
We can move by more than one step by seperating the names of the folders using the symbol:
¥
(note, this is \ or / on US and European computers, depending on the operating system)
For example, now try navigating to any folder in your Documents folder by typing:
>`cd Documents¥folder_name`
where `folder_name` is the name of the folder to move to.
And now let's go back to the main Documents folder by typing:
> cd ..
Type:
>`dir`
again to view the contents of your Documents folder.
## 'Cloning' the Textbook Using Git
Go to the Github site we opened earlier.
We are going to download a copy of the textbook from an online *repository*.
This is referred to as *cloning*.
This will allow you to work on the textbook and save it locally on a computer.
Click the button "Clone or download" and copy the link by presssing Ctrl , C
<img src="../../../ILAS_seminars/intro to python/img/clone-url.png" alt="Drawing" style="width: 500px;"/>
In the terminal type `git clone`. After the word `clone` leave a space and then paste the URL that you just copied.:
> `git clone` [PASTE COPIED URL HERE]
`Clone` copies all the files from the repository at the URL you have entered.
In the terminal type:
> `dir`
A folder called "ILAS_python" should have appeared.
Go into the folder and view the content by typing:
>`cd ILAS_pyhon`
><br>`dir`
Hint: If you start typing a folder name and press "tab", the folder name autocompletes! Try it for yourself e.g. in the Documents directory type:
>`cd ILAS`
then press "tab".
The textbook files should now have appeared in your Documents folder.
## Creating an Online Github Account
The __online Github repository__ that you cloned the textbook from belongs to me.
You are going to create your own online Github user account.
You will use Github to update the online version of your textbook with the changes you make to the version stored locally on the university M drive.
This means you can easily access it from outside the Kyoto University system, for example, to complete your homework.
I will use your online repositories to view your work and check your progress during the course.
Open https://github.com
Click "Sign in" at the top right hand corner.
<img src="../../../ILAS_seminars/intro to python/img/github_signup.png" alt="Drawing" style="width: 500px;"/>
Follow the steps to create an account, the same way as you would for a social media site for example.
Choose a user name, email address, password.
<img src="../../../ILAS_seminars/intro to python/img/github-signup.png" alt="Drawing" style="width: 300px;"/>
Use the confirmation email to complete your registration.
## Creating an Online GitHub Repository
Now we are going to set up your first online repository.
Click the + sign in the top right corner.
Choose "New repository".
<img src="../../../ILAS_seminars/intro to python/img/github_newrepo.png" alt="Drawing" style="width: 500px;"/>
Choose a repository name (e.g. Python Textbook, python_textbook, Intro_to_python)
<img src="../../../ILAS_seminars/intro to python/img/github_namerepo.jpg" alt="Drawing" style="width: 300px;"/>
Leave the other settings as they are for now.
We will learn about these later in the course.
Click the button "Create repository".
<img src="../../../ILAS_seminars/intro to python/img/github_create_repo.jpg" alt="Drawing" style="width: 300px;"/>
## Adding Files to an Online Github Repository
We are now going to link your local repository (stored on the computer on the M drive) to your online repository (stored at github.com).
In the terminal, make sure you are __inside__ the folder named ILAS_python.
If you are not, then navigate to the folder using
>`cd`
Enter the username that you registered when setting up your account on GitHub:
>`git config --global user.name "username"`
Enter the email adress that you registered when setting up your account on GitHub:
>`git config --global user.email "your_email@youremail.com"`
Copy the URL of your repo from the "Quick setup" section.
<img src="../../../ILAS_seminars/intro to python/img/github_copyurl.png" alt="Drawing" style="width: 300px;"/>
__NOTE__
<br>Earlier we copied the URL of __my repository__ (https://github.com/hphilamore/ILAS_python.git).
<br>We used it to tell the computer where to copy files __from__.
<br>Now we are copying the URL of __your repository__(https://github.com/yourGithub/yourRepo.git).
<br>We will now use a similar procedure to tell the computer where it should copy files __to__.
First we will disconnect your local repo from __my__ online repo.
<br>The command `git remote rm` removes (`rm`) a remote (`remote`) URL from your local repository.
<br>Type:
>`git remote rm origin`
(*origin* is a name that was given by default to the URL you cloned the repository from).
Second we will connect your local repo to __your__ online repo.
<br>The command `git remote add` connects (`add`) a remote (`remote`) URL to your local repository using:
- a name for you remote (let's use origin, again)
- a URL (the URL just just copied)
<br>Type:
>`git remote add origin` [PASTE COPIED URL HERE]
<br>The command `git push -u` uploads (pushes) the contents of your local repository to a remote repository:
- a remote name (ours is "origin")
- a *branch* of your repository (this is a more advanced feature of github. We will use the default branch ony. It is called "master")
<br>Type:
>`git push -u origin master`
A new window should open.
Enter your github login details:
<img src="../../../ILAS_seminars/intro to python/img/GitHubLogin.png" alt="Drawing" style="width: 200px;"/>
Return to the teminal. You may be prompted to enter your login details a second time:
<img src="../../../ILAS_seminars/intro to python/img/GitHubTermLogin.png" alt="Drawing" style="width: 500px;"/>
You should see a few lines of code appear, ending with the message:
>`Branch master set up to track remote branch master from origin`
Now look again at your online GitHub page.
Click on the "code" tab to reload the page.
<img src="../../../ILAS_seminars/intro to python/img/github_code.png" alt="Drawing" style="width: 300px;"/>
The textbook should now have appeared in your online repository.
## Tracking changes using Git
Throughout this course you will develop new skills by completing excercises in the interactive textbook.
At the end of the course you will have all of your notes and practise excercises in one place, accessible from almost anywhere.
Each time you make changes to the textbook, save it and exit it, either in class or at home, track the changes using git abd sync them with the online repository. The following sections will show how to do this.
We are now going to:
- use Git to record the changes you make to the textbook.
- upload it to your online GitHub repository so that you can access it online.
Git has a two-step process for saving changes.
1. Select files for which to log changes (__"add"__)
1. Log changes (__"commit"__)
This is an advanced feature.
For now, we will just learn to __add__ all the files in our directory (rather than selecting individual files).
When files have been __added__ but not yet __commited__, we say they have been *staged*.
<img src="../../../ILAS_seminars/intro to python/img/git_simple_workflow.png" alt="Drawing" style="width: 500px;"/>
In the terminal type:
>`git add -A`
to take a snapshot of the changes to all (`-A`) the files in your local directory.
<br>This is held at the index (stage).
Then:
>`git commit -m "A short message explaining your changes"`
to save the changes with a message (`-m`) you can refer to to remind you what you changed.
<br>
To avoid losing any changes, these commands are usually executed in immediate succession.
To see the commit you just made type:
>`git log`
You will see the message you write and a long number.
We can return to this point in the your work at any time by referencing this number.
Type:
>`q`
to exit the log commit log.
## Updating your Online GitHub Repository
We have updated the Git repository held on the computer.
The last thing we need to do is to update your online repository.
We do this using the `push` command.
<img src="../../../ILAS_seminars/intro to python/img/git-local-remote-workflow-cropped.png" alt="Drawing" style="width: 500px;"/>
You used the `push` command when you originally uploaded the textbook to your repository.
Enter exactly the same code into the terminal:
Type:
git push -u origin master
Enter your GitHub login details when prompted.
Go to the web browser and open the file 0_Introduction.
Scroll down to where you made the change.
Hint: look for the marker:
<img src="../../../ILAS_seminars/intro to python/img/change.jpg" alt="Drawing" style="width: 100px;"/>
<a id='InstallingSoftwareHomeUse'></a>
## Installing Git for Home Use
It is highly recommended that you download and install the software we will use in class:
- Jupyter notebook (anaconda)
- Git
You will need to use this software to complete homework assigments and prepare for the exam.
Both are free to download and install.
When running Git you do not need to use the
Anaconda (which includes Jupyter notebook) can be downloaded from: https://www.anaconda.com/download/
><br>Python 3.6 version and Python 2.7 version are available.
><br>Choose Python 3.6 version
Git can be downloaded from: https://github.com/git-for-windows/git/releases/tag/v2.14.1.windows.1
>Choose Git-2.14.1-64-bit.exe if you have a 64 bit operating system.
><br> Choose Git-2.14.1-32-bit.exe if you have a 32 bit operating system.
An easy to follow download wizard will launch for each piece of software.
__NOTE:__ The procedure to install git on your personal computer is different from the method we have used in the "Installing Git" Section of this seminar.
## Installing Git for On-Campus Use
Git is only available in the computer lab (Room North wing 21, Academic Center Bldg., Yoshida-South Campus).
To use Git on a Kyoto University computer outside of the computer lab you need install Git in your local user area:
Download the Git program from here:
https://github.com/git-for-windows/git/releases/tag/v2.14.1.windows.1
The version you need is:
PortableGit-2.14.1-32-bit.7z.exe
When prompted, choose to __run the file__ 実行(R).
<img src="../../../ILAS_seminars/intro to python/img/GitHubInstallRun.png" alt="Drawing" style="width: 200px;"/>
When prompted, change the location to save the file to:
M:¥Documents¥PortableGit
<img src="../../../ILAS_seminars/intro to python/img/GitLocation.png" alt="Drawing" style="width: 200px;"/>
Press OK
The download may take some time.
Once the download has completed...
To open the terminal:
- press "win key" + "R"
- type: __cmd__
- press enter
In the terminal type:
>`M:`
...and press enter, to switch to the M drive.
You should see the command prompt change.
<img src="../../../ILAS_seminars/intro to python/img/KUterminalMdrive.png" alt="Drawing" style="width: 700px;"/>
To navigate to documents type:
>`cd Documents`
cd stands for "change directory".
You should now see a folder called PortableGit in the contents list of __Documents__ folder.
Type:
>cd PortableGit
to move into your PortableGit folder.
To check git has installed type:
>`git-bash.exe`
A new terminal window will open. In this window type:
>`git --version`
If Git has installed, the version of the program will be dipolayed. You should see something like this:
<img src="../../../ILAS_seminars/intro to python/img/git-version.gif" alt="Drawing" style="width: 500px;"/>
Close the window.
The final thing we need to do is to tell the computer where to look for the Git program.
Move one step up from the Git folder. In the original terminal window, type:
> `cd ..`
Now enter the following in the terminal:
> PATH=M:¥Documents¥PortableGit¥bin;%PATH%
(you may need to have your keyboard set to JP to achieve this)
<img src="../../../ILAS_seminars/intro to python/img/windows_change_lang.png" alt="Drawing" style="width: 400px;"/>
You can type this or __copy and paste__ it from the README section on the github page we looked at earlier.
<img src="img/readme_.png" alt="Drawing" style="width: 500px;"/>
__Whenever to use Git on a Kyoto University computer outside of the computer lab (Room North wing 21, Academic Center Bldg., Yoshida-South Campus), you must first opena terminal and type the line of code above to tell the computer where to look for the Git program.__
The program Git has its own terminal commands.
Each one starts with the word `git`
You can check git is working by typing:
>`git status`
You should see something like this:
<img src="../../../ILAS_seminars/intro to python/img/git-version.gif" alt="Drawing" style="width: 500px;"/>
## Creating a Local Repository on your Personal Computer
In addition to the local copy of the textbook you have stored on the Kyoto University M drive, you are going to make a local copy of the interactive textbook on your personal computer so that you can make and save changes, for example when doing your homework.
This is the same process as when you initially *cloned* the textbook from the ILAS_python online repository, except this time you will clone the textbook from your personal GitHub repository.
On your personal computer...
If you have not already installed Git, then go back to <a href='#InstallingSoftwareHomeUse'>Installing Software for Home Use.</a>
Open the terminal:
__On Windows:__
- press "win key" + "R"
- type: __cmd__
- press enter
__On Mac:__
- Open the "Terminal" application
__On Linux:__
- Open the "Terminal" application
or
- press "Ctrl" + "Alt" + "T"
Using `cd`, navigate to where you want the folder containing the textbook to appear.
In a web browswer, open your personal Github page that you created earlier.
Navigate to the online repository to which you uploaded the textbook.
*Note that you can view the textbook online on the GitHub site.*
Click the button "Clone or download" and copy the link by presssing Ctrl , C
<img src="../../../ILAS_seminars/intro to python/img/clone-url.png" alt="Drawing" style="width: 500px;"/>
In the terminal type `git clone`. After the word `clone` leave a space and then paste the URL that you just copied.:
> `git clone` [PASTE COPIED URL HERE]
In the terminal type:
> `dir`
A folder called "ILAS_python" should have appeared.
Go into the folder and view the content by typing:
>`cd ILAS_python`
><br>`dir`
You can now open the notebooks stored in the repository in Jupyter Notebook to complete your homework.
## Syncronising Repositories
You now have three repositiories.
<br>__Two Local Repositories__
- Kyoto Univeristy M drive
- your personal computer
<br>__One Online Repository__
- GitHub
Each repository contains a copy of the interactive Python textbook.
We want to keep the three repositories *syncronised*. The version of the textbook in each repository should be the same. When we make changes to the textbook, either using a Kyoto University computer or your personal computer, the changes should be added to both the online repository and the other local repository.
The online repository can be accessed from either a Kyoto University computer or your personal computer. It is less easy for the two local repositories to access each other.
Therefore we will use GitHub as a central repository that we use to pass changes between the two local repositories.
<img src="img/syncing_repos.png" alt="Drawing" style="width: 300px;"/>
### Pushing and Pulling
This syncronisation is done using the Git commands __`push`__ and __`pull`__.
When you have competed your homework on your personal computer, you __`push`__ the changes to GitHub.
<img src="img/syncing_repos_home_push.png" alt="Drawing" style="width: 300px;"/>
We use the process we have already learnt to push changes from a local repository to our online repository.
Let's recap....
#### Pushing Changes to an Online Repository
Open a terminal:
>__On Windows:__
> - press "win key" + "R"
> - type: __cmd__
> - press enter
>
> __On Mac:__
> - Open the "Terminal" application
>
> __On Linux:__
> - Open the "Terminal" application
>or
> - press "Ctrl" + "Alt" + "T"
Using `cd`, navigate to where you want the folder containing the textbook to appear.
In the terminal type:
>`git add -A`
>`git commit -m "A short message explaining your changes"`
>`git push origin master`
Enter your GitHub login details when prompted.
Your online remote repository should now have been updated.
#### Pulling Changes to a Local Repository
After commiting the changes made on your personal computer to GitHub (e.g. homework), the next time you log on to a Kyoto University computer, the local repository will be *behind* (not up-to-date with):
- the online repository on GitHub
- the local repository on your personal computer
You need to __pull__ the changes from the online repository to your local repository on the M drive.
<img src="img/syncing_repos_home_to_class.png" alt="Drawing" style="width: 300px;"/>
Open a terminal.
Using `cd`, navigate to *inside* the folder in which your textbook is stored.
Type:
>`git pull master`
Enter your GitHub login details when prompted.
The local repository on the M drive should have now been updated with the changes you made using your personal computer.
At the end of the seminar, you will once again update the online reopsitory with the changes you make in-class.
<img src="img/syncing_repos_class_push.png" alt="Drawing" style="width: 300px;"/>
When you get home you must __pull__ the changes before starting this week's homework.
<img src="img/syncing_repos_class_to_home.png" alt="Drawing" style="width: 300px;"/>
#### Resolving Clashes
If the online repository is *ahead* of the local repository you are currently working on (ie. it has been updated from another repository), you are required to __pull__ the updates from the online repository to your current local repository before you can __push__ from the current local repository to the online repository.
Therefore, it is best practise to always:
- __push__ your changes at the end of a work session
- __pull__ your changes at the begining of a work session
If you begin working on a local repository *before* pulling the most recent changes, you may cause clashes between the version held locally and the version held online.
When you pull the changes from online, these clashes create errors which can be difficult to resolve.
<br>(*We will cover some possible solutions if/when clashes occur later in the course*).
To avoid clashes:
- stick to the fomat of the textbook and use the boxes provided when completeing your answers to the review questions (the importance of this is explained in the next section)
- follow the __push__...__pull__...__push__... working format.
## Pulling the Homework Solutions from an "Uptream" Repository
Sometimes you may want to pull changes from an online repository other than your personal GitHub repository.
For example, the original repository from which you cloned the textbook will change during the course:
- example solutions to the previous seminars review exercisess will be added weekly.
- new chapters will be added before the second half of the course begins.
To __pull__ these changes, incorporating them into your local version of the textbook, you need to connect your local repository to the online repositories where these changes will be held.
### Adding an Online Repository
First check that your local and online repositories are syncronised by __pulling__ and __pushing__ as necessary.
Open a terminal.
Using `cd`, navigate to *inside* the folder in which your textbook is stored.
Type:
>`git remote add upstream https://github.com/hphilamore/ILAS_python.git`
We have connected (`remote add`) the online repository from which we originally cloned the textbook to our local repository and called it `upstream` to distinguish it from our main online repository, `origin`.
Type:
>`git fetch upstream`
>`git merge upstream/master master`
Any changes made to the original version of the textbook should now be incorporated with your local version.
<br>*To avoid clashes between the two versions it is particularly important to stick to the format of the textbook and use the boxes provided when completeing your answers to the review questions.*
Lastly, remember to push your changes to your personal online repository.
<br>Type:
>`git push origin master`
__NOTE:__ You only need to add the repository `upstream` once. Add this to the local repository on both your personal computer and the local repository on the Kyoto University system.
After the remote repository has been added, each time you want to pull changes from `upstream` to your local repository simply:
- First check that your local and online repositories are syncronised by __pulling__ and __pushing__ as necessary.
- Navigate to *inside* the folder in which your textbook is stored using the terminal.
- Type:
>`git fetch upstream`
>`git merge upstream/master master`
>`git push origin master`
|
github_jupyter
|
# Trace Simple Image Classifier
Task: trace and explain the dimensionality of each tensor in a simple image classifier.
## Setup
```
from fastai.vision.all import *
from fastbook import *
matplotlib.rc('image', cmap='Greys')
```
Get some example digits from the MNIST dataset.
```
path = untar_data(URLs.MNIST_SAMPLE)
threes = (path/'train'/'3').ls().sorted()
sevens = (path/'train'/'7').ls().sorted()
len(threes), len(sevens)
```
Here is one image:
```
example_3 = Image.open(threes[1])
example_3
```
To prepare to use it as input to a neural net, we first convert integers from 0 to 255 into floating point numbers between 0 and 1.
```
example_3_tensor = tensor(example_3).float() / 255
example_3_tensor.shape
height, width = example_3_tensor.shape
```
Our particular network will ignore the spatial relationship between the features; later we'll learn about network architectures that do pay attention to spatial neighbors. So we'll *flatten* the image tensor into 28\*28 values.
```
example_3_flat = example_3_tensor.view(width * height)
example_3_flat.shape
```
## Task
We'll define a simple neural network (in the book, a 3-vs-7 classifier) as the sequential combination of 3 layers. First we define each layer:
```
# Define the layers. This is where you'll try changing constants.
linear_1 = nn.Linear(in_features=784, out_features=30)
relu_layer = nn.ReLU()
linear_2 = nn.Linear(in_features=30, out_features=1)
```
Then we put them together in sequence.
```
simple_net = nn.Sequential(
linear_1,
relu_layer,
linear_2
)
```
Each of `nn.Linear`, `nn.ReLU`, and `nn.Squential` are PyTorch *modules*. We can *call* a module with some input data to get the output data:
```
simple_net(example_3_flat)
```
Your turn:
1. Obtain the same result as the line above by applying each layer in sequence.
The outputs of each layer are called *activations*, so we can name the variables `act1` for the activations of layer 1, and so forth. Each `act` will be a function of the previous `act` (or the `inp`ut, for the first layer.)
```
inp = example_3_flat
act1 = ...
act2 = ...
act3 = ...
```
2. Evaluate `act1`, `act2`, and `act3`. (Code already provided; look at the results.)
```
act1
act2
act3
```
2. Evaluate the `shape` of `act1`, `act2`, and `act3`.
```
# your code here
```
3. Write expressions for the shapes of each activation in terms of `linear_1.in_features`, `linear_2.out_features`, etc. (ignore the `torch.Size(` part)
```
linear_1.in_features
act1_shape = [...]
act2_shape = [...]
act3_shape = [...]
assert list(act1_shape) == list(act1.shape)
assert list(act2_shape) == list(act2.shape)
assert list(act3_shape) == list(act3.shape)
```
4. Evaluate the `shape` of `linear_1.weight`, `linear_1.bias`, and the same for `linear_2`. Write expressions that give the value of each shape in terms of the `in_features` and other parameters.
```
print(f"Linear 1: Weight shape is {list(linear_1.weight.shape)}, bias shape is {list(linear_1.bias.shape)}")
print(f"Linear 2: Weight shape is {list(linear_2.weight.shape)}, bias shape is {list(linear_2.bias.shape)}")
linear_1_weight_shape = [...]
linear_1_bias_shape = [...]
linear_2_weight_shape = [...]
linear_2_bias_shape = [...]
assert list(linear_1_weight_shape) == list(linear_1.weight.shape)
assert list(linear_1_bias_shape) == list(linear_1.bias.shape)
assert list(linear_2_weight_shape) == list(linear_2.weight.shape)
assert list(linear_2_bias_shape) == list(linear_2.bias.shape)
```
## Analysis
1. Try changing each of the constants provided to the `nn.Linear` modules. Identify an example of:
1. A constant that can be freely changed in the neural net definition.
2. A constant that cannot be changed because it depends on the input.
3. A pair of constants that must be changed together.
*your answer here*
2. Describe the relationship between the values in `act1` and `act2`.
*your answer here*
3. In a concise but complete sentence, describe the shapes of the parameters of the `Linear` layer (`weight` and `bias`).
*your answer here*
|
github_jupyter
|
# ANCOM: WGS
```
library(tidyverse)
library(magrittr)
source("/Users/Cayla/ANCOM/scripts/ancom_v2.1.R")
```
## T2
```
t2 <- read_csv('https://github.com/bryansho/PCOS_WGS_16S_metabolome/raw/master/DESEQ2/WGS/T2/T2_filtered_greater_00001.csv')
head(t2,n=1)
t2.meta <- read_csv('https://github.com/bryansho/PCOS_WGS_16S_metabolome/raw/master/DESEQ2/WGS/T2/Deseq2_T2_mapping.csv')
head(t2.meta,n=1)
# subset data
t2.meta.PvL <- t2.meta %>% filter(Treatment == 'Placebo' | Treatment == 'Let')
t2.PvL <- t2 %>% select(X1, any_of(t2.meta.PvL$Sample)) %>% column_to_rownames('X1')
t2.meta.LvLCH <- t2.meta %>% filter(Treatment == 'Let' | Treatment == 'CoL')
t2.LvLCH <- t2 %>% select(X1, any_of(t2.meta.LvLCH$Sample)) %>% column_to_rownames('X1')
```
### Placebo vs. Let
```
# Data Preprocessing
# feature_table is a df/matrix with features as rownames and samples in columns
feature_table <- t2.PvL
# character vector/column containing sample IDs
sample_var <- "Sample"
# grouping variable to detect structural zeros and outliers
group_var <- "Treatment"
# 0 < fraction < 1. For each feature, observations with proportion of mixture
# distribution < out_cut will be detected as outlier zeros;
# > (1 - out_cut) will be detected as outlier values
out_cut <- 0.05
# 0 < fraction < 1. Features with proportion of zeros > zero_cut are removed.
zero_cut <- 0.90
# samples with library size < lib_cut will be excluded in the analysis
lib_cut <- 0
# TRUE indicates a taxon would be classified as a structural zero in the
# corresponding experimental group using its asymptotic lower bound. More
# specifically, ```neg_lb = TRUE``` indicates you are using both criteria
# stated in section 3.2 of [ANCOM-II]
# (https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5682008/) to detect structural
# zeros; Otherwise, ```neg_lb = FALSE``` will only use the equation 1 in
# section 3.2 of [ANCOM-II](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5682008/)
# for declaring structural zeros.
neg_lb <- TRUE
prepro <- feature_table_pre_process(feature_table, t2.meta.PvL, sample_var, group_var,
out_cut, zero_cut, lib_cut, neg_lb)
# Preprocessed feature table
feature_table1 <- prepro$feature_table
# Preprocessed metadata
meta_data1 <- prepro$meta_data
# Structural zero info
struc_zero1 <- prepro$structure_zeros
# Run ANCOM
# name of the main variable of interest (character)
main_var <- "Treatment"
p_adj_method <- "BH" # number of taxa > 10, therefore use Benjamini-Hochberg correction
alpha <- 0.05
# character string representing the formula for adjustment
adj_formula <- NULL
# character string representing the formula for random effects in lme
rand_formula <- NULL
t_start <- Sys.time()
res <- ANCOM(feature_table1, meta_data1, struc_zero1, main_var, p_adj_method,
alpha, adj_formula, rand_formula)
t_end <- Sys.time()
t_end - t_start
# write output to file
# output contains the "W" statistic for each taxa - a count of the number of times
# the null hypothesis is rejected for each taxa
# detected_x are logicals indicating detection at specified FDR cut-off
write_csv(res$out, "2021-07-25_WGS_T2_PvL_ANCOM_data.csv")
n_taxa <- ifelse(is.null(struc_zero1), nrow(feature_table1), sum(apply(struc_zero1, 1, sum) == 0))
res$fig + scale_y_continuous(sec.axis = sec_axis(~ . * 100 / n_taxa, name = 'W proportion'))
ggsave(filename = paste(lubridate::today(),'volcano_WGS_T2_PvL.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina')
# to find most significant taxa, I will sort the data
# 1) y (W statistic)
# 2) according to the absolute value of CLR mean difference
sig <- res$fig$data %>%
mutate(taxa_id = str_split_fixed(res$fig$data$taxa_id, pattern='s_', n=2)[,2]) %>% # remove leading 's_'
arrange(desc(y), desc(abs(x))) %>%
filter(y >= (0.7*n_taxa), !is.na(taxa_id)) # keep significant taxa, remove unidentified taxa
write.csv(sig, paste(lubridate::today(),'SigFeatures_WGS_T2_PvL.csv',sep='_'))
# save features with W > 0
non.zero <- res$fig$data %>%
arrange(desc(y), desc(abs(x))) %>%
mutate(taxa_id = str_split_fixed(res$fig$data$taxa_id, pattern='s_', n=2)[,2], # remove leading 's_'
W.proportion = y/(n_taxa-1)) %>% # add W
filter(y > 0) %>%
rowid_to_column()
write.csv(non.zero, paste(lubridate::today(),'NonZeroW_Features_WGS_T2_PvL.csv',sep='_'))
# plot top 20 taxa
sig %>%
slice_head(n=20) %>%
ggplot(aes(x, taxa_id)) +
geom_point(aes(size = 1)) +
theme_bw(base_size = 16) +
guides(size = FALSE) +
labs(x = 'CLR Mean Difference', y = NULL)
ggsave(filename = paste(lubridate::today(),'Top20_WGS_T2_PvL.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina', width = 10)
```
### Let v Let-co-housed
```
# Data Preprocessing
feature_table <- t2.LvLCH
sample_var <- "Sample"
group_var <- "Treatment"
out_cut <- 0.05
zero_cut <- 0.90
lib_cut <- 0
neg_lb <- TRUE
prepro <- feature_table_pre_process(feature_table, t2.meta.LvLCH, sample_var, group_var,
out_cut, zero_cut, lib_cut, neg_lb)
# Preprocessed feature table
feature_table2 <- prepro$feature_table
# Preprocessed metadata
meta_data2 <- prepro$meta_data
# Structural zero info
struc_zero2 <- prepro$structure_zeros
# Run ANCOM
# name of the main variable of interest (character)
main_var <- "Treatment"
p_adj_method <- "BH" # number of taxa > 10, therefore use Benjamini-Hochberg correction
alpha <- 0.05
# character string representing the formula for adjustment
adj_formula <- NULL
# character string representing the formula for random effects in lme
rand_formula <- NULL
t_start <- Sys.time()
res2 <- ANCOM(feature_table2, meta_data2, struc_zero2, main_var, p_adj_method,
alpha, adj_formula, rand_formula)
t_end <- Sys.time()
t_end - t_start
# write output to file
# output contains the "W" statistic for each taxa - a count of the number of times
# the null hypothesis is rejected for each taxa
# detected_x are logicals indicating detection at specified FDR cut-off
write_csv(res2$out, "2021-07-25_WGS_T2_LvLCH_ANCOM_data.csv")
n_taxa <- ifelse(is.null(struc_zero2), nrow(feature_table2), sum(apply(struc_zero2, 1, sum) == 0))
res2$fig + scale_y_continuous(sec.axis = sec_axis(~ . * 100 / n_taxa, name = 'W proportion'))
ggsave(filename = paste(lubridate::today(),'volcano_WGS_T2_LvLCH.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina')
# save features with W > 0
non.zero <- res2$fig$data %>%
arrange(desc(y), desc(abs(x))) %>%
mutate(taxa_id = str_split_fixed(res2$fig$data$taxa_id, pattern='s_', n=2)[,2], # remove leading 's_'
W.proportion = y/(n_taxa-1)) %>% # add W
filter(y > 0) %>%
rowid_to_column()
write.csv(non.zero, paste(lubridate::today(),'NonZeroW_Features_WGS_T2_LvLCH.csv',sep='_'))
# to find most significant taxa, I will sort the data
# 1) y (W statistic)
# 2) according to the absolute value of CLR mean difference
sig <- res2$fig$data %>%
mutate(taxa_id = str_split_fixed(res2$fig$data$taxa_id, pattern='s_', n=2)[,2]) %>% # remove leading 's_'
arrange(desc(y), desc(abs(x))) %>%
filter(y >= (0.7*n_taxa), !is.na(taxa_id)) # keep significant taxa, remove unidentified taxa
write.csv(sig, paste(lubridate::today(),'SigFeatures_WGS_T2_LvLCH.csv',sep='_'))
# plot top 20 taxa
sig %>%
slice_head(n=20) %>%
ggplot(aes(x, taxa_id)) +
geom_point(aes(size = 1)) +
theme_bw(base_size = 16) +
guides(size = FALSE) +
labs(x = 'CLR Mean Difference', y = NULL)
ggsave(filename = paste(lubridate::today(),'Top20_WGS_T2_LvLCH.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina', width = 10)
```
## T5
```
t5 <- read_csv('https://github.com/bryansho/PCOS_WGS_16S_metabolome/raw/master/DESEQ2/WGS/T5/T5_filtered_greater_00001.csv')
head(t5,n=1)
t5.meta <- read_csv('https://github.com/bryansho/PCOS_WGS_16S_metabolome/raw/master/DESEQ2/WGS/T5/Deseq2_T5_mapping.csv')
head(t5.meta,n=1)
# subset data
t5.meta.PvL <- t5.meta %>% filter(Treatment == 'Placebo' | Treatment == 'Let')
t5.PvL <- t5 %>% select(X1, any_of(t5.meta.PvL$SampleID)) %>% column_to_rownames('X1')
t5.meta.LvLCH <- t5.meta %>% filter(Treatment == 'Let' | Treatment == 'CoL')
t5.LvLCH <- t5 %>% select(X1, any_of(t5.meta.LvLCH$SampleID)) %>% column_to_rownames('X1')
```
### Placebo v Let
```
# Data Preprocessing
feature_table <- t5.PvL
sample_var <- "SampleID"
group_var <- "Treatment"
out_cut <- 0.05
zero_cut <- 0.90
lib_cut <- 0
neg_lb <- TRUE
prepro <- feature_table_pre_process(feature_table, t5.meta.PvL, sample_var, group_var,
out_cut, zero_cut, lib_cut, neg_lb)
# Preprocessed feature table
feature_table3 <- prepro$feature_table
# Preprocessed metadata
meta_data3 <- prepro$meta_data
# Structural zero info
struc_zero3 <- prepro$structure_zeros
# Run ANCOM
# name of the main variable of interest (character)
main_var <- "Treatment"
p_adj_method <- "BH" # number of taxa > 10, therefore use Benjamini-Hochberg correction
alpha <- 0.05
# character string representing the formula for adjustment
adj_formula <- NULL
# character string representing the formula for random effects in lme
rand_formula <- NULL
t_start <- Sys.time()
res3 <- ANCOM(feature_table3, meta_data3, struc_zero3, main_var, p_adj_method,
alpha, adj_formula, rand_formula)
t_end <- Sys.time()
t_end - t_start
# write output to file
# output contains the "W" statistic for each taxa - a count of the number of times
# the null hypothesis is rejected for each taxa
# detected_x are logicals indicating detection at specified FDR cut-off
write_csv(res3$out, "2021-07-25_WGS_T5_PvL_ANCOM_data.csv")
n_taxa <- ifelse(is.null(struc_zero3), nrow(feature_table3), sum(apply(struc_zero3, 1, sum) == 0))
res3$fig + scale_y_continuous(sec.axis = sec_axis(~ . * 100 / n_taxa, name = 'W proportion'))
ggsave(filename = paste(lubridate::today(),'volcano_WGS_T5_PvL.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina')
# save features with W > 0
non.zero <- res3$fig$data %>%
arrange(desc(y), desc(abs(x))) %>%
mutate(taxa_id = str_split_fixed(res3$fig$data$taxa_id, pattern='s_', n=2)[,2], # remove leading 's_'
W.proportion = y/(n_taxa-1)) %>% # add W
filter(y > 0) %>%
rowid_to_column()
write.csv(non.zero, paste(lubridate::today(),'NonZeroW_Features_WGS_T5_PvL.csv',sep='_'))
# to find most significant taxa, I will sort the data
# 1) y (W statistic)
# 2) according to the absolute value of CLR mean difference
sig <- res3$fig$data %>%
mutate(taxa_id = str_split_fixed(res3$fig$data$taxa_id, pattern='s_', n=2)[,2]) %>% # remove leading 's_'
arrange(desc(y), desc(abs(x))) %>%
filter(y >= (0.7*n_taxa), !is.na(taxa_id)) # keep significant taxa, remove unidentified taxa
write.csv(sig, paste(lubridate::today(),'SigFeatures_WGS_T5_PvL.csv',sep='_'))
# plot top 20 taxa
sig %>%
slice_head(n=20) %>%
ggplot(aes(x, taxa_id)) +
geom_point(aes(size = 1)) +
theme_bw(base_size = 16) +
guides(size = FALSE) +
labs(x = 'CLR Mean Difference', y = NULL)
ggsave(filename = paste(lubridate::today(),'Top20_WGS_T5_PvL.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina', width = 10)
```
### Let v Let-co-housed
```
# Data Preprocessing
feature_table <- t5.LvLCH
sample_var <- "SampleID"
group_var <- "Treatment"
out_cut <- 0.05
zero_cut <- 0.90
lib_cut <- 0
neg_lb <- TRUE
prepro <- feature_table_pre_process(feature_table, t5.meta.LvLCH, sample_var, group_var,
out_cut, zero_cut, lib_cut, neg_lb)
# Preprocessed feature table
feature_table4 <- prepro$feature_table
# Preprocessed metadata
meta_data4 <- prepro$meta_data
# Structural zero info
struc_zero4 <- prepro$structure_zeros
# Run ANCOM
# name of the main variable of interest (character)
main_var <- "Treatment"
p_adj_method <- "BH" # number of taxa > 10, therefore use Benjamini-Hochberg correction
alpha <- 0.05
# character string representing the formula for adjustment
adj_formula <- NULL
# character string representing the formula for random effects in lme
rand_formula <- NULL
t_start <- Sys.time()
res4 <- ANCOM(feature_table4, meta_data4, struc_zero4, main_var, p_adj_method,
alpha, adj_formula, rand_formula)
t_end <- Sys.time()
t_end - t_start
# write output to file
# output contains the "W" statistic for each taxa - a count of the number of times
# the null hypothesis is rejected for each taxa
# detected_x are logicals indicating detection at specified FDR cut-off
write_csv(res4$out, "2021-07-25_WGS_T5_LvLCH_ANCOM_data.csv")
n_taxa <- ifelse(is.null(struc_zero4), nrow(feature_table4), sum(apply(struc_zero4, 1, sum) == 0))
res4$fig + scale_y_continuous(sec.axis = sec_axis(~ . * 100 / n_taxa, name = 'W proportion'))
ggsave(filename = paste(lubridate::today(),'volcano_WGS_T5_LvLCH.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina')
# save features with W > 0
non.zero <- res4$fig$data %>%
arrange(desc(y), desc(abs(x))) %>%
mutate(taxa_id = str_split_fixed(res4$fig$data$taxa_id, pattern='s_', n=2)[,2], # remove leading 's_'
W.proportion = y/(n_taxa-1)) %>% # add W
filter(y > 0) %>%
rowid_to_column()
write.csv(non.zero, paste(lubridate::today(),'NonZeroW_Features_WGS_T5_LvLCH.csv',sep='_'))
# to find most significant taxa, I will sort the data
# 1) y (W statistic)
# 2) according to the absolute value of CLR mean difference
sig <- res4$fig$data %>%
mutate(taxa_id = str_split_fixed(res4$fig$data$taxa_id, pattern='s_', n=2)[,2]) %>% # remove leading 's_'
arrange(desc(y), desc(abs(x))) %>%
filter(y >= (0.7*n_taxa), !is.na(taxa_id)) # keep significant taxa, remove unidentified taxa
write.csv(sig, paste(lubridate::today(),'SigFeatures_WGS_T5_LvLCH.csv',sep='_'))
# plot top 20 taxa
sig %>%
slice_head(n=20) %>%
ggplot(aes(x, taxa_id)) +
geom_point(aes(size = 1)) +
theme_bw(base_size = 16) +
guides(size = FALSE) +
labs(x = 'CLR Mean Difference', y = NULL)
ggsave(filename = paste(lubridate::today(),'Top20_WGS_T5_LvLCH.pdf',sep='_'), bg = 'transparent', device = 'pdf', dpi = 'retina', width=10)
```
|
github_jupyter
|
```
import json
import os
from pathlib import Path
import matplotlib
matplotlib.rcParams['font.family'] = ['Noto Serif CJK JP']
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
from sklearn.metrics import brier_score_loss
from sklearn.calibration import calibration_curve
ROOT = Path('/home/kylehiroyasu/programming/masters_thesis/stochastic-YOLO/results_21_09_2021')
files = [f for f in os.listdir(ROOT) if 'csv' in f]
files.sort()
files
files = [
'ccpd_blur.csv',
'ccpd_blur_dropout.csv',
'ccpd_blur_ensemble.csv',
'ccpd_challenge.csv',
'ccpd_challenge_dropout.csv',
'ccpd_challenge_ensemble.csv',
'ccpd_db.csv',
'ccpd_db_dropout.csv',
'ccpd_db_ensemble.csv',
'ccpd_fn.csv',
'ccpd_fn_dropout.csv',
'ccpd_fn_ensemble.csv',
'ccpd_rotate.csv',
'ccpd_rotate_dropout.csv',
'ccpd_rotate_ensemble.csv',
'ccpd_tilt.csv',
'ccpd_tilt_dropout.csv',
'ccpd_tilt_ensemble.csv',
'ccpd.csv',
'ccpd_dropout.csv',
'ccpd_ensemble.csv',
'ccpd_weather.csv',
'ccpd_weather_dropout.csv',
'ccpd_weather_ensemble.csv'
]
groups = ['blur', 'challenge', 'db', 'fn', 'rotate', 'tilt', 'val']
groups = ['Blur', 'Challenge', 'DB', 'FN', 'Rotate', 'Tilt', 'Base', 'Weather']
def load_data(path: str):
all_predictions = []
with open(path, mode='r') as f:
for line in f.readlines():
prediction = json.loads(line)
for correct, confidence, bbv, entropy in zip(prediction['correct'], prediction['confidence'], prediction['bounding_box_variance'], prediction['entropy']):
data = {
'image_name': prediction['image_name'],
'correct': correct[0],
'confidence': confidence,
'bounding_box_variance':bbv,
'entropy': entropy
}
all_predictions.append(data)
return pd.DataFrame(all_predictions)
def plot_calibration_curve(data_dict: dict, dataset_name: str, fig_index):
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for name, df in data_dict.items():
y_test = df.correct
prob_pos = df.confidence
clf_score = brier_score_loss(y_test, prob_pos, pos_label=1)
#print("%s:" % name)
#print("\tBrier: %1.3f" % (clf_score))
#print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
#print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
#print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
# ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
# label="%s (%1.3f)" % (name, clf_score))
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label=name)
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title(f'Calibration plots {dataset_name} (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.savefig(f'{dataset_name}_detection.svg')
datasets = []
for i in range(0, len(files), 3):
data = load_data(ROOT/files[i])
dropout_data = load_data(ROOT/files[i+1])
ensemble_data = load_data(ROOT/files[i+2])
datasets.append({
'Normal': data,
'MC-Dropout (10 samples)': dropout_data,
'Ensemble (N=3)': ensemble_data
})
all_results = []
for group, data in zip(groups, datasets):
for k, df in data.items():
df['model'] = k
df['dataset'] = group
all_results.append(df)
dataset_results = pd.concat(all_results)
mean_results = dataset_results.groupby(by=['dataset','model']).mean()
mean_results
mean_results.entropy.plot.barh()
summary = pd.read_csv(ROOT/'all_test_results.csv', index_col=0)
dataset = {
'ccpd_blur.data': 'Blur',
'ccpd_challenge.data': 'Challenge',
'ccpd_db.data': 'DB',
'ccpd_fn.data': 'FN',
'ccpd_rotate.data': 'Rotate',
'ccpd_tilt.data': 'Tilt',
'ccpd.data': 'Base',
'ccpd_weather.data':'Weather'
}
model = {
'ensemble': 'Ensemble (N=3)',
'dropout': 'MC-Dropout (10 samples)',
'normal': 'Normal'
}
rename_columns = ['Data', 'do', 'Model','MP', 'MR', 'MAP', 'MF1']
summary.columns = rename_columns
summary['Data'] = summary.Data.apply(lambda d: dataset[d])
summary['Model'] = summary.Model.apply(lambda d: model[d])
summary_piv = summary.pivot(index='Model', columns='Data', values='MAP')
summary_piv
print(summary_piv.to_latex(float_format="%0.3f"))
results = mean_results.reset_index()
results['id'] = results.dataset + results.model
summary
summary['id'] = summary.Data + summary.Model
merged = pd.merge(summary, results, on='id')
merged
fig, axs = plt.subplots(ncols=3, sharex=True, sharey='row', figsize=(15,5))
for i, (name, group) in enumerate(merged.groupby(by='Model')):
group.plot.scatter(x='entropy', y='MAP',xlabel='Average Entropy', ax=axs[i])
axs[i].set_title(name)
plt.savefig(f'map_vs_entropy.svg')
fig, axs = plt.subplots(ncols=3, sharex=True, sharey='row', figsize=(15,5))
for i, (name, group) in enumerate(merged.groupby(by='Model')):
group.plot.scatter(x='bounding_box_variance', y='MAP',xlabel='Average Bounding Box Variance', ax=axs[i])
axs[i].set_title(name)
plt.savefig(f'map_vs_variance.svg')
#fig = plt.figure(figsize=(20, 20))
#axes.Axes(fig, (0,0,3,8), sharex=True)
fig, axs = plt.subplots(len(groups), 3, sharex=True, sharey='row', figsize=(15,15))
for i, (name, data_dict) in enumerate(zip(groups, datasets)):
for j, (key, df) in enumerate(data_dict.items()):
#df.groupby(by=['correct'])['entropy'].plot.hist(bins=10, ax=axs[i,j], alpha=.5)
df.groupby(by=['correct'])['confidence'].plot.hist(bins=10, ax=axs[i,j], alpha=.5)
for i, k in enumerate(data_dict.keys()):
axs[0, i].set_title(k, fontsize=14)
for i, k in enumerate(groups):
axs[i, 0].set_ylabel(k, rotation=0, fontsize=14, labelpad=100)
lines, labels = fig.axes[-1].get_legend_handles_labels()
fig.legend(lines, labels, loc = 'upper right')
plt.tight_layout()
plt.savefig(f'detection_hist.svg')
plt.show()
def plot_calibration_curve(data_dict: dict, dataset_name: str, ax1):
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for name, df in data_dict.items():
y_test = df.correct
prob_pos = df.confidence
clf_score = brier_score_loss(y_test, prob_pos, pos_label=1)
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label=name)
ax1.set_ylabel("Fraction of positives")
ax1.set_xlabel("Predicted Probability")
ax1.set_ylim([-0.05, 1.05])
ax1.set_title(f'{dataset_name}')
fig, axs = plt.subplots(4, 2, sharex=True, sharey=True, figsize=(15,15))
for i, (name, data_dict) in enumerate(zip(groups, datasets)):
row = i % 4
col = int(i/4)
plot_calibration_curve(data_dict=data_dict, dataset_name=name, ax1=axs[row, col])
lines, labels = fig.axes[-1].get_legend_handles_labels()
fig.legend(lines, labels, loc = 'upper right',borderaxespad=0.)
fig
plt.tight_layout()
plt.savefig(f'detection_calibration.svg')
```
|
github_jupyter
|
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"></ul></div>
```
!pip install tensorflow-addons
!pip install lifelines
!pip install scikit-plot
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow import keras
from sklearn.model_selection import train_test_split
from keras import backend as K
from tensorflow.keras.layers import StringLookup
from tqdm.keras import TqdmCallback
from tqdm.auto import tqdm
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use('default')
plt.style.use('seaborn-white')
models = tf.keras.models
layers = tf.keras.layers
preprocessing = tf.keras.preprocessing
tqdm.pandas()
def mse_nan(y_true, y_pred):
masked_true = tf.where(tf.math.is_nan(y_true), tf.zeros_like(y_true), y_true)
masked_pred = tf.where(tf.math.is_nan(y_true), tf.zeros_like(y_true), y_pred)
return K.mean(K.square(masked_pred - masked_true), axis=-1)
def get_optimizer():
optimizer = tf.keras.optimizers.Adam()
return optimizer
def get_model(num_shared=2, units=64, rate=0.3, loss_weights=None):
sm = layers.Input(shape=(100, ), name='D_Inp')
aa = layers.Input(shape=(1000, ), name='T_Inp')
emsm0 = layers.Embedding(53,
128,
trainable=True,
name='D_Emb',
mask_zero=True)(sm)
emaa0 = layers.Embedding(22,
128,
trainable=True,
name='T_Emb',
mask_zero=True)(aa)
cnvsm1 = layers.Conv1D(32, 3, name='D_L1')(emsm0)
cnvaa1 = layers.Conv1D(32, 3, name='T_L1')(emaa0)
cnvsm2 = layers.Conv1D(64, 3, name='D_L2')(cnvsm1)
cnvaa2 = layers.Conv1D(64, 3, name='T_L2')(cnvaa1)
cnvsm3 = layers.Conv1D(96, 3, name='D_L3')(cnvsm2)
cnvaa3 = layers.Conv1D(96, 3, name='T_L3')(cnvaa2)
gmpsm = layers.GlobalMaxPool1D(name='D_Gmp')(cnvsm2)
gmpaa = layers.GlobalMaxPool1D(name='T_Gmp')(cnvaa2)
C1 = layers.concatenate([gmpsm, gmpaa], axis=-1, name='C1')
S1 = layers.Dense(512, activation='relu', name='S1')(C1)
S1 = layers.Dropout(rate)(S1)
S2 = layers.Dense(512, activation='relu', name='S2')(S1)
S2 = layers.Dropout(rate)(S2)
S3 = layers.Dense(512, activation='relu', name='S3')(S2)
S3 = layers.Dropout(rate)(S3)
Kd = layers.Dense(units, activation='relu', name='S1_Kd')(S3)
Kd = layers.Dropout(rate)(Kd)
Ki = layers.Dense(units, activation='relu', name='S1_Ki')(S3)
Ki = layers.Dropout(rate)(Ki)
IC50 = layers.Dense(units, activation='relu', name='S1_IC50')(S3)
IC50 = layers.Dropout(rate)(IC50)
EC50 = layers.Dense(units, activation='relu', name='S1_EC50')(S3)
EC50 = layers.Dropout(rate)(EC50)
IA = layers.Dense(units, activation='relu', name='S1_IA')(S3)
IA = layers.Dropout(rate)(IA)
pH = layers.Dense(units, activation='relu', name='S1_pH')(S3)
pH = layers.Dropout(rate)(pH)
out1 = layers.Dense(1, activation='linear', name='Kd')(Kd)
out2 = layers.Dense(1, activation='linear', name='Ki')(Ki)
out3 = layers.Dense(1, activation='linear', name='IC50')(IC50)
out4 = layers.Dense(1, activation='linear', name='EC50')(EC50)
out5 = layers.Dense(1, activation='sigmoid', name='IA')(IA)
out6 = layers.Dense(1, activation='linear', name='pH')(pH)
model = models.Model(inputs=[sm, aa],
outputs=[out1, out2, out3, out4, out5, out6])
losses = {
"Kd": mse_nan,
"Ki": mse_nan,
"IC50": mse_nan,
"EC50": mse_nan,
"pH": mse_nan,
"IA": "binary_crossentropy",
}
metrics = {"IA": tf.keras.metrics.AUC()}
model.compile(loss=losses, optimizer=get_optimizer(), metrics=metrics, loss_weights=loss_weights)
model.summary()
return model
tf.keras.backend.clear_session()
np.random.seed(7)
tf.random.set_seed(7)
loss_weights = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
model = get_model(rate=0.3, loss_weights=loss_weights)
tf.keras.utils.plot_model(model, rankdir='LR',
show_shapes=True,
show_layer_activations=True)
CHARPROTSET = dict([('A', 1), ('G', 2), ('L', 3), ('M', 4), ('S', 5), ('T', 6),
('E', 7), ('Q', 8), ('P', 9), ('F', 10), ('R', 11),
('V', 12), ('D', 13), ('I', 14), ('N', 15), ('Y', 16),
('H', 17), ('C', 18), ('K', 19), ('W', 20), ('X', 21)])
CHARCANSMISET = dict([(')', 1), ('(', 2), ('1', 3), ('C', 4), ('c', 5),
('O', 6), ('2', 7), ('N', 8), ('=', 9), ('n', 10),
('3', 11), ('-', 12), ('4', 13), ('F', 14), ('S', 15),
('[', 16), (']', 17), ('l', 18), ('H', 19), ('s', 20),
('#', 21), ('o', 22), ('5', 23), ('B', 24), ('r', 25),
('+', 26), ('6', 27), ('P', 28), ('.', 29), ('I', 30),
('7', 31), ('e', 32), ('i', 33), ('a', 34), ('8', 35),
('K', 36), ('A', 37), ('9', 38), ('T', 39), ('g', 40),
('R', 41), ('Z', 42), ('%', 43), ('0', 44), ('u', 45),
('V', 46), ('b', 47), ('t', 48), ('L', 49), ('*', 50),
('d', 51), ('W', 52)])
class Gen:
def __init__(self,
data,
map_smiles,
map_aa,
shuffle=True,
test_only=False,
len_drug=100,
len_target=1000,
window=False):
self.data = data
self.map_smiles = map_smiles
self.map_aa = map_aa
self.shuffle = shuffle
self.test_only = test_only
self.len_drug = len_drug
self.len_target = len_target
self.size = self.data.shape[0]
self.inds = list(range(self.size))
if self.shuffle:
random.shuffle(self.inds)
self.window = window
self.gen = self._get_inputs()
def _get_inputs(self):
seen = 0
while seen < self.size:
ind = self.inds[seen]
sample = self.data.iloc[ind, :].values.tolist()
sample[0] = self.map_smiles[sample[0]]
sample[1] = self.map_aa[sample[1]]
if self.window:
ld = max(0, (len(sample[0]) - self.len_drug))
lt = max(0, (len(sample[1]) - self.len_target))
dstart = random.randint(0, ld)
tstart = random.randint(0, lt)
sample[0] = sample[0][dstart:dstart + self.len_drug]
sample[1] = sample[1][tstart:dstart + self.len_target]
yield sample
seen += 1
if seen == self.size:
if self.shuffle:
random.shuffle(self.inds)
seen = 0
def get_batch(self, batch_size):
while True:
BATCH = []
for _ in range(batch_size):
sample = next(self.gen)
for k, value in enumerate(sample):
if len(BATCH) < (k+1):
BATCH.append([])
BATCH[k].append(value)
BATCH[0] = preprocessing.sequence.pad_sequences(BATCH[0], self.len_drug)
BATCH[1] = preprocessing.sequence.pad_sequences(BATCH[1], self.len_target)
for k in range(2, len(BATCH)):
BATCH[k] = np.array(BATCH[k]).flatten()
if not self.test_only:
yield [BATCH[0], BATCH[1]], [BATCH[k] for k in range(2, len(BATCH))]
else:
yield [BATCH[0], BATCH[1]], [BATCH[k]*0 for k in range(2, len(BATCH))]
data = pd.read_csv("data_full_05_pH.zip", compression='zip')
order = [
'smiles', 'target', 'p1Kd', 'p1Ki', 'p1IC50', 'p1EC50', 'is_active', 'pH'
]
data = data[order]
data = data.sample(frac=1, random_state = 7)
data.head()
data.dropna().shape
SMILES = {}
for smiles in tqdm(data['smiles'].unique()):
SMILES[smiles] = [CHARCANSMISET[s] for s in smiles]
AA = {}
for aa in tqdm(data['target'].unique()):
AA[aa] = [CHARPROTSET[a.upper()] for a in aa]
X_train, X_test = train_test_split(data, test_size=0.1, shuffle=True, random_state = 7, stratify=data['is_active'])
X_train, X_valid = train_test_split(X_train, test_size=0.1, shuffle=True, random_state = 7, stratify=X_train['is_active'])
X_train.shape[0], X_test.shape[0], X_valid.shape[0]
X_train.head()
batch_size = 128
trg = Gen(X_train, SMILES, AA)
trg = trg.get_batch(batch_size)
vag = Gen(X_valid, SMILES, AA)
vag = vag.get_batch(batch_size)
# for batch in trg:
# break
# batch
steps_per_epoch = X_train.shape[0] // batch_size
valid_steps = X_valid.shape[0] // batch_size
filepath = "{epoch:02d}-{val_loss:.2f}.h5"
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath,
monitor='val_loss',
verbose=1,
save_best_only=False,
mode='auto',
save_weights_only=True)
history = model.fit(trg,
validation_data=vag,
steps_per_epoch=steps_per_epoch,
validation_steps=valid_steps,
verbose=0,
callbacks=[TqdmCallback(), checkpoint],
epochs=50)
model.load_weights('45-5.30.h5')
# !rm *.h5 -r
# history.history
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='valid')
plt.xlabel('Epoch')
plt.title('Loss on train-valid subsets')
plt.legend()
def get_batch_size(S):
mbs = 1
for i in range(1, min(64, S)):
if S % i == 0:
mbs = i
assert S % mbs == 0
return mbs
mbs = get_batch_size(X_test.shape[0])
mbs
teg = Gen(X_test, SMILES, AA, shuffle=False, test_only=True)
teg = teg.get_batch(mbs)
prediction = model.predict(teg, steps=X_test.shape[0]//mbs, verbose=1)
from sklearn.metrics import mean_squared_error
from lifelines.utils import concordance_index
from scipy import stats
def get_scores(y_true, y_pred):
mse = np.round(mean_squared_error(y_true, y_pred), 3)
rmse = np.round(mse**0.5, 3)
ci = np.round(concordance_index(y_true, y_pred), 3)
pearson = np.round(stats.pearsonr(y_true, y_pred)[0], 3)
spearman = np.round(stats.spearmanr(y_true, y_pred)[0], 3)
res = f"rmse={rmse}, mse={mse},\npearson={pearson}, spearman={spearman},\nci={ci}"
return res
for k, col in enumerate(
['p1Kd', 'p1Ki', 'p1IC50', 'p1EC50', 'is_active', 'pH']):
plt.scatter(X_test[col], prediction[k], alpha=0.7, c='k')
plt.xlabel('true')
plt.ylabel('predicted')
y_true = X_test[col][X_test[col].notna()]
y_pred = prediction[k][X_test[col].notna()].ravel()
plt.title(col + ":\n" + get_scores(y_true, y_pred))
plt.show() # 74.6
import scikitplot as skplt
p = prediction[-2].ravel().tolist()
probas = np.zeros((len(p),2))
probas[:,1] = p
probas[:,0] = 1
probas[:,0] = probas[:,0] - p
skplt.metrics.plot_roc_curve(X_test['is_active'].values.ravel().tolist(), probas)
plt.show()
plt.hist(prediction[-2].ravel(), bins=32, edgecolor='w', color='k', alpha=0.7);
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
from sklearn.model_selection import cross_val_score
from collections import Counter
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import GradientBoostingClassifier
from lightgbm import LGBMClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from preproc3 import na, encode, split, binarize, shuffle_in_unison, scale
from imblearn.over_sampling import SMOTE
# explicitly require this experimental feature
from sklearn.experimental import enable_hist_gradient_boosting # noqa
# now you can import normally from ensemble
from sklearn.ensemble import HistGradientBoostingClassifier
DATA='ugrin2020-vehiculo-usado-multiclase/'
TRAIN=DATA+'train.csv'
TEST=DATA+'test.csv'
PREPROCESSED_DATA='preprocessed_data/'
RESULTS='results/'
train = pd.read_csv(TRAIN) # Cargo datos de entrenamiento
test = pd.read_csv(TEST) # Cargo datos de test
# Eliminamos el campo id ya que no se debe usar para predecir
test_ids = test['id']
del test['id']
del train['id']
# Cambiamos el nombre a la columna Año para poder manejarla correctamente
train.rename(columns = {'Año':'Anio'}, inplace = True)
test.rename(columns = {'Año':'Anio'}, inplace = True)
train_label = train.Precio_cat
del train['Precio_cat']
train2, val, train2_label, val_label = train_test_split(train, train_label, stratify=train_label, test_size=0.25, random_state=42)
train2['Precio_cat']=train2_label
train2, val = na(train2, val)
val['label']=val_label
val=val[val.Combustible!='Electric']
val=val.dropna()
val_label=val.label
del val['label']
train2, val = encode (train2, val)
train2, train2_label, val = split(train2, val)
train2, val = binarize(train2, val)
train2, train2_label = SMOTE(random_state=25).fit_resample(train2, train2_label)
shuffle_in_unison(train2, train2_label)
train2, val = scale(train2, val)
#np.savez_compressed(PREPROCESSED_DATA+'binScale-val', train2, train2_label, val, val_label)
results=pd.DataFrame(columns=['iter','leaf','lr','acc'])
param_grid={'max_iter':[75,100,150,200,300], 'max_leaf_nodes':[27,29,31,33],'learning_rate':[0.08,0.1,0.12]}
for iters in param_grid['max_iter']:
for leaf in param_grid['max_leaf_nodes']:
for lr in param_grid['learning_rate']:
print(iters, leaf, lr)
model=HistGradientBoostingClassifier(max_iter=iters, max_leaf_nodes=leaf, learning_rate=lr)
model.fit(train2, train2_label)
results=results.append(pd.DataFrame([[iters, leaf, lr,accuracy_score(val_label,model.predict(val))]],columns=['iter','leaf','lr','acc']),ignore_index=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(results.sort_values(by='acc',ascending=False))
model=XGBClassifier(n_jobs=4)
model
results=pd.DataFrame(columns=['n','d','acc'])
param_grid={'n_estimators':[75,100,150,200,300,400,500], 'max_depth':[3,8,14,26,None]}
for n in param_grid['n_estimators']:
for d in param_grid['max_depth']:
print(n, d)
model= XGBClassifier(n_estimators=n, max_depth=d, n_jobs=4, eval_metric='mlogloss')
model.fit(train2, train2_label)
results=results.append(pd.DataFrame([[n,d,accuracy_score(val_label,model.predict(val))]],columns=['n','d','acc']),ignore_index=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(results.sort_values(by='acc',ascending=False))
results=pd.DataFrame(columns=['C','acc'])
param_grid={'C':[0.25,0.5,1,2.5,5,10,15,20,25,30,35,40,45,50,60,70]}
for c in param_grid['C']:
print(c)
model=SVC(C=c)
model.fit(train2, train2_label)
results=results.append(pd.DataFrame([[c,accuracy_score(val_label,model.predict(val))]],columns=['C','acc']),ignore_index=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(results.sort_values(by='acc',ascending=False))
results=pd.DataFrame(columns=['shape','early','alpha','acc'])
param_grid={'hidden_layer_sizes':[(50),(100),(150),(200),(250),(50,50),(100,100),(150,150),(200,200),(250,250)], 'early_stopping':[True,False],'alpha':[0.00005,0.0001,0.00015]}
for s in param_grid['hidden_layer_sizes']:
for early in param_grid['early_stopping']:
for a in param_grid['alpha']:
print(s,early,a)
model=MLPClassifier(hidden_layer_sizes=s,alpha=a,early_stopping=early,max_iter=1000)
model.fit(train2, train2_label)
results=results.append(pd.DataFrame([[s,early,a,accuracy_score(val_label,model.predict(val))]],columns=['shape','early','alpha','acc']),ignore_index=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(results.sort_values(by='acc',ascending=False))
results=pd.DataFrame(columns=['n','lr','s','d','acc'])
param_grid={'n_estimators':[450,500,550,600], 'learning_rate':[0.1,0.125,0.15,0.175,0.2], 'subsample':[0.8,0.9], 'max_depth':[2,3,4]}
for n in param_grid['n_estimators'][3:4]:
for lr in param_grid['learning_rate']:
for s in param_grid['subsample']:
for d in param_grid['max_depth']:
print(n, lr, s, d)
model= GradientBoostingClassifier(n_estimators=n, learning_rate=lr, subsample=s, max_depth=d)
model.fit(train2, train2_label)
results=results.append(pd.DataFrame([[n,lr,s,d,accuracy_score(val_label,model.predict(val))]],columns=['n','lr','s','d','acc']),ignore_index=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(results.sort_values(by='acc',ascending=False))
results=pd.DataFrame(columns=['n','lr','leaves','d','acc'])
param_grid={'learning_rate':[0.07,0.08,0.1,0.12],'n_estimators':[125,150,200],'num_leaves':[25,27,29,31], 'max_depth':[3,8,-1]}
for n in param_grid['n_estimators']:
for lr in param_grid['learning_rate']:
for leaves in param_grid['num_leaves']:
for d in param_grid['max_depth']:
print(n, lr, leaves, d)
model = LGBMClassifier(n_estimators=n, learning_rate=lr, num_leaves=leaves, max_depth=d)
model.fit(train2, train2_label)
results=results.append(pd.DataFrame([[n,lr,leaves,d,accuracy_score(val_label,model.predict(val))]],columns=['n','lr','leaves','d','acc']),ignore_index=True)
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(results.sort_values(by='acc',ascending=False))
model.fit(train2,train2_label)
pred=model.predict(val)
accuracy_score(val_label,pred)
scores=cross_val_score(model, train, label, cv=5)
print(scores)
print(np.mean(scores))
```
|
github_jupyter
|
# TensorFlow BYOM: Train with Custom Training Script, Compile with Neo, and Deploy on SageMaker
In this notebook you will compile a trained model using Amazon SageMaker Neo. This notebook is similar to the [TensorFlow MNIST training and serving notebook](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-python-sdk/tensorflow_script_mode_training_and_serving/tensorflow_script_mode_training_and_serving.ipynb) in terms of its functionality. You will complete the same classification task, however this time you will compile the trained model using the SageMaker Neo API on the backend. SageMaker Neo will optimize your model to run on your choice of hardware. At the end of this notebook you will setup a real-time hosting endpoint in SageMaker for your SageMaker Neo compiled model using the TensorFlow Model Server. Note: This notebooks requires Sagemaker Python SDK v2.x.x or above.
### Set up the environment
```
import os
import sagemaker
from sagemaker import get_execution_role
sagemaker_session = sagemaker.Session()
role = get_execution_role()
```
### Download the MNIST dataset
```
import utils
from tensorflow.contrib.learn.python.learn.datasets import mnist
import tensorflow as tf
data_sets = mnist.read_data_sets("data", dtype=tf.uint8, reshape=False, validation_size=5000)
utils.convert_to(data_sets.train, "train", "data")
utils.convert_to(data_sets.validation, "validation", "data")
utils.convert_to(data_sets.test, "test", "data")
```
### Upload the data
We use the ```sagemaker.Session.upload_data``` function to upload our datasets to an S3 location. The return value inputs identifies the location -- we will use this later when we start the training job.
```
inputs = sagemaker_session.upload_data(path="data", key_prefix="data/DEMO-mnist")
```
# Construct a script for distributed training
Here is the full code for the network model:
```
!cat 'mnist.py'
```
The script here is and adaptation of the [TensorFlow MNIST example](https://github.com/tensorflow/models/blob/master/official/vision/image_classification/mnist_main.py). It provides a ```model_fn(features, labels, mode)```, which is used for training, evaluation and inference. See [TensorFlow MNIST training and serving notebook](https://github.com/aws/amazon-sagemaker-examples/blob/master/sagemaker-python-sdk/tensorflow_script_mode_training_and_serving/tensorflow_script_mode_training_and_serving.ipynb) for more details about the training script.
## Create a training job using the sagemaker.TensorFlow estimator
```
from sagemaker.tensorflow import TensorFlow
mnist_estimator = TensorFlow(
entry_point="mnist.py",
role=role,
framework_version="1.15.3",
py_version="py3",
training_steps=1000,
evaluation_steps=100,
instance_count=2,
instance_type="ml.c4.xlarge",
)
mnist_estimator.fit(inputs)
```
The **```fit```** method will create a training job in two **ml.c4.xlarge** instances. The logs above will show the instances doing training, evaluation, and incrementing the number of **training steps**.
In the end of the training, the training job will generate a saved model for TF serving.
# Deploy the trained model to prepare for predictions (the old way)
The deploy() method creates an endpoint which serves prediction requests in real-time.
```
mnist_predictor = mnist_estimator.deploy(initial_instance_count=1, instance_type="ml.m4.xlarge")
```
## Invoking the endpoint
```
import numpy as np
import json
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
for i in range(10):
data = mnist.test.images[i].tolist()
# Follow https://www.tensorflow.org/tfx/serving/api_rest guide to format input to the model server
predict_response = mnist_predictor.predict({"instances": np.asarray(data).tolist()})
print("========================================")
label = np.argmax(mnist.test.labels[i])
print("label is {}".format(label))
prediction = np.argmax(predict_response["predictions"])
print("prediction is {}".format(prediction))
```
## Deleting the endpoint
```
sagemaker.Session().delete_endpoint(mnist_predictor.endpoint)
```
# Deploy the trained model using Neo
Now the model is ready to be compiled by Neo to be optimized for our hardware of choice. We are using the ``TensorFlowEstimator.compile_model`` method to do this. For this example, our target hardware is ``'ml_c5'``. You can changed these to other supported target hardware if you prefer.
## Compiling the model
The ``input_shape`` is the definition for the model's input tensor and ``output_path`` is where the compiled model will be stored in S3. **Important. If the following command result in a permission error, scroll up and locate the value of execution role returned by `get_execution_role()`. The role must have access to the S3 bucket specified in ``output_path``.**
```
output_path = "/".join(mnist_estimator.output_path.split("/")[:-1])
optimized_estimator = mnist_estimator.compile_model(
target_instance_family="ml_c5",
input_shape={"data": [1, 784]}, # Batch size 1, 3 channels, 224x224 Images.
output_path=output_path,
framework="tensorflow",
framework_version="1.15.3",
)
```
## Set image uri (Temporarily required)
Image URI: aws_account_id.dkr.ecr.aws_region.amazonaws.com/sagemaker-inference-tensorflow:1.15.3-instance_type-py3
Refer to the table on the bottom [here](https://docs.aws.amazon.com/sagemaker/latest/dg/neo-deployment-hosting-services-container-images.html) to get aws account id and region mapping
```
optimized_estimator.image_uri = (
"301217895009.dkr.ecr.us-west-2.amazonaws.com/sagemaker-inference-tensorflow:1.15.3-cpu-py3"
)
```
## Deploying the compiled model
```
optimized_predictor = optimized_estimator.deploy(
initial_instance_count=1, instance_type="ml.c5.xlarge"
)
```
## Invoking the endpoint
```
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
for i in range(10):
data = mnist.test.images[i].tolist()
# Follow https://www.tensorflow.org/tfx/serving/api_rest guide to format input to the model server
predict_response = optimized_predictor.predict({"instances": np.asarray(data).tolist()})
print("========================================")
label = np.argmax(mnist.test.labels[i])
print("label is {}".format(label))
prediction = np.argmax(predict_response["predictions"])
print("prediction is {}".format(prediction))
```
## Deleting endpoint
```
sagemaker.Session().delete_endpoint(optimized_predictor.endpoint)
```
|
github_jupyter
|
[](https://pythonista.io)
[*D3.js*](https://d3js.org/) es una biblioteca de Javascript especializada en la creación de documentos orientados a datos (Data Driven Documents) capaz de acceder a los recursos de un documento HTML mediante selecciones.
*D3.js* no contiene herramientas específicas para crear gráficos, pero es capaz de acceder a los estilos de un elemento, así como de crear y modificar elementos SVG y Canvas.
## Inclusión de *D3.js* en un documento HTML.
Existen varias formas de acceder a la biblioteca, dependiendo del estilo de programación.
Es posible acceder a la documentación de *D3.js* en la siguiente liga:
https://github.com/d3/d3
**Nota:** Al momento de escribir este documento, la versión 5 es la más reciente de *D3.js*.
### Inclusión de mediante el elemento *<script>*.
La forma más común de incluir la biblioteca es haciendo referencia a una URL desde la que se puede obtener la bibilioteca.
``` html
<script src="https://d3js.org/d3.v5.js"></script>
```
También es posible acceder a la versión minimizada de la biblioteca.
```html
<script src="https://d3js.org/d3.v5.min.js"></script>
```
Del mismo modo, es posible descargar el archivo y cargarlo de forma local.
**Ejemplo:**
El documento [html/d3-1.html](html/d3-1.html) contiene el siguiente código:
```html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Ejemplo 1</title>
<script src="https://d3js.org/d3.v5.min.js"></script>
</head>
<body>
<h1>Ejemplo básico de <em>D3.js</em></h1>
<p>El siguiente párrafo será modificado mediante el uso de la biblioteca <i>D3.js</i></p>
<p id="muestra">¡Hola, mundo!</p>
<script>
d3.select("#muestra").
style("background-color", "gray").
style("color", "white").
style("font-size", "150%").
style("text-align", "center");
</script>
</body>
</html>
```
### Inclusión como módulo.
La biblioteca es compatible con el formato de módulos de ECMAScript 2015.
## *D3.js* en una notebook de Jupyter con un kernel de iPython.
En versiones recientes de Jupyter, no es posible utilizar el elemento *<script>* dentro de una celda usando el comando mágico *%%html%%*.
### Uso de *RequireJS*.
[*RequireJS*](https://requirejs.org/) es una herramienta que permite importar diversos paquetes de Javascript como si fueran módulos que ya se encuentra incluida en las notebooks de Jupyter y puede ser invicada como *require*.
**Nota:** En este capítulo se explorarán sólo las funcionalidades necesarias de *RequireJS* para acceder a *D3.js*.
```
%%javascript
require.config({
paths: {
d3: 'https://d3js.org/d3.v5.min'
}
})
```
**Ejemplo:**
```
%%html
<p id="muestra">Hola, mundo.</p>
%%javascript
require.config({
paths: {
d3: 'https://d3js.org/d3.v5.min'
}
})
require(["d3"], function(d3){
d3.select("#muestra").
style("background-color", "gray").
style("color", "white").
style("font-size", "150%").
style("text-align", "center");
})
```
## El objeto *d3*.
El objeto *d3* es el componente básico mediante el cual se hace uso de las funcionalidades de la biblioteca sobre los elementos de un documento HTML.
El objeto *d3* cuenta con múltiples métodos que regresan a su vez objetos *d3*, alo cuales se les pueden ir añadiendo métodos.
```
d3.<método 1>(<argumentos 1>).<método 2>(<argumentos 2>)...<método n>(<argumentos n>);
```
Debido a que javascript no es sensible a los retornos de línea y para facilitar la lectura, se sugiere utilizar una estructura como la siguiente:
```
d3.<método 1>(<argumentos 1>).
<método 2>(<argumentos 2>).
...
<método n>(<argumentos n>);
```
<p style="text-align: center"><a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Licencia Creative Commons" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/80x15.png" /></a><br />Esta obra está bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Licencia Creative Commons Atribución 4.0 Internacional</a>.</p>
<p style="text-align: center">© José Luis Chiquete Valdivieso. 2019.</p>
|
github_jupyter
|
# Data Science Session 4
John Michael Hernandez Valerio is inviting you to a scheduled Zoom meeting.
Topic: Rafael's Data Science Class 4
Time: Mar 29, 2021 08:00 AM Beijing, Shanghai
Join Zoom Meeting
https://us04web.zoom.us/j/75939938727?pwd=dVJhTXNydTV2TGxJUVZ1QVZaUnByUT09
Meeting ID: 759 3993 8727
Passcode: KNa2R4
### Today's Class
- Concate strings: using + vs , (comma separate string)
- Story telling with Strings and `input()`
### Next Class
- Intro to Dataframe
- Intro to Pandas Library
- Search for an string in a string.
- What is a CSV file and how to read it.
- What is a xsl or xsls file and how to read it.
**Important:** Its neccesary to mention that someone may think there are some information missing, in the class video and in this document, and the reason for that is that the course have been structure for the specific purpose of the student's need ([Rafael Mesa](https://www.linkedin.com/in/rafael-mesa-rodriguez-2a1298124/ "Rafael's LinkedIn profile")).
<font size="4" color="blue" face="verdana"> <B>Concate strings: using + vs , (comma separate string) </B></font>
Python provides several methods of formatting strings in the **`print()`** function beyond **string addition**
**`print()`** provides using **commas** to combine stings for output
by comma separating strings **`print()`** will output each separated by a space by default
<font size="4" color="#B24C00" face="verdana"> <B>Example</B></font>
**print 3 strings on the same line using commas inside the print() function**
```
# [ ] print 3 strings on the same line using commas inside the print() function
print("Rafael")
print("John")
print("Daniel")
print("Rafael" + "John" + "Daniel")
print("Rafael " + "John " + "Daniel")
print("Rafael" +" "+ "John" +" "+ "Daniel")
print("Rafael " + "John " + "Daniel")
print("Rafael" +" "+ "John" +" "+ "Daniel")
print("Rafael" , "John" , "Daniel")
```
<font size="4" color="#B24C00" face="verdana"> <B>Example: Concatenating multiple elements using comma</B></font>
```
# review and run code
time_PU= input("What time do you want to go to the party")
location= "Puente de la 17"
print("I will pick you up @",time_PU,"for the party.", "wait for me at",location)
```
<font size="4" color="#B24C00" face="verdana"> <B>Task 1</B></font>
Create a new markdown cell below and explain what is the difference between using the addition sign (+) vs using comma (,) to concatenate elements in a print function
<font size="4" color="#B24C00" face="verdana"> <B>Task 2</B></font>
Create a new code cell below and provide an example of the explaination given in the previous task
<font size="4" color="#B24C00" face="verdana"> <B>Task 3</B></font>
## Program: How many for the training?
Create a program that prints out a reservation for a training class. Gather the name of the party, the number attending and the time.
>**example** of what input/output might look like:
```
enter name for contact person for training group: Hiroto Yamaguchi
enter the total number attending the course: 7
enter the training time selected: 3:25 PM
------------------------------
Reminder: training is schedule at 3:25 PM for the Hiroto Yamaguchi group of 7 attendees
Please arrive 10 minutes early for the first class
```
Design and Create your own reminder style
- **[ ]** get user input for variables:
- **owner**: name of person the reservation is for
- **num_people**: how many are attending
- **training_time**: class time
- **[ ]** create an integer variable **min_early**: number of minutes early the party should arrive
- **[ ]** using comma separation, print reminder text
- use all of the variables in the text
- use additional strings as needed
- use multiple print statements to format message on multiple lines (optional)
```
# [ ] get input for variables: owner, num_people, training_time - use descriptive prompt text
owner =
num_people =
training_time =
# [ ] create a integer variable min_early and "hard code" the integer value (e.g. - 5, 10 or 15)
min_early =
# [ ] print reminder text using all variables & add additional strings - use comma separated print formatting
```
<font size="4" color="#B24C00" face="verdana"> <B>Example: Telling a story using strings</B></font>
Run the cell below and answer the questions to see the result.
```
#initialize the variables
girldescription = " "
boydescription = " "
walkdescription = " "
girlname = " "
boyname = " "
animal = " "
gift = " "
answer = " "
#Ask the user to specify values for the variables
print(":) Welcome to the Awesome Stories game!!!!")
print("Please answer the following questions and we will create a story for you\n")
girlname = input("Enter a girl's name: ")
boyname = input("Enter a boy's name: " )
animal = input("Name a type of animal: " )
gift = input("Name something you find in the bathroom: ")
girldescription = input("Enter a description of a flower: ")
boydescription = input("Enter a description of a car: ")
walkdescription = input("Enter a description of how you might dance: " )
answer = input("What would you say to someone who gave you a cow: ")
#Display the story
#Don't forget to format the strings when they are displayed
print()
print ("Once upon a time,")
print("there was a girl named " + girlname.capitalize() + ".")
print("One day, " + girlname.capitalize() + " was walking " + walkdescription.lower() + " down the street.")
print("Then she met a " + boydescription.lower() + " boy named " + boyname.capitalize() + ".")
print("He said, 'You are really " + girldescription.lower() + "!'")
print("She said '" + answer.capitalize() + ", " + boyname.capitalize() + ".'")
print("Then they both rode away on a " + animal.lower() + " and lived happily ever after.")
#cl
#initialize the variables
girldescription = " "
boydescription = " "
walkdescription = " "
girlname = " "
girlprofession= " "
boyname = " "
animal = " "
answer = " "
action_on_thestreet = ""
#Ask the user to specify values for the variables
print(":) Welcome to the Awesome Stories game!!!!")
print("Please answer the following questions and we will create a story for you\n")
girlname = input("Enter a girl's name: ")
girlprofession= input("Enter the profession that the girl want to study")
boyname = input("Enter a boy's name: " )
animal = input("Name a type of animal: " )
boyprofession = input("Enter the profession: ")
newyorkstreet = input("Enter a famous street of new york: " )
action_on_thestreet= input("the action they where doing on the street")
#Display the story
#Don't forget to format the strings when they are displayed
print()
print ("In 2012 in the city of New Work,")
print("there was a girl named",girlname.capitalize(),"that have a dream to be a great", girlprofession + ".")
print("with the vision to save people with cancer disease")
print("12 years later in 2025")
#print(girlname.capitalize(), "was walking",newyorkstreet(),"visiting the New York Stock Exchange with her friend that came from cameron", action_on_thestreet.lower(),".")
print("By coincidence",boyprofession.lower(),"that she knew walkout of a building","his name was",boyname.capitalize(),"that boy was a cancer pacient stage 4 that she had cure",".")
print("He said, Do you are",girlname.capitalize(), girldescription.lower(),'that save my life'"!")
print("She said OMG you are", boyname, answer.capitalize() + ", " + boyname.capitalize() + ".'")
print("Then they both decided to meet up later at" + animal.lower() + "to go have dinner after.")
#cl
```
<font size="4" color="#B24C00" face="verdana"> <B>Task 4</B></font>
Run the cell below.
```
#Run this and see what happens
message= "tU TIENES la Pampara enceDIA en DS."
print("Option 1:", message,"\n")
print("Option 2:", message.lower(),"\n")
print("Option 3:", message.upper(),"\n")
print("Option 4:", message.capitalize(),"\n")
print("Option 5:", message.title(),"\n")
```
### Explain what happened in the cell above.
```
print("cincO"=="cinco")
```
### Print the variable message and answer: Is the message (the size of its letters) different from the beggining?
|
github_jupyter
|
# Bento Activity Recognition Tutorial:
This notebook has been designed for the bento activity challenge recognition competition with the the aim of providing the basic knowledge of Human Activity Recognition by MOCAP.
It has been made by Nazmun Nahid.
# Library import:
Here we are going to use pandas(https://pandas.pydata.org/docs/user_guide/index.html), numpy(https://numpy.org/devdocs/user/whatisnumpy.html) and matplotlib(https://matplotlib.org/stable/contents.html).
```
import pandas as pd
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (19,15)
```
# Read Data:
First, we have to load the data in the data frame.
```
df=pd.read_csv('/content/drive/MyDrive/Tutorial/Tutorial.csv')
```
Now let's check what information the data contains!
```
df.head()
```
So, here we can see in the data file there are many rows and columns. Do you want to know the exact number of rows and columns?
```
df.shape
```
# Data Visualization:
Now, let's see how the data looks like!
```
df.plot()
df['activity'].value_counts().plot.bar()
```
# Pre-processing:
In the preprocessing stage we need to first focus on the missing values. Let's check if our data have any missing values.
```
df.isnull().sum().sum()
print(df.isnull().sum())
```
We have some missing values. So, we have to keep that in mind while handling the data. To work with this data we will devide the whole data into smaller segments.
```
def segmentation(x_data,overlap_rate,time_window):
# make a list for segment window and its label
seg_data = []
y_segmented_list = []
#convert overlap rate to step for sliding window
overlap = int((1 - overlap_rate)*time_window)
#segment and keep the labels
for i in range(0,x_data.shape[0],overlap):
seg_data.append(x_data[i:i+time_window])
y_segmented_list.append(x_data['activity'][i])
return seg_data,y_segmented_list
#Segmentation with overlaprate=0 & window=100
df1_itpl=df.interpolate()
#replace missing values with 0
df1_itpl=df1_itpl.fillna(0)
[seg, seg_label]=segmentation(df1_itpl,0.5,350)
```
# Feature Extarction:
There are many types of features. For ease of use we have shown only some very common features.
```
def get_features(x_data):
#Set features list
features = []
#Set columns name list
DFclist=list(x_data.columns)
#Calculate features (STD, Average, Max, Min) for each data columns X Y Z
for k in DFclist:
# std
features.append(x_data[k].std(ddof=0))
# avg
features.append(np.average(x_data[k]))
# max
features.append(np.max(x_data[k]))
# min
features.append(np.min(x_data[k]))
return features
#set list
features_list=[]
label_list=[]
for j in range(0,len(seg)):
#extract only xyz columns
frame1=seg[j].drop(columns=['subject_id','activity'])
#Get features and label for each elements
features_list.append(get_features(frame1))
label_list.append(seg_label[j])
```
Now we have a feature list and lablel list. Next step is classification.
# Training:
For classification there are several models. Here we are using one of the most commonly used model Random Forest.
```
from sklearn.ensemble import RandomForestClassifier
model_ml = RandomForestClassifier(n_estimators=500,n_jobs=-1)
```
Here we only have one subject. So, we will divide data from this subject into train and test file to evaluate the results. For more than one subject you can also put one subject in testing and others in training.
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(features_list, label_list, test_size=0.3, random_state=42)
```
Now let's train the model!
```
model_ml.fit(X_train, y_train)
```
The training is complete but how can we see the results? For that we will here use classification report with which we can see the accuracy, precision, recall and f1 score. We will also use confusion matrix for the evaluation.
```
from sklearn.metrics import classification_report
from sklearn.metrics import plot_confusion_matrix
from sklearn.metrics import confusion_matrix
y_predict = model_ml.predict(X_test)
print(classification_report(y_test,y_predict))
#confusion_matrix(y_test, y_predict)
plot_confusion_matrix(model_ml, X_test, y_test)
plt.show()
```
We have successfully completed learning to read the data, visualize data, pre-processing, feature extraction, classification and evaluation of the generated model. Now it's your turn to generate a model following these steps and predict the labels of the test data. Best of luck!
|
github_jupyter
|
# Date+Time Basics
**Inhalt:** Mit Zeit-Datentyp umgehen
**Nötige Skills:** Erste Schritte mit Pandas
**Lernziele:**
- Text in Zeit konvertieren
- Zeit in Text konvertieren
- Zeit-Informationen extrahieren
- Einfache Zeit-Operationen
## Libraries
```
import pandas as pd
from datetime import datetime
from datetime import timedelta
```
## Zeitformat Codes
Extrakt, die volle Liste: http://strftime.org/. Diese Format-Codes brauchen wir, um mit Daten zu arbeiten.
| Code | Description | *Example* |
|--------|---------|--------|
| **`%a`** | Weekday as locale’s abbreviated name. | *Mon* |
| **`%A`** | Weekday as locale’s full name. | *Monday* |
| **`%d`** | Day of the month as a zero-padded decimal number. | *30* |
| **`%-d`** | Day of the month as a decimal number. (Platform specific) | *30* |
| **`%b`** | Month as locale’s abbreviated name. | *Sep* |
| **`%B`** | Month as locale’s full name. | *September* |
| **`%m`** | Month as a zero-padded decimal number. | *09* |
| **`%-m`** | Month as a decimal number. (Platform specific) | *9* |
| **`%y`** | Year without century as a zero-padded decimal number. | *13* |
| **`%Y`** | Year with century as a decimal number. | *2013* |
| **`%H`** | Hour (24-hour clock) as a zero-padded decimal number. | *07* |
| **`%-H`** | Hour (24-hour clock) as a decimal number. (Platform specific) | *7* |
| **`%I`** | Hour (12-hour clock) as a zero-padded decimal number. | *07* |
| **`%-I`** | Hour (12-hour clock) as a decimal number. (Platform specific) | *7* |
| **`%p`** | Locale’s equivalent of either AM or PM. | *AM* |
| **`%M`** | Minute as a zero-padded decimal number. | *06* |
| **`%-M`** | Minute as a decimal number. (Platform specific) | *6* |
| **`%S`** | Second as a zero-padded decimal number. | *05* |
| **`%-S`** | Second as a decimal number. (Platform specific) | *5* |
| **`%j`** | Day of the year as a zero-padded decimal number. | *273* |
| **`%-j`** | Day of the year as a decimal number. (Platform specific) | *273* |
| **`%U`** | Week number of the year (Sunday as the first day of the week) as a zero padded decimal number. All days in a new year preceding the first Sunday are considered to be in week 0. | *39* |
| **`%W`** | Week number of the year (Monday as the first day of the week) as a decimal number. All days in a new year preceding the first Monday are considered to be in week 0. | *39* |
| **`%c`** | Locale’s appropriate date and time representation. | *Mon Sep 30 07:06:05 2013* |
| **`%x`** | Locale’s appropriate date representation. | *09/30/13* |
| **`%X`** | Locale’s appropriate time representation. | *07:06:05* |
| **`%%`** | A literal '%' character. | *%*
## Text to Time
Eine häufige Situation, wenn man von irgendwo Daten importiert:
- Wir haben einen bestimmten String, zB: "1981-08-23"
- Wir wollen den String in ein Datetime-Objekt verwandeln, um sie zu analysieren
- Dazu benutzen wir die Pandas-Funktion `to_datetime()`
```
my_birthday_date = pd.to_datetime('1981-08-23', format='%Y-%m-%d')
#funktion heisst .to_datetime: nimm den Textstring und mach ein Datum daraus.
```
Das Ergebnis wird uns als "Timestamp" angezeigt.
```
my_birthday_date
```
Die Funktion erkennt einige Standardformate automatisch
```
my_date = pd.to_datetime('1972-03-11 13:42:25')
my_date
```
**Platz** zum ausprobieren. Kreiere ein Datetime-Objekt aus folgenden Strings:
```
# Beispiel: '23.08.1981'
my_date = pd.to_datetime('23.08.1981', format='%d.%m.%Y')
my_date
# Do it yourself: 'Aug 23, 1981'
dat1 = pd.to_datetime("Aug 23, 1981", format="%b %d, %Y")
dat1
# '18.01.2016, 18:25 Uhr'
dat2 = pd.to_datetime("18.01.2016, 18:25 Uhr" , format= "%d.%m.%Y, %H:%M Uhr")
dat2
# '5. May 2014'
dat3 = pd.to_datetime("5. May 2014", format='%d. %B %Y')
dat3
("5. Mai 2014").replace("Mai", "May")
# '5. Mai 2014'
my_date = pd.to_datetime('5. Mai 2014'.replace('Mai','May'), format='%d. %B %Y')
my_date
```
## Time to Text
Brauchen wir typischerweise bei der Anzeige oder beim Export von DAten
- Wir haben bereits ein Datetime-Objekt erstellt
- jetzt wollen wir es nach einem bestimmten Schema anzeigen
- dafür dient die Funktion `strftime()`, die jedes Datetime-Objekt hat
Das Datums-Ojbekt haben wir bereits:
```
my_date = pd.to_datetime('1981-08-23 08:15:25')
```
Als Text:
```
my_text = my_date.strftime(format='%Y-%m-%d')
# .strftime() (wie string for time), formatiere eine Zeit (Timestamp) als String. Gib ein sauberes Datum aus
my_text
```
**Quiz**: Lass `strftime()` den folgenden Text ausgeben:
```
# Beispiel: 'Aug 23, 1981'
my_text = my_date.strftime(format="%b %d, %Y")
my_text
# Do it yourself: #'23.8.81, 08:15:25'
text1 = my_date.strftime(format="%d.%m.%Y, %H:%M:%S'")
text1
# 'Sunday, 23. of August 1981, 8:15 AM'
text2 = my_date.strftime(format="%A, %d. of %B, %-I:%M %p")
text2
```
## Time properties
`strftime()` ist nicht die einzige Möglichkeit, Daten als Text anzuzeigen.
Wir können auch direkt einzelne Eigenschaften eines Datetime-Objekts abfragen.
Taken from https://pandas.pydata.org/pandas-docs/stable/timeseries.html
| Property | Description |
|----------|------------|
| **`.year`** | - The year of the datetime |
| **`.month`** | - The month of the datetime |
| **`.day`** | - The days of the datetime |
| **`.hour`** | - The hour of the datetime |
| **`.minute`** | - The minutes of the datetime |
| **`.second`** | - The seconds of the datetime |
| **`.microsecond`** | - The microseconds of the datetime |
| **`.nanosecond`** | - The nanoseconds of the datetime |
| **`.date`** | - Returns datetime.date (does not contain timezone information) |
| **`.time`** | - Returns datetime.time (does not contain timezone information) |
| **`.dayofyear`** | - The ordinal day of year |
| **`.weekofyear`** | - The week ordinal of the year |
| **`.week`** | - The week ordinal of the year |
| **`.dayofweek`** | - The number of the day of the week with Monday=0, Sunday=6 |
| **`.weekday`** | - The number of the day of the week with Monday=0, Sunday=6 |
| **`.weekday_name`** | - The name of the day in a week (ex: Friday) |
| **`.quarter`** | - Quarter of the date: Jan-Mar = 1, Apr-Jun = 2, etc. |
| **`.days_in_month`** | - The number of days in the month of the datetime |
| **`.is_month_start`** | - Logical indicating if first day of month (defined by frequency) |
| **`.is_month_end`** | - Logical indicating if last day of month (defined by frequency) |
| **`.is_quarter_start`** | - Logical indicating if first day of quarter (defined by frequency) |
| **`.is_quarter_end`** | - Logical indicating if last day of quarter (defined by frequency) |
| **`.is_year_start`** | - Logical indicating if first day of year (defined by frequency) |
| **`.is_year_end`** | - Logical indicating if last day of year (defined by frequency) |
| **`.is_leap_year`** | - Logical indicating if the date belongs to a leap year |
Das funktioniert dann ganz einfach:
```
my_date.year
my_date.day
my_date.is_month_start
```
**Quiz**:
```
# In welcher Jahreswoche liegt unser Datum `my_date`?
my_date.weekofyear
# Um was für einen Wochentag handelt es sich (Zahl)?
my_date.dayofweek
```
## Zeitintervalle
"Timedelta" ist ein spezieller Datentyp, der kein Datum, sondern einen Zeitintervall modelliert.
Wir können diesen Datentyp z.B. für Vergleiche zwischen zwei Daten brauchen.
Die folgenden Intervalle stehen uns dabei zur Verfügung:
**`weeks`** - Wochen
**`days`** - Tage
**`hours`** - Stunden
**`minutes`** - Minuten
**`seconds`** - Sekunden
**`microseconds`** - Mikrosekunden
Ein Intervall erstellen wir mit der Funktion `timedelta()`
```
d = timedelta(days=2)
d
d = timedelta(hours=1)
d
```
Wir können die Argumente beliebig kombinieren
```
d = timedelta(days=3, hours=10, minutes=25, seconds=10)
d
```
Wir können ein Zeitintervall zu einem Datetime-Objekt addieren oder subtrahieren:
```
my_date + d
my_date - d
```
Ein Timedelta erhalten wir auch, wenn wir die Differenz von zwei Daten bilden:
```
my_date_1 = pd.to_datetime('1981-08-23', format='%Y-%m-%d')
my_date_2 = pd.to_datetime('1981-08-25', format='%Y-%m-%d')
d = my_date_2 - my_date_1
d
```
Die Info erhalten wir wiederum, indem wir die Eigenschaft abfragen:
```
d.days
```
**Quiz:** Wie viele Tage liegen zwischen folgenden Daten?
```
my_string_1 = '2001/09/11'
my_string_2 = '2016/11/09'
#Antwort
my_date_1 = pd.to_datetime(my_string_1, format='%Y/%m/%d')
my_date_2 = pd.to_datetime(my_string_2, format='%Y/%m/%d')
d = my_date_2 - my_date_1
d.days
```
**Quiz:** Ich werde ab dem 1. Januar 2019 um 0:00 Uhr während 685648 Sekunden keinen Alkohol trinken. An welchem Datum greife ich wieder zum Glas?
```
#Antwort
#Startdatum als Variable kreieren
neujahr= pd.to_datetime("2019-01-01", format="%Y-%m-%d")
#Timedelta kreieren
d= timedelta(seconds=685648)
#Variable für Datum und Timedelta erstellen
trinkstart = neujahr + d
#Resultat ausgeben als String
trinkstart.strftime(format="%Y-%m-%d")
```
## Hier und Jetzt
Last but not least: eine Funktion, die uns das aktuelle Datum samt Zeit angibt:
```
jetzt = datetime.today()
jetzt
```
Wir können dieses Datum wie jedes andere Datum auch anzeigen:
```
jetzt.strftime(format='%Y-%m-%d: %H:%M:%S')
```
Wir können auch damit herumrechnen:
```
d = timedelta(days=1)
(jetzt - d).strftime(format='%Y-%m-%d: %H:%M:%S')
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from igp2 import AgentState
from igp2.data.data_loaders import InDDataLoader
from igp2.data.episode import Frame
from igp2.data.scenario import InDScenario, ScenarioConfig
from igp2.opendrive.map import Map
from igp2.opendrive.plot_map import plot_map
from core.feature_extraction import FeatureExtractor, GoalDetector
from core.goal_generator import GoalGenerator
from core import feature_extraction
print(feature_extraction.__file__)
odr_results = pd.read_csv('../predictions/heckstrasse_trained_trees_test.csv')
odr_results.shape
lanelet_results = pd.read_csv('../../GRIT-lanelet/predictions/heckstrasse_trained_trees_test.csv')
lanelet_results.shape
odr_results = pd.read_csv('../predictions/frankenberg_trained_trees_test.csv')
odr_results
lanelet_results = pd.read_csv('../../GRIT-lanelet/predictions/frankenberg_trained_trees_test.csv')
lanelet_results
lanelet_results[['episode', 'agent_id']].drop_duplicates()
odr_results[['episode', 'agent_id']].drop_duplicates()
lanelet_agents = lanelet_results.agent_id.drop_duplicates()
odr_agents = odr_results.agent_id.drop_duplicates()
lanelet_agents.isin(odr_agents).sum()
lanelet_agents.shape
odr_agents.isin(lanelet_agents).sum()
odr_agents.shape
lanelet_agents.loc[~lanelet_agents.isin(odr_agents)]
lanelet_results.loc[lanelet_results.agent_id==75]
```
Why does opendrive have more vehicles? e.g agent 1.
Different goal locations? - yes
Bicycles are also included - needs fixing
Why are some agents included in lanelet2 but not odr? e.g. 75 - vehicle misses goal slightly to the right. Should goal detection be based on dist along lane rather than pointgoal? or increase goal radius to match lane width
Baseline acc goes down at final point (1.0 of traj obs)
```
%cd ..
scenario_name = 'frankenberg'
scenario_map = Map.parse_from_opendrive(f"scenarios/maps/{scenario_name}.xodr")
scenario_config = ScenarioConfig.load(f"scenarios/configs/{scenario_name}.json")
scenario = InDScenario(scenario_config)
episode_idx = 5
episode = scenario.load_episode(episode_idx)
agent = episode.agents[75]
fig, ax = plt.subplots(figsize=(10, 6))
plot_map(scenario_map, ax=ax)
path = agent.trajectory.path
ax.plot(path[:, 0], path[:, 1])
ax.plot(*scenario_config.goals[0], 'o')
scenario_name = 'heckstrasse'
scenario_map = Map.parse_from_opendrive(f"scenarios/maps/{scenario_name}.xodr")
scenario_config = ScenarioConfig.load(f"scenarios/configs/{scenario_name}.json")
scenario = InDScenario(scenario_config)
goal_detector = GoalDetector(scenario.config.goals)
episode_idx = 0
episode = scenario.load_episode(episode_idx)
agent = episode.agents[0]
agent_goals, goal_frame_idxes = goal_detector.detect_goals(agent.trajectory)
agent_goals
trajectory = agent.trajectory
feature_extractor = FeatureExtractor(scenario_map)
for idx in range(0, len(agent.trajectory.path)):
typed_goals = feature_extractor.get_typed_goals(agent.trajectory.slice(0, idx+1), scenario.config.goals)
print(idx, [g is not None for g in typed_goals])
typed_goals
agent.trajectory.path[68]
ax = plot_map(scenario_map)
ax.plot(*agent.trajectory.path[68], 'o')
ax.plot([20],[-60], 'o')
scenario_map.lanes_at(agent.trajectory.path[0], max_distance=3)
lanes = scenario_map.lanes_within_angle(agent.trajectory.path[0],
agent.trajectory.heading[0],
threshold=np.pi/4, max_distance=3)
print(lanes)
ax = plot_map(scenario_map)
for lane in lanes:
ax.plot(*list(zip(*[x for x in lane.midline.coords])))
goal_point = np.array((62.2, -47.3))
idx = 70
best_lane = scenario_map.best_lane_at(agent.trajectory.path[idx],
agent.trajectory.heading[idx],
max_distance=3, goal_point=goal_point)
print(best_lane)
ax = plot_map(scenario_map)
ax.plot(*list(zip(*[x for x in best_lane.midline.coords])))
data = pd.read_csv('data/heckstrasse_e0.csv')
goals_10 = data.loc[data.fraction_observed==1.0].value_counts('agent_id')
goals_09 = data.loc[data.fraction_observed==0.9].value_counts('agent_id')
(goals_10 > goals_09).sum()
predictions = pd.read_csv('predictions/heckstrasse_prior_baseline_test.csv')
predictions
predictions.loc[predictions.fraction_observed==1.0].model_correct.mean()
predictions.loc[predictions.fraction_observed==0.9].model_correct.mean()
idx = predictions.loc[predictions.fraction_observed==0.9].set_index('agent_id').model_correct \
!= predictions.loc[predictions.fraction_observed==1.0].set_index('agent_id').model_correct
idx.loc[idx]
predictions.loc[predictions.agent_id==15]
```
Problem: Wrong goal type inferred at the last minute - why? G1 assigned goal type turn-left
```
data.loc[data.agent_id==15]
# lane id -1 on road 6, heckstrasse - detected as goal G1 - this must be junction NE to SE
# take into account trajectory history when detecting lane? Is this done for lanelet2 GRIT? e.g. previous lanelet
ax = plot_map(scenario_map)
lane = scenario_map.get_lane(7, -1)
ax.plot(*list(zip(*[x for x in lane.midline.coords])))
ax.plot([36.0], [-27.0], 'o')
heading = -0.6367160078810041
speed = 15.915689301070186
scenario_name = 'round'
scenario_map = Map.parse_from_opendrive(f"scenarios/maps/{scenario_name}.xodr")
scenario_config = ScenarioConfig.load(f"scenarios/configs/{scenario_name}.json")
scenario = InDScenario(scenario_config)
episode_idx = 0
episode = scenario.load_episode(episode_idx)
print(len(agent_goals))
for g in agent_goals:
print(g)
pwd
odr_results = pd.read_csv('predictions/round_trained_trees_test.csv')
odr_results.shape
lanelet_results = pd.read_csv('../GRIT-lanelet/predictions/round_trained_trees_test.csv')
lanelet_results.shape
odr_results[['episode', 'agent_id', 'fraction_observed']]
# isin with multiple columns?
episode = 4
fraction_observerd = 0.8
odr_samples = odr_results.loc[(odr_results.episode == episode)
& (odr_results.fraction_observed == fraction_observerd)].set_index('agent_id')
lanelet_samples = lanelet_results.loc[(lanelet_results.episode == episode)
& (lanelet_results.fraction_observed == fraction_observerd)].set_index('agent_id')
lanelet_samples
odr_samples
odr_samples = odr_samples.join(lanelet_samples.model_correct, rsuffix='_ll')
odr_samples.loc[odr_samples.model_correct != odr_samples.model_correct_ll]
```
|
github_jupyter
|
```
#!jupyter nbextension enable --py widgetsnbextension --sys-prefix
#!jupyter serverextension enable voila --sys-prefix
%matplotlib widget
import ipywidgets as widgets
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import display, clear_output
output1 = widgets.Output()
output2 = widgets.Output()
output3 = widgets.Output()
output4 = widgets.Output()
# create some x data
#x = np.linspace(0, 2 * np.pi, 100)
e1 = 0.25 #m
e2 = 0.05
k1 = 0.2 #W/(m.K)
k2 = 0.1
he2 = 40
Q=400 #W/m3
La = np.linspace(0,e1,10)
Lb = np.linspace(0,e2,10)
#Ltot = np.linspace(0,e1+e2,20)
Te = 300 #K
qpp = Q*e1
def T_B(Qa,ea,h,eb,kb):
q = Qa*ea
Tbr = Te + q/h
Tbl = Tbr + q*eb/kb
return Tbr, Tbl
def T_A(Qa,ea,ka, h, eb, kb):
x = np.linspace(0,ea,10)
x2 = np.linspace(0,eb,10)
Ta = []
L = []
TbR, TbL = T_B(Qa,ea,h,eb,kb)
for i in range(len(x)):
Ta.append(-Qa*x[i]*x[i]/(2*ka)+((TbL-Te)/ea+Q*ea/(2*ka))*x[i]+Te)
L.append(x[i])
for i in range(1, len(x2)):
Ta.append((TbR-TbL)*(x[len(x)-1]+x2[i])/x2[len(x2)-1]+(TbL*(x[len(x)-1]+x2[len(x2)-1])-TbR*x[len(x)-1])/x2[len(x2)-1])
L.append(x[len(x)-1]+x2[i])
return L, Ta
Ltot, Ttot = T_A(Q,e1,k1, he2, e2, k2)
# default line color
initial_color = '#FF00DD'
with output1:
fig, ax = plt.subplots(constrained_layout=True, figsize=(8, 4))
# move the toolbar to the bottom
fig.canvas.toolbar_position = 'bottom'
ax.grid(True)
#line, = ax.plot(x_list, q_fin(x_list,k1,hi1,he1), initial_color, label='Q')
line, = ax.plot(Ltot, Ttot, color='b', label='T')
ax.set_xlim(0,0.425)
ax.set_ylim(299,400)
ax.set_xlabel('x')
ax.set_ylabel('T (K)')
ax.legend()
#output1
text_0 = widgets.HTML(value="<p>Consider the case of a composite wall - Wall A and Wall B with Wall A being heat generating. Assume there is not heat resistance between boundaries of Wall A and B. The left side of the Wall A is at ambient temperature of Tair = Tw = 300 K. Wall A has parameters La, Ka, with heat generation Q=400 W/m3. Wall B has parameters Lb, Kb, and h.</p>")
vbox_text = widgets.VBox([text_0])
# create some control elements
la_slider = widgets.FloatSlider(value=e1, min=0.15, max=0.35, step=0.05, description='La')
lb_slider = widgets.FloatSlider(value=e2, min=0.015, max=0.075, step=0.005, description='Lb')
ka_slider = widgets.FloatSlider(value=k1, min=0.05, max=2, step=0.05, description='Ka')
kb_slider = widgets.FloatSlider(value=k2, min=0.05, max=2, step=0.05, description='Kb')
heb_slider = widgets.FloatSlider(value=he2, min=30, max=50, step=1, description='h')
#Q_slider = widgets.FloatSlider(value=Q, min=390, max=500, step=10, description='Q')
# callback functions
def update1(change):
"""redraw line (update plot)"""
xnew, ynew = T_A(Q,la_slider.value,ka_slider.value, heb_slider.value, lb_slider.value, kb_slider.value)
line.set_xdata(xnew)
line.set_ydata(ynew)
fig.canvas.draw()
button = widgets.Button(description="Reset")
def on_button_clicked(b):
with output1:
la_slider.value = e1
lb_slider.value = e2
ka_slider.value = k1
kb_slider.value = k2
heb_slider.value = he2
button.on_click(on_button_clicked)
la_slider.observe(update1, 'value')
lb_slider.observe(update1, 'value')
ka_slider.observe(update1, 'value')
kb_slider.observe(update1, 'value')
heb_slider.observe(update1, 'value')
#Q_slider.observe(update1, 'value')
clear_output()
controls = widgets.VBox([vbox_text, la_slider, lb_slider, ka_slider, kb_slider, heb_slider, button])
page = widgets.HBox([controls, output1])
display(page)
```
|
github_jupyter
|
# $$User\ Defined\ Metrics\ Tutorial$$
[](https://colab.research.google.com/github/catboost/tutorials/blob/master/custom_loss/custom_loss_and_metric_tutorial.ipynb)
# Contents
* [1. Introduction](#1.\-Introduction)
* [2. Classification](#2.\-Classification)
* [3. Regression](#3.\-Regression)
* [4. Multiclassification](#4.\-Multiclassification)
# 1. Introduction
CatBoost allows you to create and pass to model your own loss functions and metrics. To do this you should implement classes with specicial interfaces.
##### Interface for user defined objectives:
```
class UserDefinedObjective(object):
def calc_ders_range(self, approxes, targets, weights):
# approxes, targets, weights are indexed containers of floats
# (containers which have only __len__ and __getitem__ defined).
# weights parameter can be None.
#
# To understand what these parameters mean, assume that there is
# a subset of your dataset that is currently being processed.
# approxes contains current predictions for this subset,
# targets contains target values you provided with the dataset.
#
# This function should return a list of pairs (der1, der2), where
# der1 is the first derivative of the loss function with respect
# to the predicted value, and der2 is the second derivative.
pass
class UserDefinedMultiClassObjective(object):
def calc_ders_multi(self, approxes, target, weight):
# approxes - indexed container of floats with predictions
# for each dimension of single object
# target - contains a single expected value
# weight - contains weight of the object
#
# This function should return a tuple (der1, der2), where
# - der1 is a list-like object of first derivatives of the loss function with respect
# to the predicted value for each dimension.
# - der2 is a matrix of second derivatives.
pass
```
##### Interface for user defined metrics:
```
class UserDefinedMetric(object):
def is_max_optimal(self):
# Returns whether great values of metric are better
pass
def evaluate(self, approxes, target, weight):
# approxes is a list of indexed containers
# (containers with only __len__ and __getitem__ defined),
# one container per approx dimension.
# Each container contains floats.
# weight is a one dimensional indexed container.
# target is a one dimensional indexed container.
# weight parameter can be None.
# Returns pair (error, weights sum)
pass
def get_final_error(self, error, weight):
# Returns final value of metric based on error and weight
pass
```
Below we consider examples of user defined metrics for different types of tasks. We will use the following variables:
<center>$a$ - approx value</center>
<center>$p$ - probability</center>
<center>$t$ - target</center>
<center>$w$ - weight</center>
```
# import neccessary packages
from catboost import CatBoostClassifier, CatBoostRegressor
import numpy as np
from sklearn.datasets import make_classification, make_regression
from sklearn.model_selection import train_test_split
```
# 2. Classification
Note: for binary classification problems approxes are not equal to probabilities. Probabilities are calculated from approxes using sigmoid function.
<h4><center>$p=\frac{1}{1 + e^{-a}}=\frac{e^a}{1 + e^a}$</center></h4>
As an example, let's take Logloss metric which is defined by the following formula:
<h4><center>$Logloss_i = -{w_i * (t_i * log(p_i) + (1 - t_i) * log(1 - p_i))}$</center></h4>
<h4><center>$Logloss = \frac{\sum_{i=1}^{N}{Logloss_i}}{\sum_{i=1}^{N}{w_i}}$</center></h4>
This metric has derivative and can be used as objective. The derivatives of Logloss for single object are defined by the following formulas:
<h4><center>$\frac{\delta(Logloss_i)}{\delta(a)} = w_i * (t_i - p_i)$</center></h4>
<h4><center>$\frac{\delta^2(Logloss_i)}{\delta(a^2)} = -w_i * p_i * (1 - p_i)$</center></h4>
Below you can see implemented Logloss objective and metric.
```
class LoglossObjective(object):
def calc_ders_range(self, approxes, targets, weights):
assert len(approxes) == len(targets)
if weights is not None:
assert len(weights) == len(approxes)
result = []
for index in range(len(targets)):
e = np.exp(approxes[index])
p = e / (1 + e)
der1 = targets[index] - p
der2 = -p * (1 - p)
if weights is not None:
der1 *= weights[index]
der2 *= weights[index]
result.append((der1, der2))
return result
class LoglossMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return False
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
for i in range(len(approx)):
e = np.exp(approx[i])
p = e / (1 + e)
w = 1.0 if weight is None else weight[i]
weight_sum += w
error_sum += -w * (target[i] * np.log(p) + (1 - target[i]) * np.log(1 - p))
return error_sum, weight_sum
```
Below there are examples of training with built-in Logloss function and our Logloss objective and metric. As we can see, the results are the same.
```
X, y = make_classification(n_classes=2, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
model1 = CatBoostClassifier(iterations=10, loss_function='Logloss', eval_metric='Logloss',
learning_rate=0.03, bootstrap_type='Bayesian', boost_from_average=False,
leaf_estimation_iterations=1, leaf_estimation_method='Gradient')
model1.fit(X_train, y_train, eval_set=(X_test, y_test))
model2 = CatBoostClassifier(iterations=10, loss_function=LoglossObjective(), eval_metric=LoglossMetric(),
learning_rate=0.03, bootstrap_type='Bayesian', boost_from_average=False,
leaf_estimation_iterations=1, leaf_estimation_method='Gradient')
model2.fit(X_train, y_train, eval_set=(X_test, y_test))
```
# 3. Regression
For regression approxes don't need any transformations. As an example of regression loss function and metric we take well-known RMSE which is defined by the following formulas:
<h3><center>$RMSE = \sqrt{\frac{\sum_{i=1}^{N}{w_i * (t_i - a_i)^2}}{\sum_{i=1}^{N}{w_i}}}$</center></h3>
<h4><center>$\frac{\delta(RMSE_i)}{\delta(a)} = w_i * (t_i - a_i)$</center></h4>
<h4><center>$\frac{\delta^2(RMSE_i)}{\delta(a^2)} = -w_i$</center></h4>
```
class RmseObjective(object):
def calc_ders_range(self, approxes, targets, weights):
assert len(approxes) == len(targets)
if weights is not None:
assert len(weights) == len(approxes)
result = []
for index in range(len(targets)):
der1 = targets[index] - approxes[index]
der2 = -1
if weights is not None:
der1 *= weights[index]
der2 *= weights[index]
result.append((der1, der2))
return result
class RmseMetric(object):
def get_final_error(self, error, weight):
return np.sqrt(error / (weight + 1e-38))
def is_max_optimal(self):
return False
def evaluate(self, approxes, target, weight):
assert len(approxes) == 1
assert len(target) == len(approxes[0])
approx = approxes[0]
error_sum = 0.0
weight_sum = 0.0
for i in range(len(approx)):
w = 1.0 if weight is None else weight[i]
weight_sum += w
error_sum += w * ((approx[i] - target[i])**2)
return error_sum, weight_sum
```
Below there are examples of training with built-in RMSE function and our RMSE objective and metric. As we can see, the results are the same.
```
X, y = make_regression(random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
model1 = CatBoostRegressor(iterations=10, loss_function='RMSE', eval_metric='RMSE',
learning_rate=0.03, bootstrap_type='Bayesian', boost_from_average=False,
leaf_estimation_iterations=1, leaf_estimation_method='Gradient')
model1.fit(X_train, y_train, eval_set=(X_test, y_test))
model2 = CatBoostRegressor(iterations=10, loss_function=RmseObjective(), eval_metric=RmseMetric(),
learning_rate=0.03, bootstrap_type='Bayesian', boost_from_average=False,
leaf_estimation_iterations=1, leaf_estimation_method='Gradient')
model2.fit(X_train, y_train, eval_set=(X_test, y_test))
```
# 4. Multiclassification
Note: for multiclassification problems approxes are not equal to probabilities. Usually approxes are transformed to probabilities using Softmax function.
<h3><center>$p_{i,c} = \frac{e^{a_{i,c}}}{\sum_{j=1}^k{e^{a_{i,j}}}}$</center></h3>
<center>$p_{i,c}$ - the probability that $x_i$ belongs to class $c$</center>
<center>$k$ - number of classes</center>
<center>$a_{i,j}$ - approx for object $x_i$ for class $j$</center>
Let's implement MultiClass objective that is defined as follows:
<h3><center>$MultiClass_i = w_i * \log{p_{i,t_i}}$</center></h3>
<h3><center>$MultiClass = \frac{\sum_{i=1}^{N}Multiclass_i}{\sum_{i=1}^{N}w_i}$</center></h3>
<h3><center>$\frac{\delta(Multiclass_i)}{\delta{a_{i,c}}} = \begin{cases}
w_i-\frac{w_i*e^{a_{i,c}}}{\sum_{j=1}^{k}e^{a_{i,j}}}, & \mbox{if } c = t_i \\
-\frac{w_i*e^{a_{i,c}}}{\sum_{j=1}^{k}e^{a_{i,j}}}, & \mbox{if } c \neq t_i
\end{cases}$</center></h3>
<h3><center>$\frac{\delta^2(Multiclass_i)}{\delta{a_{i,c_1}}\delta{a_{i,c_2}}} = \begin{cases}
\frac{w_i*e^{2*a_{i,c_1}}}{(\sum_{j=1}^{k}e^{a_{i,j}})^2} - \frac{w_i*e^{a_{i, c_1}}}{\sum_{j=1}^{k}e^{a_{i,j}}}, & \mbox{if } c_1 = c_2 \\
\frac{w_i*e^{a_{i,c_1}+a_{i,c_2}}}{(\sum_{j=1}^{k}e^{a_{i,j}})^2}, & \mbox{if } c_1 \neq c_2
\end{cases}$</center></h3>
```
class MultiClassObjective(object):
def calc_ders_multi(self, approx, target, weight):
approx = np.array(approx) - max(approx)
exp_approx = np.exp(approx)
exp_sum = exp_approx.sum()
grad = []
hess = []
for j in range(len(approx)):
der1 = -exp_approx[j] / exp_sum
if j == target:
der1 += 1
hess_row = []
for j2 in range(len(approx)):
der2 = exp_approx[j] * exp_approx[j2] / (exp_sum**2)
if j2 == j:
der2 -= exp_approx[j] / exp_sum
hess_row.append(der2 * weight)
grad.append(der1 * weight)
hess.append(hess_row)
return (grad, hess)
class AccuracyMetric(object):
def get_final_error(self, error, weight):
return error / (weight + 1e-38)
def is_max_optimal(self):
return True
def evaluate(self, approxes, target, weight):
best_class = np.argmax(approxes, axis=0)
accuracy_sum = 0
weight_sum = 0
for i in range(len(target)):
w = 1.0 if weight is None else weight[i]
weight_sum += w
accuracy_sum += w * (best_class[i] == target[i])
return accuracy_sum, weight_sum
```
Below there are examples of training with built-in MultiClass function and our MultiClass objective. As we can see, the results are the same.
```
X, y = make_classification(n_samples=1000, n_features=50, n_informative=40, n_classes=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
model1 = CatBoostClassifier(iterations=10, loss_function='MultiClass', eval_metric='Accuracy',
learning_rate=0.03, bootstrap_type='Bayesian', boost_from_average=False,
leaf_estimation_iterations=1, leaf_estimation_method='Newton', classes_count=5)
model1.fit(X_train, y_train, eval_set=(X_test, y_test))
model2 = CatBoostClassifier(iterations=10, loss_function=MultiClassObjective(), eval_metric=AccuracyMetric(),
learning_rate=0.03, bootstrap_type='Bayesian', boost_from_average=False,
leaf_estimation_iterations=1, leaf_estimation_method='Newton', classes_count=5)
model2.fit(X_train, y_train, eval_set=(X_test, y_test))
```
|
github_jupyter
|
# Nombre: Oscar Esaú Peralta Rosales
## Procesamiento de Lenguaje Natural
## Práctica 3: Bolsas de Términos y esquemas de pesado
### Lectura simple de datos
```
import os
import re
import math
from keras.preprocessing.text import Tokenizer
def get_texts_from_file(path_corpus, path_truth):
tr_txt = []
tr_y = []
with open(path_corpus, "r") as f_corpus, open(path_truth, "r") as f_truth:
for twitt in f_corpus:
tr_txt += [twitt]
for label in f_truth:
tr_y += [label]
return tr_txt, tr_y
tr_txt, tr_y = get_texts_from_file("./mex_train.txt", "./mex_train_labels.txt")
```
### Estadisticas Simples
```
tr_y = list(map(int, tr_y))
from collections import Counter
import matplotlib.pyplot as plt
%matplotlib inline
print(Counter(tr_y))
plt.hist(tr_y, bins=len(set(tr_y)))
plt.ylabel('Users');
plt.xlabel('Class');
```
# Un ojo a los datos
```
tr_txt[:10]
```
### Construcción simple del vocabulario
```
import nltk
corpus_palabras = []
for doc in tr_txt:
corpus_palabras += doc.split()
#print(corpus_palabras)
fdist = nltk.FreqDist(corpus_palabras)
fdist
len(fdist)
def sortFreqDict(freqdict):
aux = [(freqdict[key], key) for key in freqdict]
aux.sort()
aux.reverse()
return aux
V = sortFreqDict(fdist)
V = V[:5000]
dict_indices = dict()
cont = 0
for weight, word in V:
dict_indices[word] = cont
cont += 1
```
### Bolsa de Términos
```
import numpy as np
def build_bow_tr(tr_txt, V, dict_indices):
BOW = np.zeros((len(tr_txt),len(V)), dtype=int)
cont_doc = 0
for tr in tr_txt:
fdist_doc = nltk.FreqDist(tr.split())
for word in fdist_doc:
if word in dict_indices:
BOW[cont_doc, dict_indices[word]] = 1
cont_doc += 1
return BOW
```
### Debug?
```
tr_txt[10]
fdist_doc = nltk.FreqDist(tr_txt[10].split())
fdist_doc
```
### Bolsa de Terminos en Validación
```
BOW_tr=build_bow_tr(tr_txt, V, dict_indices)
print(V[:10])
import sys
import numpy
numpy.set_printoptions(threshold=sys.maxsize)
#print(BOW[10])
val_txt, val_y = get_texts_from_file("./mex_val.txt", "./mex_val_labels.txt")
val_y = list(map(int, val_y))
from collections import Counter
import matplotlib.pyplot as plt
%matplotlib inline
print(Counter(val_y))
plt.hist(val_y, bins=len(set(val_y)))
plt.ylabel('Users');
plt.xlabel('Class');
val_txt[:10]
BOW_val=build_bow_tr(val_txt, V, dict_indices)
```
### Clasificación
```
import csv
import argparse
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, precision_recall_fscore_support, roc_auc_score
from sklearn import metrics, preprocessing
import numpy as np
from sklearn import svm, datasets
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
parameters = {'C': [.05, .12, .25, .5, 1, 2, 4]}
svr = svm.LinearSVC(class_weight='balanced')
grid = GridSearchCV(estimator=svr, param_grid=parameters, n_jobs=8, scoring="f1_macro", cv=5)
grid.fit(BOW_tr, tr_y)
y_pred = grid.predict(BOW_val)
p, r, f, _ = precision_recall_fscore_support(val_y, y_pred, average='macro', pos_label=None)
print(confusion_matrix(val_y, y_pred))
print(metrics.classification_report(val_y, y_pred))
```
### Errores
```
incorrect = []
for e in zip(val_y,y_pred,range(len(val_y))):
#print(e[0])
#print(e[1])
if e[0] != e[1]:
incorrect += [e[2]]
for e in incorrect:
case = e
if "madre" in val_txt[case].strip():
print("Texto: ", val_txt[case].strip())
print("Truth: ", val_y[case])
print("Pred: ", y_pred[case])
#print("PredProba: ", y_pred_proba[case])
```
# Tu turno:
## Realiza los siguientes ejercicios en esta clase:
## 1) Bolsa de Palabras con frecuencia y clasifique: Haga bolsa de palabras en dónde cada término tenga frecuencia bruta en lugar de pesado binario
```
def build_bow_tr_frec(tr_txt, V, dict_indices):
BOW = np.zeros((len(tr_txt),len(V)), dtype=int)
cont_doc = 0
for tr in tr_txt:
fdist_doc = nltk.FreqDist(tr.split())
for word in fdist_doc:
if word in dict_indices:
BOW[cont_doc, dict_indices[word]] = fdist_doc[word]
cont_doc += 1
return BOW
BOW_txt_frec = build_bow_tr_frec(tr_txt, V, dict_indices)
BOW_val_frec = build_bow_tr_frec(val_txt, V, dict_indices)
parameters = {'C': [.05, .12, .25, .5, 1, 2, 4]}
svr = svm.LinearSVC(class_weight='balanced')
grid = GridSearchCV(estimator=svr, param_grid=parameters, n_jobs=8, scoring="f1_macro", cv=5)
grid.fit(BOW_txt_frec, tr_y)
y_pred = grid.predict(BOW_val_frec)
p, r, f, _ = precision_recall_fscore_support(val_y, y_pred, average='macro', pos_label=None)
print(confusion_matrix(val_y, y_pred))
print(metrics.classification_report(val_y, y_pred))
```
## 2) Bolsa de Palabras con frecuencia normalizada y clasifique: Haga bolsa de palabras en dónde cada término tenga frecuencia normalizada a sumar 1 por documento
```
def build_bow_tr_frec_norm(tr_txt, V, dict_indices):
BOW = np.zeros((len(tr_txt),len(V)), dtype=np.float64)
cont_doc = 0
for tr in tr_txt:
fdist_doc = nltk.FreqDist(tr.split())
len_fd = len(fdist_doc)
for word in fdist_doc:
if word in dict_indices:
BOW[cont_doc, dict_indices[word]] = fdist_doc[word] / len_fd
cont_doc += 1
return BOW
BOW_txt_frec_norm = build_bow_tr_frec_norm(tr_txt, V, dict_indices)
BOW_val_frec_norm = build_bow_tr_frec_norm(val_txt, V, dict_indices)
parameters = {'C': [.05, .12, .25, .5, 1, 2, 4]}
svr = svm.LinearSVC(class_weight='balanced')
grid = GridSearchCV(estimator=svr, param_grid=parameters, n_jobs=8, scoring="f1_macro", cv=5)
grid.fit(BOW_txt_frec_norm, tr_y)
y_pred = grid.predict(BOW_val_frec_norm)
p, r, f, _ = precision_recall_fscore_support(val_y, y_pred, average='macro', pos_label=None)
print(confusion_matrix(val_y, y_pred))
print(metrics.classification_report(val_y, y_pred))
```
## 3) Bolsa de Palabras Normalizada con la norma del vector (un vector unitario por documento)
```
def build_bow_tr_frec_norm(tr_txt, V, dict_indices):
BOW = np.zeros((len(tr_txt),len(V)), dtype=np.float64)
cont_doc = 0
for tr in tr_txt:
fdist_doc = nltk.FreqDist(tr.split())
len_fd = len(fdist_doc)
for word in fdist_doc:
if word in dict_indices:
BOW[cont_doc, dict_indices[word]] = fdist_doc[word] / len_fd
cont_doc += 1
for row in BOW:
row /= np.linalg.norm(row) or 1
return BOW
BOW_txt_frec_norm = build_bow_tr_frec_norm(tr_txt, V, dict_indices)
BOW_val_frec_norm = build_bow_tr_frec_norm(val_txt, V, dict_indices)
parameters = {'C': [.05, .12, .25, .5, 1, 2, 4]}
svr = svm.LinearSVC(class_weight='balanced')
grid = GridSearchCV(estimator=svr, param_grid=parameters, n_jobs=8, scoring="f1_macro", cv=5)
grid.fit(BOW_txt_frec_norm, tr_y)
y_pred = grid.predict(BOW_val_frec_norm)
p, r, f, _ = precision_recall_fscore_support(val_y, y_pred, average='macro', pos_label=None)
print(confusion_matrix(val_y, y_pred))
print(metrics.classification_report(val_y, y_pred))
```
## 4) Bolsa de Palabras con TFIDF y clasifique
```
def build_bow_tr_tfidf(tr_txt, V, dict_indices):
BOW = np.zeros((len(tr_txt),len(V)), dtype=np.float64)
cont_doc = 0
N = len(tr_txt)
frecs_history = [nltk.FreqDist(tr.split()) for tr in tr_txt]
for n_doc, tr in enumerate(tr_txt):
fdist_doc = frecs_history[n_doc]
for word in fdist_doc:
if word in dict_indices:
BOW[cont_doc, dict_indices[word]] = fdist_doc[word] / len(fdist_doc)
for fh in frecs_history:
count = 0
if word in fh:
count +=1
BOW[cont_doc, dict_indices[word]] *= math.log(len(tr_txt) / (count + 1))
cont_doc += 1
return BOW
BOW_txt_frec_norm = build_bow_tr_tfidf(tr_txt, V, dict_indices)
BOW_val_frec_norm = build_bow_tr_tfidf(val_txt, V, dict_indices)
parameters = {'C': [.05, .12, .25, .5, 1, 2, 4]}
svr = svm.LinearSVC(class_weight='balanced')
grid = GridSearchCV(estimator=svr, param_grid=parameters, n_jobs=8, scoring="f1_macro", cv=5)
grid.fit(BOW_txt_frec_norm, tr_y)
y_pred = grid.predict(BOW_val_frec_norm)
p, r, f, _ = precision_recall_fscore_support(val_y, y_pred, average='macro', pos_label=None)
print(confusion_matrix(val_y, y_pred))
print(metrics.classification_report(val_y, y_pred))
```
## 5) (Opcional) Mismo que anterior pero normalizando TFIDF con la norma del vector
```
def build_bow_tr_tfidf_norm(tr_txt, V, dict_indices):
BOW = np.zeros((len(tr_txt),len(V)), dtype=np.float64)
cont_doc = 0
N = len(tr_txt)
frecs_history = [nltk.FreqDist(tr.split()) for tr in tr_txt]
for n_doc, tr in enumerate(tr_txt):
fdist_doc = frecs_history[n_doc]
for word in fdist_doc:
if word in dict_indices:
BOW[cont_doc, dict_indices[word]] = fdist_doc[word] / len(fdist_doc)
for fh in frecs_history:
count = 0
if word in fh:
count +=1
BOW[cont_doc, dict_indices[word]] *= math.log(len(tr_txt) / (count + 1))
cont_doc += 1
for row in BOW:
row /= np.linalg.norm(row) or 1
return BOW
BOW_txt_frec_norm = build_bow_tr_tfidf_norm(tr_txt, V, dict_indices)
BOW_val_frec_norm = build_bow_tr_tfidf_norm(val_txt, V, dict_indices)
parameters = {'C': [.05, .12, .25, .5, 1, 2, 4]}
svr = svm.LinearSVC(class_weight='balanced')
grid = GridSearchCV(estimator=svr, param_grid=parameters, n_jobs=8, scoring="f1_macro", cv=5)
grid.fit(BOW_txt_frec_norm, tr_y)
y_pred = grid.predict(BOW_val_frec_norm)
p, r, f, _ = precision_recall_fscore_support(val_y, y_pred, average='macro', pos_label=None)
print(confusion_matrix(val_y, y_pred))
print(metrics.classification_report(val_y, y_pred))
```
## 6) (Opcional) Bolsa de Palabras con Top 10000 palabras más frecuentes
```
def build_bow_top(tr_txt, V, dict_indices, top=1000, jump=0):
BOW = np.zeros((len(tr_txt),len(V)), dtype=int)
cont_doc = 0
for tr in tr_txt:
fdist_doc = nltk.FreqDist(tr.split())
fdist_doc_sorted = sorted(fdist_doc.items(), key=lambda item: item[1], reverse=True)
for word, _ in fdist_doc_sorted[jump:jump+top]:
if word in dict_indices:
BOW[cont_doc, dict_indices[word]] = fdist_doc[word]
cont_doc += 1
return BOW
BOW_txt_frec_norm = build_bow_top(tr_txt, V, dict_indices, top=10000)
BOW_val_frec_norm = build_bow_top(val_txt, V, dict_indices, top=10000)
parameters = {'C': [.05, .12, .25, .5, 1, 2, 4]}
svr = svm.LinearSVC(class_weight='balanced')
grid = GridSearchCV(estimator=svr, param_grid=parameters, n_jobs=8, scoring="f1_macro", cv=5)
grid.fit(BOW_txt_frec_norm, tr_y)
y_pred = grid.predict(BOW_val_frec_norm)
p, r, f, _ = precision_recall_fscore_support(val_y, y_pred, average='macro', pos_label=None)
print(confusion_matrix(val_y, y_pred))
print(metrics.classification_report(val_y, y_pred))
```
## 7) (Opcional) Bolsa de Palabras descartando las top 1000 palabras más frecuentes y tomando las siguientes 5000
```
BOW_txt_frec_norm = build_bow_top(tr_txt, V, dict_indices, top=5000, jump=1000)
BOW_val_frec_norm = build_bow_top(val_txt, V, dict_indices, top=5000, jump=1000)
parameters = {'C': [.05, .12, .25, .5, 1, 2, 4]}
svr = svm.LinearSVC(class_weight='balanced')
grid = GridSearchCV(estimator=svr, param_grid=parameters, n_jobs=8, scoring="f1_macro", cv=5)
grid.fit(BOW_txt_frec_norm, tr_y)
y_pred = grid.predict(BOW_val_frec_norm)
p, r, f, _ = precision_recall_fscore_support(val_y, y_pred, average='macro', pos_label=None)
print(confusion_matrix(val_y, y_pred))
print(metrics.classification_report(val_y, y_pred))
```
|
github_jupyter
|
[Table of Contents](./table_of_contents.ipynb)
# The Extended Kalman Filter
```
from __future__ import division, print_function
%matplotlib inline
#format the book
import book_format
book_format.set_style()
```
We have developed the theory for the linear Kalman filter. Then, in the last two chapters we broached the topic of using Kalman filters for nonlinear problems. In this chapter we will learn the Extended Kalman filter (EKF). The EKF handles nonlinearity by linearizing the system at the point of the current estimate, and then the linear Kalman filter is used to filter this linearized system. It was one of the very first techniques used for nonlinear problems, and it remains the most common technique.
The EKF provides significant mathematical challenges to the designer of the filter; this is the most challenging chapter of the book. I do everything I can to avoid the EKF in favor of other techniques that have been developed to filter nonlinear problems. However, the topic is unavoidable; all classic papers and a majority of current papers in the field use the EKF. Even if you do not use the EKF in your own work you will need to be familiar with the topic to be able to read the literature.
## Linearizing the Kalman Filter
The Kalman filter uses linear equations, so it does not work with nonlinear problems. Problems can be nonlinear in two ways. First, the process model might be nonlinear. An object falling through the atmosphere encounters drag which reduces its acceleration. The drag coefficient varies based on the velocity the object. The resulting behavior is nonlinear - it cannot be modeled with linear equations. Second, the measurements could be nonlinear. For example, a radar gives a range and bearing to a target. We use trigonometry, which is nonlinear, to compute the position of the target.
For the linear filter we have these equations for the process and measurement models:
$$\begin{aligned}\dot{\mathbf x} &= \mathbf{Ax} + w_x\\
\mathbf z &= \mathbf{Hx} + w_z
\end{aligned}$$
Where $\mathbf A$ is the systems dynamic matrix. Using the state space methods covered in the **Kalman Filter Math** chapter these equations can be tranformed into
$$\begin{aligned}\bar{\mathbf x} &= \mathbf{Fx} \\
\mathbf z &= \mathbf{Hx}
\end{aligned}$$
where $\mathbf F$ is the *fundamental matrix*. The noise $w_x$ and $w_z$ terms are incorporated into the matrices $\mathbf R$ and $\mathbf Q$. This form of the equations allow us to compute the state at step $k$ given a measurement at step $k$ and the state estimate at step $k-1$. In earlier chapters I built your intuition and minimized the math by using problems describable with Newton's equations. We know how to design $\mathbf F$ based on high school physics.
For the nonlinear model the linear expression $\mathbf{Fx} + \mathbf{Bu}$ is replaced by a nonlinear function $f(\mathbf x, \mathbf u)$, and the linear expression $\mathbf{Hx}$ is replaced by a nonlinear function $h(\mathbf x)$:
$$\begin{aligned}\dot{\mathbf x} &= f(\mathbf x, \mathbf u) + w_x\\
\mathbf z &= h(\mathbf x) + w_z
\end{aligned}$$
You might imagine that we could proceed by finding a new set of Kalman filter equations that optimally solve these equations. But if you remember the charts in the **Nonlinear Filtering** chapter you'll recall that passing a Gaussian through a nonlinear function results in a probability distribution that is no longer Gaussian. So this will not work.
The EKF does not alter the Kalman filter's linear equations. Instead, it *linearizes* the nonlinear equations at the point of the current estimate, and uses this linearization in the linear Kalman filter.
*Linearize* means what it sounds like. We find a line that most closely matches the curve at a defined point. The graph below linearizes the parabola $f(x)=x^2-2x$ at $x=1.5$.
```
import kf_book.ekf_internal as ekf_internal
ekf_internal.show_linearization()
```
If the curve above is the process model, then the dotted lines shows the linearization of that curve for the estimate $x=1.5$.
We linearize systems by taking the derivative, which finds the slope of a curve:
$$\begin{aligned}
f(x) &= x^2 -2x \\
\frac{df}{dx} &= 2x - 2
\end{aligned}$$
and then evaluating it at $x$:
$$\begin{aligned}m &= f'(x=1.5) \\&= 2(1.5) - 2 \\&= 1\end{aligned}$$
Linearizing systems of differential equations is similar. We linearize $f(\mathbf x, \mathbf u)$, and $h(\mathbf x)$ by taking the partial derivatives of each to evaluate $\mathbf F$ and $\mathbf H$ at the point $\mathbf x_t$ and $\mathbf u_t$. We call the partial derivative of a matrix the [*Jacobian*](https://en.wikipedia.org/wiki/Jacobian_matrix_and_determinant). This gives us the the discrete state transition matrix and measurement model matrix:
$$
\begin{aligned}
\mathbf F
&= {\frac{\partial{f(\mathbf x_t, \mathbf u_t)}}{\partial{\mathbf x}}}\biggr|_{{\mathbf x_t},{\mathbf u_t}} \\
\mathbf H &= \frac{\partial{h(\bar{\mathbf x}_t)}}{\partial{\bar{\mathbf x}}}\biggr|_{\bar{\mathbf x}_t}
\end{aligned}
$$
This leads to the following equations for the EKF. I put boxes around the differences from the linear filter:
$$\begin{array}{l|l}
\text{linear Kalman filter} & \text{EKF} \\
\hline
& \boxed{\mathbf F = {\frac{\partial{f(\mathbf x_t, \mathbf u_t)}}{\partial{\mathbf x}}}\biggr|_{{\mathbf x_t},{\mathbf u_t}}} \\
\mathbf{\bar x} = \mathbf{Fx} + \mathbf{Bu} & \boxed{\mathbf{\bar x} = f(\mathbf x, \mathbf u)} \\
\mathbf{\bar P} = \mathbf{FPF}^\mathsf{T}+\mathbf Q & \mathbf{\bar P} = \mathbf{FPF}^\mathsf{T}+\mathbf Q \\
\hline
& \boxed{\mathbf H = \frac{\partial{h(\bar{\mathbf x}_t)}}{\partial{\bar{\mathbf x}}}\biggr|_{\bar{\mathbf x}_t}} \\
\textbf{y} = \mathbf z - \mathbf{H \bar{x}} & \textbf{y} = \mathbf z - \boxed{h(\bar{x})}\\
\mathbf{K} = \mathbf{\bar{P}H}^\mathsf{T} (\mathbf{H\bar{P}H}^\mathsf{T} + \mathbf R)^{-1} & \mathbf{K} = \mathbf{\bar{P}H}^\mathsf{T} (\mathbf{H\bar{P}H}^\mathsf{T} + \mathbf R)^{-1} \\
\mathbf x=\mathbf{\bar{x}} +\mathbf{K\textbf{y}} & \mathbf x=\mathbf{\bar{x}} +\mathbf{K\textbf{y}} \\
\mathbf P= (\mathbf{I}-\mathbf{KH})\mathbf{\bar{P}} & \mathbf P= (\mathbf{I}-\mathbf{KH})\mathbf{\bar{P}}
\end{array}$$
We don't normally use $\mathbf{Fx}$ to propagate the state for the EKF as the linearization causes inaccuracies. It is typical to compute $\bar{\mathbf x}$ using a suitable numerical integration technique such as Euler or Runge Kutta. Thus I wrote $\mathbf{\bar x} = f(\mathbf x, \mathbf u)$. For the same reasons we don't use $\mathbf{H\bar{x}}$ in the computation for the residual, opting for the more accurate $h(\bar{\mathbf x})$.
I think the easiest way to understand the EKF is to start off with an example. Later you may want to come back and reread this section.
## Example: Tracking a Airplane
This example tracks an airplane using ground based radar. We implemented a UKF for this problem in the last chapter. Now we will implement an EKF for the same problem so we can compare both the filter performance and the level of effort required to implement the filter.
Radars work by emitting a beam of radio waves and scanning for a return bounce. Anything in the beam's path will reflects some of the signal back to the radar. By timing how long it takes for the reflected signal to get back to the radar the system can compute the *slant distance* - the straight line distance from the radar installation to the object.
The relationship between the radar's slant range distance $r$ and elevation angle $\epsilon$ with the horizontal position $x$ and altitude $y$ of the aircraft is illustrated in the figure below:
```
ekf_internal.show_radar_chart()
```
This gives us the equalities:
$$\begin{aligned}
\epsilon &= \tan^{-1} \frac y x\\
r^2 &= x^2 + y^2
\end{aligned}$$
### Design the State Variables
We want to track the position of an aircraft assuming a constant velocity and altitude, and measurements of the slant distance to the aircraft. That means we need 3 state variables - horizontal distance, horizonal velocity, and altitude:
$$\mathbf x = \begin{bmatrix}\mathtt{distance} \\\mathtt{velocity}\\ \mathtt{altitude}\end{bmatrix}= \begin{bmatrix}x \\ \dot x\\ y\end{bmatrix}$$
### Design the Process Model
We assume a Newtonian, kinematic system for the aircraft. We've used this model in previous chapters, so by inspection you may recognize that we want
$$\mathbf F = \left[\begin{array}{cc|c} 1 & \Delta t & 0\\
0 & 1 & 0 \\ \hline
0 & 0 & 1\end{array}\right]$$
I've partioned the matrix into blocks to show the upper left block is a constant velocity model for $x$, and the lower right block is a constant position model for $y$.
However, let's practice finding these matrices. We model systems with a set of differential equations. We need an equation in the form
$$\dot{\mathbf x} = \mathbf{Ax} + \mathbf{w}$$
where $\mathbf{w}$ is the system noise.
The variables $x$ and $y$ are independent so we can compute them separately. The differential equations for motion in one dimension are:
$$\begin{aligned}v &= \dot x \\
a &= \ddot{x} = 0\end{aligned}$$
Now we put the differential equations into state-space form. If this was a second or greater order differential system we would have to first reduce them to an equivalent set of first degree equations. The equations are first order, so we put them in state space matrix form as
$$\begin{aligned}\begin{bmatrix}\dot x \\ \ddot{x}\end{bmatrix} &= \begin{bmatrix}0&1\\0&0\end{bmatrix} \begin{bmatrix}x \\
\dot x\end{bmatrix} \\ \dot{\mathbf x} &= \mathbf{Ax}\end{aligned}$$
where $\mathbf A=\begin{bmatrix}0&1\\0&0\end{bmatrix}$.
Recall that $\mathbf A$ is the *system dynamics matrix*. It describes a set of linear differential equations. From it we must compute the state transition matrix $\mathbf F$. $\mathbf F$ describes a discrete set of linear equations which compute $\mathbf x$ for a discrete time step $\Delta t$.
A common way to compute $\mathbf F$ is to use the power series expansion of the matrix exponential:
$$\mathbf F(\Delta t) = e^{\mathbf A\Delta t} = \mathbf{I} + \mathbf A\Delta t + \frac{(\mathbf A\Delta t)^2}{2!} + \frac{(\mathbf A \Delta t)^3}{3!} + ... $$
$\mathbf A^2 = \begin{bmatrix}0&0\\0&0\end{bmatrix}$, so all higher powers of $\mathbf A$ are also $\mathbf{0}$. Thus the power series expansion is:
$$
\begin{aligned}
\mathbf F &=\mathbf{I} + \mathbf At + \mathbf{0} \\
&= \begin{bmatrix}1&0\\0&1\end{bmatrix} + \begin{bmatrix}0&1\\0&0\end{bmatrix}\Delta t\\
\mathbf F &= \begin{bmatrix}1&\Delta t\\0&1\end{bmatrix}
\end{aligned}$$
This is the same result used by the kinematic equations! This exercise was unnecessary other than to illustrate finding the state transition matrix from linear differential equations. We will conclude the chapter with an example that will require the use of this technique.
### Design the Measurement Model
The measurement function takes the state estimate of the prior $\bar{\mathbf x}$ and turn it into a measurement of the slant range distance. We use the Pythagorean theorem to derive:
$$h(\bar{\mathbf x}) = \sqrt{x^2 + y^2}$$
The relationship between the slant distance and the position on the ground is nonlinear due to the square root. We linearize it by evaluating its partial derivative at $\mathbf x_t$:
$$
\mathbf H = \frac{\partial{h(\bar{\mathbf x})}}{\partial{\bar{\mathbf x}}}\biggr|_{\bar{\mathbf x}_t}
$$
The partial derivative of a matrix is called a Jacobian, and takes the form
$$\frac{\partial \mathbf H}{\partial \bar{\mathbf x}} =
\begin{bmatrix}
\frac{\partial h_1}{\partial x_1} & \frac{\partial h_1}{\partial x_2} &\dots \\
\frac{\partial h_2}{\partial x_1} & \frac{\partial h_2}{\partial x_2} &\dots \\
\vdots & \vdots
\end{bmatrix}
$$
In other words, each element in the matrix is the partial derivative of the function $h$ with respect to the $x$ variables. For our problem we have
$$\mathbf H = \begin{bmatrix}{\partial h}/{\partial x} & {\partial h}/{\partial \dot{x}} & {\partial h}/{\partial y}\end{bmatrix}$$
Solving each in turn:
$$\begin{aligned}
\frac{\partial h}{\partial x} &= \frac{\partial}{\partial x} \sqrt{x^2 + y^2} \\
&= \frac{x}{\sqrt{x^2 + y^2}}
\end{aligned}$$
and
$$\begin{aligned}
\frac{\partial h}{\partial \dot{x}} &=
\frac{\partial}{\partial \dot{x}} \sqrt{x^2 + y^2} \\
&= 0
\end{aligned}$$
and
$$\begin{aligned}
\frac{\partial h}{\partial y} &= \frac{\partial}{\partial y} \sqrt{x^2 + y^2} \\
&= \frac{y}{\sqrt{x^2 + y^2}}
\end{aligned}$$
giving us
$$\mathbf H =
\begin{bmatrix}
\frac{x}{\sqrt{x^2 + y^2}} &
0 &
&
\frac{y}{\sqrt{x^2 + y^2}}
\end{bmatrix}$$
This may seem daunting, so step back and recognize that all of this math is doing something very simple. We have an equation for the slant range to the airplane which is nonlinear. The Kalman filter only works with linear equations, so we need to find a linear equation that approximates $\mathbf H$. As we discussed above, finding the slope of a nonlinear equation at a given point is a good approximation. For the Kalman filter, the 'given point' is the state variable $\mathbf x$ so we need to take the derivative of the slant range with respect to $\mathbf x$. For the linear Kalman filter $\mathbf H$ was a constant that we computed prior to running the filter. For the EKF $\mathbf H$ is updated at each step as the evaluation point $\bar{\mathbf x}$ changes at each epoch.
To make this more concrete, let's now write a Python function that computes the Jacobian of $h$ for this problem.
```
from math import sqrt
def HJacobian_at(x):
""" compute Jacobian of H matrix at x """
horiz_dist = x[0]
altitude = x[2]
denom = sqrt(horiz_dist**2 + altitude**2)
return array ([[horiz_dist/denom, 0., altitude/denom]])
```
Finally, let's provide the code for $h(\bar{\mathbf x})$:
```
def hx(x):
""" compute measurement for slant range that
would correspond to state x.
"""
return (x[0]**2 + x[2]**2) ** 0.5
```
Now let's write a simulation for our radar.
```
from numpy.random import randn
import math
class RadarSim:
""" Simulates the radar signal returns from an object
flying at a constant altityude and velocity in 1D.
"""
def __init__(self, dt, pos, vel, alt):
self.pos = pos
self.vel = vel
self.alt = alt
self.dt = dt
def get_range(self):
""" Returns slant range to the object. Call once
for each new measurement at dt time from last call.
"""
# add some process noise to the system
self.vel = self.vel + .1*randn()
self.alt = self.alt + .1*randn()
self.pos = self.pos + self.vel*self.dt
# add measurement noise
err = self.pos * 0.05*randn()
slant_dist = math.sqrt(self.pos**2 + self.alt**2)
return slant_dist + err
```
### Design Process and Measurement Noise
The radar measures the range to a target. We will use $\sigma_{range}= 5$ meters for the noise. This gives us
$$\mathbf R = \begin{bmatrix}\sigma_{range}^2\end{bmatrix} = \begin{bmatrix}25\end{bmatrix}$$
The design of $\mathbf Q$ requires some discussion. The state $\mathbf x= \begin{bmatrix}x & \dot x & y\end{bmatrix}^\mathtt{T}$. The first two elements are position (down range distance) and velocity, so we can use `Q_discrete_white_noise` noise to compute the values for the upper left hand side of $\mathbf Q$. The third element of $\mathbf x$ is altitude, which we are assuming is independent of the down range distance. That leads us to a block design of $\mathbf Q$ of:
$$\mathbf Q = \begin{bmatrix}\mathbf Q_\mathtt{x} & 0 \\ 0 & \mathbf Q_\mathtt{y}\end{bmatrix}$$
### Implementation
`FilterPy` provides the class `ExtendedKalmanFilter`. It works similarly to the `KalmanFilter` class we have been using, except that it allows you to provide a function that computes the Jacobian of $\mathbf H$ and the function $h(\mathbf x)$.
We start by importing the filter and creating it. The dimension of `x` is 3 and `z` has dimension 1.
```python
from filterpy.kalman import ExtendedKalmanFilter
rk = ExtendedKalmanFilter(dim_x=3, dim_z=1)
```
We create the radar simulator:
```python
radar = RadarSim(dt, pos=0., vel=100., alt=1000.)
```
We will initialize the filter near the airplane's actual position:
```python
rk.x = array([radar.pos, radar.vel-10, radar.alt+100])
```
We assign the system matrix using the first term of the Taylor series expansion we computed above:
```python
dt = 0.05
rk.F = eye(3) + array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])*dt
```
After assigning reasonable values to $\mathbf R$, $\mathbf Q$, and $\mathbf P$ we can run the filter with a simple loop. We pass the functions for computing the Jacobian of $\mathbf H$ and $h(x)$ into the `update` method.
```python
for i in range(int(20/dt)):
z = radar.get_range()
rk.update(array([z]), HJacobian_at, hx)
rk.predict()
```
Adding some boilerplate code to save and plot the results we get:
```
from filterpy.common import Q_discrete_white_noise
from filterpy.kalman import ExtendedKalmanFilter
from numpy import eye, array, asarray
import numpy as np
dt = 0.05
rk = ExtendedKalmanFilter(dim_x=3, dim_z=1)
radar = RadarSim(dt, pos=0., vel=100., alt=1000.)
# make an imperfect starting guess
rk.x = array([radar.pos-100, radar.vel+100, radar.alt+1000])
rk.F = eye(3) + array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]) * dt
range_std = 5. # meters
rk.R = np.diag([range_std**2])
rk.Q[0:2, 0:2] = Q_discrete_white_noise(2, dt=dt, var=0.1)
rk.Q[2,2] = 0.1
rk.P *= 50
xs, track = [], []
for i in range(int(20/dt)):
z = radar.get_range()
track.append((radar.pos, radar.vel, radar.alt))
rk.update(array([z]), HJacobian_at, hx)
xs.append(rk.x)
rk.predict()
xs = asarray(xs)
track = asarray(track)
time = np.arange(0, len(xs)*dt, dt)
ekf_internal.plot_radar(xs, track, time)
```
## Using SymPy to compute Jacobians
Depending on your experience with derivatives you may have found the computation of the Jacobian difficult. Even if you found it easy, a slightly more difficult problem easily leads to very difficult computations.
As explained in Appendix A, we can use the SymPy package to compute the Jacobian for us.
```
import sympy
from IPython.display import display
sympy.init_printing(use_latex='mathjax')
x, x_vel, y = sympy.symbols('x, x_vel y')
H = sympy.Matrix([sympy.sqrt(x**2 + y**2)])
state = sympy.Matrix([x, x_vel, y])
J = H.jacobian(state)
display(state)
display(J)
```
This result is the same as the result we computed above, and with much less effort on our part!
## Robot Localization
It's time to try a real problem. I warn you that this section is difficult. However, most books choose simple, textbook problems with simple answers, and you are left wondering how to solve a real world problem.
We will consider the problem of robot localization. We already implemented this in the **Unscented Kalman Filter** chapter, and I recommend you read it now if you haven't already. In this scenario we have a robot that is moving through a landscape using a sensor to detect landmarks. This could be a self driving car using computer vision to identify trees, buildings, and other landmarks. It might be one of those small robots that vacuum your house, or a robot in a warehouse.
The robot has 4 wheels in the same configuration used by automobiles. It maneuvers by pivoting the front wheels. This causes the robot to pivot around the rear axle while moving forward. This is nonlinear behavior which we will have to model.
The robot has a sensor that measures the range and bearing to known targets in the landscape. This is nonlinear because computing a position from a range and bearing requires square roots and trigonometry.
Both the process model and measurement models are nonlinear. The EKF accommodates both, so we provisionally conclude that the EKF is a viable choice for this problem.
### Robot Motion Model
At a first approximation an automobile steers by pivoting the front tires while moving forward. The front of the car moves in the direction that the wheels are pointing while pivoting around the rear tires. This simple description is complicated by issues such as slippage due to friction, the differing behavior of the rubber tires at different speeds, and the need for the outside tire to travel a different radius than the inner tire. Accurately modeling steering requires a complicated set of differential equations.
For lower speed robotic applications a simpler *bicycle model* has been found to perform well. This is a depiction of the model:
```
ekf_internal.plot_bicycle()
```
In the **Unscented Kalman Filter** chapter we derived these equations:
$$\begin{aligned}
\beta &= \frac d w \tan(\alpha) \\
x &= x - R\sin(\theta) + R\sin(\theta + \beta) \\
y &= y + R\cos(\theta) - R\cos(\theta + \beta) \\
\theta &= \theta + \beta
\end{aligned}
$$
where $\theta$ is the robot's heading.
You do not need to understand this model in detail if you are not interested in steering models. The important thing to recognize is that our motion model is nonlinear, and we will need to deal with that with our Kalman filter.
### Design the State Variables
For our filter we will maintain the position $x,y$ and orientation $\theta$ of the robot:
$$\mathbf x = \begin{bmatrix}x \\ y \\ \theta\end{bmatrix}$$
Our control input $\mathbf u$ is the velocity $v$ and steering angle $\alpha$:
$$\mathbf u = \begin{bmatrix}v \\ \alpha\end{bmatrix}$$
### Design the System Model
We model our system as a nonlinear motion model plus noise.
$$\bar x = f(x, u) + \mathcal{N}(0, Q)$$
Using the motion model for a robot that we created above, we can expand this to
$$\bar{\begin{bmatrix}x\\y\\\theta\end{bmatrix}} = \begin{bmatrix}x\\y\\\theta\end{bmatrix} +
\begin{bmatrix}- R\sin(\theta) + R\sin(\theta + \beta) \\
R\cos(\theta) - R\cos(\theta + \beta) \\
\beta\end{bmatrix}$$
We find The $\mathbf F$ by taking the Jacobian of $f(x,u)$.
$$\mathbf F = \frac{\partial f(x, u)}{\partial x} =\begin{bmatrix}
\frac{\partial f_1}{\partial x} &
\frac{\partial f_1}{\partial y} &
\frac{\partial f_1}{\partial \theta}\\
\frac{\partial f_2}{\partial x} &
\frac{\partial f_2}{\partial y} &
\frac{\partial f_2}{\partial \theta} \\
\frac{\partial f_3}{\partial x} &
\frac{\partial f_3}{\partial y} &
\frac{\partial f_3}{\partial \theta}
\end{bmatrix}
$$
When we calculate these we get
$$\mathbf F = \begin{bmatrix}
1 & 0 & -R\cos(\theta) + R\cos(\theta+\beta) \\
0 & 1 & -R\sin(\theta) + R\sin(\theta+\beta) \\
0 & 0 & 1
\end{bmatrix}$$
We can double check our work with SymPy.
```
import sympy
from sympy.abc import alpha, x, y, v, w, R, theta
from sympy import symbols, Matrix
sympy.init_printing(use_latex="mathjax", fontsize='16pt')
time = symbols('t')
d = v*time
beta = (d/w)*sympy.tan(alpha)
r = w/sympy.tan(alpha)
fxu = Matrix([[x-r*sympy.sin(theta) + r*sympy.sin(theta+beta)],
[y+r*sympy.cos(theta)- r*sympy.cos(theta+beta)],
[theta+beta]])
F = fxu.jacobian(Matrix([x, y, theta]))
F
```
That looks a bit complicated. We can use SymPy to substitute terms:
```
# reduce common expressions
B, R = symbols('beta, R')
F = F.subs((d/w)*sympy.tan(alpha), B)
F.subs(w/sympy.tan(alpha), R)
```
This form verifies that the computation of the Jacobian is correct.
Now we can turn our attention to the noise. Here, the noise is in our control input, so it is in *control space*. In other words, we command a specific velocity and steering angle, but we need to convert that into errors in $x, y, \theta$. In a real system this might vary depending on velocity, so it will need to be recomputed for every prediction. I will choose this as the noise model; for a real robot you will need to choose a model that accurately depicts the error in your system.
$$\mathbf{M} = \begin{bmatrix}\sigma_{vel}^2 & 0 \\ 0 & \sigma_\alpha^2\end{bmatrix}$$
If this was a linear problem we would convert from control space to state space using the by now familiar $\mathbf{FMF}^\mathsf T$ form. Since our motion model is nonlinear we do not try to find a closed form solution to this, but instead linearize it with a Jacobian which we will name $\mathbf{V}$.
$$\mathbf{V} = \frac{\partial f(x, u)}{\partial u} \begin{bmatrix}
\frac{\partial f_1}{\partial v} & \frac{\partial f_1}{\partial \alpha} \\
\frac{\partial f_2}{\partial v} & \frac{\partial f_2}{\partial \alpha} \\
\frac{\partial f_3}{\partial v} & \frac{\partial f_3}{\partial \alpha}
\end{bmatrix}$$
These partial derivatives become very difficult to work with. Let's compute them with SymPy.
```
V = fxu.jacobian(Matrix([v, alpha]))
V = V.subs(sympy.tan(alpha)/w, 1/R)
V = V.subs(time*v/R, B)
V = V.subs(time*v, 'd')
V
```
This should give you an appreciation of how quickly the EKF become mathematically intractable.
This gives us the final form of our prediction equations:
$$\begin{aligned}
\mathbf{\bar x} &= \mathbf x +
\begin{bmatrix}- R\sin(\theta) + R\sin(\theta + \beta) \\
R\cos(\theta) - R\cos(\theta + \beta) \\
\beta\end{bmatrix}\\
\mathbf{\bar P} &=\mathbf{FPF}^{\mathsf T} + \mathbf{VMV}^{\mathsf T}
\end{aligned}$$
This form of linearization is not the only way to predict $\mathbf x$. For example, we could use a numerical integration technique such as *Runge Kutta* to compute the movement
of the robot. This will be required if the time step is relatively large. Things are not as cut and dried with the EKF as for the Kalman filter. For a real problem you have to carefully model your system with differential equations and then determine the most appropriate way to solve that system. The correct approach depends on the accuracy you require, how nonlinear the equations are, your processor budget, and numerical stability concerns.
### Design the Measurement Model
The robot's sensor provides a noisy bearing and range measurement to multiple known locations in the landscape. The measurement model must convert the state $\begin{bmatrix}x & y&\theta\end{bmatrix}^\mathsf T$ into a range and bearing to the landmark. If $\mathbf p$
is the position of a landmark, the range $r$ is
$$r = \sqrt{(p_x - x)^2 + (p_y - y)^2}$$
The sensor provides bearing relative to the orientation of the robot, so we must subtract the robot's orientation from the bearing to get the sensor reading, like so:
$$\phi = \arctan(\frac{p_y - y}{p_x - x}) - \theta$$
Thus our measurement model $h$ is
$$\begin{aligned}
\mathbf z& = h(\bar{\mathbf x}, \mathbf p) &+ \mathcal{N}(0, R)\\
&= \begin{bmatrix}
\sqrt{(p_x - x)^2 + (p_y - y)^2} \\
\arctan(\frac{p_y - y}{p_x - x}) - \theta
\end{bmatrix} &+ \mathcal{N}(0, R)
\end{aligned}$$
This is clearly nonlinear, so we need linearize $h$ at $\mathbf x$ by taking its Jacobian. We compute that with SymPy below.
```
px, py = symbols('p_x, p_y')
z = Matrix([[sympy.sqrt((px-x)**2 + (py-y)**2)],
[sympy.atan2(py-y, px-x) - theta]])
z.jacobian(Matrix([x, y, theta]))
```
Now we need to write that as a Python function. For example we might write:
```
from math import sqrt
def H_of(x, landmark_pos):
""" compute Jacobian of H matrix where h(x) computes
the range and bearing to a landmark for state x """
px = landmark_pos[0]
py = landmark_pos[1]
hyp = (px - x[0, 0])**2 + (py - x[1, 0])**2
dist = sqrt(hyp)
H = array(
[[-(px - x[0, 0]) / dist, -(py - x[1, 0]) / dist, 0],
[ (py - x[1, 0]) / hyp, -(px - x[0, 0]) / hyp, -1]])
return H
```
We also need to define a function that converts the system state into a measurement.
```
from math import atan2
def Hx(x, landmark_pos):
""" takes a state variable and returns the measurement
that would correspond to that state.
"""
px = landmark_pos[0]
py = landmark_pos[1]
dist = sqrt((px - x[0, 0])**2 + (py - x[1, 0])**2)
Hx = array([[dist],
[atan2(py - x[1, 0], px - x[0, 0]) - x[2, 0]]])
return Hx
```
### Design Measurement Noise
It is reasonable to assume that the noise of the range and bearing measurements are independent, hence
$$\mathbf R=\begin{bmatrix}\sigma_{range}^2 & 0 \\ 0 & \sigma_{bearing}^2\end{bmatrix}$$
### Implementation
We will use `FilterPy`'s `ExtendedKalmanFilter` class to implement the filter. Its `predict()` method uses the standard linear equations for the process model. Ours is nonlinear, so we will have to override `predict()` with our own implementation. I'll want to also use this class to simulate the robot, so I'll add a method `move()` that computes the position of the robot which both `predict()` and my simulation can call.
The matrices for the prediction step are quite large. While writing this code I made several errors before I finally got it working. I only found my errors by using SymPy's `evalf` function. `evalf` evaluates a SymPy `Matrix` with specific values for the variables. I decided to demonstrate this technique to you, and used `evalf` in the Kalman filter code. You'll need to understand a couple of points.
First, `evalf` uses a dictionary to specify the values. For example, if your matrix contains an `x` and `y`, you can write
```python
M.evalf(subs={x:3, y:17})
```
to evaluate the matrix for `x=3` and `y=17`.
Second, `evalf` returns a `sympy.Matrix` object. Use `numpy.array(M).astype(float)` to convert it to a NumPy array. `numpy.array(M)` creates an array of type `object`, which is not what you want.
Here is the code for the EKF:
```
from filterpy.kalman import ExtendedKalmanFilter as EKF
from numpy import array, sqrt
class RobotEKF(EKF):
def __init__(self, dt, wheelbase, std_vel, std_steer):
EKF.__init__(self, 3, 2, 2)
self.dt = dt
self.wheelbase = wheelbase
self.std_vel = std_vel
self.std_steer = std_steer
a, x, y, v, w, theta, time = symbols(
'a, x, y, v, w, theta, t')
d = v*time
beta = (d/w)*sympy.tan(a)
r = w/sympy.tan(a)
self.fxu = Matrix(
[[x-r*sympy.sin(theta)+r*sympy.sin(theta+beta)],
[y+r*sympy.cos(theta)-r*sympy.cos(theta+beta)],
[theta+beta]])
self.F_j = self.fxu.jacobian(Matrix([x, y, theta]))
self.V_j = self.fxu.jacobian(Matrix([v, a]))
# save dictionary and it's variables for later use
self.subs = {x: 0, y: 0, v:0, a:0,
time:dt, w:wheelbase, theta:0}
self.x_x, self.x_y, = x, y
self.v, self.a, self.theta = v, a, theta
def predict(self, u):
self.x = self.move(self.x, u, self.dt)
self.subs[self.theta] = self.x[2, 0]
self.subs[self.v] = u[0]
self.subs[self.a] = u[1]
F = array(self.F_j.evalf(subs=self.subs)).astype(float)
V = array(self.V_j.evalf(subs=self.subs)).astype(float)
# covariance of motion noise in control space
M = array([[self.std_vel*u[0]**2, 0],
[0, self.std_steer**2]])
self.P = np.dot(F, self.P).dot(F.T) + np.dot(V, M).dot(V.T)
def move(self, x, u, dt):
hdg = x[2, 0]
vel = u[0]
steering_angle = u[1]
dist = vel * dt
if abs(steering_angle) > 0.001: # is robot turning?
beta = (dist / self.wheelbase) * tan(steering_angle)
r = self.wheelbase / tan(steering_angle) # radius
dx = np.array([[-r*sin(hdg) + r*sin(hdg + beta)],
[r*cos(hdg) - r*cos(hdg + beta)],
[beta]])
else: # moving in straight line
dx = np.array([[dist*cos(hdg)],
[dist*sin(hdg)],
[0]])
return x + dx
```
Now we have another issue to handle. The residual is notionally computed as $y = z - h(x)$ but this will not work because our measurement contains an angle in it. Suppose z has a bearing of $1^\circ$ and $h(x)$ has a bearing of $359^\circ$. Naively subtracting them would yield a angular difference of $-358^\circ$, whereas the correct value is $2^\circ$. We have to write code to correctly compute the bearing residual.
```
def residual(a, b):
""" compute residual (a-b) between measurements containing
[range, bearing]. Bearing is normalized to [-pi, pi)"""
y = a - b
y[1] = y[1] % (2 * np.pi) # force in range [0, 2 pi)
if y[1] > np.pi: # move to [-pi, pi)
y[1] -= 2 * np.pi
return y
```
The rest of the code runs the simulation and plots the results, and shouldn't need too much comment by now. I create a variable `landmarks` that contains the landmark coordinates. I update the simulated robot position 10 times a second, but run the EKF only once per second. This is for two reasons. First, we are not using Runge Kutta to integrate the differental equations of motion, so a narrow time step allows our simulation to be more accurate. Second, it is fairly normal in embedded systems to have limited processing speed. This forces you to run your Kalman filter only as frequently as absolutely needed.
```
from filterpy.stats import plot_covariance_ellipse
from math import sqrt, tan, cos, sin, atan2
import matplotlib.pyplot as plt
dt = 1.0
def z_landmark(lmark, sim_pos, std_rng, std_brg):
x, y = sim_pos[0, 0], sim_pos[1, 0]
d = np.sqrt((lmark[0] - x)**2 + (lmark[1] - y)**2)
a = atan2(lmark[1] - y, lmark[0] - x) - sim_pos[2, 0]
z = np.array([[d + randn()*std_rng],
[a + randn()*std_brg]])
return z
def ekf_update(ekf, z, landmark):
ekf.update(z, HJacobian=H_of, Hx=Hx,
residual=residual,
args=(landmark), hx_args=(landmark))
def run_localization(landmarks, std_vel, std_steer,
std_range, std_bearing,
step=10, ellipse_step=20, ylim=None):
ekf = RobotEKF(dt, wheelbase=0.5, std_vel=std_vel,
std_steer=std_steer)
ekf.x = array([[2, 6, .3]]).T # x, y, steer angle
ekf.P = np.diag([.1, .1, .1])
ekf.R = np.diag([std_range**2, std_bearing**2])
sim_pos = ekf.x.copy() # simulated position
# steering command (vel, steering angle radians)
u = array([1.1, .01])
plt.figure()
plt.scatter(landmarks[:, 0], landmarks[:, 1],
marker='s', s=60)
track = []
for i in range(200):
sim_pos = ekf.move(sim_pos, u, dt/10.) # simulate robot
track.append(sim_pos)
if i % step == 0:
ekf.predict(u=u)
if i % ellipse_step == 0:
plot_covariance_ellipse(
(ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2],
std=6, facecolor='k', alpha=0.3)
x, y = sim_pos[0, 0], sim_pos[1, 0]
for lmark in landmarks:
z = z_landmark(lmark, sim_pos,
std_range, std_bearing)
ekf_update(ekf, z, lmark)
if i % ellipse_step == 0:
plot_covariance_ellipse(
(ekf.x[0,0], ekf.x[1,0]), ekf.P[0:2, 0:2],
std=6, facecolor='g', alpha=0.8)
track = np.array(track)
plt.plot(track[:, 0], track[:,1], color='k', lw=2)
plt.axis('equal')
plt.title("EKF Robot localization")
if ylim is not None: plt.ylim(*ylim)
plt.show()
return ekf
landmarks = array([[5, 10], [10, 5], [15, 15]])
ekf = run_localization(
landmarks, std_vel=0.1, std_steer=np.radians(1),
std_range=0.3, std_bearing=0.1)
print('Final P:', ekf.P.diagonal())
```
I have plotted the landmarks as solid squares. The path of the robot is drawn with a black line. The covariance ellipses for the predict step are light gray, and the covariances of the update are shown in green. To make them visible at this scale I have set the ellipse boundary at 6$\sigma$.
We can see that there is a lot of uncertainty added by our motion model, and that most of the error in in the direction of motion. We determine that from the shape of the blue ellipses. After a few steps we can see that the filter incorporates the landmark measurements and the errors improve.
I used the same initial conditions and landmark locations in the UKF chapter. The UKF achieves much better accuracy in terms of the error ellipse. Both perform roughly as well as far as their estimate for $\mathbf x$ is concerned.
Now let's add another landmark.
```
landmarks = array([[5, 10], [10, 5], [15, 15], [20, 5]])
ekf = run_localization(
landmarks, std_vel=0.1, std_steer=np.radians(1),
std_range=0.3, std_bearing=0.1)
plt.show()
print('Final P:', ekf.P.diagonal())
```
The uncertainly in the estimates near the end of the track are smaller. We can see the effect that multiple landmarks have on our uncertainty by only using the first two landmarks.
```
ekf = run_localization(
landmarks[0:2], std_vel=1.e-10, std_steer=1.e-10,
std_range=1.4, std_bearing=.05)
print('Final P:', ekf.P.diagonal())
```
The estimate quickly diverges from the robot's path after passing the landmarks. The covariance also grows quickly. Let's see what happens with only one landmark:
```
ekf = run_localization(
landmarks[0:1], std_vel=1.e-10, std_steer=1.e-10,
std_range=1.4, std_bearing=.05)
print('Final P:', ekf.P.diagonal())
```
As you probably suspected, one landmark produces a very bad result. Conversely, a large number of landmarks allows us to make very accurate estimates.
```
landmarks = array([[5, 10], [10, 5], [15, 15], [20, 5], [15, 10],
[10,14], [23, 14], [25, 20], [10, 20]])
ekf = run_localization(
landmarks, std_vel=0.1, std_steer=np.radians(1),
std_range=0.3, std_bearing=0.1, ylim=(0, 21))
print('Final P:', ekf.P.diagonal())
```
### Discussion
I said that this was a real problem, and in some ways it is. I've seen alternative presentations that used robot motion models that led to simpler Jacobians. On the other hand, my model of the movement is also simplistic in several ways. First, it uses a bicycle model. A real car has two sets of tires, and each travels on a different radius. The wheels do not grip the surface perfectly. I also assumed that the robot responds instantaneously to the control input. Sebastian Thrun writes in *Probabilistic Robots* that this simplified model is justified because the filters perform well when used to track real vehicles. The lesson here is that while you have to have a reasonably accurate nonlinear model, it does not need to be perfect to operate well. As a designer you will need to balance the fidelity of your model with the difficulty of the math and the CPU time required to perform the linear algebra.
Another way in which this problem was simplistic is that we assumed that we knew the correspondance between the landmarks and measurements. But suppose we are using radar - how would we know that a specific signal return corresponded to a specific building in the local scene? This question hints at SLAM algorithms - simultaneous localization and mapping. SLAM is not the point of this book, so I will not elaborate on this topic.
## UKF vs EKF
In the last chapter I used the UKF to solve this problem. The difference in implementation should be very clear. Computing the Jacobians for the state and measurement models was not trivial despite a rudimentary motion model. A different problem could result in a Jacobian which is difficult or impossible to derive analytically. In contrast, the UKF only requires you to provide a function that computes the system motion model and another for the measurement model.
There are many cases where the Jacobian cannot be found analytically. The details are beyond the scope of this book, but you will have to use numerical methods to compute the Jacobian. That undertaking is not trivial, and you will spend a significant portion of a master's degree at a STEM school learning techniques to handle such situations. Even then you'll likely only be able to solve problems related to your field - an aeronautical engineer learns a lot about Navier Stokes equations, but not much about modelling chemical reaction rates.
So, UKFs are easy. Are they accurate? In practice they often perform better than the EKF. You can find plenty of research papers that prove that the UKF outperforms the EKF in various problem domains. It's not hard to understand why this would be true. The EKF works by linearizing the system model and measurement model at a single point, and the UKF uses $2n+1$ points.
Let's look at a specific example. Take $f(x) = x^3$ and pass a Gaussian distribution through it. I will compute an accurate answer using a monte carlo simulation. I generate 50,000 points randomly distributed according to the Gaussian, pass each through $f(x)$, then compute the mean and variance of the result.
The EKF linearizes the function by taking the derivative to find the slope at the evaluation point $x$. This slope becomes the linear function that we use to transform the Gaussian. Here is a plot of that.
```
import kf_book.nonlinear_plots as nonlinear_plots
nonlinear_plots.plot_ekf_vs_mc()
```
The EKF computation is rather inaccurate. In contrast, here is the performance of the UKF:
```
nonlinear_plots.plot_ukf_vs_mc(alpha=0.001, beta=3., kappa=1.)
```
Here we can see that the computation of the UKF's mean is accurate to 2 decimal places. The standard deviation is slightly off, but you can also fine tune how the UKF computes the distribution by using the $\alpha$, $\beta$, and $\gamma$ parameters for generating the sigma points. Here I used $\alpha=0.001$, $\beta=3$, and $\gamma=1$. Feel free to modify them to see the result. You should be able to get better results than I did. However, avoid over-tuning the UKF for a specific test. It may perform better for your test case, but worse in general.
|
github_jupyter
|
# Kinetic Energy
Mean and Eddy Kinetic Energy
## Theory
For a hydrostatic ocean like MOM5, the relevant kinetic energy per mass is
$$ KE = \frac{1}{2} (u^2 + v^2).$$
The vertical velocity component, $w$, does not appear in the mechanical energy budget. It is very much subdominant. But more fundamentally, it simply does not appear in the mechanical energy buget for a hydrostatic ocean.
For a non-steady fluid, we can define the time-averaged kinetic energy as the __total kinetic energy__, TKE
$$ TKE = \left< K \right > = \frac{1}{T} \int_0^T \frac{1}{2} \left( u^2 + v^2 \right) dt $$
It is useful to decompose the velocity in the mean and time varying components
$$ u = \bar{u} + u'$$
The __mean kinetic energy__ is the energy associated with the mean flow
$$ MKE = \frac{1}{2} \left( \bar{u}^2 + \bar{v}^2 \right) $$
The kinetic energy of the time varying component is the __eddy kinetic energy__, EKE. This quantity can be obtained by
substracting the velocity means and calculating the kinetic energy of the
perturbation velocity quantities.
$$ EKE = \left< \frac{1}{2} \left( \left(u - \left<u\right>\right)^2 +
\left(v - \left<v\right>\right)^2
\right) \right> $$
MKE and EKE partition the total kinetic energy
$$TKE = EKE + MKE $$
## Calculation
We start by importing some useful packages.
```
%matplotlib inline
import cosima_cookbook as cc
import matplotlib.pyplot as plt
import numpy as np
import cmocean as cm
import xarray as xr
from dask.distributed import Client
```
Start up a dask cluster.
```
client = Client(n_workers=6)
client
```
Create a database session and select an experiment. Here we choose an experiment which has daily velocities saved for the Southern Ocean.
```
session = cc.database.create_session()
expt = '01deg_jra55v13_ryf9091'
```
While not difficult to write down, this is fairly involved computation since to compute the eddy kinetic energy requires both the velocity and the mean of the velocity components. Since the dataset is large, we want to avoid loading all of the velocity data into memory at the same time.
To calculate EKE, we need horizontal velocities $u$ and $v$, preferably saved at `1 daily` frequency (or perhaps `5 daily`). You can check whether your experiment has that kind of data:
```
varlist = cc.querying.get_variables(session, expt,frequency = '1 daily')
varlist
```
### Example
For example, let's calculate the mean and eddy kinetic energy over the last year of this particular model run:
```
start_time = '1970-01-01'
```
Here we build datasets for the variables u and v
```
u = cc.querying.getvar(expt,'u',session,ncfile='ocean_daily_3d_u_%.nc',start_time = start_time)
v = cc.querying.getvar(expt,'v',session,ncfile='ocean_daily_3d_v_%.nc',start_time = start_time)
```
The kinetic energy is given by
$$ KE = \frac{1}{2} (u^2 + v^2)$$
we construct the following expression:
```
KE = 0.5*(u**2 + v**2)
```
You may notice that this line takes only a moment to run. The calculation is not (yet) being run. Rather, XArray needs to broadcast the squares of the velocity fields together to determine the final shape of KE.
```
print(KE.shape)
```
This is too large to store locally. We need to reduce the data in some way.
The mean kinetic energy is calculated by this function
```
def calculate_MKE(KE):
MKE = KE.mean('time').sum('st_ocean')
return MKE
```
While we could try and compute this DataArray using the new mapblocks function:
```
MKE = xr.map_blocks(calculate_MKE, KE)
MKE.data
MKE.plot()
KE.isel(time=1).sum('st_ocean').plot(vmax=10)
```
## Mean Kinetic Energy
For the mean kinetic energy, we need to average the velocities over time over time.
```
u_mean = u.mean('time')
v_mean = v.mean('time')
MKE = 0.5*(u_mean**2 + v_mean**2)
MKE = MKE.sum('st_ocean')
MKE.plot(vmax=1)
```
## Eddy Kinetic Energy
We calculate the perturbation velocities
```
u_ = u - u_mean
v_ = v - v_mean
EKE = 0.5 * (u_**2 + v_**2)
EKE = EKE.mean('time').sum(['st_ocean'])
EKE = cc.compute_by_block(EKE)
EKE.plot(vmax=1)
plt.show()
```
### Functions
```
from joblib import Memory
memory = Memory(cachedir='/g/data1/v45/cosima-cookbook/',verbose=0)
```
Here are functions for calculating both MKE and EKE.
```
@memory.cache
def calc_mke(expt, n=6):
print('Opening datasets...')
u = cc.get_nc_variable(expt, 'ocean__\d+_\d+.nc', 'u',
time_units = 'days since 2000-01-01',
n=n)
v = cc.get_nc_variable(expt, 'ocean__\d+_\d+.nc', 'v',
time_units = 'days since 2000-01-01',
n=n)
print('Preparing computation...')
u_mean = u.mean('time')
v_mean = v.mean('time')
MKE = 0.5*(u_mean**2 + v_mean**2)
MKE = MKE.sum('st_ocean')
print('Calculating...')
MKE = cc.compute_by_block(MKE)
return MKE
%%time
MKE = calc_mke('KDS75', n=6)
@memory.cache
def calc_eke(expt, n=6):
print('Opening datasets...')
u = cc.get_nc_variable(expt, 'ocean__\d+_\d+.nc', 'u',
time_units = 'days since 2000-01-01',
n=n)
v = cc.get_nc_variable(expt, 'ocean__\d+_\d+.nc', 'v',
time_units = 'days since 2000-01-01',
n=n)
print('Preparing computation...')
u_mean = u.mean('time')
v_mean = v.mean('time')
u_ = u - u_mean
v_ = v - v_mean
EKE = 0.5 * (u_**2 + v_**2)
EKE = EKE.mean('time')
EKE = EKE.sum(['st_ocean'])
print('Calculating...')
EKE = cc.compute_by_block(EKE)
return EKE
%%time
EKE = calc_eke('KDS75', n=6)
EKE.plot(vmax=1)
plt.show()
%%time
EKE = calc_eke('KDS75', n=72)
EKE.plot(vmax=1)
plt.show()
```
## Using dask directly
Note: Does not (yet) work but performance gains suggest that it may be further debugging for production use.
```
import netCDF4
import dask.array as da
import dataset
@memory.cache
def calc_mke_dask(expt, n=6):
db = dataset.connect(cc.netcdf_index.database_url)
res = db.query('SELECT ncfile'
' from ncfiles'
' where variable = "u"'
' AND experiment = "%s"'
' AND basename_pattern = "ocean__\d+_\d+.nc"'
' ORDER BY ncfile'
' LIMIT %d' % (expt, n),
)
rows = list(res)
ncfiles = [row['ncfile'] for row in rows]
u_dataarrays = [da.from_array(netCDF4.Dataset(ncfile, 'r')['u'],
chunks=(1,7,300,400)) for ncfile in ncfiles]
u = da.concatenate(u_dataarrays, axis=0)
v_dataarrays = [da.from_array(netCDF4.Dataset(ncfile, 'r')['v'],
chunks=(1,7,300,400)) for ncfile in ncfiles]
v = da.concatenate(v_dataarrays, axis=0)
u_mean = u.mean(axis=0)
v_mean = v.mean(axis=0)
MKE = 0.5*(u_mean**2 + v_mean**2)
MKE = MKE.sum(axis=0)
MKE = cc.compute_by_block(MKE)
temp = cc.get_nc_variable(expt, 'ocean__\d+_\d+.nc', 'u',
time_units = 'days since 2000-01-01',
n=1)
template = temp.mean('time').sum('st_ocean')
result = xr.zeros_like(template).compute()
result[:] = MKE
result.name = 'MKE'
return result
%%time
MKE = calc_mke_dask('KDS75', n=6)
@memory.cache
def calc_eke_dask(expt, n=72):
db = dataset.connect(cc.netcdf_index.database_url)
res = db.query('SELECT ncfile'
' from ncfiles'
' where variable = "u"'
' AND experiment = "%s"'
' AND basename_pattern = "ocean__\d+_\d+.nc"'
' ORDER BY ncfile'
' LIMIT %d' % (expt, n)
)
ncfiles = [row['ncfile'] for row in res]
u_dataarrays = [da.from_array(netCDF4.Dataset(ncfile, 'r')['u'],
chunks=(1,7,300,400)) for ncfile in ncfiles]
u = da.concatenate(u_dataarrays, axis=0)
v_dataarrays = [da.from_array(netCDF4.Dataset(ncfile, 'r')['v'],
chunks=(1,7,300,400)) for ncfile in ncfiles]
v = da.concatenate(v_dataarrays, axis=0)
u_mean = u.mean(axis=0)
v_mean = v.mean(axis=0)
u_ = u - u_mean
v_ = v - v_mean
EKE = 0.5 * (u_**2 + v_**2)
EKE = EKE.mean(axis=0)
EKE = EKE.sum(axis=0)
EKE = cc.compute_by_block(EKE)
temp = cc.get_nc_variable(expt, 'ocean__\d+_\d+.nc', 'u',
time_units = 'days since 2000-01-01',
n=1)
template = temp.mean('time').sum('st_ocean')
result = xr.zeros_like(template).compute()
result[:] = EKE
result.name = 'EKE'
return result
%%time
EKE = calc_eke_dask('KDS75', 2)
EKE.plot(vmax=1)
plt.show()
```
## Visualization
```
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
# Plot in basemap
plt.figure(figsize=(15,6))
lev = np.arange(0, 1.0, 0.05)
map = Basemap(projection='mbtfpq',
lon_0 = -100, resolution='l')
map.drawcoastlines(linewidth=0.25)
map.fillcontinents(color='gray',lake_color='gray')
map.drawparallels(np.arange(-60.,61.,30.),
labels=[True,False,False,False])
map.drawmeridians(np.arange(-180.,181.,90.),
labels=[False,False,False,True])
expt = 'KDS75'
dsx = calc_eke(expt, n=6)
x=dsx.xu_ocean[:]
y=dsx.yu_ocean[:]
lon, lat = np.meshgrid(x, y)
X, Y = map(lon,lat)
map.contourf(X, Y, dsx.data,
cmap=plt.cm.hot,
levels=lev,
extend='both')
cb = plt.colorbar(orientation='vertical',shrink = 0.7)
cb.ax.set_xlabel('m^2 s$^{-2}$')
plt.title('Eddy KE {}'.format(expt))
```
|
github_jupyter
|
```
import numpy as np, pandas as pd
import matplotlib.pyplot as plt
#Exception case for using sklearn: to split the dataset
from sklearn import model_selection
#Create a simple dataset
X =pd.DataFrame( np.linspace(0.1,1,1001))
test = X
test[test >=0.85] = 1
test[test < 0.85] = 0
# thus the dataset is such that if observation is > =0.65, it is a positive case
Y = test
X_train, X_test, Y_train, Y_test = model_selection.train_test_split(X,Y)
X_train.insert(0,'ones',np.ones(Y_train.shape))
#how balanced is the dataset
print("Positive Class count: ",X_train[X_train[X_train.columns[1]]==1].count()[0])
print("Negative Class count: ",X_train[X_train[X_train.columns[1]]==0].count()[0])
def Logistic_Regression(X_train, Y_train, alpha=0.001, iter=100):
theta = np.ones(X_train.shape[1]) / 100
J = np.ones(X_train.shape[0])
theta.reshape(X_train.shape[1],1)
Rl=[]
m= X_train.shape[0]
for iteration in range(iter):
ht= (theta*X_train).sum(axis=1)
Yp =1/(1+np.exp(-ht))
for i in range(X_train.shape[0]):
J[i] = -(Y_train.values[i] * np.log(Yp.values[i])) -((1-Y_train.values[i])*np.log(1-Yp.values[i]))
Rl.append(J.sum())
err= Yp- Y_train
err = err[0]
theta= theta - (alpha*(X_train.T.dot(err)))/m
return theta, Rl, iter
params=Logistic_Regression(X_train, Y_train,2,30)
plt.title("Gradient descent")
X= np.arange(params[2])
plt.plot(X,params[1])
plt.xlabel('Iterations')
plt.ylabel('Cost')
plt.show()
def predict(x = X_test,theta = params[0]):
ht= (theta * x).sum(axis=1)
y =1/(1+np.exp(-ht))
return y
#Test the model
# change for X observations
X_tr = X_test
Yp = predict(X_tr)
# change for y responses
Y_tr = Y_test
#Change threshold value to get improve performance
Yp[Yp>0.5] = 1
Yp[Yp<=0.5] = 0
X_terms = np.arange(X_tr.shape[0])
#some fun visualization
plt.title('Actual Vs Predicted')
plt.xlabel('Observations')
plt.ylabel('Predicted Class')
plt.plot(X_terms, Yp)
plt.plot(X_terms, Y_tr)
plt.show()
```
Model Metrics:
```
Predictions = pd.DataFrame(Y_tr.values)
Predictions.insert(1,1,Yp.values)
Predictions.columns=['Actual','Predicted']
# Metric calculations
TP=Predictions[(Predictions['Actual'] == 1 ) & (Predictions['Predicted'] == 1)].count()[0]
TN=Predictions[(Predictions['Actual'] == 0) & (Predictions['Predicted'] == 0)].count()[0]
FP=Predictions[(Predictions['Actual'] == 0 ) & (Predictions['Predicted'] == 1)].count()[0]
FN=Predictions[(Predictions['Actual'] == 1 ) & (Predictions['Predicted'] == 0)].count()[0]
# Create the confusion Matrix
confusion = pd.DataFrame(np.array([[TP, FP],[FN,TN]]))
confusion.columns = ['Actual_Positives','Actual_Negatives']
confusion.index = ['Predicted_Positives','Predicted_Negatives']
Predicted_Positive= TP + FP
Predicted_Negative= TN + FN
Actual_Positive= TP + FN
Actual_Negative= TN + FP
Accuracy= (TP + TN)/(Actual_Positive + Actual_Negative)
Precision= TP / Predicted_Positive
#Recall or Sensitivity
Recall = TP / Actual_Positive
Specificity = TN / Actual_Negative
F1_Score = (2*Precision * Recall) / (Precision + Recall)
```
Print all the metrics
```
a = np.array([[True_Positives, False_Positives],[False_Negatives,True_Negatives]])
print(confusion)
print('Accuracy: ',Accuracy)
print('Precision: ', Precision)
print('Recall: ',Recall)
print('Specificity: ', Specificity)
print('F1_score: %.2F'%F1_Score)
```
|
github_jupyter
|
```
import os
import sys
import random
import math
import re
import time
import numpy as np
import cv2
import matplotlib
import matplotlib.pyplot as plt
# Root directory of the project
ROOT_DIR = os.getenv("MRCNN_HOME", "/Mask_RCNN")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.model import log
%matplotlib inline
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Download COCO trained weights from Releases if needed
if not os.path.exists(COCO_MODEL_PATH):
utils.download_trained_weights(COCO_MODEL_PATH)
DATA_PATH = "/data/RCNNTanks256Train/Yanbu"
DATA_PATH = os.path.join("E:", os.sep, "RCNNTanks256Train")
IMG_SIZE = 256
# set tf backend to allow memory to grow, instead of claiming everything
import tensorflow as tf
import keras
def get_session():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
# set the modified tf session as backend in keras
keras.backend.tensorflow_backend.set_session(get_session())
```
## Configurations
```
class MRConfig(Config):
"""Configuration for training on the Miami buildings dataset.
Derives from the base Config class and overrides values specific
to the toy shapes dataset.
"""
# Give the configuration a recognizable name
NAME = "tank"
BATCH_SIZE = 8
GPU_COUNT = 1
IMAGES_PER_GPU = 8
# Number of classes (including background)
NUM_CLASSES = 1 + 1
# Use small images for faster training. Set the limits of the small side
# the large side, and that determines the image shape.
IMAGE_MIN_DIM = IMG_SIZE
IMAGE_MAX_DIM = IMG_SIZE
# Use smaller anchors because our image and objects are small
RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128)
# RPN_ANCHOR_SCALES = (10, 20, 40, 80, 160)
# Ratios of anchors at each cell (width/height)
# A value of 1 represents a square anchor, and 0.5 is a wide anchor
# RPN_ANCHOR_RATIOS = [0.25, 1, 4]
# Loss weights for more precise optimization.
# Can be used for R-CNN training setup.
# LOSS_WEIGHTS = {
# "rpn_class_loss": 1.,
# "rpn_bbox_loss": 1.,
# "mrcnn_class_loss": 1.,
# "mrcnn_bbox_loss": 1.,
# "mrcnn_mask_loss": 1.1
# }
# Reduce training ROIs per image because the images are small and have
# few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
TRAIN_ROIS_PER_IMAGE = 256
# ROI_POSITIVE_RATIO = 0.5 #makes no positive effect
# Max number of final detections
DETECTION_MAX_INSTANCES = 100
# Maximum number of ground truth instances to use in one image
MAX_GT_INSTANCES = 100
# Use a small epoch since the data is simple
STEPS_PER_EPOCH = 100
# use small validation steps since the epoch is small
VALIDATION_STEPS = 1
# Image mean (RGB)
MEAN_PIXEL = np.array([131.84381436753546, 125.43039054432134, 113.32320930217874])
LEARNING_RATE = 1.e-4
WEIGHT_DECAY = 1.e-5
config = MRConfig()
config.display()
```
## Notebook Preferences
```
def get_ax(rows=1, cols=1, size=8):
fig, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax, fig
import glob
import skimage
class MRDataset(utils.Dataset):
def load(self, dataset_dir):
# Add classes
self.add_class("tank", 1, "C_TankCompleted")
#loading images
self._image_dir = os.path.join(dataset_dir, "images/")
self._label_dir = os.path.join(dataset_dir, "labels/")
for i, f in enumerate(glob.glob(os.path.join(self._image_dir, "*.tif"))):
_, filename = os.path.split(f)
self.add_image("tank",
image_id=i,
path=f,
width=config.IMAGE_MAX_DIM,
height=config.IMAGE_MAX_DIM,
filename=filename)
def load_mask(self, image_id):
info = self.image_info[image_id]
fname = info["filename"]
masks = []
class_ids = []
#looping through all the classes, loading and processing corresponding masks
for ci in self.class_info:
class_name = ci["name"]
class_id = ci["id"]
try:
m_src = skimage.io.imread(os.path.join(self._label_dir, class_name, fname))
except:
#no file with masks of this class found
continue
#making individual masks for each instance
instance_ids = np.unique(m_src)
for i in instance_ids:
if i > 0:
m = np.zeros(m_src.shape)
m[m_src==i] = i
if np.any(m==i):
masks.append(m)
class_ids.append(class_id)
if len(masks) == 0:
masks.append(np.zeros(m_src.shape))
class_ids.append(1)
masks = np.stack(masks, axis=-1)
return masks.astype(np.bool), np.array(class_ids, dtype=np.int32)
def image_reference(self, image_id):
"""Return the shapes data of the image."""
info = self.image_info[image_id]
if info["source"] == "tank":
return info["path"]
else:
super(self.__class__).image_reference(self, image_id)
# Training dataset
dataset_train = MRDataset()
dataset_train.load(os.path.join(DATA_PATH, "Karachi20170515_060830"))
dataset_train.prepare()
# Validation dataset
dataset_val = MRDataset()
dataset_val.load(os.path.join(DATA_PATH, "Paradip20170210_043931"))
dataset_val.prepare()
# Test dataset
dataset_test = MRDataset()
dataset_test.load(os.path.join(DATA_PATH, "Karachi20170718_055349"))
dataset_test.prepare()
# Load and display random samples
dataset = dataset_test
image_ids = np.random.choice(dataset.image_ids, 4)
# for ii in dataset.image_info:
# if ii['filename'] == '000005160.tif':
# image_ids = [ii['id']]
# break
for image_id in image_ids:
image = dataset.load_image(image_id)
mask, class_ids = dataset.load_mask(image_id)
visualize.display_top_masks(image, mask, class_ids, dataset.class_names)
#print(dataset.image_info[image_id]["filename"])
#log("mask", mask)
#log("class_ids", class_ids)
#print(class_ids)
```
## Ceate Model
```
# Create model in training mode
model = modellib.MaskRCNN(mode="training",
config=config,
model_dir=MODEL_DIR)
# Which weights to start with?
init_with = "imagenet" # imagenet, coco, or last
if init_with == "imagenet":
model.load_weights(model.get_imagenet_weights(), by_name=True)
elif init_with == "coco":
# Load weights trained on MS COCO, but skip layers that
# are different due to the different number of classes
# See README for instructions to download the COCO weights
model.load_weights(COCO_MODEL_PATH, by_name=True,
exclude=["mrcnn_class_logits",
"mrcnn_bbox_fc",
"mrcnn_bbox",
"mrcnn_mask"])
elif init_with == "last":
# Load the last model you trained and continue training
model_path = model.find_last()
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
```
## Training
Train in two stages:
1. Only the heads. Here we're freezing all the backbone layers and training only the randomly initialized layers (i.e. the ones that we didn't use pre-trained weights from MS COCO). To train only the head layers, pass `layers='heads'` to the `train()` function.
2. Fine-tune all layers. For this simple example it's not necessary, but we're including it to show the process. Simply pass `layers="all` to train all layers.
```
# Train the head branches
# Passing layers="heads" freezes all layers except the head
# layers. You can also pass a regular expression to select
# which layers to train by name pattern.
model.train(dataset_train,
dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=10,
layers='heads')
# Fine tune all layers
# Passing layers="all" trains all layers. You can also
# pass a regular expression to select which layers to
# train by name pattern.
#image augmentation: https://github.com/aleju/imgaug
import imgaug as ia
from imgaug import augmenters as iaa
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
seqAug = iaa.Sequential(
[
# apply the following augmenters to most images
iaa.Fliplr(0.5), # horizontally flip 50% of all images
iaa.Flipud(0.5), # vertically flip 20% of all images
# crop images by -10% to 10% of their height/width
# sometimes(iaa.CropAndPad( # !!! Looks like memory curruption is hapenning somewhere in this C++ impl
# percent=(-0.1, 0.1),
# pad_mode=ia.ALL,
# pad_cval=0
# )),
sometimes(iaa.Affine(
scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
rotate=(-175, 175), # rotate by -175 to +175 degrees
shear=(-16, 16), # shear by -16 to +16 degrees
order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
cval=0, # if mode is constant, use a cval = 0
mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
))
],
random_order=True
)
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE / 100.0,
epochs=180,
layers="all",
augmentation=seqAug
)
```
## Detection
```
class InferenceConfig(MRConfig):
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.9
DETECTION_NMS_THRESHOLD = 0.2
inference_config = InferenceConfig()
# Recreate the model in inference mode
model = modellib.MaskRCNN(mode="inference",
config=inference_config,
model_dir=MODEL_DIR)
# Get path to saved weights
# Either set a specific path or find last trained weights
model_path = model.find_last()
# model_path = os.path.join(ROOT_DIR, "./logs/r5_imgaug_roi1000_20180608T1627/mask_rcnn_r5_imgaug_roi1000__0570.h5")
# Load trained weights (fill in path to trained weights here)
assert model_path != "", "Provide path to trained weights"
print("Loading weights from ", model_path)
model.load_weights(model_path, by_name=True)
# Test on a random image
dataset = dataset_test
image_id = random.choice(dataset.image_ids)
# for ii in dataset.image_info:
# if ii['filename'] == '000005160.tif':
# image_id = ii['id']
# break
original_image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, inference_config,
image_id, use_mini_mask=False)
# log("original_image", original_image)
# log("image_meta", image_meta)
# log("gt_class_id", gt_class_id)
# log("gt_bbox", gt_bbox)
# log("gt_mask", gt_mask)
# print("image_id: ", image_id)
# print(dataset.image_info[image_id])
results = model.detect([original_image], verbose=1)
r = results[0]
#if r["masks"].shape[2] > 0:
# log("masks", r["masks"])
ax, fig = get_ax(1,2)
visualize.display_instances(original_image, gt_bbox, gt_mask, gt_class_id,
dataset.class_names, ax=ax[0])
visualize.display_instances(original_image, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], ax=ax[1])
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'])
print(AP)
fig.savefig("tank1.png")
```
## Evaluation
```
# Compute VOC-Style mAP @ IoU=0.5
dataset = dataset_val
image_ids = dataset.image_ids
APs = []
pss = []
rcs = []
ops = []
for image_id in image_ids:
# Load image and ground truth data
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, inference_config,
image_id, use_mini_mask=False)
molded_images = np.expand_dims(modellib.mold_image(image, inference_config), 0)
# Run object detection
results = model.detect([image], verbose=0)
r = results[0]
# Compute AP
AP, precisions, recalls, overlaps =\
utils.compute_ap(gt_bbox, gt_class_id, gt_mask,
r["rois"], r["class_ids"], r["scores"], r['masks'])
APs.append(AP)
pss.append(precisions)
rcs.append(recalls)
ops.append(overlaps)
print("mAP: ", np.mean(APs))
```
|
github_jupyter
|
Complex Laplacian and its eigenmodes are parameterized by $\alpha$ and $k$.
---
Theory and math behind the eigenmodes:
The simplest possible dynamic behavior of a damped system is the first order differential equation with one term, and it's rate of exponential decay is governed by a rate contant $\beta$:
\begin{equation}
\label{dampedSys}
\frac{dx_{1}(t)}{dt} = -\beta x_{1}(t)
\end{equation}
Where $x_{1}(t)$ is the average neuronal activation signal between all neurons within a region of interest. Thus we can interpret the above equation as the refractory period after neural discharge. But when viewing the brain as a network of interconnected regions, we want to introduce activities originating from other regions:
\begin{equation}
\frac{dx_{i}(t)}{dt} = -\beta (x_{i}(t) - \frac{\alpha}{\pmb{deg_i}} \sum_{i,j} c_{i,j} x_{j}(t-\tau^{\nu}_{i,j}))
\end{equation}
The above equation introduces a connectivity $c_{i,j}$ term, which is normalized by a diagonal degree matrix and scaled by a coupling term $\alpha$. $\alpha$ acts as both a coupling constant as well as a parameter to distinguish the rate of diffusion from connected regions from $\beta$. By introducing connectivity, we also have to take into account the distance between each connected regions, therefore the term $\tau^{\nu}_{i,j}$ is introduced as delay, and is computed by dividing fiber tract distance between regions and signal transmission velocity. Now if we transform the above equation into the Fourier domain, we obtain the follow complex expression:
\begin{equation}
\begin{aligned}
j\omega X(\omega)_{i} = -\beta X(\omega)_{i} + \frac{\alpha}{\pmb{deg_i}} \sum_j c_{i,j} e^{-j\omega \tau^{\nu}_{i,j}} X(\omega)\\
j\omega \bar{X}(\omega) = -\beta (I - \alpha \Delta^{-\frac{1}{2}} C^{*}(\omega)) \bar{X}(\omega)\\
j\omega \bar{X}(\omega) = -B\mathcal{L}\bar{X}(\omega)\\
\end{aligned}
\end{equation}
Here, we introduced a complex component to our structural connectivity term as delays become phases in the Fourier domain, specifically, $x(t-\tau^{\nu}_{i,j}) \to e^{-j\omega \tau^{\nu}_{i,j}} X(\omega)$, thus we can define a complex connectivity as a function of angular frequency $\omega$ as $C(\omega) = \frac{1}{\pmb{deg}}C^{*}(\omega)$, where $C^{*}(\omega) = c_{i,j}e^{-j\omega \tau^{\nu}_{i,j}}$. By redefining the connectivity term from above, the complex Laplacian $\mathcal{L}(\omega)$ is then defined as $\mathcal{L}(\omega) = I - \alpha C(\omega)$. Next we decompose the complex Laplacian matrix $\mathcal{L}$ into it's eigen modes and eigen values:
\begin{equation}
\mathcal{L}(\omega) = \pmb(U)(\omega)\pmb{\Lambda}(\omega)\pmb{U}(\omega)^H
\end{equation}
Where $\pmb{\Lambda}(\omega) = diag([\lambda_1(\omega), ... , \lambda_N(\omega)])$ is a diagonal matrix consisting of the eigen values of the complex Laplacian matrix at angular frequency $\omega$, and $\pmb{U}(\omega)$ are the eigen modes of the complex Laplacian matrix at angular frequency $\omega$. We are going to see how these eigenmodes behave in their parameter space:
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# spectrome imports
from spectrome.brain import Brain
from spectrome.utils import functions, path
from spectrome.forward import eigenmode, runforward
# Some house keeping:
data_dir = "../data"
# Define frequency range of interest
fmin = 2 # 2Hz - 45Hz signal range, filter for this with hbp
fmax = 45
fvec = np.linspace(fmin, fmax, 40)
# Load Pablo's Yeo 2017 canonical network maps
fc_dk = np.load("../data/com_dk.npy", allow_pickle=True).item()
fc_dk_normalized = pd.read_csv("../data/DK_dictionary_normalized.csv").set_index(
"Unnamed: 0"
)
# Define variables for analysis:
alpha_vec = np.linspace(
0.5, 4.5, 17
) # np.linspace(0.5,5,10) # coupling strength values we are going to explore
k_vec = np.linspace(0, 100, 11) # wave numbers we are going to explore
num_fc = 7 # 7 canonical networks
num_emode = 86 # number of eigenmodes, we are using 86 region DK atlas
default_k = 20 # default wave number
default_alpha = 0.1 # default alpha
# define list of canonical network names and re-order the dictionary using these names:
fc_names = [
"Limbic",
"Default",
"Visual",
"Fronto \n parietal",
"Somato \n motor",
"Dorsal \n Attention",
"Ventral \n Attention",
]
fc_dk_normalized = fc_dk_normalized.reindex(
[
"Limbic",
"Default",
"Visual",
"Frontoparietal",
"Somatomotor",
"Dorsal_Attention",
"Ventral_Attention",
]
).fillna(0)
# turbo color map
turbo = functions.create_turbo_colormap()
```
#### Explore varying coupling strength while keeping wave number default first
```
## Create Brain object from spectrome
alpha_brain = Brain.Brain()
alpha_brain.add_connectome(data_dir)
alpha_brain.reorder_connectome(alpha_brain.connectome, alpha_brain.distance_matrix)
alpha_brain.bi_symmetric_c()
alpha_brain.reduce_extreme_dir()
## Compute correlation values:
alpha_corr = np.zeros((num_emode, num_fc, len(alpha_vec)))
for a_ind in np.arange(0, len(alpha_vec)):
alpha_brain.decompose_complex_laplacian(alpha=alpha_vec[a_ind], k=default_k)
alpha_corr[:, :, a_ind] = eigenmode.get_correlation_df(
alpha_brain.norm_eigenmodes, fc_dk_normalized, method="spearman"
)
## set-up some visualization details
dynamic_range = [0.35, 0.65] # for colormap
# for coupling strength labels and number of ticks on x-axis:
alpha_labels = np.linspace(0.5, 4.5, 3)
n_ticks = 3
## PLOT
with plt.style.context("seaborn-paper"):
alpha_corr_fig, alpha_ax = plt.subplots(1, 7, figsize=(7.0, 4.0), sharey=True)
for i, ax in enumerate(alpha_corr_fig.axes):
im = ax.imshow(alpha_corr[:, i, :], vmin=0, vmax=1, cmap=turbo, aspect="auto")
ax.xaxis.set_major_locator(
plt.LinearLocator(numticks=n_ticks)
) # LinearLocator(numticks = n_ticks)
ax.set_yticklabels([0, 1, 10, 20, 30, 40, 50, 60, 70, 80])
ax.xaxis.tick_top()
ax.set_xticklabels(alpha_labels, linespacing=0.2)
im.set_clim(dynamic_range)
plt.suptitle("Coupling Strength", fontsize=12, fontweight="bold", y=1.025)
#cbar_ax = alpha_corr_fig.add_axes([1, 0.15, 0.03, 0.7])
#cb = alpha_corr_fig.colorbar(im, cax=cbar_ax, extend="both")
#alpha_corr_fig.add_subplot(1, 1, 1, frameon=False)
#plt.tick_params(labelcolor="none", which="both", axis = "both", top="off", bottom="off", left="off", right="off")
#plt.grid(False)
#plt.ylabel("Eigenmode Number", fontsize=12)
alpha_corr_fig.text(-0.008, 0.25, 'Eigenmode Number', rotation = 'vertical', fontsize = 12)
plt.tight_layout(w_pad = 0.30)
plt.savefig("../figures/fig4/coupling_strength.png", dpi=300, bbox_inches="tight")
```
#### Set to default $\alpha$ and explore wave number now:
```
## Brain object with spectrome
k_brain = Brain.Brain()
k_brain.add_connectome(data_dir)
k_brain.reorder_connectome(k_brain.connectome, k_brain.distance_matrix)
k_brain.bi_symmetric_c()
k_brain.reduce_extreme_dir()
# preallocate empty correlation df
k_corr = np.zeros((num_emode, num_fc, len(k_vec)))
## Compute correlations
for k in np.arange(0, len(k_vec)):
k_brain.decompose_complex_laplacian(alpha=default_alpha, k=k_vec[k], num_ev=86)
k_corr[:, :, k] = eigenmode.get_correlation_df(
k_brain.norm_eigenmodes, fc_dk_normalized, method="spearman"
)
n_ticks = 3
k_labels = [0, 50, 100]
## PLOT
with plt.style.context("seaborn-paper"):
k_corr_fig, k_ax = plt.subplots(1, 7, figsize=(6.25, 4.0), sharey=True)
for i, ax in enumerate(k_corr_fig.axes):
im = ax.imshow(k_corr[:, i, :], vmin=0, vmax=1, cmap=turbo, aspect="auto")
ax.xaxis.set_major_locator(
plt.LinearLocator(numticks=n_ticks)
) # LinearLocator(numticks = n_ticks)
ax.set_yticklabels([0, 1, 10, 20, 30, 40, 50, 60, 70, 80])
ax.xaxis.tick_top()
ax.set_xticklabels(k_labels)
im.set_clim(dynamic_range)
if i < 3:
ax.set_title(fc_names[i], y=-0.08, fontsize=8, weight="bold")
else:
ax.set_title(fc_names[i], y=-0.12, fontsize=8, weight="bold")
plt.suptitle("Wave Number", fontsize=12, fontweight="bold", y=1.025)
cbar_ax = k_corr_fig.add_axes([1, 0.15, 0.03, 0.7])
cb = k_corr_fig.colorbar(im, cax=cbar_ax, extend="both")
cb.set_label(r'Spatial Similarity to $\Psi_{CFN}$')
#k_corr_fig.add_subplot(1, 1, 1, frameon=False)
#plt.tick_params(labelcolor="none", top="off", bottom="off", left="off", right="off")
#plt.grid(False)
#plt.ylabel("Eigenmode Number", fontsize=12)
k_corr_fig.text(-0.008, 0.35, 'Eigenmode Number', rotation = 'vertical', fontsize = 12)
plt.tight_layout(w_pad = 0.25)
plt.savefig("../figures/fig4/wave_number.png", dpi=300, bbox_inches="tight")
```
Complex Laplacian Eigenmodes
---
Find the highest spatial corelation values achieved by the best performing eigenmodes for each canonical network:
Compute Spearman correlation values:
```
# pre-allocate an array for spearman R of best performing eigenmodes:
params_bestr = np.zeros((len(alpha_vec), len(k_vec), num_fc))
# Create brain object from spectrome with HCP connectome:
hcp_brain = Brain.Brain()
hcp_brain.add_connectome(data_dir)
hcp_brain.reorder_connectome(hcp_brain.connectome, hcp_brain.distance_matrix)
hcp_brain.bi_symmetric_c()
hcp_brain.reduce_extreme_dir()
# for each network, scan through alpha and k values, compute all eigenmode's spearman R
# then select the best performing eigenmode's spearman R
for i in np.arange(0, num_fc):
print('Computing for {} network'.format(fc_dk_normalized.index[i]))
for a_ind in np.arange(0, len(alpha_vec)):
for k_ind in np.arange(0, len(k_vec)):
# get eigenmodes of complex laplacian:
hcp_brain.decompose_complex_laplacian(alpha = alpha_vec[a_ind], k = k_vec[k_ind])
# compute spearman correlation
spearman_eig = eigenmode.get_correlation_df(
hcp_brain.norm_eigenmodes, fc_dk_normalized.iloc[[i]], method = 'spearman'
)
params_bestr[a_ind, k_ind, i] = np.max(spearman_eig.values)
```
Visualize in heatmap:
```
dynamic_range = [0.30, 0.65]
k_ticks = 11
k_labels = np.linspace(0, 100, 11).astype(int)
a_ticks = 3
a_labels = np.linspace(0.5, 4.5, 3)
with plt.style.context("seaborn-paper"):
corr_fig, corr_ax = plt.subplots(1,7, figsize = (8,5))
for i, ax in enumerate(corr_fig.axes):
im = ax.imshow(np.transpose(params_bestr[:,:,i]), vmin = 0, vmax = 1, cmap = turbo, aspect = 'auto')
ax.yaxis.set_major_locator(plt.LinearLocator(numticks = k_ticks))
ax.xaxis.tick_top()
ax.set_yticklabels(k_labels)
ax.xaxis.set_major_locator(plt.LinearLocator(numticks = a_ticks))
ax.set_xticklabels(a_labels)
im.set_clim(dynamic_range)
if i < 3:
ax.set_title(fc_names[i], y=-0.08, fontsize=8, weight="bold")
else:
ax.set_title(fc_names[i], y=-0.12, fontsize=8, weight="bold")
plt.suptitle('Coupling Strength', fontsize = 12, y = 1)
cbar_ax = corr_fig.add_axes([1, 0.15, 0.03, 0.7])
cb = corr_fig.colorbar(im, cax=cbar_ax, extend="both")
corr_fig.add_subplot(1, 1, 1, frameon=False)
plt.tick_params(labelcolor="none", top="off", bottom="off", left="off", right="off")
plt.grid(False)
plt.ylabel('Wave Number', fontsize = 12)
plt.tight_layout()
plt.savefig('../figures/fig5/param_bestr.png', dpi = 300, bbox_inches = 'tight')
```
Note - global coupling doesn't affect the best performing eigenmode but may change which eigenmode is the best performing eigenmode as well as the other eigenmodes.
Split the wave number parameter into oscillatory frequency and signal transmission velocity since wave number $k$ is defined as $k = \frac{2 \pi f}{\nu}$. Then perform the same exploratory exercise as above:
```
# define parameter ranges:
freq_vec = np.linspace(2, 47, 46)
nu_vec = np.linspace(1, 20, 21)
# define plotting visuals
dynamic_range = [0.3, 0.7]
f_ticks = 6
f_labels = np.linspace(2, 47, 6).astype(int)
nu_ticks = 3
nu_labels = np.linspace(0.5, 20, 3).astype(int)
#pre-allocate array for results
k_bestr = np.zeros((len(freq_vec), len(nu_vec), num_fc))
# compute spearman Rs:
for i in np.arange(0, num_fc):
print('Computing for {} network'.format(fc_dk_normalized.index[i]))
for f_ind in np.arange(0, len(freq_vec)):
for v_ind in np.arange(0, len(nu_vec)):
# get eigenmodes of complex laplacian:
hcp_brain.decompose_complex_laplacian(alpha = default_alpha, k = None, f = freq_vec[f_ind], speed = nu_vec[v_ind])
# compute spearman correlation
spearman_eig = eigenmode.get_correlation_df(
hcp_brain.norm_eigenmodes, fc_dk_normalized.iloc[[i]], method = 'spearman'
)
k_bestr[f_ind, v_ind, i] = np.max(spearman_eig.values)
# Plot as above:
with plt.style.context("seaborn-paper"):
k_fig, k_ax = plt.subplots(1,7, figsize = (8,4))
for i, ax in enumerate(k_fig.axes):
im = ax.imshow(k_bestr[:,:,i], vmin = 0, vmax = 1, cmap = turbo, aspect = 'auto')
ax.yaxis.set_major_locator(plt.LinearLocator(numticks = f_ticks))
ax.xaxis.tick_top()
ax.set_yticklabels(f_labels)
ax.xaxis.set_major_locator(plt.LinearLocator(numticks = nu_ticks))
ax.set_xticklabels(nu_labels)
im.set_clim(dynamic_range)
if i < 3:
ax.set_title(fc_names[i], y=-0.08, fontsize=8, weight="bold")
else:
ax.set_title(fc_names[i], y=-0.12, fontsize=8, weight="bold")
plt.suptitle('Transmission Velocity (m/s)', fontsize = 12, y = 1)
cbar_ax = k_fig.add_axes([1, 0.15, 0.03, 0.7])
cb = k_fig.colorbar(im, cax=cbar_ax, extend="both")
k_fig.add_subplot(1, 1, 1, frameon=False)
plt.tick_params(labelcolor="none", top="off", bottom="off", left="off", right="off")
plt.grid(False)
plt.ylabel('Frequency (Hz)', fontsize = 12)
plt.tight_layout()
plt.savefig('../figures/fig5/k_bestr.png', dpi = 300, bbox_inches = 'tight')
```
|
github_jupyter
|
```
%matplotlib inline
```
Loading data in PyTorch
=======================
PyTorch features extensive neural network building blocks with a simple,
intuitive, and stable API. PyTorch includes packages to prepare and load
common datasets for your model.
Introduction
------------
At the heart of PyTorch data loading utility is the
`torch.utils.data.DataLoader <https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader>`__
class. It represents a Python iterable over a dataset. Libraries in
PyTorch offer built-in high-quality datasets for you to use in
`torch.utils.data.Dataset <https://pytorch.org/docs/stable/data.html#torch.utils.data.Dataset>`__.
These datasets are currently available in:
* `torchvision <https://pytorch.org/docs/stable/torchvision/datasets.html>`__
* `torchaudio <https://pytorch.org/audio/datasets.html>`__
* `torchtext <https://pytorch.org/text/datasets.html>`__
with more to come.
Using the Yesno dataset from ``torchaudio.datasets.YESNO``, we will
demonstrate how to effectively and efficiently load data from a PyTorch
``Dataset`` into a PyTorch ``DataLoader``.
Setup
-----
Before we begin, we need to install ``torchaudio`` to have access to the
dataset.
::
pip install torchaudio
Steps
-----
1. Import all necessary libraries for loading our data
2. Access the data in the dataset
3. Loading the data
4. Iterate over the data
5. [Optional] Visualize the data
1. Import necessary libraries for loading our data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For this recipe, we will use ``torch`` and ``torchaudio``. Depending on
what built-in datasets you use, you can also install and import
``torchvision`` or ``torchtext``.
```
import torch
import torchaudio
```
2. Access the data in the dataset
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The Yesno dataset in ``torchaudio`` features sixty recordings of one
individual saying yes or no in Hebrew; with each recording being eight
words long (`read more here <https://www.openslr.org/1/>`__).
``torchaudio.datasets.YESNO`` creates a dataset for YesNo.
::
torchaudio.datasets.YESNO(
root,
url='http://www.openslr.org/resources/1/waves_yesno.tar.gz',
folder_in_archive='waves_yesno',
download=False,
transform=None,
target_transform=None)
Each item in the dataset is a tuple of the form: (waveform, sample_rate,
labels).
You must set a ``root`` for the Yesno dataset, which is where the
training and testing dataset will exist. The other parameters are
optional, with their default values shown. Here is some additional
useful info on the other parameters:
```
# * ``download``: If true, downloads the dataset from the internet and puts it in root directory. If dataset is already downloaded, it is not downloaded again.
# * ``transform``: Using transforms on your data allows you to take it from its source state and transform it into data that’s joined together, de-normalized, and ready for training. Each library in PyTorch supports a growing list of transformations.
# * ``target_transform``: A function/transform that takes in the target and transforms it.
#
# Let’s access our Yesno data:
#
# A data point in Yesno is a tuple (waveform, sample_rate, labels) where labels
# is a list of integers with 1 for yes and 0 for no.
yesno_data_trainset = torchaudio.datasets.YESNO('./', download=True)
# Pick data point number 3 to see an example of the the yesno_data:
n = 3
waveform, sample_rate, labels = yesno_data[n]
print("Waveform: {}\nSample rate: {}\nLabels: {}".format(waveform, sample_rate, labels))
```
When using this data in practice, it is best practice to provision the
data into a “training” dataset and a “testing” dataset. This ensures
that you have out-of-sample data to test the performance of your model.
3. Loading the data
~~~~~~~~~~~~~~~~~~~~~~~
Now that we have access to the dataset, we must pass it through
``torch.utils.data.DataLoader``. The ``DataLoader`` combines the dataset
and a sampler, returning an iterable over the dataset.
```
data_loader = torch.utils.data.DataLoader(yesno_data,
batch_size=1,
shuffle=True)
```
4. Iterate over the data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Our data is now iterable using the ``data_loader``. This will be
necessary when we begin training our model! You will notice that now
each data entry in the ``data_loader`` object is converted to a tensor
containing tensors representing our waveform, sample rate, and labels.
```
for data in data_loader:
print("Data: ", data)
print("Waveform: {}\nSample rate: {}\nLabels: {}".format(data[0], data[1], data[2]))
break
```
5. [Optional] Visualize the data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can optionally visualize your data to further understand the output
from your ``DataLoader``.
```
import matplotlib.pyplot as plt
print(data[0][0].numpy())
plt.figure()
plt.plot(waveform.t().numpy())
```
Congratulations! You have successfully loaded data in PyTorch.
Learn More
----------
Take a look at these other recipes to continue your learning:
- `Defining a Neural Network <https://pytorch.org/tutorials/recipes/recipes/defining_a_neural_network.html>`__
- `What is a state_dict in PyTorch <https://pytorch.org/tutorials/recipes/recipes/what_is_state_dict.html>`__
|
github_jupyter
|
# Large Scale Training with VISSL Training (mixed precision, LARC, ZeRO etc)
In this tutorial, show configuration settings that users can set for training large models.
You can make a copy of this tutorial by `File -> Open in playground mode` and make changes there. DO NOT request access to this tutorial.
# Using LARC
LARC (Large Batch Training of Convolutional Networks) is a technique proposed by **Yang You, Igor Gitman, Boris Ginsburg** in https://arxiv.org/abs/1708.03888 for improving the convergence of large batch size trainings.
LARC uses the ratio between gradient and parameter magnitudes is used to calculate an adaptive local learning rate for each individual parameter.
See the [LARC paper](<https://arxiv.org/abs/1708.03888>) for calculation of learning rate. In practice, it modifies the gradients of parameters as a proxy
for modifying the learning rate of the parameters.
## How to enable LARC
VISSL supports the LARC implementation from [NVIDIA's Apex LARC](https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py). To use LARC, users need to set config option
:code:`OPTIMIZER.use_larc=True`. VISSL exposes LARC parameters that users can tune. Full list of LARC parameters exposed by VISSL:
```yaml
OPTIMIZER:
name: "sgd"
use_larc: False # supported for SGD only for now
larc_config:
clip: False
eps: 1e-08
trust_coefficient: 0.001
```
**NOTE:** LARC is currently supported for SGD optimizer only in VISSL.
# Using Apex
In order to use Apex, VISSL provides `anaconda` and `pip` packages of Apex (compiled with Optimzed C++ extensions/CUDA kernels). The Apex
packages are provided for all versions of `CUDA (9.2, 10.0, 10.1, 10.2, 11.0), PyTorch >= 1.4 and Python >=3.6 and <=3.9`.
Follow VISSL's instructions to [install apex in pip](https://github.com/facebookresearch/vissl/blob/master/INSTALL.md#step-2-install-pytorch-opencv-and-apex-pip) and instructions to [install apex in conda](https://github.com/facebookresearch/vissl/blob/master/INSTALL.md#step-3-install-apex-conda>).
# Using Mixed Precision
Many self-supervised approaches leverage mixed precision training by default for better training speed and reducing the model memory requirement.
For this, we use [NVIDIA Apex Library with AMP](https://nvidia.github.io/apex/amp.html#o1-mixed-precision-recommended-for-typical-use).
Users can tune the AMP level to the levels supported by NVIDIA. See [this for details on Apex amp levels](https://nvidia.github.io/apex/amp.html#opt-levels).
To use Mixed precision training, one needs to set the following parameters in configuration file:
```yaml
MODEL:
AMP_PARAMS:
USE_AMP: True
# Use O1 as it is robust and stable than O3. If you want to use O3, we recommend
# the following setting:
# {"opt_level": "O3", "keep_batchnorm_fp32": True, "master_weights": True, "loss_scale": "dynamic"}
AMP_ARGS: {"opt_level": "O1"}
```
# Using ZeRO
**ZeRO: Memory Optimizations Toward Training Trillion Parameter Models** is a technique developed by **Samyam Rajbhandari, Jeff Rasley, Olatunji Ruwase, Yuxiong He** in [this paper](https://arxiv.org/abs/1910.02054).
When training models with billions of parameters, GPU memory becomes a bottleneck. ZeRO can offer 4x to 8x memory reductions in memory thus allowing to fit larger models in memory.
## How ZeRO works?
Memory requirement of a model can be broken down roughly into:
1. activations memory
2. model parameters
3. parameters momentum buffers (optimizer state)
4. parameters gradients
ZeRO *shards* the optimizer state and the parameter gradients onto different devices and reduces the memory needed per device.
## How to use ZeRO in VISSL?
VISSL uses [FAIRScale](https://github.com/facebookresearch/fairscale)_ library which implements ZeRO in PyTorch.
Using VISSL in ZeRO involves no code changes and can simply be done by setting some configuration options in the yaml files.
In order to use ZeRO, user needs to set `OPTIMIZER.name=zero` and nest the desired optimizer (for example SGD) settings in `OPTIMIZER.base_optimizer`.
An example for using ZeRO with LARC and SGD optimization:
```yaml
OPTIMIZER:
name: zero
base_optimizer:
name: sgd
use_larc: False
larc_config:
clip: False
trust_coefficient: 0.001
eps: 0.00000001
weight_decay: 0.000001
momentum: 0.9
nesterov: False
```
**NOTE**: ZeRO works seamlessly with LARC and mixed precision training. Using ZeRO with activation checkpointing is not yet enabled primarily due to manual gradient reduction need for activation checkpointing.
# Using Stateful Data Sampler
## Issue with PyTorch DataSampler for large data training
PyTorch default [torch.utils.data.distributed.DistributedSampler](https://github.com/pytorch/pytorch/blob/master/torch/utils/data/distributed.py#L12) is the default sampler used for many trainings. However, it becomes limiting to use this sampler in case of large batch size trainings for 2 reasons:
- Using PyTorch `DataSampler`, each trainer shuffles the full data (assuming shuffling is used) and then each trainer gets a view of this shuffled data. If the dataset is large (100 millions, 1 billion or more), generating very large permutation
on each trainer can lead to large CPU memory consumption per machine. Hence, it becomes difficult to use the PyTorch default `DataSampler` when user wants to train on large data and for several epochs (for example: 10 epochs of 100M images).
- When using PyTorch `DataSampler` and the training is resumed, the sampler will serve the full dataset. However, in case of large data trainings (like 1 billion images or more), one mostly trains for 1 epoch only.
In such cases, when the training resumes from the middle of the epoch, the sampler will serve the full 1 billion images which is not what we want.
To solve both the above issues, VISSL provides a custom samplier `StatefulDistributedSampler` which inherits from the PyTorch `DistributedSampler` and fixes the above issues in following manner:
- Sampler creates the view of the data per trainer and then shuffles only the data that trainer is supposed to view. This keeps the CPU memory requirement expected.
- Sampler adds a member `start_iter` which tracks what iteration number of the given epoch model is at. When the training is used, the `start_iter` will be properly set to the last iteration number and the sampler will serve only the remainder of data.
## How to use VISSL custom DataSampler
Using VISSL provided custom samplier `StatefulDistributedSampler` is extremely easy and involves simply setting the correct configuration options as below:
```yaml
DATA:
TRAIN:
USE_STATEFUL_DISTRIBUTED_SAMPLER: True
TEST:
USE_STATEFUL_DISTRIBUTED_SAMPLER: True
```
**NOTE**: Users can use `StatefulDistributedSampler` for only training dataset and use PyTorch default :code:`DataSampler` if desired i.e. it is not mandatory to use the same sampler type for all data splits.
# Activation Checkpointing
Activation checkpointing is a very powerful technique to reduce the memory requirement of a model. This is especially useful when training very large models with billions of parameters.
## How it works?
Activation checkpointing trades compute for memory. It discards intermediate activations during the forward pass, and recomputes them during the backward pass. In
our experiments, using activation checkpointing, we observe negligible compute overhead in memory-bound settings while getting big memory savings.
In summary, This technique offers 2 benefits:
- saves gpu memory that can be used to fit large models
- allows increasing training batch size for a given model
We recommend users to read the documentation available [here](https://pytorch.org/docs/stable/checkpoint.html) for further details on activation checkpointing.
## How to use activation checkpointing in VISSL?
VISSL integrates activation checkpointing implementation directly from PyTorch available [here](https://pytorch.org/docs/stable/checkpoint.html).
Using activation checkpointing in VISSL is extremely easy and doable with simple settings in the configuration file. The settings required are as below:
```yaml
MODEL:
ACTIVATION_CHECKPOINTING:
# whether to use activation checkpointing or not
USE_ACTIVATION_CHECKPOINTING: True
# how many times the model should be checkpointed. User should tune this parameter
# and find the number that offers best memory saving and compute tradeoff.
NUM_ACTIVATION_CHECKPOINTING_SPLITS: 8
DISTRIBUTED:
# if True, does the gradient reduction in DDP manually. This is useful during the
# activation checkpointing and sometimes saving the memory from the pytorch gradient
# buckets.
MANUAL_GRADIENT_REDUCTION: True
```
|
github_jupyter
|
# Logistic Regression with a Neural Network mindset
Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.
**Instructions:**
- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.
**You will learn to:**
- Build the general architecture of a learning algorithm, including:
- Initializing parameters
- Calculating the cost function and its gradient
- Using an optimization algorithm (gradient descent)
- Gather all three functions above into a main model function, in the right order.
## 1 - Packages ##
First, let's run the cell below to import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
```
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
import imageio
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
from skimage.transform import resize
%matplotlib inline
```
## 2 - Overview of the Problem set ##
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labeled as cat (y=1) or non-cat (y=0)
- a test set of m_test images labeled as cat or non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).
You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.
Let's get more familiar with the dataset. Load the data by running the following code.
```
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
```
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
```
# Example of a picture
index = 34
plt.imshow(test_set_x_orig[index])
print ("y = " + str(test_set_y[:, index]) + ", it's a '" + classes[np.squeeze(test_set_y[:, index])].decode("utf-8") + "' picture.")
```
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
**Exercise:** Find the values for:
- m_train (number of training examples)
- m_test (number of test examples)
- num_px (= height = width of a training image)
Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
```
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
```
**Expected Output for m_train, m_test and num_px**:
<table style="width:15%">
<tr>
<td>**m_train**</td>
<td> 209 </td>
</tr>
<tr>
<td>**m_test**</td>
<td> 50 </td>
</tr>
<tr>
<td>**num_px**</td>
<td> 64 </td>
</tr>
</table>
For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.
**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).
A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use:
```python
X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
```
```
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
#train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[1] * train_set_x_orig.shape[2] * 3, train_set_x_orig.shape[0])
#test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[1] * test_set_x_orig.shape[2] * 3, test_set_x_orig.shape[0])
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
```
**Expected Output**:
<table style="width:35%">
<tr>
<td>**train_set_x_flatten shape**</td>
<td> (12288, 209)</td>
</tr>
<tr>
<td>**train_set_y shape**</td>
<td>(1, 209)</td>
</tr>
<tr>
<td>**test_set_x_flatten shape**</td>
<td>(12288, 50)</td>
</tr>
<tr>
<td>**test_set_y shape**</td>
<td>(1, 50)</td>
</tr>
<tr>
<td>**sanity check after reshaping**</td>
<td>[17 31 56 22 33]</td>
</tr>
</table>
To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.
One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).
<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->
Let's standardize our dataset.
```
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
```
<font color='blue'>
**What you need to remember:**
Common steps for pre-processing a new dataset are:
- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)
- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)
- "Standardize" the data
## 3 - General Architecture of the learning algorithm ##
It's time to design a simple algorithm to distinguish cat images from non-cat images.
You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**
<img src="images/LogReg_kiank.png" style="width:650px;height:400px;">
**Mathematical expression of the algorithm**:
For one example $x^{(i)}$:
$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
$$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
The cost is then computed by summing over all training examples:
$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$
**Key steps**:
In this exercise, you will carry out the following steps:
- Initialize the parameters of the model
- Learn the parameters for the model by minimizing the cost
- Use the learned parameters to make predictions (on the test set)
- Analyse the results and conclude
## 4 - Building the parts of our algorithm ##
The main steps for building a Neural Network are:
1. Define the model structure (such as number of input features)
2. Initialize the model's parameters
3. Loop:
- Calculate current loss (forward propagation)
- Calculate current gradient (backward propagation)
- Update parameters (gradient descent)
You often build 1-3 separately and integrate them into one function we call `model()`.
### 4.1 - Helper functions
**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1 / (1 + np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
```
**Expected Output**:
<table>
<tr>
<td>**sigmoid([0, 2])**</td>
<td> [ 0.5 0.88079708]</td>
</tr>
</table>
### 4.2 - Initializing parameters
**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
```
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros((dim, 1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
```
**Expected Output**:
<table style="width:15%">
<tr>
<td> ** w ** </td>
<td> [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td> ** b ** </td>
<td> 0 </td>
</tr>
</table>
For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).
### 4.3 - Forward and Backward propagation
Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.
**Hints**:
Forward Propagation:
- You get X
- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$
- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
Here are the two formulas you will be using:
$$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
```
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Variables:
m = Number of training exampels
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T, X) + b)
cost = - 1/m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = 1 / m * np.dot(X, (A - Y).T)
db = 1 / m * np.sum(A - Y)
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td> ** dw ** </td>
<td> [[ 0.99845601]
[ 2.39507239]]</td>
</tr>
<tr>
<td> ** db ** </td>
<td> 0.00145557813678 </td>
</tr>
<tr>
<td> ** cost ** </td>
<td> 5.801545319394553 </td>
</tr>
</table>
### 4.4 - Optimization
- You have initialized your parameters.
- You are also able to compute a cost function and its gradient.
- Now, you want to update the parameters using gradient descent.
**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
```
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate * dw
b = b - learning_rate * db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **w** </td>
<td>[[ 0.19033591] [ 0.12259159]] </td>
</tr>
<tr>
<td> **b** </td>
<td> 1.92535983008 </td>
</tr>
<tr>
<td> **dw** </td>
<td> [[ 0.67752042] [ 1.41625495]] </td>
</tr>
<tr>
<td> **db** </td>
<td> 0.219194504541 </td>
</tr>
</table>
**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:
1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$
2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
```
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T, X) + b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
if A[0,i] > 0.5:
Y_prediction[0][i] = 1
else:
Y_prediction[0][i] = 0
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
```
**Expected Output**:
<table style="width:30%">
<tr>
<td>
**predictions**
</td>
<td>
[[ 1. 1. 0.]]
</td>
</tr>
</table>
<font color='blue'>
**What to remember:**
You've implemented several functions that:
- Initialize (w,b)
- Optimize the loss iteratively to learn parameters (w,b):
- computing the cost and its gradient
- updating the parameters using gradient descent
- Use the learned (w,b) to predict the labels for a given set of examples
## 5 - Merge all functions into a model ##
You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.
**Exercise:** Implement the model function. Use the following notation:
- Y_prediction_test for your predictions on the test set
- Y_prediction_train for your predictions on the train set
- w, costs, grads for the outputs of optimize()
```
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
#Intialize w, b; w.shape = (num_px * num_px * 3, 1)
w, b = initialize_with_zeros(X_train.shape[0])
#Use Transponsed Datasets -> shape: (m_train, num_px..) -> w.T * X in sigmoid functionw will work
params, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
w = params["w"]
b = params["b"]
Y_prediction_train = predict(w, b, X_train)
Y_prediction_test = predict(w, b, X_test)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
```
Run the following cell to train your model.
```
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **Cost after iteration 0 ** </td>
<td> 0.693147 </td>
</tr>
<tr>
<td> <center> $\vdots$ </center> </td>
<td> <center> $\vdots$ </center> </td>
</tr>
<tr>
<td> **Train Accuracy** </td>
<td> 99.04306220095694 % </td>
</tr>
<tr>
<td>**Test Accuracy** </td>
<td> 70.0 % </td>
</tr>
</table>
**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!
Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
```
# Example of a picture that was wrongly classified.
index = 13
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[int(d["Y_prediction_test"][0,index])].decode("utf-8") + "\" picture.")
#print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + str(d["Y_prediction_test"][0,index]) + "\" picture.")
```
Let's also plot the cost function and the gradients.
```
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
```
**Interpretation**:
You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting.
## 6 - Further analysis (optional/ungraded exercise) ##
Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$.
#### Choice of learning rate ####
**Reminder**:
In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.
Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
```
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
```
**Interpretation**:
- Different learning rates give different costs and thus different predictions results.
- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
- In deep learning, we usually recommend that you:
- Choose the learning rate that better minimizes the cost function.
- If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.)
## 7 - Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
```
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image4.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(imageio.imread(fname))
print(image.shape)
my_image = resize(image, (num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
```
<font color='blue'>
**What to remember from this assignment:**
1. Preprocessing the dataset is important.
2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().
3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course!
Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:
- Play with the learning rate and the number of iterations
- Try different initialization methods and compare the results
- Test other preprocessings (center the data, or divide each row by its standard deviation)
Bibliography:
- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/
- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c
|
github_jupyter
|
# Funciones generadoras
Por regla general, cuando queremos crear una lista de algún tipo, lo que hacemos es crear la lista vacía, y luego con un bucle varios elementos e ir añadiendolos a la lista si cumplen una condición:
```
[numero for numero in [0,1,2,3,4,5,6,7,8,9,10] if numero % 2 == 0 ]
```
También vimos cómo era posible utilizar la función **range()** para generar dinámicamente la lista en la memoria, es decir, no teníamos que crearla en el propio código, sino que se interpretaba sobre la marcha:
```
[numero for numero in range(0,11) if numero % 2 == 0 ]
```
La verdad es que **range()** es una especie de función generadora. Por regla general las funciones devolvuelven un valor con **return**, pero la preculiaridad de los generadores es que van *cediendo* valores sobre la marcha, en tiempo de ejecución.
La función generadora **range(0,11)**, empieza cediendo el **0**, luego se procesa el for comprobando si es par y lo añade a la lista, en la siguiente iteración se cede el **1**, se procesa el for se comprueba si es par, en la siguiente se cede el **2**, etc.
Con esto se logra ocupar el mínimo de espacio en la memoria y podemos generar listas de millones de elementos sin necesidad de almacenarlos previamente.
Veamos a ver cómo crear una función generadora de pares:
```
def pares(n):
for numero in range(n+1):
if numero % 2 == 0:
yield numero
pares(10)
```
Como vemos, en lugar de utilizar el **return**, la función generadora utiliza el **yield**, que significa ceder. Tomando un número busca todos los pares desde 0 hasta el número+1 sirviéndonos de un range().
Sin embargo, fijaros que al imprimir el resultado, éste nos devuelve un objeto de tipo generador.
De la misma forma que recorremos un **range()** podemos utilizar el bucle for para recorrer todos los elementos que devuelve el generador:
```
for numero in pares(10):
print(numero)
```
Utilizando comprensión de listas también podemos crear una lista al vuelo:
```
[numero for numero in pares(10)]
```
Sin embargo el gran potencial de los generadores no es simplemente crear listas, de hecho como ya hemos visto, el propio resultado no es una lista en sí mismo, sino una secuencia iterable con un montón de características únicas.
## Iteradores
Por tanto las funciones generadoras devuelven un objeto que suporta un protocolo de iteración. ¿Qué nos permite hacer? Pues evidentemente controlar el proceso de generación. Teniendo en cuenta que cada vez que la función generadora cede un elemento, queda suspendida y se retoma el control hasta que se le pide generar el siguiente valor.
Así que vamos a tomar nuestro ejemplo de pares desde otra perspectiva, como si fuera un iterador manual, así veremos exactamente a lo que me refiero:
```
pares = pares(3)
```
Bien, ahora tenemos un iterador de pares con todos los números pares entre el 0 y el 3. Vamos a conseguir el primer número par:
```
next(pares)
```
Como vemos la función integrada **next()** nos permite acceder al siguiente elemento de la secuencia. Pero no sólo eso, si volvemos a ejecutarla...
```
next(pares)
```
Ahora devuelve el segundo! ¿No os recuerdo esto al puntero de los ficheros? Cuando leíamos una línea, el puntero pasaba a la siguiente y así sucesivamente. Pues aquí igual.
¿Y qué pasaría si intentamos acceder al siguiente, aún sabiendo que entre el 0 y el 3 sólo tenemos los pares 0 y 2?
```
next(pares)
```
Pues que nos da un error porque se ha acabado la secuencia, así que tomad nota y capturad la excepción si váis a utilizarlas sin saber exactamente cuantos elementos os devolverá el generador.
Así que la pregunta que nos queda es ¿sólo es posible iterar secuencias generadas al vuelo? Vamos a probar con una lista:
```
lista = [1,2,3,4,5]
next(lista)
```
¿Quizá con una cadena?
```
cadena = "Hola"
next(cadena)
```
Pues no, no podemos iterar ninguna colección como si fuera una secuencia. Sin embargo, hay una función muy interesante que nos permite covertir las cadenas y algunas colecciones a iteradores, la función **iter()**:
```
lista = [1,2,3,4,5]
lista_iterable = iter(lista)
print( next(lista_iterable) )
print( next(lista_iterable) )
print( next(lista_iterable) )
print( next(lista_iterable) )
print( next(lista_iterable) )
cadena = "Hola"
cadena_iterable = iter(cadena)
print( next(cadena_iterable) )
print( next(cadena_iterable) )
print( next(cadena_iterable) )
print( next(cadena_iterable) )
```
Muy bien, ahora ya sabemos qué son las funciones generadores, cómo utilizarlas, y también como como convertir algunos objetos a iteradores. Os sugiero probar por vuestra cuenta más colecciones a ver si encontráis alguna más que se pueda iterar.
|
github_jupyter
|
<img src="images/QISKit-c copy.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="250 px" align="left">
# Hadamard Action: Approach 2
## Jupyter Notebook 2/3 for the Teach Me QISKIT Tutorial Competition
- Connor Fieweger
<img src="images/hadamard_action.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="750 px" align="left">
Another approach to showing equivalence of the presented circuit diagrams is to represent the operators on the qubits as matrices and the qubit states as column vectors. The output is found by applying the matrix that represents the action of the circuit onto the initial state column vector to find the final state column vector. Since the numpy library already enables making linear algebra computations such as these, we'll use that to employ classical programming in order to understand this quantum circuit.
```
import numpy as np
```
## Circuit i)
For i), the initial state of the input is represented by the tensor product of the two input qubits in the initial register. This is given by:
$$\Psi = \psi_1 \otimes \psi_2$$
Where each $\psi$ can be either 0 or 1
This results in the following input states for $\Psi$: |00>, |01>, |10>, or |11>. Represented by column vectors, these are:
$$\text{|00>} = \left(\begin{array}{c}
1 \\
0 \\
0 \\
0
\end{array}\right)$$
$$\text{|01>} = \left(\begin{array}{c}
0 \\
1 \\
0 \\
0
\end{array}\right)$$
$$\text{|10>} = \left(\begin{array}{c}
0 \\
0 \\
1 \\
0
\end{array}\right)$$
$$\text{|11>} = \left(\begin{array}{c}
0 \\
0 \\
0 \\
1
\end{array}\right)$$
```
# These column vectors can be stored in numpy arrays so that we can operate
# on them with the circuit diagram's corresponding matrix (which is to be evaluated)
# as follows:
zero_zero = np.array([[1],[0],[0],[0]])
zero_one = np.array([[0],[1],[0],[0]])
one_zero = np.array([[0],[0],[1],[0]])
one_one = np.array([[0],[0],[0],[1]])
Psi = {'zero_zero': zero_zero, 'zero_one': zero_one, 'one_zero': one_zero, 'one_one': one_one}
# ^We can conveniently store all possible input states in a dictionary and then print to check the representations:
for key, val in Psi.items():
print(key, ':', '\n', val)
```
The action of the circuit gates on this state is simply the CNOT operator. For a control qubit on line 1 and a subject qubit on line 2, CNOT is given by the 4x4 matrix (as discussed in the appendix notebook):
$$CNOT_1 = \left[\begin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0
\end{array}\right]$$
This matrix is the operator that describes the effect of the circuit on the initial state. By taking $CNOT_1$|$\Psi$> = |$\Psi$'>, then, the final state for i) can be found.
```
# storing CNOT as a numpy array:
CNOT_1 = np.matrix([[1, 0, 0, 0],[0, 1, 0, 0],[0, 0, 0, 1],[0, 0, 1, 0]])
print(CNOT_1)
print('FINAL STATE OF i):')
#Apply CNOT to each possible state for |Psi> to find --> |Psi'>
for key, val in Psi.items():
print(key, 'becomes..\n', CNOT_1*val)
```
As one can see, the first set of two states (00, 01) has stayed the same, while the second (10, 11) has flipped to (11, 10). This is readily understood when considering the defining logic of the CNOT gate - the subject qubit on line 2 is flipped if the control qubit on line 1 in the state |1> (this is also referred to as the control qubit being 'on'). Summatively, then, the action of i) is given by: [|00>,|01>,|10>,|11>] --> [|00>,|01>,|11>,|10>].
## Circuit ii)
For ii), a similar examination of the input states and the result when the circuit operation matrix is applied to these states can be done. The input states are the same as those in i), so we can just use the variable 'Psi' that we stored earlier. In order to find the matrix representation of the circuit, a little more depth in considering the matrix that represents the gates is required as follows:
First, consider the parallel application of the Hadamard gate 'H' to each wire. In order to represent this as an operator on the two-qubit-tensor-space state ('$\Psi$'), one needs to take the tensor product of the single-qubit-space's ('$\psi$') Hadamard with itself: $H \otimes H = H^{\otimes 2}$
As discussed in the appendix notebook, this is given by:
$$\text{H}^{\otimes 2} = \frac{1}{2}\left[\begin{array}{cccc}
1 & 1 & 1 & 1 \\
1 & -1 & 1 & -1 \\
1 & 1 & -1 & -1 \\
1 & -1 & -1 & 1
\end{array}\right]$$
This is then the first matrix to consider when finding the matrix that represents the action of circuit ii).
```
# storing this in a numpy array:
H_2 = .5*np.matrix([[1, 1, 1, 1],[1, -1, 1, -1],[1, 1, -1, -1],[1, -1, -1, 1]])
print('H_2:')
print(H_2)
```
The next operation on the qubits is a CNOT controlled by line 2. This is given by the 4x4 matrix (also in the appendix notebook):
$$CNOT_2 = \left[\begin{array}{cccc}
1 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0 \\
0 & 1 & 0 & 0
\end{array}\right]$$
This is then the second matrix to consider in finding the matrix that represents the action of circuit ii).
```
# storing this in a numpy array:
CNOT_2 = np.matrix([[1, 0, 0, 0],[0, 0, 0, 1],[0, 0, 1, 0],[0, 1, 0, 0]])
```
Finally, the set of parallel hadamard matrices as given by $H^{\otimes 2}$ is again applied to the two-qubit-space. With this, all matrices that contribute to the circuit's action have been found. Applying each operator to the state as one reads the circuit diagram from left to right, one finds: $(H^{\otimes 2})(CNOT_2)(H^{\otimes} 2)\Psi$ = $\Psi$ '. The $(H^{\otimes 2})(CNOT_2)(H^{\otimes} 2)$ term can be evaluated through matrix multiplication to a single 4x4 matrix that represents the entire circuit as an operator, let's call it 'A'.
```
A = H_2*CNOT_2*H_2
print(A)
```
This representation should look familiar, no?
```
print(CNOT_1)
```
Just to double check,;
```
for key, val in Psi.items():
print(key, 'becomes...\n', A*val)
```
The action of i) and ii) are evidently the same then $\square$.
|
github_jupyter
|
### Verify Installation
```
import torch
# get Pytorch version
torch.__version__
# import torchvision
import torchvision
# get torchvision version
torchvision.__version__
# checking if cuda is available
torch.cuda.is_available()
# get number of cuda/gpu devices
torch.cuda.device_count()
# get cuda/gpu device id
torch.cuda.current_device()
# get cuda/gpu device name
torch.cuda.get_device_name(0)
```
### Tesnor Data Type
```
# define a tensor with default data type
x = torch.ones(2, 2)
print(x)
print(x.dtype)
```
### Specify Data Type
```
# define a tensor
x = torch.ones(2, 2, dtype=torch.int8)
print(x)
print(x.dtype)
```
### Change Tensor data type
```
# define a tensor with type torch.uint8
x=torch.ones(1,dtype=torch.uint8)
print(x.dtype)
# change a tesnor data type
x=x.type(torch.float)
print(x.dtype)
```
### Converting Tensors to NumPy arrays
```
# define a tensor
x=torch.rand(2,2)
print(x)
print(x.dtype)
# convert tensor to numpy array
y=x.numpy()
print(y)
print(y.dtype)
```
### Converting NumPy arrays to Tensors
```
# import NumPy
import numpy as np
# define a NumPy array
x=np.zeros((2,2),dtype=np.float32)
print(x)
print(x.dtype)
# convert array to PyTorch Tensor
y=torch.from_numpy(x)
print(y)
print(y.dtype)
```
### Move Tensors between devices
```
# define a tensor
x=torch.tensor([1.5, 2])
print(x)
print(x.device)
# move tensor onto GPU
# define a cuda/gpu device
device = torch.device("cuda:0")
x = x.to(device)
print(x)
print(x.device)
# move tensor onto cpu
# define a cpu device
device = torch.device("cpu")
x = x.to(device)
print(x)
print(x.device)
# define a tensor on device
device = torch.device("cuda:0")
x = torch.ones(2,2, device=device)
print(x)
```
## Loading and Processing Data
```
from torchvision import datasets
# path to store data and/or load from
path2data="./data"
# loading training data
train_data=datasets.MNIST(path2data, train=True, download=True)
# extract data and targets
x_train, y_train=train_data.data,train_data.targets
print(x_train.shape)
print(y_train.shape)
# loading validation data
val_data=datasets.MNIST(path2data, train=False, download=True)
# extract data and targets
x_val,y_val=val_data.data, val_data.targets
print(x_val.shape)
print(y_val.shape)
```
### Display images
```
from torchvision import utils
import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
# First, add a dimension to tensor to become B*C*H*W
if len(x_train.shape)==3:
x_train=x_train.unsqueeze(1)
print(x_train.shape)
if len(x_val.shape)==3:
x_val=x_val.unsqueeze(1)
# make a grid of 40 images, 8 images per row
x_grid=utils.make_grid(x_train[:40], nrow=8, padding=2)
print(x_grid.shape)
# helper function to display images
def show(img):
# convert tensor to numpy array
npimg = img.numpy()
# Convert to H*W*C shape
npimg_tr=np.transpose(npimg, (1,2,0))
# display images
plt.imshow(npimg_tr,interpolation='nearest')
# call helper function
show(x_grid)
```
### Transform Data
```
from torchvision import transforms
# loading MNIST training dataset
train_data=datasets.MNIST(path2data, train=True, download=True)
# define transformations
data_transform = transforms.Compose([transforms.RandomHorizontalFlip(p=1),
transforms.RandomVerticalFlip(p=1),
transforms.ToTensor(),
])
# get a sample image from training dataset
img = train_data[0][0]
# tranform sample image
img_tr=data_transform(img)
# convert tensor to numpy array
img_tr_np=img_tr.numpy()
# show original and transformed images
plt.subplot(1,2,1)
plt.imshow(img,cmap="gray")
plt.title("original")
plt.subplot(1,2,2)
plt.imshow(img_tr_np[0],cmap="gray");
plt.title("transformed")
# define transformations
data_transform = transforms.Compose([
transforms.RandomHorizontalFlip(1),
transforms.RandomVerticalFlip(1),
transforms.ToTensor(),])
# Loading MNIST training data with on-the-fly transformations
train_data=datasets.MNIST(path2data, train=True, download=True,
transform=data_transform )
```
### Wrap Tensors into Dataset
```
from torch.utils.data import TensorDataset
# wrap tensors into a dataset
train_ds = TensorDataset(x_train, y_train)
val_ds = TensorDataset(x_val, y_val)
for x,y in train_ds:
print(x.shape,y.item())
break
```
### Iterate Over Dataset
```
from torch.utils.data import DataLoader
# create a data loader from dataset
train_dl = DataLoader(train_ds, batch_size=8)
val_dl = DataLoader(val_ds, batch_size=8)
# iterate over batches
for xb,yb in train_dl:
print(xb.shape)
print(yb.shape)
break
# your training code will be here!
```
## Building Models
```
from torch import nn
# input tensor dimension 64*1000
input_tensor = torch.randn(64, 1000)
# linear layer with 1000 inputs and 100 outputs
linear_layer = nn.Linear(1000, 100)
# output of the linear layer
output = linear_layer(input_tensor)
print(output.size())
```
### Define models using nn.Sequential
```
from torch import nn
# define a two-layer model
model = nn.Sequential(
nn.Linear(4, 5),
nn.ReLU(), # relu is not shown in the figure.
nn.Linear(5, 1),)
print(model)
```
## Define models using nn.Module
```
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 8, 5, 1)
self.conv2 = nn.Conv2d(8, 16, 5, 1)
self.fc1 = nn.Linear(4*4*16, 100)
self.fc2 = nn.Linear(100, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4*4*16)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
model = Net()
print(model)
```
### Move Model to Device
```
print(next(model.parameters()).device)
device = torch.device("cuda:0")
model.to(device)
print(next(model.parameters()).device)
```
### Show model summary
```
from torchsummary import summary
summary(model, input_size=(1, 28, 28))
```
## Loss Function
```
# define the negative log likelihood loss
loss_func = nn.NLLLoss(reduction="sum")
for xb, yb in train_dl:
# move batch to cuda device
xb=xb.type(torch.float).to(device)
yb=yb.to(device)
# get model output
out=model(xb)
# calculate loss value
loss = loss_func(out, yb)
print (loss.item())
break
# compute gradients
loss.backward()
```
## Optimizer
```
from torch import optim
# define Adam optimizer
opt = optim.Adam(model.parameters(), lr=1e-4)
# update model parameters
opt.step()
# set gradients to zero
opt.zero_grad()
```
## Training and Validation
```
import numpy as np
def metrics_batch(target, output):
# obtain output class
pred = output.argmax(dim=1, keepdim=True)
# compare output class with target class
corrects=pred.eq(target.view_as(pred)).sum().item()
return corrects
def loss_batch(loss_func, xb, yb,yb_h, opt=None):
# obtain loss
loss = loss_func(yb_h, yb)
# obtain performance metric
metric_b = metrics_batch(yb,yb_h)
if opt is not None:
loss.backward()
opt.step()
opt.zero_grad()
return loss.item(), metric_b
def loss_epoch(model,loss_func,dataset_dl,opt=None):
loss=0.0
metric=0.0
len_data=len(dataset_dl.dataset)
for xb, yb in dataset_dl:
xb=xb.type(torch.float).to(device)
yb=yb.to(device)
# obtain model output
yb_h=model(xb)
loss_b,metric_b=loss_batch(loss_func, xb, yb,yb_h, opt)
loss+=loss_b
if metric_b is not None:
metric+=metric_b
loss/=len_data
metric/=len_data
return loss, metric
def train_val(epochs, model, loss_func, opt, train_dl, val_dl):
for epoch in range(epochs):
model.train()
train_loss, train_metric=loss_epoch(model,loss_func,train_dl,opt)
model.eval()
with torch.no_grad():
val_loss, val_metric=loss_epoch(model,loss_func,val_dl)
accuracy=100*val_metric
print("epoch: %d, train loss: %.6f, val loss: %.6f, accuracy: %.2f" %(epoch, train_loss,val_loss,accuracy))
import numpy as np
# call train_val function
num_epochs=5
train_val(num_epochs, model, loss_func, opt, train_dl, val_dl)
```
## Store and Load Models
```
import torch
# define path2weights
path2weights="./models/weights.pt"
# store state_dict to file
torch.save(model.state_dict(), path2weights)
```
### Method 1
```
# define model: weights are randomly initiated
_model = Net()
# load weights from file
weights=torch.load(path2weights)
# set weights to model: weights are set with the store values
_model.load_state_dict(weights)
# set model in eval mode for deployment
_model.eval()
# model model to cuda device for accelerated computation
_model.to(device)
```
### Method 2
```
# define a path2model
path2model="./models/model.pt"
# store model and weights into local file
torch.save(model,path2model)
# define model: weights are randomly initiated
_model = Net()
# load model and weights from local file
_model=torch.load(path2model)
# set model in eval mode for deployment
_model.eval()
# move model to cuda device for accelerated computation
_model.to(device)
```
## Deploy Models
```
# x is a data point with C*H*W shape
n=100
x= x_val[n]
y=y_val[n]
print(x.shape)
plt.imshow(x.numpy()[0],cmap="gray")
# we use unsqueeze to expand dimensions to 1*C*H*W
x= x.unsqueeze(0)
# convert to torch.float32
x=x.type(torch.float)
# move to cuda device
x=x.to(device)
# get model output
output=_model(x)
# get predicted class
pred = output.argmax(dim=1, keepdim=True)
print (pred.item(),y.item())
```
|
github_jupyter
|
# Introduction to Python
An introduction to Python for middle and high school students using Python 3 syntax.

## Getting started
We're assuming that you already have Python 3.6 or higher installed. If not, go to Python.org to download the latest for your operating system. Verify that you have the correct version by opening a terminal or command prompt and running
```
$ python --version
Python 3.6.0
```
# Your First Program: Hello, World!
Open the Interactive DeveLopment Environment (IDLE) and write the famous Hello World program. Open IDLE and you'll be in an interactive shell.
```
print('Hello, World!')
```
Choose *File > New Window*. An empty window will appear with *Untitled* in the menu bar. Enter the following code into the new shell window. Choose *File > Save*. Save as `hello.py`, which is known as a python `module`. Choose *Run > Run module* to run the file
## Calculating with Python
Mathematical operators:
* Addition: +
* Subtraction: -
* Multiplication: *
Try these
* `3 * 4`
```
3*4
```
Division:
* Floating point `/`
* Integer `//`
Try these:
* `5/4`
* `1/0`
* `3//4`
* `5//4`
```
3//4
# Exponents
2**3
# Modulus
5%4
```
### Type function
Theres lots more available via the standard library and third party packages. To see the type of the result, use the *type* function. For example `type(3//4))` returns `int`
## Order of Operations
Python reads left to right. Higher precedence operators are applied before lower precedence operators. Operators below are listed lowest precedence at the top.
| Operator | Description |
|----------------------------------------------|-----------------------------------------------------------|
| or | Boolean OR |
| and | Boolean AND |
| not | Boolean NOT |
| in, not in, is, is not, `<`, `<=`, `>`, `>=`, `!=`, `==` | Comparison, including membership tests and identity tests |
| `+`, `-` | Addition and Subtraction |
| `*`, `/`, `//`, `%` | Multiplication, division, integer division, remainder |
| `**` | Exponentiation |
Calculate the result of `5 + 1 * 4`.
We override the precendence using parenthesis which are evaluated from the innermost out.
Calculate the result of `(5+1) * 4`.
> Remember that multiplication and division always go before
addition and subtraction, unless parentheses are used to control
the order of operations.
```
(2 + 2) ** 3
```
## Variables
Variables are like labels so that we can refer to things by a recognizable name.
```
fred = 10 + 5
type(fred)
fred = 10 / 5
type(fred)
fred * 55 + fred
joe = fred * 55
joe
joe
fred
joe = fred
fred = joe
```
### Valid varible names
Variables begin with a letter followed by container letters, numbers and underscores
* `jim`
* `other_jim`
* `other_jim99`
### Invalid variable names: don't meet requiremenets
* `symbol$notallowed`
* `5startswithnumber`
### Invalid variable names: reserved words
| Reserved words | | | | |
|----------------|----------|--------|----------|-------|
| None | continue | for | lambda | try |
| True | def | from | nonlocal | while |
| and | del | global | not | with |
| as | elif | if | or | yield |
| break | except | in | raise | |
### Referring to a previous result
You can use the `_` variable to refer to the result of a previous calculation when working in the shell.
```
ends_with_9 = 9
a = 6
b = 4
my_var = 7
num_apples * 65
doesntexist
```
## User Input
We can get keyboard input using the `input` function
```
name = input("What's your name? ")
print("Hi ", name)
```
## Strings
* Strings are immutable objects in python, meaning they can't be modified once created, but they can be used to create new strings.
* Strings should be surrounded with a single quote `'` or double quote `"`. The general rule is to use the single quote unless you plan to use something called *interpolation*
### Formatting
Strings support templating and formatting.
```
id("bar")
fred = "bar"
id(fred)
"this string is %s" % ('formatted')
"this string is also {message}. The message='{message}' can be used more than once".format(message='formatted')
# Called string concatenation
"this string is "+ 'concatenated'
## Conditionals
`if (condition):`
`elif (condition):`
`else (optional condition):`
aa = False
if aa:
print('a is true')
else:
print ('aa is not true')
aa = 'wasdf'
if aa == 'aa':
print('first condition')
elif aa == 'bb':
print('second condition')
else:
print('default condition')
```
## Data Structures
* Lists `[]`
Lists are orderings of things where each thing corresponds to an index starting at 0.
Example `[1, 2, 3]` where 1 is at index 0, 2 is at index 1 and 3 is at index 2.
* Tuples `()`
Tuples are like lists, only you can't
* Dictionaries `{}`
key value pairs
## Comprehension
Lists can be constructed using comprehension logic
```
[(a, a*2) for a in range(10)]
```
We can use conditionals as well
```
[(a, a*2) for a in range(10) if a < 8]
```
Additional topics
* python modules and packages
* functions
* classes and methods
* generators
* pip and the python packaging land
* virtualenvs
|
github_jupyter
|
Good morning! You have completed the math trail on car plate numbers in a somewhat (semi-)automated way.
Can you actually solve the same tasks with code? Read on and you will be amazed how empowering programming can be to help make mathematics learning more efficient and productive! :)
# Task
Given the incomplete car plate number `SLA9??2H`
Find the missing ?? numbers.
A valid Singapore car plate number typically starts with 3 letters, followed by 4 digits and ending with a 'check' letter.
For example, for the valid car plate number is 'SDG6136T',
- The first letter is 'S' for Singapore.
- The next two letters and the digits are used to compute the check letter, using the following steps:
- Ignoring the first letter 'S', the letters are converted to their positions in the alphabet. For example, 'D' is 4, 'G' is 7 and 'M' is 13.
- The converted letters and the digits form a sequence of 6 numbers. For example, 'DG6136' will give (4, 7, 6, 1, 3, 6).
- The sequence of 6 numbers is multiplied term by term by the sequence of 6 weights (9, 4, 5, 4, 3, 2) respectively, summed up and then divided by 19 to obtain the remainder.
- For example, '476136' will give 4x9 + 7x4 + 6x5 + 1x4 + 3x3 + 6x2 = 119, and this leaves a remainder of 5 after dividing by 19.
- The 'check' letter is obtained by referring to the following table. Thus the check letter corresponding to remainder 5 is T.
```
| Remainder | 'check' letter | Remainder | 'check' letter | Remainder | 'check' letter |
| 0 | A | 7 | R | 13 | H |
| 1 | Z | 8 | P | 14 | G |
| 2 | Y | 9 | M | 15 | E |
| 3 | X | 10 | L | 16 | D |
| 4 | U | 11 | K | 17 | C |
| 5 | T | 12 | J | 18 | B |
| 6 | S | | | | |
```
Reference: https://sgwiki.com/wiki/Vehicle_Checksum_Formula
Pseudocode
```
FOR i = 0 to 99
Car_Plate = 'SJT9' + str(i) + '2H'
IF Check_Letter(Car_Plate) is True
print (Car_Plate) on screen
ENDIF
NEXT
```
```
# we need to store the mapping from A to 1, B to 2, etc.
# for the letters part of the car plate number
# a dictionary is good for this purpose
letter_map = {}
for i in range(27): # 26 alphabets
char = chr(ord('A') + i)
letter_map[char] = i + 1
#print(letter_map) # this will output {'A':1, 'B':2, 'C':3, ..., 'Z':26}
# we also need to store the mapping from remainders to the check letter
# and we can also use a dictionary! :)
check_map = {0:'A', 1:'Z', 2:'Y', 3:'X', 4:'U', 5:'T', 6:'S', 7:'R', 8:'P', \
9:'M', 10:'L', 11:'K', 12:'J', 13:'H', 14:'G', 15:'E', 16:'D', \
17:'C', 18:'B'}
# we define a reusable Boolean function to generate the check letter and
# check if it matches the last letter of the car plate number
def check_letter(car_plate):
weights = [9, 4, 5, 4, 3, 2]
total = 0
for i in range(len(car_plate)-1):
if i < 2: # letters
num = letter_map[car_plate[i]]
else: # digits
num = int(car_plate[i])
total += num * weights[i]
remainder = total % 19
return check_map[remainder] == car_plate[-1]
#main
car_plate = 'DG6136T' # you can use this to verify the given example
if check_letter(car_plate):
print('S' + car_plate, car_plate[3:5])
print()
for i in range(100): # this loop repeats 100 times for you! :)
car_plate = 'LA9' + str(i).zfill(2) + '2H' # 'LA9002H', 'LA9012H', ...
if check_letter(car_plate):
print('S' + car_plate, car_plate[3:5])
#main
for i in range(100):
car_plate = 'LA' + str(i).zfill(2) + '68Y'
if check_letter(car_plate):
print('S' + car_plate, car_plate[2:4])
'0'.zfill(2)
```
# Challenge
- How many car_plate numbers start with SMV and end with D?
```
#main
count = 0
for i in range(10000):
car_plate = 'MV' + str(i).zfill(4) + 'D'
if check_letter(car_plate):
count += 1
print(count)
#main
wanted = []
for i in range(10000):
car_plate = 'MV' + str(i).zfill(4) + 'D'
if check_letter(car_plate):
print('S' + car_plate, end=' ')
wanted.append('S' + car_plate)
print(len(wanted))
```
# More challenges!
Suggest one or more variations of problems you can solve with car plate numbers using the power of Python programming. Some ideas include:
* Check if a given car plate number is valid
* Which valid car plate numbers have a special property (eg prime number, contains at least two '8' digits, does not contain the lucky number 13, etc.)
* If there are the same number of available car plate numbers each series (eg SMV and SMW)
* (your idea here)
Submit a pull request with your ideas and/or code to contribute to learning Mathematics using programming to benefit the world! :)
```
```
# This is really more than car plate numbers!
You have just learned an application of mathematics called modulus arithmetic in generating check letters/digits. Do you know that actually the following are also applications of modulus arithmetic?
* Singapore NRIC numbers (http://www.ngiam.net/NRIC/NRIC_numbers.ppt)
* international ISBNs (https://en.wikipedia.org/wiki/International_Standard_Book_Number)
* credit card numbers (https://en.wikipedia.org/wiki/Luhn_algorithm)
* universal product codes (https://en.wikipedia.org/wiki/Universal_Product_Code)
Can you research on other applications modulus arithmetic has? Better still, contribute by submitting Python code to unleash the power of automation!
You can submit a pull request by doing one of the following:
- suggesting a new application for modulus arithmetic
- creating a new .py file
- uploading an existing .py file
We look forward to your pull requests! :)
|
github_jupyter
|
# Using a new function to evaluate or evaluating a new acquisition function
In this notebook we describe how to integrate a new fitness function to the testing framework as well as how to integrate a new acquisition function.
```
import numpy as np
import matplotlib.pyplot as plt
# add the egreedy module to the path (one directory up from this)
import sys, os
sys.path.append(os.path.realpath(os.path.pardir))
```
## New fitness function
The `perform_experiment` function in the `optimizer` class, used to carry out the optimisation runs (see its docstring and `run_all_experiments.py` for usage examples), imports a fitness **class**. This class, when instantiated is also callable. The class is imported from the `test_problems` module. Therefore, the easiest way to incorporate your own fitness function is to add it to the `test_problems` module by creating a python file in the `egreedy/test_problems/` directory and adding a line importing it into the namespace (see `egreedy/test_problems/__init__.py` for examples) so that it can be directly imported from `test_problems`.
If, for example, your fitness class is called `xSquared` and is located in the file `xs.py`, you would place the python file in the directory `egreedy/test_problems` and add the line:
```
from .xs import xSquared
```
to the `egreedy/test_problems/__init__.py` file.
We will now detail how to structure your fitness class and show the required class methods by creating a new fitness class for the function
\begin{equation}
f( \mathbf{x} ) = \sum_{i=1}^2 x_i^2,
\end{equation}
where $\mathbf{x} \in [-5, 5]^2.$
```
class xSquared:
"""Example fitness class.
.. math::
f(x) = \sum_{i=1}^2 x_i^2
This demonstration class shows all the required attributes and
functionality of the fitness function class.
"""
def __init__(self):
"""Initialisation function.
This is called when the class is instantiated and sets up its
attributes as well as any other internal variables that may
be needed.
"""
# problem dimensionality
self.dim = 2
# lower and upper bounds for each dimension (must be numpy.ndarray)
self.lb = np.array([-5., -5.])
self.ub = np.array([5., 5.])
# location(s) of the optima (optional - not always known)
self.xopt = np.array([0.])
# its/thier fitness value(s)
self.yopt = np.array([0.])
# callable constraint function for the problem - should return
# True if the argument value is **valid** - if no constraint function
# is required then this can take the value of None
self.cf = None
def __call__(self, x):
"""Main callable function.
This is called after the class is instantiated, e.g.
>>> f = xSquared()
>>> f(np.array([2., 2.]))
array([8.])
Note that it is useful to have a function that is able deal with
multiple inputs, which should a numpy.ndarray of shape (N, dim)
"""
# ensure the input is at least 2d, this will cause one-dimensional
# vectors to be reshaped to shape (1, dim)
x = np.atleast_2d(x)
# evaluate the function
val = np.sum(np.square(x), axis=1)
# return the evaluations
return val
```
This class can then either be placed in the directories discussed above and used for evaluating multiple techniques on it or used for testing purposes.
### Optimising the new test function with an acquistion function
The following code outlines how to optimise your newly created test function with the $\epsilon$-greedy with Pareto front selection ($\epsilon$-PF) algorithm.
```
from pyDOE2 import lhs
from egreedy.optimizer import perform_BO_iteration
# ---- instantiate the test problem
f = xSquared()
# ---- Generate testing data by Latin hypercube sampling across the domain
n_training = 2 * f.dim
# LHS sample in [0, 1]^2 and rescale to problem domain
Xtr = lhs(f.dim, n_training, criterion='maximin')
Xtr = (f.ub - f.lb) * Xtr + f.lb
# expensively evaluate and ensure shape is (n_training, 1)
Ytr = np.reshape(f(Xtr), (n_training, 1))
# ---- Select an acquistion function with optimiser.
# In this case we select e-greedy with Pareto front selection (e-PF)
# known as eFront.
#
# All the acqusition functions have the same parameters:
# lb : lower-bound constraints (numpy.ndarray)
# ub : upper-bound constraints (numpy.ndarray)
# acq_budget : max number of calls to the GP model
# cf : callable function constraint function that returns True if
# the argument vector is VALID. Optional, has a value of None
# if not used
# acquisition_args : optional dictionary containing key:value pairs
# of arguments to a specific acqutision function.
# e.g. for an e-greedy method then the dict
# {'epsilon': 0.1} would dictate the epsilon value.
# e-greedy with Pareto front selection (e-PF), known as eFront
from egreedy.acquisition_functions import eFront
# instantiate the optimiser with a budget of 5000d and epsilon = 0.1
acq_budget = 5000 * f.dim
acquisition_args = {'epsilon': 0.1}
acq_func = eFront(lb=f.lb, ub=f.ub, cf=None, acq_budget=acq_budget,
acquisition_args=acquisition_args)
# ---- Perform the bayesian optimisation loop for a total budget of 20
# function evaluations (including those used for LHS sampling)
total_budget = 20
while Xtr.shape[0] < total_budget:
# perform one iteration of BO:
# this returns the new location and function value (Xtr, Ynew) as well
# as the trained model used to select the new location
Xnew, Ynew, model = perform_BO_iteration(Xtr, Ytr,f, acq_func, verbose=True)
# augment the training data and repeat
Xtr = np.concatenate((Xtr, np.atleast_2d(Xnew)))
Ytr = np.concatenate((Ytr, np.atleast_2d(Ynew)))
print('Best function value so far: {:g}'.format(np.min(Ytr)))
print()
```
The plot below shows the difference between the best seen function value and the true minimum, i.e. $|f^\star - f_{min}|$, over each iteration.
```
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.semilogy(np.minimum.accumulate(np.abs(Ytr - f.yopt)))
ax.set_xlabel('Iteration', fontsize=15)
ax.set_ylabel('$|f^\star - f_{min}|$', fontsize=15)
plt.show()
```
## New acquisition function
We now detail how to create your own acquisition function class and integrate it into the testing suite.
In a similar manner to the fitness function classes, the acquisition function classes are imported from the `egreedy.acquisition_functions` module, with the specific classes available determined by the `__ini__.py` file in the same module.
If, for example, your acquisition function class is called `greed` and is located in the file `gr.py`, you would place the python file in the directory `egreedy/acquisition_functions` and add the line:
```
from .gr import greed
```
to the `egreedy/acquisition_functions/__init__.py` file.
The python file `egreedy/acquisition_functions/acq_func_optimisers.py` contains base classes for the acquisition function classes. We will now demonstrate how to implement two simple acquisition functions and then show how to optimise one of the test functions included in the suite.
The `BaseOptimiser` class is the base acquisition function class that implements the standard interface for acquisition function optimizers. It only contains an initialisation function with several arguments:
- lb: lower-bound constraint
- ub: upper-bound constraint
- acq_budget : maximum number of calls to the Gaussian Process
- cf : callable constraint function that returns True if the argument decision vector is VALID (optional, default value: None)
- acquisition_args : Optional dictionary containing additional arguments that are unpacked into key=value arguments for an internal acquisition function; e.g. {'epsilon':0.1}.
The `ParetoFrontOptimiser` class implements the base class as well as an additional function named `get_front(model)` that takes in a GPRegression model from GPy and approximates its Pareto front of model prediction and uncertainty. It returns the decision vectors belonging to the members of the front, an array containing corresponding their predicted value, and an array containing the prediction uncertainty.
We first create a simple acquisition function, extending the base class, that generates uniform samples in space and uses the Gaussian Process's mean prediction to select the best (lowest value) predicted location.
```
from egreedy.acquisition_functions.acq_func_optimisers import BaseOptimiser
class greedy_sample(BaseOptimiser):
"""Greedy function that uniformly samples the GP posterior
and returns the location with the best (lowest) mean predicted value.
"""
# note we do not need to implement an __init__ method because the
# base class already does this. Here we will include a commented
# version for clarity.
# def __init__(self, lb, ub, acq_budget, cf=None, acquisition_args={}):
# self.lb = lb
# self.ub = ub
# self.cf = cf
# self.acquisition_args = acquisition_args
# self.acq_budget = acq_budget
def __call__(self, model):
"""Returns the location with the best (lowest) predicted value
after uniformly sampling decision space.
"""
# generate samples
X = np.random.uniform(self.lb, self.ub,
size=(acq_budget, self.lb.size))
# evaluate them with the gp
mu, sigmasqr = model.predict(X, full_cov=False)
# find the index of the best value
argmin = np.argmin(mu.flat)
# return the best found value
return X[argmin, :]
from egreedy.acquisition_functions.acq_func_optimisers import ParetoFrontOptimiser
class greedy_pfront(ParetoFrontOptimiser):
"""Exploitative method that calculates the approximate Pareto front
of a GP model and returns the Pareto set member that has the best
(lowest) predicted value.
"""
# again here we do not need to implement an __init__ method.
def __call__(self, model):
"""Returns the location with the best (lowest) predicted value
from the approximate Pareto set of the GP's predicted value and
its corresponding uncertainty.
"""
# approximate the pareto set; here X are the locations of the
# members of the set and mu and sigma are their predicted values
# and uncertainty
X, mu, sigma = self.get_front(model)
# find the index of the best value
argmin = np.argmin(mu.flat)
# return the best found value
return X[argmin, :]
```
We now create a similar script to the one used above in the function example. This time we will optimise the `push4` function included in the test suite and load the training data associated with the first run all techniques evaluated in the paper carried out.
Note that in this case the training data contains arguments to be passed into the function during instantiation. This is because the `push4` runs are evaluated on a *problem instance* basis.
```
from egreedy.optimizer import perform_BO_iteration
from egreedy import test_problems
# ---- optimisation run details
problem_name = 'push4'
run_no = 1
acq_budget = 5000 * 4 # because the problem dimensionality is 4
total_budget = 25
# ---- load the training data
data_file = f'../training_data/{problem_name:}_{run_no:}.npz'
with np.load(data_file, allow_pickle=True) as data:
Xtr = data['arr_0']
Ytr = data['arr_1']
if 'arr_2' in data:
f_optional_arguments = data['arr_2'].item()
else:
f_optional_arguments = {}
# ---- instantiate the test problem
f_class = getattr(test_problems, problem_name)
f = f_class(**f_optional_arguments)
# ---- instantiate the acquistion function we created earlier
acq_func = greedy_sample(lb=f.lb, ub=f.ub, cf=None, acq_budget=acq_budget,
acquisition_args=acquisition_args)
while Xtr.shape[0] < total_budget:
# perform one iteration of BO:
# this returns the new location and function value (Xtr, Ynew) as well
# as the trained model used to select the new location
Xnew, Ynew, model = perform_BO_iteration(Xtr, Ytr, f, acq_func, verbose=True)
# augment the training data and repeat
Xtr = np.concatenate((Xtr, np.atleast_2d(Xnew)))
Ytr = np.concatenate((Ytr, np.atleast_2d(Ynew)))
print('Best function value so far: {:g}'.format(np.min(Ytr)))
print()
fig, ax = plt.subplots(1, 1, figsize=(8, 4))
ax.plot(np.minimum.accumulate(np.abs(Ytr - f.yopt)))
ax.set_xlabel('Iteration', fontsize=15)
ax.set_ylabel('$|f^\star - f_{min}|$', fontsize=15)
plt.show()
```
|
github_jupyter
|
```
from dgpsi import dgp, kernel, combine, lgp, path, emulator, Poisson, Hetero, NegBin
import numpy as np
import matplotlib.pyplot as plt
```
# Example 1 on heteroskedastic Gaussian likelihood
```
n=12
X=np.linspace(0,1,n)[:,None]
#Create some replications of input positions so that each input position will six different outputs. Note that SI has linear complexity with number of replications.
for i in range(5):
X=np.concatenate((X,np.linspace(0,1,n)[:,None]),axis=0)
f1= lambda x: -1. if x<0.5 else 1. #True mean function, which is a step function
f2= lambda x: np.exp(1.5*np.sin((x-0.3)*7.)-6.5) #True variance function, which has higher values around 0.5 but low values around boundaries
Y=np.array([np.random.normal(f1(x),np.sqrt(f2(x))) for x in X]) #Generate stochastic outputs according to f1 and f2
z=np.linspace(0,1.,200)[:,None]
Yz=np.array([f1(x) for x in z]).flatten()
plt.plot(z,Yz) #Plot true mean function
plt.scatter(X,Y,color='r')
#Create a 2-layered DGP + Hetero model
layer1=[kernel(length=np.array([0.5]),name='matern2.5')]
layer2=[kernel(length=np.array([0.2]),name='matern2.5',scale_est=1,connect=np.arange(1)),
kernel(length=np.array([0.2]),name='matern2.5',scale_est=1,connect=np.arange(1))]
layer3=[Hetero()]
#Construct the DGP + Hetero model
all_layer=combine(layer1,layer2,layer3)
m=dgp(X,[Y],all_layer)
#Train the model
m.train(N=500)
#Construct the emulator
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
#Make predictions across all layers so we can extract predictions for the mean and variance functions
mu,var=emu.predict(z, method='mean_var',full_layer=True)
#Visualize the overall model prediction
s=np.sqrt(var[-1])
u=mu[-1]+2*s
l=mu[-1]-2*s
p=plt.plot(z,mu[-1],color='r',alpha=1,lw=1)
p1=plt.plot(z,u,'--',color='g',lw=1)
p1=plt.plot(z,l,'--',color='g',lw=1)
plt.scatter(X,Y,color='black')
plt.plot(z,Yz)
#Visualize the prediction for the mean function
mu_mean=mu[-2][:,0]
var_mean=var[-2][:,0]
s=np.sqrt(var_mean)
u=mu_mean+2*s
l=mu_mean-2*s
p=plt.plot(z,mu_mean,color='r',alpha=1,lw=1)
p1=plt.plot(z,u,'--',color='g',lw=1)
p1=plt.plot(z,l,'--',color='g',lw=1)
plt.plot(z,Yz,color='black',lw=1)
#Visualize the prediction for the log(variance) function
mu_var=mu[-2][:,1]
var_var=var[-2][:,1]
s=np.sqrt(var_var)
u=mu_var+2*s
l=mu_var-2*s
p=plt.plot(z,mu_var,color='r',alpha=1,lw=1)
p1=plt.plot(z,u,'--',color='g',lw=1)
p1=plt.plot(z,l,'--',color='g',lw=1)
plt.plot(z,np.array([np.log(f2(x)) for x in z]).reshape(-1,1),color='black',lw=1)
```
# Example 2 on heteroskedastic Gaussian likelihood
```
#Load and visualize the motorcycle dataset
X=np.loadtxt('./mc_input.txt').reshape(-1,1)
Y=np.loadtxt('./mc_output.txt').reshape(-1,1)
X=(X-np.min(X))/(np.max(X)-np.min(X))
Y=(Y-Y.mean())/Y.std()
plt.scatter(X,Y)
#Construct a 2-layered DGP + Hetero model
layer1=[kernel(length=np.array([0.5]),name='sexp')]
layer2=[]
for _ in range(2):
k=kernel(length=np.array([0.2]),name='sexp',scale_est=1,connect=np.arange(1))
layer2.append(k)
layer3=[Hetero()]
all_layer=combine(layer1,layer2,layer3)
m=dgp(X,[Y],all_layer)
#Train the model
m.train(N=500)
#Construct the emulator
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
#Make predictions over [0,1]
z=np.linspace(0,1,100)[:,None]
mu,var=emu.predict(z, method='mean_var')
#Visualize the predictions
s=np.sqrt(var)
u=mu+2*s
l=mu-2*s
p=plt.plot(z,mu,color='r',alpha=1,lw=1)
p1=plt.plot(z,u,'--',color='g',lw=1)
p1=plt.plot(z,l,'--',color='g',lw=1)
plt.scatter(X,Y,color='black')
```
# Example 3 on Poisson likelihood
```
#Generate some data with replications
n=10
X=np.linspace(0,.3,n)[:,None]
for _ in range(4):
X=np.concatenate((X,np.linspace(0,.3,n)[:,None]),axis=0)
X=np.concatenate((X,np.linspace(0.35,1,n)[:,None]),axis=0)
f= lambda x: np.exp(np.exp(-1.5*np.sin(1/((0.7*0.8*(1.5*x+0.1)+0.3)**2))))
Y=np.array([np.random.poisson(f(x)) for x in X]).reshape(-1,1)
z=np.linspace(0,1.,200)[:,None]
Yz=np.array([f(x) for x in z]).flatten()
test_Yz=np.array([np.random.poisson(f(x)) for x in z]).reshape(-1,1) #generate some testing output data
plt.plot(z,Yz)
plt.scatter(X,Y,color='r')
#Train a GP + Poisson model
layer1=[kernel(length=np.array([0.5]),name='matern2.5',scale_est=1)]
layer2=[Poisson()]
all_layer=combine(layer1,layer2)
m=dgp(X,[Y],all_layer)
m.train(N=500)
#Visualize the results
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
mu,var=emu.predict(z, method='mean_var',full_layer=True) #Make mean-variance prediction
samp=emu.predict(z, method='sampling') #Draw some samples to obtain the quantiles of the overall model
quant=np.quantile(np.squeeze(samp), [0.05,0.5,0.95],axis=1) #Compute sample-based quantiles
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(15,4))
ax1.set_title('Predicted and True Poisson Mean')
ax1.plot(z,Yz,color='black')
ax1.plot(z,mu[-1],'--',color='red',alpha=0.8,lw=3)
ax1.plot(z,quant[0,:],'--',color='b',lw=1)
ax1.plot(z,quant[1,:],'--',color='b',lw=1)
ax1.plot(z,quant[2,:],'--',color='b',lw=1)
mu_gp, var_gp=mu[-2], var[-2]
s=np.sqrt(var_gp)
u,l =mu_gp+2*s, mu_gp-2*s
ax2.set_title('Predicted and True logged Poisson Mean')
ax2.plot(z,mu_gp,color='r',alpha=1,lw=1)
ax2.plot(z,u,'--',color='g',lw=1)
ax2.plot(z,l,'--',color='g',lw=1)
ax2.plot(z,np.log(Yz),color='black',lw=1)
print('The negative log-likelihood of predictions is', emu.nllik(z,test_Yz)[0])
#Train a 2-layered DGP + Poisson model
layer1=[kernel(length=np.array([0.5]),name='matern2.5')]
layer2=[kernel(length=np.array([0.1]),name='matern2.5',scale_est=1,connect=np.arange(1))]
layer3=[Poisson()]
all_layer=combine(layer1,layer2,layer3)
m=dgp(X,[Y],all_layer)
m.train(N=500)
#Visualize the results
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
mu,var=emu.predict(z, method='mean_var',full_layer=True) #Make mean-variance prediction
samp=emu.predict(z, method='sampling') #Draw some samples to obtain the quantiles of the overall model
quant=np.quantile(np.squeeze(samp), [0.05,0.5,0.95],axis=1) #Compute sample-based quantiles
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(15,4))
ax1.set_title('Predicted and True Poisson Mean')
ax1.plot(z,Yz,color='black')
ax1.plot(z,mu[-1],'--',color='red',alpha=0.8,lw=3)
ax1.plot(z,quant[0,:],'--',color='b',lw=1)
ax1.plot(z,quant[1,:],'--',color='b',lw=1)
ax1.plot(z,quant[2,:],'--',color='b',lw=1)
mu_gp, var_gp=mu[-2], var[-2]
s=np.sqrt(var_gp)
u,l =mu_gp+2*s, mu_gp-2*s
ax2.set_title('Predicted and True logged Poisson Mean')
ax2.plot(z,mu_gp,color='r',alpha=1,lw=1)
ax2.plot(z,u,'--',color='g',lw=1)
ax2.plot(z,l,'--',color='g',lw=1)
ax2.plot(z,np.log(Yz),color='black',lw=1)
print('The negative log-likelihood of predictions is', emu.nllik(z,test_Yz)[0])
```
# Example 4 on Negative Binomial likelihood
The Negative Binomial pmf in dgpsi is defined by
$$p_Y(y;\mu,\sigma)=\frac{\Gamma(y+\frac{1}{\sigma})}{\Gamma(1/{\sigma})\Gamma(y+1)}\left(\frac{\sigma\mu}{1+\sigma\mu}\right)^y\left(\frac{1}{1+\sigma\mu}\right)^{1/{\sigma}}$$
with mean $0<\mu<\infty$ and dispersion $0<\sigma<\infty$, which correspond to numpy's negative binomial parameters $n$ and $p$ via $n=1/\sigma$ and $p=1/(1+\mu\sigma)$.
```
#Generate some data from the Negative Binomial distribution.
n=30
X=np.linspace(0,1,n)[:,None]
for _ in range(5):
X=np.concatenate((X,np.linspace(0,1,n)[:,None]),axis=0)
f1= lambda x: 1/np.exp(2) if x<0.5 else np.exp(2) #True mean function
f2= lambda x: np.exp(6*x**2-3) #True dispersion function
Y=np.array([np.random.negative_binomial(1/f2(x),1/(1+f1(x)*f2(x))) for x in X]).reshape(-1,1)
Xt=np.linspace(0,1.,200)[:,None]
Yt=np.array([f1(x) for x in Xt]).flatten()
plt.plot(Xt,Yt)
plt.scatter(X,Y,color='r')
#Train a 2-layered DGP (one GP in the first layer and two in the second corresponding to the mean and dispersion parameters) + NegBin model
layer1=[kernel(length=np.array([0.5]),name='matern2.5')]
layer2=[kernel(length=np.array([0.02]),name='matern2.5',scale_est=1,connect=np.arange(1)),
kernel(length=np.array([0.02]),name='matern2.5',scale_est=1,connect=np.arange(1))]
layer3=[NegBin()]
all_layer=combine(layer1,layer2,layer3)
m=dgp(X,[Y],all_layer)
m.train(N=500)
#Visualize the results
final_layer_obj=m.estimate()
emu=emulator(final_layer_obj)
mu,var=emu.predict(Xt, method='mean_var',full_layer=True) #Make mean-variance prediction
samp=emu.predict(Xt, method='sampling') #Draw some samples to obtain the quantiles of the overall model
quant=np.quantile(np.squeeze(samp), [0.05,0.5,0.95],axis=1) #Compute sample-based quantiles
fig, (ax1, ax2, ax3) = plt.subplots(1,3, figsize=(15,4))
ax1.set_title('Predicted and True NegBin Mean')
ax1.plot(Xt,Yt,color='black')
ax1.plot(Xt,mu[-1],'--',color='red',alpha=0.8,lw=3)
ax1.plot(Xt,quant[0,:],'--',color='b',lw=1)
ax1.plot(Xt,quant[1,:],'--',color='b',lw=1)
ax1.plot(Xt,quant[2,:],'--',color='b',lw=1)
mu_gp, var_gp=mu[-2][:,0], var[-2][:,0]
s=np.sqrt(var_gp)
u,l =mu_gp+2*s, mu_gp-2*s
ax2.set_title('Predicted and True logged NegBin Mean')
ax2.plot(Xt,mu_gp,color='r',alpha=1,lw=1)
ax2.plot(Xt,u,'--',color='g',lw=1)
ax2.plot(Xt,l,'--',color='g',lw=1)
ax2.plot(Xt,np.log(Yt),color='black',lw=1)
mu_gp, var_gp=mu[-2][:,1], var[-2][:,1]
s=np.sqrt(var_gp)
u,l =mu_gp+2*s, mu_gp-2*s
ax3.set_title('Predicted and True logged NegBin Dispersion')
ax3.plot(Xt,mu_gp,color='r',alpha=1,lw=1)
ax3.plot(Xt,u,'--',color='g',lw=1)
ax3.plot(Xt,l,'--',color='g',lw=1)
ax3.plot(Xt,np.array([np.log(f2(x)) for x in Xt]).reshape(-1,1),color='black',lw=1)
```
|
github_jupyter
|
## Sentiment Analysis - Tweets
I have a dataset downloaded with some tweets from analytics vidhya. I'll be implementing my own sentiment analysis trainer using this dataset and a bunch of tools that I learnt recently.
```
import pandas as pd
import spacy
import numpy as np
nlp = spacy.load('en_core_web_md')
dataset = 'datasets/tweets.csv'
dataframe = pd.read_csv(dataset)
dataframe.head()
```
Let's just use spacy to tokenize, remove stop words and generate vectors for the rest
```
def tokenize(text):
doc = nlp(text)
tokens = []
for token in doc:
if token.is_stop:
continue
if token.is_punct:
continue
if token.is_digit:
continue
if token.is_space:
continue
if token.is_oov:
continue
tokens.append(token)
if len(tokens) == 0:
return None
return tokens
dataframe["tokens"] = dataframe["text"].apply(tokenize)
dataframe["tokens"]
dataframe = dataframe[dataframe["tokens"].notna()]
dataframe
from sklearn.preprocessing import LabelEncoder
target = LabelEncoder().fit_transform(dataframe["airline_sentiment"])
target.shape
```
## Spacy Vectors
I am going to try two approaches to generating vectors. The first is a lazy approach. I'll just assume the tweet is a valid english sentence (which it certainly is not) and generate a vector using spacy. The second is where I will clean up the tweet and take the mean of the vectors for each remnant token.
```
def vectorise(text):
doc = nlp(text)
return doc.vector
dataframe["vectors"] = dataframe["text"].apply(vectorise)
def mean_vector_for_tokens(list_of_tokens):
vectors = []
for token in list_of_tokens:
vectors.append(token.vector)
if len(vectors) == 0:
return None
mv = np.mean(vectors, axis=0)
return mv
dataframe["mv"] = dataframe["tokens"].apply(mean_vector_for_tokens)
train_set = dataframe["mv"].apply(pd.Series)
train_set.shape
train_set_2 = dataframe["vectors"].apply(pd.Series)
train_set_2.shape
from sklearn.model_selection import train_test_split
x_train, x_valid, y_train, y_valid = train_test_split(train_set, target, random_state=20, stratify=target)
print(x_train.shape, y_train.shape)
print(x_valid.shape, y_valid.shape)
x_train2, x_valid2, y_train2, y_valid2 = train_test_split(train_set_2, target, random_state=20, stratify=target)
print(x_train2.shape, y_train2.shape)
print(x_valid2.shape, y_valid2.shape)
```
## Classifier Approaches
I am going to try three different classifier models with the above two vectors and see how they perform.
1. Logistic Regression
2. Decision Tree Classifier
3. Simple SVM Classifier
```
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lgmodel1 = LogisticRegression(max_iter=1000)
lgmodel1.fit(x_train, y_train)
lgmodel2 = LogisticRegression(max_iter=1000)
lgmodel2.fit(x_train2, y_train2)
predictions = {
"train1": lgmodel1.predict(x_train),
"valid1": lgmodel1.predict(x_valid),
"train2": lgmodel2.predict(x_train2),
"valid2": lgmodel2.predict(x_valid2),
}
accuracy_lg = {
"train1": accuracy_score(y_train, predictions["train1"]),
"valid1": accuracy_score(y_valid, predictions["valid1"]),
"train2": accuracy_score(y_train2, predictions["train2"]),
"valid2": accuracy_score(y_valid2, predictions["valid2"])
}
accuracy_lg
from sklearn.tree import DecisionTreeClassifier
dtcmodel1 = DecisionTreeClassifier()
dtcmodel1.fit(x_train, y_train)
dtcmodel2 = DecisionTreeClassifier()
dtcmodel2.fit(x_train2, y_train2)
predictions = {
"train1": dtcmodel1.predict(x_train),
"valid1": dtcmodel1.predict(x_valid),
"train2": dtcmodel2.predict(x_train2),
"valid2": dtcmodel2.predict(x_valid2),
}
accuracy_dtc = {
"train1": accuracy_score(y_train, predictions["train1"]),
"valid1": accuracy_score(y_valid, predictions["valid1"]),
"train2": accuracy_score(y_train2, predictions["train2"]),
"valid2": accuracy_score(y_valid2, predictions["valid2"])
}
accuracy_dtc
from sklearn import svm
svcmodel1 = svm.SVC()
svcmodel1.fit(x_train, y_train)
svcmodel2 = svm.SVC()
svcmodel2.fit(x_train2, y_train2)
predictions = {
"train1": svcmodel1.predict(x_train),
"valid1": svcmodel1.predict(x_valid),
"train2": svcmodel2.predict(x_train2),
"valid2": svcmodel2.predict(x_valid2),
}
accuracy_svc = {
"train1": accuracy_score(y_train, predictions["train1"]),
"valid1": accuracy_score(y_valid, predictions["valid1"]),
"train2": accuracy_score(y_train2, predictions["train2"]),
"valid2": accuracy_score(y_valid2, predictions["valid2"])
}
accuracy_svc
```
## Results
From the above runs, we see that the best case performance is only about 80% accurate. Decision Tree seems to overfit based on how well it performs on the training data. On the other hand, I don't really know if I need to pass other parameters to improve its performance at this time. I'll park that for later.
We know that we have taken an extremely simple approach here. The vector generation is actually in vain. It removes quite a bit of information from the tweet text itself and doesn't take the meta data into account. But before exploring meta data, I am going to repeat this with a simple Tf-IDf vectoriser and then again with Word2Vec to see how they perform given just the above information.
### Summary of the experiment so far
Approach |Logistic Regression | Decision Tree | SVM
----------------|-------|-------|-----------------------
Tweet as Vector | 76.5% | 63.8% | 77.9%
Tokens Vector | 79.7% | 63.9% | 80%
|
github_jupyter
|
<a href="https://colab.research.google.com/github/mlelarge/dataflowr/blob/master/Notebooks/02_backprop_full_colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Simple implementation of backprop
Here we implement a simple backpropagation algorithm with `numpy` for the following problem:
We generate points $(x_i,y_i)$ where $y_i= \exp(w^*x_i+b^*)$, i.e $y^*_i$ is obtained by applying a deterministic function to $x_i$ with parameters $w^*$ and $b^*$. Our goal is to recover the parameters $w^*$ and $b^*$ from the observations $(x_i,y_i)$.
To do this, we use SGD to minimize $\sum_i(y^i - \exp(w x_i+b))^2$ with respect to $w$ and $b$.
```
import numpy as np
import matplotlib.pyplot as plt
w, b = 0.5, 2
xx = np.arange(0,1,.01)
yy = np.exp(w*xx+b)
plt.plot(yy)
```
Following what we just saw in the course, you need to implement each of the basic operations: `(.*w), (.+b), exp(.)` with a forward method, a backward method and a step method.
```
class add_bias(object):
def __init__(self,b):
# initialize with a bias b
self.b = b
def forward(self, x):
# return the result of adding the bias
return x+self.b
def backward(self,grad):
# save the gradient (to update the bias in the step method) and return the gradient backward
self.grad = grad
return self.grad
def step(self, learning_rate):
# update the bias
self.b -= learning_rate*self.grad
class multiplication_weight(object):
def __init__(self, w):
# initialize with a weight w
self.w = w
def forward(self, x):
# return the result of multiplying by weight
self.saved_x = x
return self.w*x
def backward(self,grad):
# save the gradient and return the gradient backward
self.grad = self.saved_x*grad
return self.grad
def step(self, learning_rate):
# update the weight
self.w -= learning_rate*self.grad
class my_exp(object):
# no parameter
def forward(self, x):
# return exp(x)
self.saved_exp = np.exp(x)
return np.exp(x)
def backward(self,grad):
# return the gradient backward
return self.saved_exp*grad
def step(self, learning_rate):
# any parameter to update?
pass
```
Now, you will need to compose sequentially these operations and here you need to code a class composing operations. This class will have a forward, a backward and a step method and also a compute_loss method.
```
class my_composition(object):
def __init__(self, layers):
# initialize with all the operations (called layers here!) in the right order...
self.layers = layers
def forward(self, x):
# apply the forward method of each layer
for layer in self.layers:
x = layer.forward(x)
return x
def compute_loss(self,y, y_est):
# use the L2 loss
# return the loss and save the gradient of the loss
self.loss_grad = 2*(y-y_est)
return (y-y_est)**2
def backward(self):
# apply backprop sequentially, starting from the gradient of the loss
current_grad = self.loss_grad
for layer in reversed(self.layers):
current_grad = layer.backward(current_grad)
def step(self, learning_rate):
# apply the step method of each layer
for layer in self.layers:
layer.step(learning_rate)
```
Now you need to code the 'training' loop. Keep track of the loss, weight and bias computed at each epoch.
```
my_fit = my_composition([multiplication_weight(1),add_bias(1), my_exp()])
learning_rate = 1e-4
losses =[]
ws = []
bs = []
for i in range(5000):
# take a random indice
j = np.random.randint(1, len(xx))
# you can compare with
#j = i % len(xx)
# compute the estimated value of y with the current values of the parameters
y_est = my_fit.forward(xx[j])
# compute the loss and save it
loss = my_fit.compute_loss(y_est,yy[j])
losses.append(loss)
# update the parameters and save them
my_fit.backward()
my_fit.step(learning_rate)
ws.append(my_fit.layers[0].w)
bs.append(my_fit.layers[1].b)
my_fit.layers[0].w
my_fit.layers[1].b
plt.plot(losses)
plt.plot(bs)
plt.plot(ws)
```
|
github_jupyter
|
# Sitios dinámicos y Selenium
```
import requests
from bs4 import BeautifulSoup
url = 'https://www.latam.com/es_co/apps/personas/booking?fecha1_dia=13&fecha1_anomes=2020-10&auAvailability=1&ida_vuelta=ida&vuelos_origen=Bogot%C3%A1&from_city1=BOG&vuelos_destino=Miami&to_city1=BUE&flex=1&vuelos_fecha_salida_ddmmaaaa=06/09/2020&cabina=Y&nadults=1&nchildren=0&ninfants=0&cod_promo=&stopover_outbound_days=0&stopover_inbound_days=0&application=#/'
r = requests.get(url)
r.status_code
s = BeautifulSoup(r.text, 'lxml')
print(s.prettify())
```
Vemos que la respuesta de la página no contiene la información que necesitamos, ya que la misma aparece después de ejecutar código JS que está en la respuesta.
# SELENIUM
Selenium es una herramienta que nos permitirá controlar un navegador y podremos utilizar las funcionalidades del motor de JS para cargar el contenido que no viene en el HTML de la página. Para esto necesitamos el módulo `webdriver`.
```
from selenium import webdriver
```
## Paso 1: Instanciar un **driver** del navegador
```
# Opciones de navegación
options = webdriver.ChromeOptions()
options.add_argument('--incognito')
driver = webdriver.Chrome(executable_path='../chrome-driver/chromedriver', options=options)
driver
```
## Paso 2: Hacer que el navegador cargue la página web.
```
driver.get(url)
```
## Paso 3: Extraer la información de la página.
```
vuelos = driver.find_elements_by_xpath('//li[@class="flight"]')
vuelos
vuelo = vuelos[0]
vuelo
# Hora de salida
vuelo.find_element_by_xpath('.//div[@class="departure"]/time').get_attribute('datetime')
# Hora de llegada
vuelo.find_element_by_xpath('.//div[@class="arrival"]/time').get_attribute('datetime')
# Duración del vuelo
vuelo.find_element_by_xpath('.//span[@class="duration"]/time').text
# Como lo hizo el profesor
vuelo.find_element_by_xpath('.//span[@class="duration"]/time').get_attribute('datetime')
```
### Interactuando con Elementos
```
boton_escalas = vuelo.find_element_by_xpath('.//div[@class="flight-summary-stops-description"]/button')
boton_escalas
# Simulamos el clic sobre el botón
boton_escalas.click()
# Seleccionamos los segmentos
segmentos = vuelo.find_elements_by_xpath('//div[@class="sc-hZSUBg gfeULV"]/div[@class="sc-cLQEGU hyoued"]')
segmentos
escalas = len(segmentos) - 1
escalas
```
### Scrapeando escalas y tarifas
```
segmento = segmentos[0]
segmento
# Obteniendo la ciudad de llegada y salida
ciudades = segmento.find_elements_by_xpath('.//div[@class="sc-bwCtUz iybVbT"]/abbr[@class="sc-hrWEMg hlCkST"]')
print(f'Ciudad de salida: {ciudades[0].text}')
print(f'Ciudad de escala: {ciudades[1].text}')
# Obteniendo la hora de llegada a la escala y salida de la escala
horas = segmento.find_elements_by_xpath('.//div[@class="sc-bwCtUz iybVbT"]/time[@class="sc-RefOD libzvk"]')
print(f'Hora de salida ciudad inicial: {horas[0].text}')
print(f'Hora de salida ciudad escala: {horas[1].text}')
# Obteniendo nombre del Aeropuerto de salida y escala
aeropuertos = segmento.find_elements_by_xpath('.//span[@class="sc-eTuwsz eumCTU"]/span[@class="sc-hXRMBi gVvErD"]')
print(f'Aeropuerto de salida: {aeropuertos[0].text}')
print(f'Aeropuerto de escala: {aeropuertos[1].text}')
# Cerrando el modal
driver.find_element_by_xpath('//div[@class="modal-content sc-iwsKbI eHVGAN"]//button[@class="close"]').click()
# Obteniendo las tarifas
vuelo.click()
contenido = driver.find_element_by_xpath('//div[@class="ReactCollapse--content"]')
tarifas = contenido.find_elements_by_xpath('.//table[@class="fare-options-table"]/tfoot/tr/td[contains(@class, "fare-")]')
precios = []
for tarifa in tarifas:
nombre = tarifa.find_element_by_xpath('.//label').get_attribute('for')
moneda = tarifa.find_element_by_xpath('.//label/span[@class="price"]/span[@class="currency-symbol"]').text
valor = tarifa.find_element_by_xpath('.//label/span[@class="price"]/span[@class="value"]').text
dict_tarifa = {nombre:{'moneda':moneda, 'valor':valor}}
precios.append(dict_tarifa)
precios
```
### Construyendo funciones
```
def obtener_precios(vuelo):
"""
Función que retorna un diccionario con las distintas tarifas
"""
tarifas = contenido.find_elements_by_xpath('.//table[@class="fare-options-table"]/tfoot/tr/td[contains(@class, "fare-")]')
precios = []
for tarifa in tarifas:
nombre = tarifa.find_element_by_xpath('.//label').get_attribute('for')
moneda = tarifa.find_element_by_xpath('.//label/span[@class="price"]/span[@class="currency-symbol"]').text
valor = tarifa.find_element_by_xpath('.//label/span[@class="price"]/span[@class="value"]').text
dict_tarifa = {nombre:{'moneda':moneda, 'valor':valor}}
precios.append(dict_tarifa)
return precios
def obtener_datos_escalas(vuelo):
"""
Función que retorna una lista de diccionarios con la información de las escalas de cada vuelo
"""
# Abriendo el modal
vuelo.find_element_by_xpath('//div[@class="flight"]//button[@class="sc-bdVaJa fuucJY"]').click()
segmentos = vuelo.find_elements_by_xpath('//div[@class="sc-hZSUBg gfeULV"]/div[@class="sc-cLQEGU hyoued"]')
info_escalas[]
for segmento in segmentos:
# Obteniendo la ciudad de llegada y salida
ciudades = segmento.find_elements_by_xpath('.//div[@class="sc-bwCtUz iybVbT"]/abbr[@class="sc-hrWEMg hlCkST"]')
origen = ciudades[0].text
escala = ciudades[1].text
#Cerrando el modal
driver.find_element_by_xpath('//div[@class="modal-component"]//button[@class="close"]').click()
return info_escalas
segmentos = vuelo.find_elements_by_xpath('//div[@class="sc-hZSUBg gfeULV"]/div[@class="sc-cLQEGU hyoued"]')
for segmento in segmentos:
# Obteniendo la ciudad de llegada y salida
ciudades = segmento.find_elements_by_xpath('.//div[@class="sc-bwCtUz iybVbT"]/abbr[@class="sc-hrWEMg hlCkST"]')
ciudades[1].text
```
## Paso 4: Cerrar el navegador.
```
driver.close()
```
|
github_jupyter
|
<h1 align="center">TensorFlow Neural Network Lab</h1>
<img src="image/notmnist.png">
In this lab, you'll use all the tools you learned from *Introduction to TensorFlow* to label images of English letters! The data you are using, <a href="http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html">notMNIST</a>, consists of images of a letter from A to J in different fonts.
The above images are a few examples of the data you'll be training on. After training the network, you will compare your prediction model against test data. Your goal, by the end of this lab, is to make predictions against that test set with at least an 80% accuracy. Let's jump in!
To start this lab, you first need to import all the necessary modules. Run the code below. If it runs successfully, it will print "`All modules imported`".
```
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
print('All modules imported.')
```
The notMNIST dataset is too large for many computers to handle. It contains 500,000 images for just training. You'll be using a subset of this data, 15,000 images for each label (A-J).
```
def download(url, file):
"""
Download file from <url>
:param url: URL to file
:param file: Local file path
"""
if not os.path.isfile(file):
print('Downloading ' + file + '...')
urlretrieve(url, file)
print('Download Finished')
# Download the training and test dataset.
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_train.zip', 'notMNIST_train.zip')
download('https://s3.amazonaws.com/udacity-sdc/notMNIST_test.zip', 'notMNIST_test.zip')
# Make sure the files aren't corrupted
assert hashlib.md5(open('notMNIST_train.zip', 'rb').read()).hexdigest() == 'c8673b3f28f489e9cdf3a3d74e2ac8fa',\
'notMNIST_train.zip file is corrupted. Remove the file and try again.'
assert hashlib.md5(open('notMNIST_test.zip', 'rb').read()).hexdigest() == '5d3c7e653e63471c88df796156a9dfa9',\
'notMNIST_test.zip file is corrupted. Remove the file and try again.'
# Wait until you see that all files have been downloaded.
print('All files downloaded.')
def uncompress_features_labels(file):
"""
Uncompress features and labels from a zip file
:param file: The zip file to extract the data from
"""
features = []
labels = []
with ZipFile(file) as zipf:
# Progress Bar
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# Get features and labels from all files
for filename in filenames_pbar:
# Check if the file is a directory
if not filename.endswith('/'):
with zipf.open(filename) as image_file:
image = Image.open(image_file)
image.load()
# Load image data as 1 dimensional array
# We're using float32 to save on memory space
feature = np.array(image, dtype=np.float32).flatten()
# Get the the letter from the filename. This is the letter of the image.
label = os.path.split(filename)[1][0]
features.append(feature)
labels.append(label)
return np.array(features), np.array(labels)
# Get the features and labels from the zip files
train_features, train_labels = uncompress_features_labels('notMNIST_train.zip')
test_features, test_labels = uncompress_features_labels('notMNIST_test.zip')
# Limit the amount of data to work with a docker container
docker_size_limit = 150000
train_features, train_labels = resample(train_features, train_labels, n_samples=docker_size_limit)
# Set flags for feature engineering. This will prevent you from skipping an important step.
is_features_normal = False
is_labels_encod = False
# Wait until you see that all features and labels have been uncompressed.
print('All features and labels uncompressed.')
```
<img src="image/Mean_Variance_Image.png" style="height: 75%;width: 75%; position: relative; right: 5%">
## Problem 1
The first problem involves normalizing the features for your training and test data.
Implement Min-Max scaling in the `normalize_grayscale()` function to a range of `a=0.1` and `b=0.9`. After scaling, the values of the pixels in the input data should range from 0.1 to 0.9.
Since the raw notMNIST image data is in [grayscale](https://en.wikipedia.org/wiki/Grayscale), the current values range from a min of 0 to a max of 255.
Min-Max Scaling:
$
X'=a+{\frac {\left(X-X_{\min }\right)\left(b-a\right)}{X_{\max }-X_{\min }}}
$
*If you're having trouble solving problem 1, you can view the solution [here](https://github.com/udacity/deep-learning/blob/master/intro-to-tensorflow/intro_to_tensorflow_solution.ipynb).*
```
# Problem 1 - Implement Min-Max scaling for grayscale image data
def normalize_grayscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
min_val = 0.1
max_val = 0.9
# normalize to 0..1
x0 = (image_data - np.min(image_data)) / (np.max(image_data) - np.min(image_data))
return min_val + x0 * (max_val - min_val)
### DON'T MODIFY ANYTHING BELOW ###
# Test Cases
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 255])),
[0.1, 0.103137254902, 0.106274509804, 0.109411764706, 0.112549019608, 0.11568627451, 0.118823529412, 0.121960784314,
0.125098039216, 0.128235294118, 0.13137254902, 0.9],
decimal=3)
np.testing.assert_array_almost_equal(
normalize_grayscale(np.array([0, 1, 10, 20, 30, 40, 233, 244, 254,255])),
[0.1, 0.103137254902, 0.13137254902, 0.162745098039, 0.194117647059, 0.225490196078, 0.830980392157, 0.865490196078,
0.896862745098, 0.9])
if not is_features_normal:
train_features = normalize_grayscale(train_features)
test_features = normalize_grayscale(test_features)
is_features_normal = True
print('Tests Passed!')
if not is_labels_encod:
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
assert is_features_normal, 'You skipped the step to normalize the features'
assert is_labels_encod, 'You skipped the step to One-Hot Encode the labels'
# Get randomized datasets for training and validation
train_features, valid_features, train_labels, valid_labels = train_test_split(
train_features,
train_labels,
test_size=0.05,
random_state=832289)
print('Training features and labels randomized and split.')
# Save the data for easy access
pickle_file = 'notMNIST.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open('notMNIST.pickle', 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels,
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
```
# Checkpoint
All your progress is now saved to the pickle file. If you need to leave and comeback to this lab, you no longer have to start from the beginning. Just run the code block below and it will load all the data and modules required to proceed.
```
%matplotlib inline
# Load the modules
import pickle
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
# Reload the data
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
```
## Problem 2
Now it's time to build a simple neural network using TensorFlow. Here, your network will be just an input layer and an output layer.
<img src="image/network_diagram.png" style="height: 40%;width: 40%; position: relative; right: 10%">
For the input here the images have been flattened into a vector of $28 \times 28 = 784$ features. Then, we're trying to predict the image digit so there are 10 output units, one for each label. Of course, feel free to add hidden layers if you want, but this notebook is built to guide you through a single layer network.
For the neural network to train on your data, you need the following <a href="https://www.tensorflow.org/resources/dims_types.html#data-types">float32</a> tensors:
- `features`
- Placeholder tensor for feature data (`train_features`/`valid_features`/`test_features`)
- `labels`
- Placeholder tensor for label data (`train_labels`/`valid_labels`/`test_labels`)
- `weights`
- Variable Tensor with random numbers from a truncated normal distribution.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#truncated_normal">`tf.truncated_normal()` documentation</a> for help.
- `biases`
- Variable Tensor with all zeros.
- See <a href="https://www.tensorflow.org/api_docs/python/constant_op.html#zeros"> `tf.zeros()` documentation</a> for help.
*If you're having trouble solving problem 2, review "TensorFlow Linear Function" section of the class. If that doesn't help, the solution for this problem is available [here](intro_to_tensorflow_solution.ipynb).*
```
# All the pixels in the image (28 * 28 = 784)
features_count = 784
# All the labels
labels_count = 10
# TODO: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# TODO: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal([features_count, labels_count], seed=23))
biases = tf.Variable(tf.zeros([10]))
### DON'T MODIFY ANYTHING BELOW ###
#Test Cases
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or (\
features._shape.dims[0].value is None and\
features._shape.dims[1].value in [None, 784]), 'The shape of features is incorrect'
assert labels._shape == None or (\
labels._shape.dims[0].value is None and\
labels._shape.dims[1].value in [None, 10]), 'The shape of labels is incorrect'
assert weights._variable._shape == (784, 10), 'The shape of weights is incorrect'
assert biases._variable._shape == (10), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: train_features, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: test_features, labels: test_labels}
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
prediction = tf.nn.softmax(logits)
# Cross entropy
cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.global_variables_initializer()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
```
<img src="image/Learn_Rate_Tune_Image.png" style="height: 70%;width: 70%">
## Problem 3
Below are 2 parameter configurations for training the neural network. In each configuration, one of the parameters has multiple options. For each configuration, choose the option that gives the best acccuracy.
Parameter configurations:
Configuration 1
* **Epochs:** 1
* **Learning Rate:**
* 0.8 (0.09)
* 0.5 (0.78)
* 0.1 (0.74)
* 0.05 (0.72)
* 0.01 (0.58)
Configuration 2
* **Epochs:**
* 1 (0.76)
* 2 (0.77)
* 3 (0.78)
* 4 (0.78)
* 5 (0.79)
* **Learning Rate:** 0.2
The code will print out a Loss and Accuracy graph, so you can see how well the neural network performed.
*If you're having trouble solving problem 3, you can view the solution [here](intro_to_tensorflow_solution.ipynb).*
```
# Change if you have memory restrictions
batch_size = 128
# TODO: Find the best parameters for each configuration
epochs = 5
learning_rate = .2
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'x', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy at {}'.format(validation_accuracy))
```
## Test
You're going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well the model will do in the real world. You should have a test accuracy of at least 80%.
```
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(train_features)/batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i+1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i*batch_size
batch_features = train_features[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
```
# Multiple layers
Good job! You built a one layer TensorFlow network! However, you might want to build more than one layer. This is deep learning after all! In the next section, you will start to satisfy your need for more layers.
|
github_jupyter
|
# **Assignment 3 (From Scratch)**
## **Penalized Logistic Ridge Regression CV with Batch Gradient Decent**
- **Programmers:**
- Shaun Pritchard
- Ismael A Lopez
- **Date:** 11-15-2021
- **Assignment:** 3
- **Prof:** M.DeGiorgio
<hr>
### **Overview: Assignment 3**
- In this assignment you will still be analyzing human genetic data from 𝑁 = 183 training
observations (individuals) sampled across the world. The goal is to fit a model that can predict
(classify) an individual’s ancestry from their genetic data that has been projected along 𝑝 = 10
top principal components (proportion of variance explained is 0.2416) that we use as features
rather than the raw genetic data
- Using ridge regression, fit a penalized (regularized) logistic (multinomial) regression with model parameters obtained by batch gradient descent. Based on K = 5 continental ancestries (African, European, East Asian, Oceanian, or Native American), predictions will be made. Ridge regression will permit parameter shrinkage (tuning parameter 𝜆 ≥ 0) to mitigate overfitting. In order to infer the bestfit model parameters on the training dataset, the tuning parameter will be selected using five-fold cross validation. After training, the model will be used to predict new test data points.
## **Imports**
> Import libaries and data
```
#Math libs
from math import sqrt
from scipy import stats
from numpy import median
from decimal import *
import os
# Data Science libs
import numpy as np
import pandas as pd
# Graphics libs
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
#Timers
# !pip install pytictoc
# from pytictoc import TicToc
# Import training and test datasets
train_df = pd.read_csv('TrainingData_N183_p10.csv')
test_df = pd.read_csv('TestData_N111_p10.csv')
# Validate traning data import correclty
train_df.head(2)
# Validate testing data import correclty
test_df.head(2)
```
## **Data Pre-Proccessing**
- Pre-proccess test and training datasets
- Impute categorical variables in features
- Validate correct output of test data
```
# recode the categories
data = train_df['Ancestry'].unique().tolist()
num_features = len(data)
train_df['Ancestry2'] = train_df['Ancestry'].apply(lambda x: data.index(x))
# Validate training data set
train_df.head(2)
# Shape trianing data
train_df.shape
```
## **Seperate X Y Predictors and Responses**
- Seperate predictors from responses
- Validate correct output
```
# Seperate dependant categorical feature data for training and test data set for later use
Y_train_names = train_df['Ancestry'].tolist()
Y_test_names = test_df['Ancestry'].tolist()
# Separate training feature predictors from responses
X_train = np.float32(train_df.to_numpy()[:, :-2])
Y_train = train_df['Ancestry2'].to_numpy()
# Separate test feature predictors from responses
X_test = np.float32(test_df.to_numpy()[:, :-1])
X_train.shape
```
## **Set Global Vairibles**
- λ = tunning parameters
- α = learning rate
- k = number of folds
- n_itters = nu mber of itterations
- X_p = predictor vlaues from training data
- Y_p = response values from training data
```
# Set local variables
# Tuning Parms
λ = 10 ** np.arange(-4., 4.)
# learning rate
α = 1e-4
# K-folds
k = 5
# Itterations
n_iters = 10000
# Set n x m matrix predictor variable
X_p = X_train
# Set n vector response variable
Y_p = Y_train
```
## **Instantiate Data**
- Handle logic and set variables needed for calculating ridge logistic regression
- Handle logic and set variables for batch gradient descent
- Handle logic and set variables for cross-validation
```
# Encode response variable from CV design matrix for cross vlaidation
def imputeResponse(response_vector, num_features):
response_vector = np.int64(response_vector)
X1 = response_vector.shape[0]
response_mat = np.zeros([X1, num_features])
response_mat[np.arange(X1), response_vector] = 1
return response_mat
# Method to handle randomization of training data predictors and responses
def randomizeData(X_p, Y_p):
data = np.concatenate((X_p, Y_p[:, None]), 1)
np.random.shuffle(data)
return data[:, :-1], data[:, -1]
# Randomize predictors and responses into new vairables
x, y = randomizeData(X_p, Y_p)
# Set Global variable for samples and number of X = N X M features
X1 = x.shape[0]
X2 = x.shape[1]
# Get number of training feature classes = 5
num_features = np.unique(y).size
# Call method imputation method on training response variables
y = imputeResponse(y, num_features)
# Store 5 K-fold cross validation results in symetric matrices
CV = np.zeros([k, len(λ)])
# Number of validation sample index values based on k-folds
val_samples = int(np.ceil(X1 / k))
test_i = list(range(0, X1, val_samples))
# Create a 𝛽 zero matrix to store the trained predictors
𝛽 = np.zeros([k, len(λ), X2 + 1, num_features])
```
## **Implement logic**
- Main functions to handle logic within the preceding algorithms
```
# Standardize X coefficients
def standardize(x, mean_x, std_x):
return (x - mean_x) / std_x
# Concatenate ones column matrix with X coefficiants
def intercept(x):
col = np.ones([x.shape[0], 1])
return np.concatenate((col, x), 1)
# Predict standardize expotential X values from intercepts
def predict(x):
x = standardize(x, mean_x, std_x)
x = intercept(x)
X_p = np.exp(np.matmul(x, 𝛽x))
return X_p / np.sum(X_p, 1)[:, None]
# Splitting the data into k groups resampling method
def cv_folds(i_test):
if i_test + val_samples <= X1:
i_tests = np.arange(i_test, i_test + val_samples)
else:
i_tests = np.arange(i_test, X1)
x_test = x[i_tests]
x_train = np.delete(x, i_tests, axis = 0)
y_test = y[i_tests]
y_train = np.delete(y, i_tests, axis = 0)
return x_train, x_test, y_train, y_test
# Calculate model CV score
def score(x, y, 𝛽x):
# Compute exponent values of X coef and BGD unnormilized probality matrix
U = np.exp(np.matmul(x, 𝛽x))
# Calculate sum unnormilized probality / sum unnormilized matrix by 1
P = U / np.sum(U, 1)[:, None]
# Calulate to cost error score
err = -(1 / x.shape[0]) * np.sum(np.sum(y * np.log10(P), 1))
return err
```
## **Batch Gradient Descent**
> Alorithm 1 used for this computation

```
def BGD(x, y, 𝛽x, lamb):
# Unormalized class probability matrix
U = np.exp(np.dot(x, 𝛽x))
# Normalized class probability matrix
P = U / np.sum(U, 1)[:, None]
# K intercept matrix
Z = 𝛽x.copy()
Z[1:] = 0
# Update parameter matrix
𝛽x = 𝛽x + α * (np.matmul(np.transpose(x), y - P) - 2 * lamb * (𝛽x - Z))
return 𝛽x
```
## **CV Ridge Penlized Logistic Regression**
> - Compute ridge-penalized logistic regression with cross vlaidation
- Performing a ridge-penalized logistic regression fit to training data
{(𝑥1, 𝑦1), (𝑥2, 𝑦2), … , (𝑥𝑁, 𝑦𝑁)} is to minimize the cost function

```
# Compute ridge-penalized logistic regression with cross vlaidation
for i_lambda, lamb in enumerate(λ):
for i_fold, i_test in zip(range(k), test_i):
# Validates and trains the CV iteration based on the validation and training sets.
x_train, x_test, y_train, y_test = cv_folds(i_test)
# Standardize x and center y 5 K-fold trianing and test data
mean_x, std_x = np.mean(x_train, 0), np.std(x_train, 0)
# implement standardize X training and test sets
x_train = standardize(x_train, mean_x, std_x)
x_test = standardize(x_test, mean_x, std_x)
# Add training and test intercept column to the design matrix
x_train = intercept(x_train)
x_test = intercept(x_test)
# initialize Beta coef for lambdas and fold
𝛽x = np.zeros([X2 + 1, num_features])
# Loop through beta and lambdas with batch gradient decent
for iter in range(n_iters):
𝛽x = BGD(x_train, y_train, 𝛽x, lamb)
# Score CV cost error tp the model and store the values
CV[i_fold, i_lambda] = score(x_test, y_test, 𝛽x)
# Save the updated coefficient vectors 𝛽x
𝛽[i_fold, i_lambda] = 𝛽x
```
## **Deliverable 1**
> Illustrate the effect of the tuning parameter on the inferred ridge regression
coefficients by generating five plots (one for each of the 𝐾 = 5 ancestry classes) of 10 lines
(one for each of the 𝑝 = 10 features)

```
# Plot tuning parameter on the inferred ridge regression coefficients
𝛽μ = np.mean(𝛽, 0)
sns.set(rc = {'figure.figsize':(15,8)})
for i, c in enumerate(data):
𝛽μk = 𝛽μ[..., i]
sns.set_theme(style="whitegrid")
sns.set_palette("mako")
for j in range(1, 1 + X2):
sns.lineplot( x=λ, y=𝛽μk[:, j], palette='mako', label = 'PC{}'.format(j) )
sns.set()
plt.xscale('log')
plt.legend(bbox_to_anchor=(1.09, 1), loc='upper left')
plt.xlabel('Log Lambda')
plt.ylabel('Coefficient Values')
plt.suptitle('Inferred Ridge Regression Coefficient Tuning Parameters of' + ' ' + c + ' ' + 'Class')
for l in range(i):
# Output Deliverable 1
plt.savefig("Assignment3_Deliverable1.{}.png".format(l))
plt.show()
```
## **Deliverable 2**
> Illustrate the effect of the tuning parameter on the cross validation error by generating a plot with the 𝑦-axis as CV(5) error, and the 𝑥-axis the corresponding log-scaled
tuning parameter value log10(𝜆) that generated the particular CV(5) error.
```
# Compute tuning parameter on the cross validation error
err = np.std(CV, 0) / np.sqrt(CV.shape[0])
sns.set(rc = {'figure.figsize':(15,8)})
sns.set_theme(style="whitegrid")
sns.set_palette("icefire")
sns.pointplot(x=λ, y=np.mean(CV, 0),yerr = err)
sns.set()
plt.xlabel('log10(lambda)')
plt.ylabel('CV(5) error')
plt.xscale('log')
plt.yscale('log')
plt.suptitle('Effect of the uning parameter on the cross validation error log10(lambda)')
plt.savefig("Assignment3_Deliverable2.png")
plt.show()
```
### **Retrain model with best lambda**
```
# Set array of indices into the best lambda
best_λ = λ[np.argmin(np.mean(CV, 0))]
# Set standaridzed variables
mean_x, std_x = np.mean(x, 0), np.std(x, 0)
# Implement standarization of predictors and copy response variables
x = standardize(x, mean_x, std_x)
x = intercept(x)
y = y.copy()
# Set zeros matrix to coef and retiran model on batch gradient decent
𝛽x = np.zeros([X2 + 1, num_features])
for iter in range(n_iters):
𝛽x = BGD(x, y, 𝛽x, best_λ)
```
## **Deliverable 3**
> Indicate the value of 𝜆 value that generated the smallest CV(5) error
**Optimal lambda**
```
# Plot lowest optimal lambda
palette = sns.color_palette('mako')
sns.set(rc = {'figure.figsize':(15,8)})
sns.set_theme(style="whitegrid")
ak = Decimal(best_λ)
plt.plot(λ)
sns.set_palette("icefire")
sns.countplot(data=λ)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('log10(lambda)')
plt.ylabel('Best Lambda')
plt.suptitle('Lowest optimal Lamda value:= log_1e{:.1f} = {}'.format(ak.log10(), best_λ))
print('Optimal lambda value:= {}'.format(best_λ))
plt.savefig("Assignment3_Deliverable3-1.png")
```
**Accuracy on training classifier**
```
#Implement Prediction function
ŷ_p = predict(X_train)
# Return the maximum value along a given y axis
ŷ0 = np.argmax(ŷ_p, 1)
# Return mean traning accuaracy
μ = np.mean(ŷ0 == Y_train)
sns.set(rc = {'figure.figsize':(15,8)})
sns.set_theme(style="whitegrid")
plt.plot(ŷ0)
sns.set_palette("icefire")
sns.boxplot(data=ŷ0 -μ**2)
plt.xscale('log')
plt.xlabel('log10(lambda)')
plt.ylabel('Best Lambda')
plt.suptitle('Classifier Training Accuracy:= {}'.format(μ))
plt.savefig("Assignment3_Deliverable3-2.png")
# print('Classifier Training Accuracy: {}'.format(μ))
```
## **Retrain model on the entire dataset for optimal 𝜆**
> - Given the optimal 𝜆, retrain your model on the entire dataset of 𝑁 = 183 observations to obtain an estimate of the (𝑝 + 1) × 𝐾 model parameter matrix as 𝐁̂ and make predictions of the probability for each of the 𝐾 = 5 classes for the 111 test individuals located in TestData_N111_p10.csv.
- Add probability predictions to the test dataframe
```
# Create new test predicotr and response variables ŷ
ŷ_test = predict(X_test)
Y_class = np.argmax(ŷ_test, 1)
# Re-lable feature headers and add new class prediction index column
new_colNames = ['{}_Probability'.format(c_name) for c_name in data] + ['ClassPredInd']
# Implemnt index array of probabilities
i_prob = np.concatenate((ŷ_test, Y_class[:, None]), 1)
# Create New dataframe for probality indeces
df2 = pd.DataFrame(i_prob, columns = new_colNames)
# Concat dependant Ancestory features to dataframe
dep_preds = pd.concat([test_df['Ancestry'], df2], axis = 1)
# Add new
dep_preds['ClassPredName'] = dep_preds['ClassPredInd'].apply(lambda x: data[int(x)])
# Validate Probability predictions dataframe
dep_preds.head()
# Slice prediction and set new feature vector column variable
prob_1 = dep_preds.loc[:, 'Ancestry':'NativeAmerican_Probability']
# Unpivot convert dataFrame to long format
prob_2 = pd.melt(prob_1, id_vars = ['Ancestry'], var_name = 'Ancestry_Predictions', value_name = 'Probability')
# Test for true probability
prob_2['Ancestry_Predictions'] = prob_2['Ancestry_Predictions'].apply(lambda x: x.split('Prob')[0])
# Validate dataframe
prob_2.head(5)
# Validate dataframe features
print('Describe Columns:=', prob_2.columns, '\n')
print('Data Index values:=', prob_2.index, '\n')
print('Describe data:=', prob_2.describe(), '\n')
```
## **Deliverable 4**
> Given the optimal 𝜆, retrain your model on the entire dataset of 𝑁 = 183 observations to obtain an estimate of the (𝑝 + 1) × 𝐾 model parameter matrix as 𝐁̂ and make predictions of the probability for each of the 𝐾 = 5 classes for the 111 test individuals located in TestData_N111_p10.csv.

```
# Plot Probality prediction matrix
sns.set(rc = {'figure.figsize':(15,8)})
sns.set_theme(style="whitegrid")
fig, ax = plt.subplots()
sns.barplot(data = prob_2[prob_2['Ancestry'] != 'Unknown'],color = 'r', x = 'Ancestry', y = 'Probability', hue = 'Ancestry_Predictions', palette = 'mako')
plt.xlabel('Ancestory Classes')
plt.ylabel('Probability')
plt.suptitle('Probabilty of Ancestor classes')
plt.savefig("Assignment3_Deliverable4.png")
plt.show()
```
## **Deliverable 5**
**How do the class label probabilities differ for the Mexican and African American samples when compared to the class label probabilities for the unknown samples?**
> In comparison to the class label probabilities for the unknown samples, those with unknown ancestry show a probability close to or equal to one while the other classes show a probability close to zero or less than one. African American samples showed similar results. The model assigned high probabilities to the African ancestry class for each of these samples. However, both Native American and European ancestry contribute high probabilities to the Mexican population on average with Native American slightly higher than European.
|
github_jupyter
|
## Тестирование даунсемплинга, низкочастотных фильтров и параметров синусоидальных волн
```
import numpy as np
from matplotlib import pyplot as plt
from pydub import AudioSegment
from scipy.fft import rfft, rfftfreq, irfft
plt.rcParams["figure.figsize"] = (20,5)
# Парсим pydub AudioSegment в numpy массив уровней квантизации. Массив может состоять из 1 стобца(канала) при моно звуке и из 2 столбцов(каналов) при стерео
def pydub_to_np(audio: AudioSegment) -> np.ndarray:
return np.array(audio.get_array_of_samples(), dtype=np.float32).reshape((-1, audio.channels))
# Трансформирует стерео звук в моно, вычисляя среднее между левым и правым каналом. dtype = int чтобы округлить до нижнего уровня
def stereo_to_mono(stereo: np.ndarray) -> np.ndarray:
return np.mean(stereo, axis = 1, dtype=int)
# Нормализирует массив уровней квантизации к [-1, 1] интервалам
def normalize_mono(mono_sound: np.ndarray, sample_width: int) -> np.ndarray:
bitrate = 1 << (8 * sample_width - 1)
return mono_sound / bitrate
# Денормализирует массив уровней квантизации обратно к исходному интервалу
def denormalize_mono(mono_sound: np.ndarray, sample_width: int) -> np.ndarray:
bitrate = 1 << (8 * sample_width - 1)
res = mono_sound * bitrate
return res.astype('int')
amplitude = 1
discr = 1024
func = lambda x: amplitude * np.sin(2 * np.pi * x * 2) \
+ 0.5 * amplitude * np.sin(2 * np.pi * x * 3) \
+ 0.1 * amplitude * np.sin(2 * np.pi * x * 7)
X = np.arange(0, 1, 1 / discr)
Y = func(X)
plt.plot(X, Y)
plt.xlabel('Время в секундах')
plt.ylabel('Нормализованная амплитуда')
fftY = rfft(Y)
x_freq = rfftfreq(len(Y), d = 1 / discr)
y_freq = (2 / len(Y)) * np.abs(fftY)
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(x_freq, y_freq)
ax2.plot(x_freq[:10], y_freq[:10])
fig.add_subplot(111, frameon= False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("Частота дискретизации в Гц")
plt.ylabel("Амплитуда")
y_from_fft = irfft(fftY)
plt.plot(X, y_from_fft)
f1 = lambda x: amplitude * np.sin(2 * np.pi * x * 2)
f2 = lambda x: amplitude * 0.5 * np.sin(2 * np.pi * x * 3)
f3 = lambda x: amplitude * 0.3 * np.sin(2 * np.pi * x * 10)
discr = 1024
X = np.arange(0, 3, 1 / discr)
Y = np.asarray([f1(x) for x in X[:1024 * 1]] + \
[f2(x) for x in X[1024 * 1:1024 * 2]] + \
[f3(x) for x in X[1024 * 2:1024 * 3]])
plt.plot(X, Y)
plt.xlabel('Время в секундах')
plt.ylabel('Нормализованная амплитуда')
# Генератор, возвращает чанк из n элементов
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
sample_size = 1024
def plot_example(X, Y, sample_size = 1024):
for chunkX, chunkY in zip(chunks(X, sample_size), chunks(Y, sample_size)):
plt.plot(chunkX, chunkY)
plt.xlabel('Время в секундах')
plt.ylabel('Нормализованная амплитуда')
for chunkY in chunks(Y, sample_size):
fftY = rfft(chunkY)
x_freq = rfftfreq(len(chunkY), d = 1 / discr)
y_freq = (2 / len(chunkY)) * np.abs(fftY)
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(x_freq, y_freq)
ax2.plot(x_freq[:20], y_freq[:20])
fig.add_subplot(111, frameon= False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("Частота дискретизации в Гц")
plt.ylabel("Амплитуда")
plot_example(X,Y, sample_size)
plot_example(X,Y, 1256)
```
Фильтры
```
func = lambda x: amplitude * np.sin(2 * np.pi * x * 1) \
+ amplitude * np.sin(2 * np.pi * x * 3) \
+ amplitude * np.sin(2 * np.pi * x * 5)
X = np.arange(0, 2, 1 / 1024)
Y = func(X)
plot_example(X,Y, len(Y))
from scipy.signal import filtfilt, butter, freqz
def butter_lowpass(cutoff, discr, order=5):
nyq = 0.5 * discr
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
return b, a
def butter_lowpass_filter(data, cutoff, discr, order=5):
b, a = butter_lowpass(cutoff, discr, order=order)
return filtfilt(b, a, data).astype(np.short)
def plot_butter_filter(discr, cutoff, order, xlim):
b, a = butter_lowpass(cutoff, discr, order)
w, h = freqz(b, a, worN=8000)
plt.plot(0.5 * discr * w / np.pi, np.abs(h), 'b')
plt.plot(cutoff, 0.5 * np.sqrt(2), 'ko')
plt.axvline(cutoff, color='k')
plt.xlim(0, xlim)
plt.grid()
plt.xlabel("Lowpass Filter Frequency Response")
plt.ylabel("Frequency [Hz]")
order = 3
discr = 1024
cutoff = 3
plot_butter_filter(discr, cutoff, order, 0.01 * discr)
order = 5
discr = 1024
cutoff = 3
plot_butter_filter(discr, cutoff, order, 0.01 * discr)
order = 3
discr = 1024
cutoff = 3
b, a = butter_lowpass(cutoff, discr, order)
y = butter_lowpass_filter(Y, cutoff, discr, order)
plot_example(X, y, len(y))
def downsampling(data, by: int = 4):
return data[::by]
len(Y)
downsampled = downsampling(Y)
len(downsampled)
discr = discr / 4
plot_example(downsampling(X), downsampled, len(downsampled))
```
|
github_jupyter
|
# Properties of ELGs in DR7 Imaging
The purpose of this notebook is to quantify the observed properties (particulary size and ellipticity) of ELGs using DR7 catalogs of the COSMOS region. We use the HST/ACS imaging of objects in this region as "truth."
J. Moustakas
2018 Aug 15
```
import os, warnings, pdb
import numpy as np
import fitsio
from astropy.table import Table
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'font.family': 'serif'}#, 'text.usetex': True}
sns.set(style='ticks', font_scale=1.5, palette='Set2', rc=rc)
%matplotlib inline
```
#### Read the HST/ACS parent (truth) catalog.
```
acsfile = os.path.join(os.getenv('DESI_ROOT'), 'target', 'analysis', 'truth', 'parent', 'cosmos-acs.fits.gz')
allacs = Table(fitsio.read(acsfile, ext=1, upper=True))
print('Read {} objects from {}'.format(len(allacs), acsfile))
```
#### Assemble all the functions we'll need.
```
def read_tractor(subset='0'):
"""Read the Tractor catalogs for a given cosmos subsest and cross-match
with the ACS catalog.
"""
from glob import glob
from astropy.table import vstack
from astrometry.libkd.spherematch import match_radec
tractordir = '/global/cscratch1/sd/dstn/cosmos-dr7-7{}/tractor'.format(subset)
tractorfiles = glob('{}/???/tractor-*.fits'.format(tractordir))
alldr7 = []
for ii, tractorfile in enumerate(tractorfiles):
#if (ii % 10) == 0:
# print('Read {:02d} / {:02d} Tractor catalogs from subset {}.'.format(ii, len(tractorfiles), subset))
alldr7.append(Table(fitsio.read(tractorfile, ext=1, upper=True)))
alldr7 = vstack(alldr7)
alldr7 = alldr7[alldr7['BRICK_PRIMARY']]
# Cross-match
m1, m2, d12 = match_radec(allacs['RA'], allacs['DEC'], alldr7['RA'],
alldr7['DEC'], 1./3600.0, nearest=True)
print('Read {} objects with HST/ACS and DR7 photometry'.format(len(m1)))
return allacs[m1], alldr7[m2]
def select_ELGs(acs, dr7):
from desitarget.cuts import isELG_south
def unextinct_fluxes(cat):
"""We need to unextinct the fluxes ourselves rather than using desitarget.cuts.unextinct_fluxes
because the Tractor catalogs don't have forced WISE photometry.
"""
res = np.zeros(len(cat), dtype=[('GFLUX', 'f4'), ('RFLUX', 'f4'), ('ZFLUX', 'f4')])
for band in ('G', 'R', 'Z'):
res['{}FLUX'.format(band)] = ( cat['FLUX_{}'.format(band)] /
cat['MW_TRANSMISSION_{}'.format(band)] )
return Table(res)
fluxes = unextinct_fluxes(dr7)
gflux, rflux, zflux = fluxes['GFLUX'], fluxes['RFLUX'], fluxes['ZFLUX']
ielg = isELG_south(gflux=fluxes['GFLUX'], rflux=fluxes['RFLUX'],
zflux=fluxes['ZFLUX'])#, gallmask=alltarg['ALLMASK_G'],
#rallmask=alltarg['ALLMASK_R'], zallmask=alltarg['ALLMASK_Z'])
print('Selected {} / {} ELGs'.format(np.sum(ielg), len(acs)))
return acs[ielg], dr7[ielg]
def get_mag(cat, band='R'):
return 22.5 - 2.5 * np.log10(cat['FLUX_{}'.format(band)])
def get_reff_acs(cat):
"""Convert SExtractor's flux_radius to half-light radius
using the relation (derived from simulations) in Sec 4.2
of Griffith et al. 2012.
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
reff = np.log10(0.03 * 0.162 * cat['FLUX_RADIUS']**1.87)
return reff
def get_reff_tractor(cat):
fracdev = cat['FRACDEV']
reff = np.log10(fracdev * cat['SHAPEDEV_R'] + (1 - fracdev) * cat['SHAPEEXP_R'])
return reff
def get_ell_acs(cat):
ell = 1 - cat['B_IMAGE'] / cat['A_IMAGE']
return ell
def get_ell_tractor(cat):
fracdev = cat['FRACDEV']
ell_exp = np.hypot(cat['SHAPEEXP_E1'], cat['SHAPEEXP_E2'])
ell_dev = np.hypot(cat['SHAPEDEV_E1'], cat['SHAPEDEV_E2'])
ell = fracdev * ell_dev + (1 - fracdev) * ell_exp
return ell
def qa_true_properties(acs, dr7, subsetlabel='0', noplots=False,
pngsize=None, pngellipticity=None):
"""Use HST to characterize the *true* ELG size and ellipticity
distributions.
"""
istar = acs['CLASS_STAR'] > 0.9
igal = ~istar
nstar, ngal, nobj = np.sum(istar), np.sum(igal), len(acs)
print('True galaxies, N={} ({:.2f}%):'.format(ngal, 100*ngal/nobj))
for tt in ('PSF ', 'REX ', 'EXP ', 'DEV ', 'COMP'):
nn = np.sum(dr7['TYPE'][igal] == tt)
frac = 100 * nn / ngal
print(' {}: {} ({:.2f}%)'.format(tt, nn, frac))
print('True stars, N={} ({:.2f}%):'.format(nstar, 100*nstar/nobj))
for tt in ('PSF ', 'REX ', 'EXP ', 'DEV ', 'COMP'):
nn = np.sum(dr7['TYPE'][istar] == tt)
frac = 100 * nn / nstar
print(' {}: {} ({:.2f}%)'.format(tt, nn, frac))
if noplots:
return
rmag = get_mag(dr7)
reff = get_reff_acs(acs)
ell = get_ell_acs(acs)
# Size
j = sns.jointplot(rmag[igal], reff[igal], kind='hex', space=0, alpha=0.7,
stat_func=None, cmap='viridis', mincnt=3)
j.set_axis_labels('DECaLS $r$ (AB mag)', r'$\log_{10}$ (HST/ACS Half-light radius) (arcsec)')
j.fig.set_figwidth(10)
j.fig.set_figheight(7)
j.ax_joint.axhline(y=np.log10(0.45), color='k', ls='--')
j.ax_joint.scatter(rmag[istar], reff[istar], marker='s', color='orange', s=10)
j.ax_joint.text(20.8, np.log10(0.45)+0.1, r'$r_{eff}=0.45$ arcsec', ha='left', va='center',
fontsize=14)
j.ax_joint.text(0.15, 0.2, 'HST Stars', ha='left', va='center',
fontsize=14, transform=j.ax_joint.transAxes)
j.ax_joint.text(0.05, 0.9, '{}'.format(subsetlabel), ha='left', va='center',
fontsize=16, transform=j.ax_joint.transAxes)
if pngsize:
plt.savefig(pngsize)
# Ellipticity
j = sns.jointplot(rmag[igal], ell[igal], kind='hex', space=0, alpha=0.7,
stat_func=None, cmap='viridis', mincnt=3)
j.set_axis_labels('DECaLS $r$ (AB mag)', 'HST/ACS Ellipticity')
j.fig.set_figwidth(10)
j.fig.set_figheight(7)
j.ax_joint.scatter(rmag[istar], ell[istar], marker='s', color='orange', s=10)
j.ax_joint.text(0.15, 0.2, 'HST Stars', ha='left', va='center',
fontsize=14, transform=j.ax_joint.transAxes)
j.ax_joint.text(0.05, 0.9, '{}'.format(subsetlabel), ha='left', va='center',
fontsize=16, transform=j.ax_joint.transAxes)
if pngellipticity:
plt.savefig(pngellipticity)
def qa_compare_radii(acs, dr7, subsetlabel='0', seeing=None, png=None):
"""Compare the HST and Tractor sizes."""
igal = dr7['TYPE'] != 'PSF '
reff_acs = get_reff_acs(acs[igal])
reff_tractor = get_reff_tractor(dr7[igal])
sizelim = (-1.5, 1)
j = sns.jointplot(reff_acs, reff_tractor, kind='hex', space=0, alpha=0.7,
stat_func=None, cmap='viridis', mincnt=3,
xlim=sizelim, ylim=sizelim)
j.set_axis_labels(r'$\log_{10}$ (HST/ACS Half-light radius) (arcsec)',
r'$\log_{10}$ (Tractor/DR7 Half-light radius) (arcsec)')
j.fig.set_figwidth(10)
j.fig.set_figheight(7)
j.ax_joint.plot([-2, 2], [-2, 2], color='k')
if seeing:
j.ax_joint.axhline(y=np.log10(seeing), ls='--', color='k')
j.ax_joint.text(0.05, 0.9, '{}'.format(subsetlabel), ha='left', va='center',
fontsize=16, transform=j.ax_joint.transAxes)
if png:
plt.savefig(png)
```
### Use subset 0 to characterize the "true" ELG properties.
```
subset = '0'
allacs, alldr7 = read_tractor(subset=subset)
acs, dr7 = select_ELGs(allacs, alldr7)
subsetlabel = 'Subset {}\n{:.3f}" seeing'.format(subset, np.median(alldr7['PSFSIZE_R']))
qa_true_properties(acs, dr7, subsetlabel=subsetlabel, pngsize='truesize.png', pngellipticity='trueell.png')
```
### Compare radii measured in three subsets of increasingly poor seeing (but same nominal depth).
```
for subset in ('0', '4', '9'):
allacs, alldr7 = read_tractor(subset=subset)
acs, dr7 = select_ELGs(allacs, alldr7)
medseeing = np.median(alldr7['PSFSIZE_R'])
subsetlabel = 'Subset {}\n{:.3f}" seeing'.format(subset, medseeing)
qa_compare_radii(acs, dr7, subsetlabel=subsetlabel, png='size_compare_subset{}.png'.format(subset))
subset = '9'
allacs, alldr7 = read_tractor(subset=subset)
acs, dr7 = select_ELGs(allacs, alldr7)
subsetlabel = 'Subset {}\n{:.3f}" seeing'.format(subset, np.median(alldr7['PSFSIZE_R']))
qa_true_properties(acs, dr7, subsetlabel=subsetlabel, noplots=True)
```
|
github_jupyter
|
```
# default_exp core
```
# hmd_newspaper_dl
> Download Heritage made Digital Newspaper from the BL repository
The aim of this code is to make it easier to download all of the [Heritage Made Digital Newspapers](https://bl.iro.bl.uk/collections/353c908d-b495-4413-b047-87236d2573e3?locale=en) from the British Library's [Research Repository](bl.iro.bl.uk/).
```
# export
import concurrent
import itertools
import json
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import random
import sys
import time
from collections import namedtuple
from functools import lru_cache
from operator import itemgetter
# from os import umask
import os
from pathlib import Path
from typing import List, Optional, Union
import requests
from bs4 import BeautifulSoup
from fastcore.script import *
from fastcore.test import *
from fastcore.net import urlvalid
from loguru import logger
from nbdev.showdoc import *
from tqdm import tqdm
```
## Getting newspaper links
The Newspapers are currently organised by newspaper title under a collection:

Under each titles you can download a zip file representing a year for that particular newspaper title

If we only want a subset of year or titles we could download these manually but if we're interested in using computational methods it's a bit slow. What we need to do is grab all of the URL's for each title so we can bulk download them all.
```
# export
def _get_link(x: str):
end = x.split("/")[-1]
return "https://bl.iro.bl.uk/concern/datasets/" + end
```
This is a smaller helper function that will generate the correct url once we have got an ID for a title.
```
# export
@lru_cache(256)
def get_newspaper_links():
"""Returns titles from the Newspaper Collection"""
urls = [
f"https://bl.iro.bl.uk/collections/9a6a4cdd-2bfe-47bb-8c14-c0a5d100501f?locale=en&page={page}"
for page in range(1, 3)
]
link_tuples = []
for url in urls:
r = requests.get(url)
r.raise_for_status()
soup = BeautifulSoup(r.text, "lxml")
links = soup.select(".hyc-container > .hyc-bl-results a[id*=src_copy_link]")
for link in links:
url = link["href"]
if url:
t = (link.text, _get_link(url))
link_tuples.append(t)
return link_tuples
```
This function starts from the Newspaper collection and then uses BeatifulSoup to scrape all of the URLs which link to a newspaper title. We have a hard coded URL here which isn't very good practice but since we're writing this code for a fairly narrow purpose we'll not worry about that here.
If we call this function we get a bunch of links back.
```
links = get_newspaper_links()
links
len(links)
```
Although this is code has fairly narrow scope, we might still want some tests to check we're not completely off. `nbdev` makes this super easy. Here we get that the we get back what we expect in terms of tuple length and that our urls look like urls.
```
assert len(links[0]) == 2 # test tuple len
assert (
next(iter(set(map(urlvalid, map(itemgetter(1), links))))) == True
) # check second item valid url
assert len(links) == 10
assert type(links[0]) == tuple
assert (list(map(itemgetter(1), links))[-1]).startswith("https://")
# export
@lru_cache(256)
def get_download_urls(url: str) -> list:
"""Given a dataset page on the IRO repo return all download links for that page"""
data, urls = None, None
try:
r = requests.get(url, timeout=30)
except requests.exceptions.MissingSchema as E:
print(E)
soup = BeautifulSoup(r.text, "lxml")
link_ends = soup.find_all("a", id="file_download")
urls = ["https://bl.iro.bl.uk" + link["href"] for link in link_ends]
# data = json.loads(soup.find("script", type="application/ld+json").string)
# except AttributeError as E:
# print(E)
# if data:
# #data = data["distribution"]
# #urls = [item["contentUrl"] for item in data]
return list(set(urls))
```
`get_download_urls` takes a 'title' URL and then grabs all of the URLs for the zip files related to that title.
```
test_link = links[0][1]
test_link
get_download_urls(test_link)
# export
def create_session() -> requests.sessions.Session:
"""returns a requests session"""
retry_strategy = Retry(total=60)
adapter = HTTPAdapter(max_retries=retry_strategy)
session = requests.Session()
session.mount("https://", adapter)
session.mount("http://", adapter)
return session
```
`create_session` just adds some extra things to our `Requests` session to try and make it a little more robust. This is probably not necessary here but it can be useful to bump up the number of retries
```
# export
def _download(url: str, dir: Union[str, Path]):
time.sleep(10)
fname = None
s = create_session()
try:
r = s.get(url, stream=True, timeout=(30))
r.raise_for_status()
# fname = r.headers["Content-Disposition"].split('_')[1]
fname = "_".join(r.headers["Content-Disposition"].split('"')[1].split("_")[0:5])
if fname:
with open(f"{dir}/{fname}", "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
except KeyError:
pass
except requests.exceptions.RequestException as request_exception:
logger.error(request_exception)
return fname
# for url in get_download_urls("https://bl.iro.bl.uk/concern/datasets/93ec8ab4-3348-409c-bf6d-a9537156f654"):
# s = create_session()
# r = s.get(url, stream=True, timeout=(30))
# print("_".join(r.headers["Content-Disposition"].split('"')[1].split("_")[0:5]))
# s = create_session()
# r = s.get(test_url, stream=True, timeout=(30))
# "_".join(r.headers["Content-Disposition"].split('"')[1].split("_")[0:5])
```
This downloads a file and logs an exception if something goes wrong. Again we do a little test.
```
# slow
test_url = (
"https://bl.iro.bl.uk/downloads/0ea7aa1f-3b4f-4972-bc12-b7559769471f?locale=en"
)
Path("test_dir").mkdir()
test_dir = Path("test_dir")
_download(test_url, test_dir)
# slow
assert list(test_dir.iterdir())[0].suffix == ".zip"
assert len(list(test_dir.iterdir())) == 1
# tidy up
[f.unlink() for f in test_dir.iterdir()]
test_dir.rmdir()
# basic test to check bad urls won't raise unhandled exceptions
bad_link = "https://bl.oar.bl.uk/fail_uploads/download_file?fileset_id=0ea7aa1-3b4f-4972-bc12-b75597694f"
_download(bad_link, "test_dir")
# export
def download_from_urls(urls: List[str], save_dir: Union[str, Path], n_threads: int = 4):
"""Downloads from an input lists of `urls` and saves to `save_dir`, option to set `n_threads` default = 8"""
download_count = 0
tic = time.perf_counter()
Path(save_dir).mkdir(exist_ok=True)
logger.remove()
logger.add(lambda msg: tqdm.write(msg, end=""))
with tqdm(total=len(urls)) as progress:
with concurrent.futures.ThreadPoolExecutor(max_workers=n_threads) as executor:
future_to_url = {
executor.submit(_download, url, save_dir): url for url in urls
}
for future in future_to_url:
future.add_done_callback(lambda p: progress.update(1))
for future in concurrent.futures.as_completed(future_to_url):
url = future_to_url[future]
try:
data = future.result()
except Exception as e:
logger.error("%r generated an exception: %s" % (url, e))
else:
if data:
logger.info(f"{url} downloaded to {data}")
download_count += 1
toc = time.perf_counter()
logger.remove()
logger.info(f"Downloads completed in {toc - tic:0.4f} seconds")
return download_count
```
`download_from_urls` takes a list of urls and downloads it to a specified directory
```
test_links = [
"https://bl.iro.bl.uk/downloads/0ea7aa1f-3b4f-4972-bc12-b7559769471f?locale=en",
"https://bl.iro.bl.uk/downloads/80708825-d96a-4301-9496-9598932520f4?locale=en",
]
download_from_urls(test_links, "test_dir")
# slow
assert len(test_links) == len(os.listdir("test_dir"))
test_dir = Path("test_dir")
[f.unlink() for f in test_dir.iterdir()]
test_dir.rmdir()
# slow
test_some_bad_links = [
"https://bl.oar.bl.uk/fail_uploads/download_file?fileset_id=0ea7aa1f-3b4f-4972-bc12-b7559769471f",
"https://bl.oar.bl.uk/fail_uploads/download_file?fileset_id=7ac7a0cb-29a2-4172-8b79-4952e2c9b",
]
download_from_urls(test_some_bad_links, "test_dir")
# slow
test_dir = Path("test_dir")
[f.unlink() for f in test_dir.iterdir()]
test_dir.rmdir()
# export
@call_parse
def cli(
save_dir: Param("Output Directory", str),
n_threads: Param("Number threads to use") = 8,
subset: Param("Download subset of HMD", int, opt=True) = None,
url: Param("Download from a specific URL", str, opt=True) = None,
):
"Download HMD newspaper from iro to `save_dir` using `n_threads`"
if url is not None:
logger.info(f"Getting zip download file urls for {url}")
try:
zip_urls = get_download_urls(url)
print(zip_urls)
except Exception as e:
logger.error(e)
download_count = download_from_urls(zip_urls, save_dir, n_threads=n_threads)
else:
logger.info("Getting title urls")
title_urls = get_newspaper_links()
logger.info(f"Found {len(title_urls)} title urls")
all_urls = []
print(title_urls)
for url in title_urls:
logger.info(f"Getting zip download file urls for {url}")
try:
zip_urls = get_download_urls(url[1])
all_urls.append(zip_urls)
except Exception as e:
logger.error(e)
all_urls = list(itertools.chain(*all_urls))
if subset:
if len(all_urls) < subset:
raise ValueError(
f"Size of requested sample {subset} is larger than total number of urls:{all_urls}"
)
all_urls = random.sample(all_urls, subset)
print(all_urls)
download_count = download_from_urls(all_urls, save_dir, n_threads=n_threads)
request_url_count = len(all_urls)
if request_url_count == download_count:
logger.info(
f"\U0001F600 Requested count of urls: {request_url_count} matches number downloaded: {download_count}"
)
if request_url_count > download_count:
logger.warning(
f"\U0001F622 Requested count of urls: {request_url_count} higher than number downloaded: {download_count}"
)
if request_url_count < download_count:
logger.warning(
f"\U0001F937 Requested count of urls: {request_url_count} lower than number downloaded: {download_count}"
)
```
We finally use `fastcore` to make a little CLI that we can use to download all of our files. We even get a little help flag for free 😀. We can either call this as a python function, or when we install the python package it gets registered as a `console_scripts` and can be used like other command line tools.
```
# cli("test_dir", subset=2)
# assert all([f.suffix == '.zip' for f in Path("test_dir").iterdir()])
# assert len(list(Path("test_dir").iterdir())) == 2
from nbdev.export import notebook2script
notebook2script()
# test_dir = Path("test_dir")
# [f.unlink() for f in test_dir.iterdir()]
# test_dir.rmdir()
```
|
github_jupyter
|
<img src="../figures/HeaDS_logo_large_withTitle.png" width="300">
<img src="../figures/tsunami_logo.PNG" width="600">
[](https://colab.research.google.com/github/Center-for-Health-Data-Science/PythonTsunami/blob/fall2021/Conditionals/Conditions.ipynb)
# Boolean and Conditional logic
*prepared by [Katarina Nastou](https://www.cpr.ku.dk/staff/?pure=en/persons/672471) and [Rita Colaço](https://www.cpr.ku.dk/staff/?id=621366&vis=medarbejder)*
## Objectives
- Understand boolean operators and how variables can relate
- Learn about "Truthiness"
- Learn how to write conditional statements and use proper indentation
- Learn how to use comparison operators to make a basic programs
## User Input
There is a built-in function in Python called "input" that will prompt the user and store the result to a variable.
```
name = input("Enter your name here: ")
print(name)
```
## Booleans
```
x = True
print(x)
print(type(x))
```
## Comparison Operators
Comparison operators can tell how two Python values relate, resulting in a boolean. They answer yes/no questions.
In the example `a = 2` and `b = 2`, i.e. we are comparing integers (`int`)
operator | Description | Result | Example (`a, b = 2, 2`)
--- | --- |--- | ---
`==` | **a** equal to **b** | True if **a** has the same value as **b** | `a == b # True`
`!=` | **a** not equal to **b** | True if **a** does NOT have the same value as **b** | `a != b # False`
`>` | **a** greater than **b** | True if **a** is greater than **b** | `a > b # False`
`<` | **a** less than **b** | True if **a** is less than be **b** | `a < b # False`
`>=` | **a** greater than or equal to **b** | True if **a** is greater than or equal to **b** | `a >= b # True`
`<=` | **a** less than or equal to **b** | True if **a** is less than or equal to **b** | `a <= b # True`
> Hint: The result of a comparison is defined by the type of **a** and **b**, and the **operator** used
### Numeric comparisons
```
a, b = 2, 2
a >= b
```
### String comparisons
```
"carl" < "chris"
```
### Quiz
**Question 1**: What will be the result of this comparison?
```python
x = 2
y = "Anthony"
x < y
```
1. True
2. False
3. Error
**Question 2**: What about this comparison?
```python
x = 12.99
y = 12
x >= y
```
1. True
2. False
3. Error
**Question 3**: And this comparison?
```python
x = 5
y = "Hanna"
x == y
```
1. True
2. False
3. Error
## Truthiness
In Python, all conditional checks resolve to `True` or `False`.
```python
x = 1
x == 1 # True
x == 0 # False
```
Besides false conditional checks, other things that are naturally "falsy" include: empty lists/tuples/arrays, empty strings, None, and zero (and non-empty things are normally `True`).
> "Although Python has a bool type, it accepts any object in a boolean context, such as the
> expression controlling an **if** or **while** statement, or as operands to **and**, **or**, and **not**.
> To determine whether a value **x** is _truthy_ or _falsy_, Python applies `bool(x)`, which always returns True or False.
>
> (...) Basically, `bool(x)` calls `x.__bool__()` and uses the result.
> If `__bool__` is not implemented, Python tries to invoke `x.__len__()`, and if that returns zero, bool returns `False`.
> Otherwise bool returns `True`." (Ramalho 2016: Fluent Python, p. 12)
```
a = []
bool(a)
a = ''
bool(a)
a = None
bool(a)
a = 0
b = 1
print(bool(a))
print(bool(b))
```
## Logical Operators or "How to combine boolean values"
In Python, the following operators can be used to make Boolean Logic comparisons. The three most common ones are `and`, `or` and `not`.
`and`, True if both **a** AND **b** are true (logical conjunction)
```python
cats_are_cute = True
dogs_are_cute = True
cats_are_cute and dogs_are_cute # True
```
> But `True and False`, `False and True` and `False and False` all evaluate to `False`.
```
x = 134
x > 49 and x < 155
```
`or`, True if either **a** OR **b** are true (logical disjunction)
```python
am_tired = True
is_bedtime = False
am_tired or is_bedtime # True
```
> `True or True`, `False or True` and `True or False` evaluate to `True`.
> Only `False or False` results in `False`.
```
x = 5
x < 7 or x > 11
```
`not`, True if the opposite of **a** is true (logical negation)
```python
is_weekend = True
not is_weekend # False
```
> `not True` -> False
> `not False` -> True
### Order of precedence
Can you guess the result of this expression?
```python
True or True and False
```
1. True
2. False
3. Error
```
# True or True and False
```
Instead of memorizing the [order of precedence](https://docs.python.org/3/reference/expressions.html#operator-precedence), we can use parentheses to define the order in which operations are performed.
- Helps prevent bugs
- Makes your intentions clearer to whomever reads your code
```
# (True or True) and False
```
## Special operators
### Identity operators
Operator | Description |Example (`a, b = 2, 3`)
--- | --- |---
`is` | True if the operands are identical (refer to the same object) | `a is 2 # True`
`is not` | True if the operands are not identical (do not refer to the same object) | `a is not b # False`
In python, `==` and `is` are very similar operators, however they are NOT the same.
`==` compares **equality**, while `is` compares by checking for the **identity**.
Example 1:
```
a = 1
print(a == 1)
print(a is 1)
```
Example 2:
```
a = [1, 2, 3]
b = [1, 2, 3]
print(a == b)
print(a is b)
```
**`is`** comparisons only return `True` if the variables reference the same item *in memory*. It is recommendend to [test Singletons with `is`](https://www.python.org/dev/peps/pep-0008/#programming-recommendations) and not `==`, e.g. `None`, `True`, `False`.
### Membership operators
Operator | Description |Example (`a = [1, 2, 3]`)
--- | --- |---
`in` | True if value/variable is found in the sequence | `2 in a # True`
`not in` | True if value/variable is not found in the sequence | `5 not in a # False`
```
aa = ['alanine', 'glycine', 'tyrosine']
'alanine' in aa
'gly' in aa[0]
```
## Quiz
**Question 1**: What is truthiness?
1. Statements or facts that seem "kind of true" even if they aren't true necessarily
2. Statements or expressions that result to a True value
3. Code that never lies
4. Computers have the tendency to believe things are True until proven False
**Question 2**: Is the following expression True or False?
```python
x = 15
y = 0
bool(x or y) # this expression
```
**Question 3**: Is the following expression True or False?
```python
x = 0
y = None
bool(x or y) # this expression
```
**Question 4**: (Hard) What is the result of the following expression?
```python
x = 233
y = 0
z = None
x or y or z # this expression
```
**Question 5**: Hardest question! Add parentheses to the expression, so that it shows the order of precedence explicitely?
```python
x = 0
y = -1
x or y and x - 1 == y and y + 1 == x
```
> Tip: check the [order of precedence](https://docs.python.org/3/reference/expressions.html#operator-precedence).
## Conditional Statements
[Conditional statements](https://docs.python.org/3/tutorial/controlflow.html#if-statements), use the keywords `if`, `elif` and `else`, and they let you control what pieces of code are run based on the value of some Boolean condition.
```python
if some condition is True:
do something
elif some other condition is True:
do something
else:
do something
```
> Recipe: if condition, execute expression
> If condition always finishes with `:` (colon)
> Expression to be executed if condition succeeds always needs to be indented (spaces or tab, depending on the editor you are using)
```
cats_are_cute = True
dogs_are_cute = True
if cats_are_cute and dogs_are_cute:
print("Pets are cute!")
```
> Here the `if` statement automatically calls `bool` on the expression, e.g. `bool(cats_are_cute and dogs_are_cute)`.
Adding the `else` statement:
```
is_weekend = True
if not is_weekend:
print("It's Monday.")
print("Go to work.")
else:
print("Sleep in and enjoy the beach.")
```
For more customized behavior, use `elif`:
```
am_tired = True
is_bedtime = True
if not am_tired:
print("One more episode.")
elif am_tired and is_bedtime:
print("Go to sleep.")
else:
print("Go to sleep anyways.")
```
### Quiz:
**Question 1**: If you set the name variable to "Gandalf" and run the script below, what will be the output?
```
name = input("Enter your name here: ")
if name == "Gandalf":
print("Run, you fools!")
elif name == "Aragorn":
print("There is always hope.")
else:
print("Move on then!")
```
**Question 2**: Why do we use `==` and not `is` in the code above?
## Group exercises
### Exercise 1
At the next code block there is some code that randomly picks a number from 1 to 10.
Write a conditional statement to check if `choice` is 5 and print `"Five it is!"` and in any other case print `"Well that's not a 5!"`.
```
from random import randint
choice = randint(1,10)
# YOUR CODE GOES HERE vvvvvv
```
### Exercise 2
At the next code block there is some code that randomly picks a number from 1 to 1000. Use a conditional statement to check if the number is odd and print `"odd"`, otherwise print `"even"`.
> *Hint*: Remember the numerical operators we saw before in [Numbers_and_operators.ipynb](https://colab.research.google.com/github/Center-for-Health-Data-Science/PythonTsunami/blob/fall2021/Numbers_and_operators/Numbers_and_operators.ipynb) and think of which one can help you find an odd number.
```
from random import randint
num = randint(1, 1000) #picks random number from 1-1000
# YOUR CODE GOES HERE vvvvvvv
```
### Exercise 3
Create a variable and assign an integer as value, then build a conditional to test it:
- If the value is below 0, print "The value is negative"
- If the value is between 0 and 20 (including 0 and 20), print the value
- Otherwise, print "Out of scope"
Test it by changing the value of the variable
### Exercise 4
Read the file 'data/samples.txt' following the notebook [Importing data](https://colab.research.google.com/github/Center-for-Health-Data-Science/PythonTsunami/blob/fall2021/Importing_data/Importing_data.ipynb) and check if Denmark is among the countries in this file.
## Recap
- Conditional logic can control the flow of a program
- We can use comparison and logical operators to make conditional if statements
- In general, always make sure you make comparisons between objects of the same type (integers and floats are the exceptions)
- Conditional logic evaluates whether statements are true or false
*Note: This notebook's content structure has been adapted from Colt Steele's slides used in [Modern Python 3 Bootcamp Course](https://www.udemy.com/course/the-modern-python3-bootcamp/) on Udemy*
|
github_jupyter
|
# Getting Started with gensim
This section introduces the basic concepts and terms needed to understand and use `gensim` and provides a simple usage example.
## Core Concepts and Simple Example
At a very high-level, `gensim` is a tool for discovering the semantic structure of documents by examining the patterns of words (or higher-level structures such as entire sentences or documents). `gensim` accomplishes this by taking a *corpus*, a collection of text documents, and producing a *vector* representation of the text in the corpus. The vector representation can then be used to train a *model*, which is an algorithms to create different representations of the data, which are usually more semantic. These three concepts are key to understanding how `gensim` works so let's take a moment to explain what each of them means. At the same time, we'll work through a simple example that illustrates each of them.
### Corpus
A *corpus* is a collection of digital documents. This collection is the input to `gensim` from which it will infer the structure of the documents, their topics, etc. The latent structure inferred from the corpus can later be used to assign topics to new documents which were not present in the training corpus. For this reason, we also refer to this collection as the *training corpus*. No human intervention (such as tagging the documents by hand) is required - the topic classification is [unsupervised](https://en.wikipedia.org/wiki/Unsupervised_learning).
For our corpus, we'll use a list of 9 strings, each consisting of only a single sentence.
```
raw_corpus = ["Human machine interface for lab abc computer applications",
"A survey of user opinion of computer system response time",
"The EPS user interface management system",
"System and human system engineering testing of EPS",
"Relation of user perceived response time to error measurement",
"The generation of random binary unordered trees",
"The intersection graph of paths in trees",
"Graph minors IV Widths of trees and well quasi ordering",
"Graph minors A survey"]
```
This is a particularly small example of a corpus for illustration purposes. Another example could be a list of all the plays written by Shakespeare, list of all wikipedia articles, or all tweets by a particular person of interest.
After collecting our corpus, there are typically a number of preprocessing steps we want to undertake. We'll keep it simple and just remove some commonly used English words (such as 'the') and words that occur only once in the corpus. In the process of doing so, we'll [tokenise][1] our data. Tokenization breaks up the documents into words (in this case using space as a delimiter).
[1]: https://en.wikipedia.org/wiki/Tokenization_(lexical_analysis)
```
# Create a set of frequent words
stoplist = set('for a of the and to in'.split(' '))
# Lowercase each document, split it by white space and filter out stopwords
texts = [[word for word in document.lower().split() if word not in stoplist]
for document in raw_corpus]
# Count word frequencies
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
# Only keep words that appear more than once
processed_corpus = [[token for token in text if frequency[token] > 1] for text in texts]
processed_corpus
```
Before proceeding, we want to associate each word in the corpus with a unique integer ID. We can do this using the `gensim.corpora.Dictionary` class. This dictionary defines the vocabulary of all words that our processing knows about.
```
from gensim import corpora
dictionary = corpora.Dictionary(processed_corpus)
print(dictionary)
```
Because our corpus is small, there are only 12 different tokens in this `Dictionary`. For larger corpuses, dictionaries that contains hundreds of thousands of tokens are quite common.
### Vector
To infer the latent structure in our corpus we need a way to represent documents that we can manipulate mathematically. One approach is to represent each document as a vector. There are various approaches for creating a vector representation of a document but a simple example is the *bag-of-words model*. Under the bag-of-words model each document is represented by a vector containing the frequency counts of each word in the dictionary. For example, given a dictionary containing the words `['coffee', 'milk', 'sugar', 'spoon']` a document consisting of the string `"coffee milk coffee"` could be represented by the vector `[2, 1, 0, 0]` where the entries of the vector are (in order) the occurrences of "coffee", "milk", "sugar" and "spoon" in the document. The length of the vector is the number of entries in the dictionary. One of the main properties of the bag-of-words model is that it completely ignores the order of the tokens in the document that is encoded, which is where the name bag-of-words comes from.
Our processed corpus has 12 unique words in it, which means that each document will be represented by a 12-dimensional vector under the bag-of-words model. We can use the dictionary to turn tokenized documents into these 12-dimensional vectors. We can see what these IDs correspond to:
```
print(dictionary.token2id)
```
For example, suppose we wanted to vectorize the phrase "Human computer interaction" (note that this phrase was not in our original corpus). We can create the bag-of-word representation for a document using the `doc2bow` method of the dictionary, which returns a sparse representation of the word counts:
```
new_doc = "Human computer interaction"
new_vec = dictionary.doc2bow(new_doc.lower().split())
new_vec
```
The first entry in each tuple corresponds to the ID of the token in the dictionary, the second corresponds to the count of this token.
Note that "interaction" did not occur in the original corpus and so it was not included in the vectorization. Also note that this vector only contains entries for words that actually appeared in the document. Because any given document will only contain a few words out of the many words in the dictionary, words that do not appear in the vectorization are represented as implicitly zero as a space saving measure.
We can convert our entire original corpus to a list of vectors:
```
bow_corpus = [dictionary.doc2bow(text) for text in processed_corpus]
bow_corpus
```
Note that while this list lives entirely in memory, in most applications you will want a more scalable solution. Luckily, `gensim` allows you to use any iterator that returns a single document vector at a time. See the documentation for more details.
### Model
Now that we have vectorized our corpus we can begin to transform it using *models*. We use model as an abstract term referring to a transformation from one document representation to another. In `gensim` documents are represented as vectors so a model can be thought of as a transformation between two vector spaces. The details of this transformation are learned from the training corpus.
One simple example of a model is [tf-idf](https://en.wikipedia.org/wiki/Tf%E2%80%93idf). The tf-idf model transforms vectors from the bag-of-words representation to a vector space where the frequency counts are weighted according to the relative rarity of each word in the corpus.
Here's a simple example. Let's initialize the tf-idf model, training it on our corpus and transforming the string "system minors":
```
from gensim import models
# train the model
tfidf = models.TfidfModel(bow_corpus)
# transform the "system minors" string
tfidf[dictionary.doc2bow("system minors".lower().split())]
```
The `tfidf` model again returns a list of tuples, where the first entry is the token ID and the second entry is the tf-idf weighting. Note that the ID corresponding to "system" (which occurred 4 times in the original corpus) has been weighted lower than the ID corresponding to "minors" (which only occurred twice).
`gensim` offers a number of different models/transformations. See [Transformations and Topics](Topics_and_Transformations.ipynb) for details.
|
github_jupyter
|
# Hello, Clojure
Hello World
```
(println "Hello, world!") ; Say hi
;; Double semicolons are used if the comment is all alone on its own line
(println "Hello, world!") ; A single semicolon is used at the end of a line with some code
```
Basic string manipulation
```
;; Concat strings
(str "Clo" "jure")
;; Concat strings oand numbers
(str 3 " " 2 " " 1 " Blast off!")
;; Count the number of characters of a string
(count "Hello, world")
```
Booleans
```
(println true) ; Prints true...
(println false) ; ...and prints false.
```
Nil
```
(println "Nobody's home:" nil) ; Prints Nobody's home: nil
(println "We can print many things:" true false nil)
```
Basic Arithmetic operations
```
;; A simple sum example
(+ 1900 84)
;; A simple product example
(* 16 124)
;; A simple substraction example
(- 2000 16) ; 1984 again.
;; A simple division example
(/ 25792 13)
;; A simple average example
(/ (+ 1984 2010) 2)
;; EVERYTHING in clojure is evaluated as follows
"""
(verb argument argument argument...)
"""
;; The math operators take an arbitrary number of args
(+ 1000 500 500 1) ; Evaluates to 2001.
;; The average of 2 numbers using floating-point numbers
(/ (+ 1984.0 2010.0) 2.0)
;; Adding an integer to a float returns a float
(+ 1984 2010.0)
```
Not Variable Assignment, but Close
```
;; Binding a symbol (first-name) to a value ("Russ")
(def first-name "Russ")
;; 'def' can accept any expression
(def the-average (/ (+ 20 40.0) 2.0))
```
Basic function definitions
```
;; A simple function without args
(defn hello-world []
(println "Hello, world!"))
(hello-world)
;; A function with 1 arg
(defn say-welcome [what]
(println "Welcome to" what))
(say-welcome "Clojure")
;; A simple average function
(defn average [a b] ; No commas between args
(/ (+ a b) 2.0))
(average 5.0 10.0)
;; A more verbose average function
(defn chatty-average [a b]
(println "chatty-average function called")
(println "** first argument:" a)
(println "** second argument:" b)
(/ (+ a b) 2.0))
(chatty-average 10 20)
```
Introduction to Leiningen
```
;; Execute the following command to start a new Clojure project skeleton
"""
!lein new app blottsbooks
"""
;; Add the following code to core.clj, located at ./blottsbooks/src/blottsbooks/core.clj
(ns blottsbooks.core ; :gen-class instructs that the namespace should be compiled
(:gen-class))
(defn say-welcome [what]
(println "Welcome to" what "!"))
(defn -main [] ; The main function
(say-welcome "Blotts Books"))
;; Execute the following command to execute the last snippet
"""
!cd ./blottsbooks
!lein run
"""
(ns user)
```
Common Clojure errors
```
;; Division by zero
(/ 100 0)
;; Typo when calling a function
(catty-average)
;; Too many parentheses
(+ (* 2 2) 10))
;; Too few parentheses
(+ (* 2 2) 10
```
|
github_jupyter
|
# Simple Test between NumPy and Numba
$$
\Gamma = \sqrt{\frac{\eta_H}{\eta_V} \kappa^2 + \eta_H \zeta_H}
$$
```
import numba
import cython
import numexpr
import numpy as np
%load_ext cython
# Used cores by numba can be shown with (xy default all cores are used):
#print(numba.config.NUMBA_DEFAULT_NUM_THREADS)
# This can be changed with the following line
#numba.config.NUMBA_NUM_THREADS = 4
from empymod import filters
from scipy.constants import mu_0 # Magn. permeability of free space [H/m]
from scipy.constants import epsilon_0 # Elec. permittivity of free space [F/m]
res = np.array([2e14, 0.3, 1, 50, 1]) # nlay
freq = np.arange(1, 201)/20. # nfre
off = np.arange(1, 101)*1000 # noff
lambd = filters.key_201_2009().base/off[:, None] # nwav
aniso = np.array([1, 1, 1.5, 2, 1])
epermH = np.array([1, 80, 9, 20, 1])
epermV = np.array([1, 40, 9, 10, 1])
mpermH = np.array([1, 1, 3, 5, 1])
etaH = 1/res + np.outer(2j*np.pi*freq, epermH*epsilon_0)
etaV = 1/(res*aniso*aniso) + np.outer(2j*np.pi*freq, epermV*epsilon_0)
zetaH = np.outer(2j*np.pi*freq, mpermH*mu_0)
```
## NumPy
Numpy version to check result and compare times
```
def test_numpy(eH, eV, zH, l):
return np.sqrt((eH/eV) * (l*l) + (zH*eH))
```
## Numba @vectorize
This is exactly the same function as with NumPy, just added the @vectorize decorater.
```
@numba.vectorize('c16(c16, c16, c16, f8)')
def test_numba_vnp(eH, eV, zH, l):
return np.sqrt((eH/eV) * (l*l) + (zH*eH))
@numba.vectorize('c16(c16, c16, c16, f8)', target='parallel')
def test_numba_v(eH, eV, zH, l):
return np.sqrt((eH/eV) * (l*l) + (zH*eH))
```
## Numba @njit
```
@numba.njit
def test_numba_nnp(eH, eV, zH, l):
o1, o3 = eH.shape
o2, o4 = l.shape
out = np.empty((o1, o2, o3, o4), dtype=numba.complex128)
for nf in numba.prange(o1):
for nl in numba.prange(o3):
ieH = eH[nf, nl]
ieV = eV[nf, nl]
izH = zH[nf, nl]
for no in numba.prange(o2):
for ni in numba.prange(o4):
il = l[no, ni]
out[nf, no, nl, ni] = np.sqrt(ieH/ieV * il*il + izH*ieH)
return out
@numba.njit(nogil=True, parallel=True)
def test_numba_n(eH, eV, zH, l):
o1, o3 = eH.shape
o2, o4 = l.shape
out = np.empty((o1, o2, o3, o4), dtype=numba.complex128)
for nf in numba.prange(o1):
for nl in numba.prange(o3):
ieH = eH[nf, nl]
ieV = eV[nf, nl]
izH = zH[nf, nl]
for no in numba.prange(o2):
for ni in numba.prange(o4):
il = l[no, ni]
out[nf, no, nl, ni] = np.sqrt(ieH/ieV * il*il + izH*ieH)
return out
```
## Run comparison for a small and a big matrix
```
eH = etaH[:, None, :, None]
eV = etaV[:, None, :, None]
zH = zetaH[:, None, :, None]
l = lambd[None, :, None, :]
# Output shape
out_shape = (freq.size, off.size, res.size, filters.key_201_2009().base.size)
print(' Shape Test Matrix ::', out_shape, '; total # elements:: '+str(freq.size*off.size*res.size*filters.key_201_2009().base.size))
print('------------------------------------------------------------------------------------------')
print(' NumPy :: ', end='')
# Get NumPy result for comparison
numpy_result = test_numpy(eH, eV, zH, l)
# Get runtime
%timeit test_numpy(eH, eV, zH, l)
print(' Numba @vectorize :: ', end='')
# Ensure it agrees with NumPy
numba_vnp_result = test_numba_vnp(eH, eV, zH, l)
if not np.allclose(numpy_result, numba_vnp_result, atol=0, rtol=1e-10):
print(' * FAIL, DOES NOT AGREE WITH NumPy RESULT!')
# Get runtime
%timeit test_numba_vnp(eH, eV, zH, l)
print(' Numba @vectorize par :: ', end='')
# Ensure it agrees with NumPy
numba_v_result = test_numba_v(eH, eV, zH, l)
if not np.allclose(numpy_result, numba_v_result, atol=0, rtol=1e-10):
print(' * FAIL, DOES NOT AGREE WITH NumPy RESULT!')
# Get runtime
%timeit test_numba_v(eH, eV, zH, l)
print(' Numba @njit :: ', end='')
# Ensure it agrees with NumPy
numba_nnp_result = test_numba_nnp(etaH, etaV, zetaH, lambd)
if not np.allclose(numpy_result, numba_nnp_result, atol=0, rtol=1e-10):
print(' * FAIL, DOES NOT AGREE WITH NumPy RESULT!')
# Get runtime
%timeit test_numba_nnp(etaH, etaV, zetaH, lambd)
print(' Numba @njit par :: ', end='')
# Ensure it agrees with NumPy
numba_n_result = test_numba_n(etaH, etaV, zetaH, lambd)
if not np.allclose(numpy_result, numba_n_result, atol=0, rtol=1e-10):
print(' * FAIL, DOES NOT AGREE WITH NumPy RESULT!')
# Get runtime
%timeit test_numba_n(etaH, etaV, zetaH, lambd)
from empymod import versions
versions('HTML', add_pckg=[cython, numba], ncol=5)
```
|
github_jupyter
|
# **G.G.: Good Game?** by Matthew Tran
## March 14, 2022
## **Introduction**
In the modern age, video games have become a modern past time enjoyed by many people of various ages. A now lucrative industry, video games come in a variety of genres, experiences, and platforms. When asked about successful video games, a handful of titles might come to mind. Ones that are iconic because of their characters, revolutionary because of the way they engage with storytelling, or perhaps nostalgic because of how long they have been around.
This project seeks to define top performing video games and the traits that may have contributed to the success of these titles. Subsequently, I would like to conduct a more qualitative investigation on these titles, mainly examining reviews to paint a clearer picture of what consumers like about top games.
## **The Data**
Initial exploration of defining what makes a good game will be conducted using the Video Games CORGIS dataset which can be accessed [here.](https://corgis-edu.github.io/corgis/python/video_games/) This data was originally collected by Dr. Joe Cox who conducted an empirical investigation of U.S. sales data of video games. Dr. Cox concluded that the major factors that predict for a title's ability to attain "blockbuster" status were threefold: the company that produced the title, the console, and the critic reviews.
I would like to use the data that Dr. Cox collected, which spans thousands of titles that were released between 2004 and 2010, and conduct my own analysis agnostic to his fidnings.
The categoies that I am interested in and their possible effects on the success of a game are:
1. Maximum number of players: how many people can play this game at one time?
2. Online Features: does the game support online play?
3. Genre: what genre does this game belong to?
Within these categories, I would like to measure success of a game using:
1. Review score: the typical review score out of 100
2. Sales: the total sales made on the game measured in millions of dollars
3. Completionist: players reported completing everything in the game
## **Data Exploration**
```
#hide
import pandas as pd
import seaborn as sns
#hide
import video_games
#hide
video_game = video_games.get_video_game()
#hide
df = pd.read_csv('video_games.csv')
#hide-input
df.head()
```
### 1. What are the top games by critic reviews?
```
#hide-input
df[['Title','Metrics.Review Score']].sort_values('Metrics.Review Score', ascending = False )
```
### 2. What are the top games by sales?
```
#hide-input
df[['Title', 'Metrics.Sales']].sort_values('Metrics.Sales', ascending = False)
```
### 3. What games have the most number of people who report completing the game?
* will be skewed based on how many people played the game
```
#hide-input
df[['Title', 'Length.Completionists.Polled']].sort_values ('Length.Completionists.Polled', ascending = False)
```
### 4. What genre of game was popular on the market during this time period (2004-2010)?
```
#collapse-output
df['Metadata.Genres'].value_counts()
```
### I would like to take the "top games" from questions 1-3 and get a closer look at these titles, since they are considered "top performing" in their respective categories.
```
#collapse-output
df.iloc[837]
#collapse-output
df.iloc[156]
#collapse-output
df.iloc[442]
#hide-input
df.iloc[[837,156,442]]
```
Observed similarities and differences:
1. Action as one of the genres, though none fall exclusively into action only.
2. All 3 were a sequel of some kind, and based off of a previously licensed entity.
3. Max players do not go above 2, two of the three games are only single-player.
4. All games came from different publishers.
5. All released for different consoles.
Because I am interested in the intersection of video games and pedagogy, I wanted to see the games that were considered "Educational."
* These were only the titles exclusively listed as 'Educational' as the genre
```
#hide-input
df[df['Metadata.Genres'] == 'Educational']
#collapse-output
df.iloc[549]
#collapse-output
df.iloc[1000]
```
Takeaways from initial data exploration:
1. Because of the saturation of Action games, I would like to take a closer look at the metrics for success in that specific genre, as well as the other genres that are well-represented in the market.
2. Because the games that were successful in these categories were all sequels of some kind, I think it would be interested to investigate if there are any titles that were successful without being a sequel, which would speak to the degree to which a factor like nostalgia or investment in a story/ universe contribute to a title's success.
3. Because these three games did not have a max player capacity above 2, are there any titles that support multiplayer that are also finding success?
4. Are there certain publishers or consoles that are finding more general success with their titles than others?
## **Further Exploration**
Based on the preliminary findings from my first data exploration, I would like to take a closer look at the data in certain places.
### Defining Success
Using the metrics I established previously, I would like to examine the top-performing games in the categories of critic reviews, sales, and number of completionists.
### 1. Critic Reviews
```
#hide
df_reviews = df[['Title','Metrics.Review Score']]
#hide
df_reviews_top = df_reviews[df_reviews['Metrics.Review Score'] > 90].sort_values('Metrics.Review Score', ascending = False)
#hide
df_reviews_top.index
#hide
df2 = df.iloc[df_reviews_top.index]
#hide-input
sns.regplot(x = df2['Metrics.Review Score'], y = df2['Metrics.Sales'])
```
Here, a sucessful game by critic review was defined as having a critic review score of over 90, of which there were 29 games. It does not seem to be the case, however, that a high critic score correlates very strongly to commercial success in sales. In fact, the games that received the highest critic scores were not the ones which had the most number of sales, with a handfull of games receiving more commercial sucess, and the highest seller (in this group) having the lowest critics score...
```
#hide-input
sns.regplot(x = df2['Metrics.Review Score'], y = df2['Length.Completionists.Polled'])
```
I observed an even weaker relationship between critic review scores and number of completionists in for the games.
This could however be because the games which received the highest critic review scores, such as Grand Theft Auto IV, are known for being "open-world" games in which the player can freely navigate the world without the story being a main part of interacting with the game.
```
#collapse-output
df2[['Title', 'Metrics.Review Score', 'Metrics.Sales', 'Length.Completionists.Polled', 'Metadata.Genres']].sort_values('Metrics.Sales', ascending = False)
```
Notably, 27 out of the 29 titles that were considered top-performers as described by their critic review scores had Action as one of their genre descriptors. The two games that did not belong to this genre were considered as Role-Playing and Racing/ Driving games.
### 2. Commercial Sales
```
#hide
df_sales = df[['Title', 'Metrics.Sales']]
#hide
df['Metrics.Sales'].mean
#hide
df_sales_top = df_sales[df_sales['Metrics.Sales'] > 4.69]
#hide
len(df_sales_top.index)
#hide
df3 = df.iloc[df_sales_top.index]
#hide-input
sns.regplot(x = df3['Metrics.Sales'], y =df3['Metrics.Review Score'] )
```
Very interestingly, for the top-performing games in terms if sales, being 14 games, there was actually a negative correlation between sales and critic scores. Shockingly, the game with the most sales had the lowest (sub-60) score of the group of games! However, the games with the highest critic scores in this set still had sales that were above the mean of the entire set, so these games were by no means unsuccessful.
```
#hide-input
sns.regplot(x = df3['Metrics.Sales'], y =df3['Length.Completionists.Polled'])
```
A similar negative relationship was observed between sales and number of completionist players. For similar reasons as the to critic scores grouping, the top game, Wii Play, is not a game that is well-known for having a definitive plot that players follow, but rather is a game that is often played socially with family and friends.
```
#hide-input
df3[['Title', 'Metrics.Review Score', 'Metrics.Sales', 'Length.Completionists.Polled', 'Metadata.Genres']].sort_values('Metrics.Sales', ascending = False)
```
The distribution of genres in this group were slightly more diverse than that of the critic scores group. While Action games still held a slight majority at 8 out 14 games being part of the Action genre, Role-Playing, sports, and Driving games made up the remainder of this group.
### 3. Completionists (or not?)
Following my analysis of the top-performing games under critic scores and commercial sales, I have decided not to continue with using number of completionists as a measure of success for a variety of reasons. Firstly, this number would already be skewed because of how the number of players would affect this figure, and completionist data as such would require standardization. While the additional work of standardizing this data is not very much work, I also chose not to use number of completionists in the remainder of my analysis because of how easily this number could be affected by the type of game. There are many games that are made simply to be enjoyed, and do not have the aspect of following a story or plot that other games have. In the former case, players would not be as motivated to "complete" the game, which would skew how the number of com
### Action Games and Reviews?
Because of the overrepresentation of Action games in the games with high critic reviews, I wanted to explore the idea that critics tend to favor games that are of the Action genre.
```
#hide
df_action = df[df['Metadata.Genres'] == 'Action']
#collapse-output
df_action['Metrics.Review Score'].mean
#hide
df_sports = df[df['Metadata.Genres'] == 'Sports']
#collapse-output
df_sports['Metrics.Review Score'].mean
#hide
df_strategy = df[df['Metadata.Genres'] == 'Strategy']
#collapse-output
df_strategy['Metrics.Review Score'].mean
```
Looking at the 3 most common genres and examining the mean critic review scores, it seems that there does not seem to be an inherent bias for Action games amonst critics, since strategy games had a higher mean score, though I think this is one area of analysis that could benefit from more investigation.
## **Who's at the Top?**
From both my own personal perspective, as well as how I assume businesses and consumers would define success, I think commerical sales is the best way to mesure the success of a game. However, because I think critic reviews may encapsulate some measure of the quality of a game, I think it would be beneficial to include critics reviews as a measure of success in some way. Therefore, I decided that when choosing the "top games," I would choose those games that were present in both categories or top-performers in critic scores and sales. That is, games that received both above a 90 on critic scores and had sales above 4.69.
To account for any phenomenon that goes beyond any conventional measure of success I would like to include those titles that had extremely high sales, but perhaps were not deemed a "good game" by critics. These three games would be: Wii Play, Mario Kart Wii, and New Super Mario Bros, all titles that had commericial sales greater that 10 million dollars.
```
#hide
top_reviews = df2['Title'].tolist()
top_sales = df3['Title'].tolist()
#collapse-output
top_sales
#collapse-output
top_reviews
#collapse-output
print(set(top_sales).intersection(set(top_reviews)))
#hide
top_games = set(top_sales).intersection(set(top_reviews))
#hide
top_games_dict = {'Grand Theft Auto IV' : 837,
'Mario Kart DS' : 22,
'Halo 3' : 420,
'Call of Duty 4: Modern Warfare' : 421,
'Super Mario Galaxy' : 422,
'Super Smash Bros.: Brawl' : 835
}
#hide
target_indices = [837, 22, 420, 421, 422, 835, 156, 833, 157]
top_games = df.iloc[target_indices]
#hide
top_games = top_games[['Title', 'Metrics.Review Score', 'Metrics.Sales', 'Metadata.Genres', 'Metadata.Sequel?', 'Metadata.Publishers', 'Features.Max Players', 'Release.Console', 'Release.Year']]
#hide-input
top_games.sort_values('Metrics.Sales', ascending = False)
#hide-input
sns.countplot(x = top_games['Metadata.Genres'], palette = 'ch:.25')
#hide-input
sns.countplot(x = top_games['Metadata.Publishers'], palette = 'ch:.25')
#hide-input
sns.countplot(x = top_games['Features.Max Players'], palette = 'ch:.25')
#hide-input
sns.countplot(x = top_games['Release.Console'], palette = 'ch:.25')
```
## **Discussion**
Examining the commonalities among the top performing games, it is clear that Nintendo games have the highest sales. They make up 6 of the 9 games that I identified as top-performing games, and represent the 6 highest-earning games in the entire dataset. This seems to operate independently of critic reviews, as the three highest selling games did not receive scores above 90 from critics.
I think that there are factors, especially metadata about each game beyond the scope of information that was included in this dataset, that contributes to why games from Nintendo, and especially those that came out at the top of this dataset were considered top-performers by sales.
Three of the top four games- Wii Play, Mario Kart Wii, and Mario Kart DS- are titles that do not have a strong storyline for the player to follow. Rather, they are multiplayer games that are centered around gaming as a social aspect. With family or friends, players can compete on teams with or against each other. Because you are constantly playing with real people in a competitive environment, the gaming experience is kept dynamic and engaging, rather then relying on a progressing in a story line.
When considering what kinds of games are successful in the market, it may be helpful to consider whether a game is player-versus-player (PVP) or player-vs-everyone (PVE). Wii Play, Mario Kart Wii, and Mario Kart DS, are examples of PVP games, that is, players do not play by the themselves against computers, but rather against other real players, and these kinds of games inherently carry with them a competitive aspect. In terms of motivation, players are motivated to constantly return to the game in order to hone their skills in the game. In many PVE games, players are instead motivated by the desire to progress in the game itself.
The other game that was represented in the top-performing game, despite not having the same PVP quality as the others, was New Super Mario Bros. I think the reason that this title in particular was so successful is because of its recognisability. Just the name Mario in the gaming sphere is already enough for people, gamer or not, to have a mental image of what the game will entail. As a game that has had many remakes and interations, I think that this game's successful largely comes from its capacity to combine the nostalgia of players with the refreshing nature of a game remake or sequel. A game beloved by many, the Super Mario series of games is one that people are invested in because of their emotional attatchment to the games and characters.
When it comes to learning, motivation is a crucial part of pedagogy. In both the conventional sense and in the realm of possibly gamifying learning, I think that it would be helpful to incoroporate a healthy amount of competition, whether it be against the self or against others. I think it is also important for students to have the ability to engage with other students as well, as this social aspect to learning and gaming is something that motivates students additionally.
## **Nintendo: A Closer Look**
Looking at the top-performing games, it is clear to see that Nintendo has a clear group on the gaming market when it comes to sales. As such, I would like to examine just what about these games makes them so desirable to players, and as such I would like to look to Nintendo themselves to see how they would market and describe these games.
```
#hide
from wordcloud import WordCloud, ImageColorGenerator
from PIL import Image
import matplotlib.pyplot as plt
#hide
myStopWords = list(punctuation) + stopwords.words('english')
#hide
super_mario_describe = '''
Bowser has taken over the Mushroom Kingdom, and it's up to Mario to put an end to his sinister reign! Battle Bowser's vile henchmen through 32 levels in the Original 1985 game mode. Move on to collecting special Red Coins and Yoshi Eggs in Challenge mode. Then, try to unlock a secret mode that's waiting to be found by super players like you! Every mode will give you the chance to beat your own score, and there's a lot more to do than just saving a princess. So get ready for a brick-smashin', pipe-warpin', turtle-stompin' good time!
Mario™ and Luigi™ star in their first ever Mushroom Kingdom adventure! Find out why Super Mario Bros. is instantly recognizable to millions of people across the globe, and what made it the best-selling game in the world for three decades straight. Jump over obstacles, grab coins, kick shells, and throw fireballs through eight action-packed worlds in this iconic NES classic. Only you and the Mario Bros. can rescue Princess Toadstool from the clutches of the evil Bowser.
Pick up items and throw them at your adversaries to clear levels in seven fantastical worlds. Even enemies can be picked up and tossed across the screen. Each character has a unique set of abilities: Luigi can jump higher and farther than any of the other characters, Toad can dig extremely fast and pull items out of the ground quicker than anyone, and the princess is the only one who can jump and hover temporarily. This unique installment in the Mario series will keep you coming back for more!
Relive the classic that brought renowned power-ups such as the Tanooki Suit to the world of Super Mario Bros.!
Bowser™ and the Koopalings are causing chaos yet again, but this time they’re going beyond the Mushroom Kingdom into the seven worlds that neighbor it. Now Mario™ and Luigi™ must battle a variety of enemies, including a Koopaling in each unique and distinctive world, on their way to ultimately taking on Bowser himself. Lucky for the brothers, they have more power-ups available than ever before. Fly above the action using the Super Leaf, swim faster by donning the Frog Suit, or defeat enemies using the Hammer Bros. Suit. Use the brand-new overworld map to take the chance to play a minigame in hopes of gaining extra lives or to find a Toad’s House where you can pick up additional items. All this (and more) combines into one of gaming’s most well-known and beloved titles—are you ready to experience gaming bliss?
'''
#hide-input
wc = WordCloud().generate_from_text(super_mario_describe)
#Use matplotlib.pyplot to display the fitted wordcloud
#Turn axis off to get rid of axis numbers
plt.imshow(wc)
plt.axis('off')
plt.show()
#hide
mario_kart_describe = '''
Select one of eight characters from the Mario™ series—offering a variety of driving styles—and take on three championship cups in three different kart classes. Win enough, and you'll unlock a fourth circuit: the ultra-tough Special Cup. Crossing the finish line in first place isn't an easy task, though, as each track has unique obstacles to conquer and racers can obtain special power-ups that boost them to victory. With more than 15 tracks to master and nearly endless replay value, Super Mario Kart is classic gaming…with some banana peels thrown in for good measure!
The newest installment of the fan-favorite Mario Kart™ franchise brings Mushroom Kingdom racing fun into glorious 3D. For the first time, drivers explore new competitive kart possibilities, such as soaring through the skies or plunging into the depths of the sea. New courses, strategic new abilities and customizable karts bring the racing excitement to new heights.
FEATURES:
The Mario Kart franchise continues to evolve. New kart abilities add to the wild fun that the games are known for. On big jumps, a kart deploys a wing to let it glide over the track shortcut. When underwater, a propeller pops out to help the kart cruise across the sea floor.
Players can show their own style by customizing their vehicles with accessories that give them a competitive advantage. For instance, giant tires help a kart drive off-road, while smaller tires accelerate quickly on paved courses.
People can choose to race as one of their favorite Mushroom Kingdom characters or even as their Mii™ character.
New courses take players on wild rides over mountains, on city streets and through a dusty desert. Nintendo fans will recognize new courses on Wuhu Island and in the jungles from Donkey Kong Country™ Returns.
The game supports both SpotPass™ and StreetPass™ features.
Players can compete in local wireless matches or online over a broadband Internet connection.
The newest installment of the fan-favorite Mario Kart™ franchise brings Mushroom Kingdom racing fun into glorious 3D. For the first time, drivers explore new competitive kart possibilities, such as soaring through the skies or plunging into the depths of the sea. New courses, strategic new abilities and customizable karts bring the racing excitement to new heights.
FEATURES:
The Mario Kart franchise continues to evolve. New kart abilities add to the wild fun that the games are known for. On big jumps, a kart deploys a wing to let it glide over the track shortcut. When underwater, a propeller pops out to help the kart cruise across the sea floor.
Players can show their own style by customizing their vehicles with accessories that give them a competitive advantage. For instance, giant tires help a kart drive off-road, while smaller tires accelerate quickly on paved courses.
People can choose to race as one of their favorite Mushroom Kingdom characters or even as their Mii™ character.
New courses take players on wild rides over mountains, on city streets and through a dusty desert. Nintendo fans will recognize new courses on Wuhu Island and in the jungles from Donkey Kong Country™ Returns.
The game supports both SpotPass™ and StreetPass™ features.
Players can compete in local wireless matches or online over a broadband Internet connection.
'''
#hide-input
wc2 = WordCloud().generate_from_text(mario_kart_describe)
#Use matplotlib.pyplot to display the fitted wordcloud
#Turn axis off to get rid of axis numbers
plt.imshow(wc2)
plt.axis('off')
plt.show()
#hide
smash_bros_describe = '''
Super Smash Bros. for Nintendo 3DS is the first portable entry in the renowned series, in which game worlds collide. Up to four players battle each other locally or online using some of Nintendo’s most well-known and iconic characters across beautifully designed stages inspired by classic portable Nintendo games. It’s a genuine, massive Super Smash Bros. experience that’s available to play on the go, anytime, anywhere.
FEATURES:
Smash and crash through “Smash Run” mode, a new mode exclusive to the Nintendo 3DS version that gives up to four players five minutes to fight solo through a huge battlefield while taking down recognizable enemies from almost every major Nintendo franchise and multiple third-party partners. Defeated enemies leave behind power-ups to collect. Players who collect more power-ups have an advantage once time runs out and the battle with opponents begins.
Compete with classic characters from the Super Smash Bros. series like Mario, Link, Samus and Pikachu, along with new challengers like Mega Man, Little Mac and newly announced Palutena, the Goddess of Light from the Kid Icarus games. For the first time players can even compete as their own Mii characters.
Customize different aspects of your character when playing locally or online with friends in a variety of multiplayer modes.
View most elements of the high-energy action at silky-smooth 60 frames per second and in eye-popping stereoscopic 3D.
Fight against friends and family locally or online, or battle random challengers all over the world online in “For Fun” or “For Glory” modes.
Gaming icons clash in the ultimate brawl you can play anytime, anywhere! Smash rivals off the stage as new characters Simon Belmont and King K. Rool join Inkling, Ridley, and every fighter in Super Smash Bros. history. Enjoy enhanced speed and combat at new stages based on the Castlevania series, Super Mario Odyssey, and more!
Having trouble choosing a stage? Then select the Stage Morph option to transform one stage into another while battling—a series first! Plus, new echo fighters Dark Samus, Richter Belmont, and Chrom join the battle. Whether you play locally or online, savor the faster combat, new attacks, and new defensive options, like a perfect shield. Jam out to 900 different music compositions and go 1-on-1 with a friend, hold a 4-player free-for-all, kick it up to 8-player battles and more! Feel free to bust out your GameCube controllers—legendary couch competitions await—or play together anytime, anywhere!
'''
#hide-input
wc3 = WordCloud().generate_from_text(smash_bros_describe)
#Use matplotlib.pyplot to display the fitted wordcloud
#Turn axis off to get rid of axis numbers
plt.imshow(wc3)
plt.axis('off')
plt.show()
```
### It's Mario's World and We're Just Playing in It
After creating word clouds from Nintendo's descriptions of its highest selling titles from 2004-2010, there are some recurring themes that we see when Nintendo describes its games to players and potential customers. Words unique to the game, such as "stage," "kart", and "world" are combined with descriptors such as "new," "fun," and "unique," as well as familiar terms such as "Nintendo," "Mario," and "Bowser," to create a sense that the player will be buying into a refreshing, updated, and modernized version of a product that they know and love. I think that much of Nintendo's success in the gaming market comes from the so-called empire that it has created both with its consistency of creating modern versions of its classic titles and capitalizing off of the nostalgia for these titles as well.
For developers that are not Nintendo, I think that it is important to create characters that people will love, and create a universe around these characters, incorporating them into different games and genres. While Mario is one character that definitely become a poster-child for Nintendo, I think that other characters such as Link and Zelda, or the Pokemon franchise in general have also achieved a similar status of recognizability for the company, and would likely be top-performing games in a more modern dataset.
## **Conclusion**
Through conducting this analysis of the video games dataset from CORGIS, I was able to learn a lot about the market in general, and what makes a "successful" game. My findings constrasted my expectations, but I was able to come to conclusions that I believe would be helpful for both game developers, and my own interests in gamifying learning.
In my exploration of both this project, and the course Digital Humanities 140, I learned many Python tools and became more comfortable working with new libraries as well as datasets. Although I used pandas for the majority of my analysis, the two libraries that I found helpful as well were seaborn and wordcloud for data visualization. Seaborn allowed me to combine aesthetic graphical information with statistical information, and wordcloud allowed me to create easy-to-understand visualizations, both of which reminded me of the importance of being able to tell a story with your data.
In the future, it would be fascinating to conduct a similar study with the modern video game market. Nowadays, gaming has been expanded to PC and mobile platforms, which were not represented in the CORGIS dataset. Additionally, many games are now free-to-play, so I think the metrics that are used for success may be a bit different that they were in my investigation. With the rise of e-sports and streaming, gaming is consumed in ways outside of simply playing the game, and has become a form of entertainment that is similar to movies, sporting, and YouTube.
I would like to acknowledge Professor Winjum for his dedication to instruction this quarter, and his continual understanding. Thank you!
|
github_jupyter
|
```
import os
import pandas as pd
import numpy as np
import json
import pickle
from collections import defaultdict
from pathlib import Path
from statistics import mean, stdev
from sklearn.metrics import ndcg_score, dcg_score
import matplotlib.pyplot as plt
import seaborn as sns
import torch
import os, sys
parentPath = os.path.abspath("..")
if parentPath not in sys.path:
sys.path.insert(0, parentPath)
from src.data import load_source
from src.config import Config, get_option_fallback
from src.path import get_best_model_paths, get_exp_paths, get_report_path, load_json, load_rep_cfg, get_exp_names
from src.trainer import Trainer
# projectdir = Path('/code')
projectdir = Path('..')
assert projectdir.exists()
```
# Common Functions
```
def summarize_test_res(rep, folds=5):
print(rep['config']['exp_name'], end=':\t')
s = pd.Series([rep['best']['auc_epoch'][str(i)] for i in range(folds)])
print(f'Best epoch at {s.mean():>6.1f}±{s.std():<5.1f}', end='\t')
s = pd.Series([rep['best']['auc'][str(i)] for i in range(folds)])
print(f'Valid AUC: {s.mean()*100:.4f}±{s.std()*100:.4f}', end='\t')
s = pd.Series([rep['indicator']['test_auc'][str(i)][0] for i in range(folds)])
print(f'Test AUC: {s.mean()*100:.4f}±{s.std()*100:.4f}', end='\t')
s = rep['indicator']['RPsoft']['all']
print(f'Good:Bad = {s["good"]}:{s["bad"]}', end='\t')
s = rep['indicator']['test_auc']['all'][0]
print(f'All Test AUC: {s*100:.4f}')
def show_valid_lc(name, idclist_dic, idc='eval_auc'):
min_len = min([len(_x) for _x in idclist_dic['epoch'].values()])
x = idclist_dic['epoch']['0'][:min_len] * (len(idclist_dic['epoch']) -1) # exclude 'all'
y = []
for _y in idclist_dic[idc].values():
y += _y[:min_len]
sns.lineplot(x=x, y=y, label=name)
plt.title(idc)
def summarize_results(config_name, folds=5):
report_paths = [get_report_path(projectdir, config_name, e) for e in get_exp_names(projectdir, config_name)]
reports = [load_json(r) for r in report_paths]
df = pd.DataFrame(columns=['dataset', 'model', 'auc', 'auc_std', 'r1_good', 'r1_goodbad', 'r2', 'r2_std'])
for r in reports:
row = {
'dataset': r['config']['config_name'],
'model': r['config']['exp_name'],
'auc': mean([r['indicator']['test_auc'][str(i)][0] for i in range(folds)]),
'auc_std': stdev([r['indicator']['test_auc'][str(i)][0] for i in range(folds)]) if folds > 1 else np.nan,
'r1_good': r['indicator']['RPsoft']['all']['good'],
'r1_goodbad': r['indicator']['RPsoft']['all']['good'] + r['indicator']['RPsoft']['all']['bad'],
'r2': mean(r['indicator']['RPhard']['all']),
'r2_std': stdev(r['indicator']['RPhard']['all'])
}
df = df.append(row, ignore_index=True)
return df
```
# Summary
## AUC table
```
summarize_results('20_0310_edm2020_assist09')
summarize_results('20_0310_edm2020_assist15')
summarize_results('20_0310_edm2020_synthetic', folds=1)
summarize_results('20_0310_edm2020_statics')
print(summarize_results('20_0310_edm2020_assist09').to_latex())
print(summarize_results('20_0310_edm2020_assist15').to_latex())
print(summarize_results('20_0310_edm2020_synthetic', folds=1).to_latex())
print(summarize_results('20_0310_edm2020_statics').to_latex())
```
## NDCG distplot
```
def ndcg_distplot(config_name, ax, idx, label_names, bins=20):
report_paths = [get_report_path(projectdir, config_name, e) for e in get_exp_names(projectdir, config_name)]
reports = [load_json(r) for r in report_paths]
for rep in reports:
if rep['config']['pre_dummy_epoch_size'] not in {0, 10}:
continue
r = rep['indicator']['RPhard']['all']
name = rep['config']['exp_name']
sns.distplot(r, ax=ax,bins=bins, label=label_names[name], kde_kws={'clip': (0.0, 1.0)})
ax.set_xlabel('NDCG score')
if idx == 0:
ax.set_ylabel('frequency')
if idx == 3:
ax.legend()
ax.set_title(label_names[config_name])
ax.set_xlim([0.59, 1.01])
ax.title.set_fontsize(18)
ax.xaxis.label.set_fontsize(14)
ax.yaxis.label.set_fontsize(14)
fig, axs = plt.subplots(1, 4, sharey=True, figsize=(4*4,3))
# plt.subplots_adjust(hspace=0.3)
fig.subplots_adjust(hspace=.1, wspace=.16)
label_names = {
'20_0310_edm2020_assist09' : 'ASSISTment 2009',
'20_0310_edm2020_assist15' : 'ASSISTment 2015',
'20_0310_edm2020_synthetic': 'Simulated-5',
'20_0310_edm2020_statics' : 'Statics 2011',
'pre_dummy_epoch_size10.auto': 'pre-train 10 epochs',
'pre_dummy_epoch_size0.auto': 'pre-train 0 epoch',
}
ndcg_distplot('20_0310_edm2020_assist09' , ax=axs[0], idx=0, label_names=label_names)
ndcg_distplot('20_0310_edm2020_assist15' , ax=axs[1], idx=1, label_names=label_names)
ndcg_distplot('20_0310_edm2020_synthetic', ax=axs[2], idx=2, label_names=label_names)
ndcg_distplot('20_0310_edm2020_statics' , ax=axs[3], idx=3, label_names=label_names)
```
## Learning curve
```
def lc_plot(config_name, ax, idx, label_names):
report_paths = [get_report_path(projectdir, config_name, e) for e in get_exp_names(projectdir, config_name)]
reports = [load_json(r) for r in report_paths]
for r in reports:
if r['config']['pre_dummy_epoch_size'] not in {0, 10}:
continue
idclist_dic = r['indicator']
idc = 'eval_auc'
min_len = min([len(_x) for _x in idclist_dic['epoch'].values()])
x = idclist_dic['epoch']['0'][:min_len] * (len(idclist_dic['epoch']) -1) # exclude 'all'
y = []
for _y in idclist_dic[idc].values():
y += _y[:min_len]
sns.lineplot(x=x, y=y, ax=ax, label=label_names[r['config']['exp_name']], ci='sd')
ax.set_xlabel('epoch')
if idx == 0:
ax.set_ylabel('AUC')
if idx == 3:
ax.legend()
else:
ax.get_legend().remove()
ax.set_title(label_names[config_name])
ax.title.set_fontsize(18)
ax.xaxis.label.set_fontsize(14)
ax.yaxis.label.set_fontsize(14)
fig, axs = plt.subplots(1, 4, sharey=False, figsize=(4*4,3))
# plt.subplots_adjust(hspace=0.3)
fig.subplots_adjust(hspace=.1, wspace=.16)
label_names = {
'20_0310_edm2020_assist09' : 'ASSISTment 2009',
'20_0310_edm2020_assist15' : 'ASSISTment 2015',
'20_0310_edm2020_synthetic': 'Simulated-5',
'20_0310_edm2020_statics' : 'Statics 2011',
'pre_dummy_epoch_size10.auto': 'pre-train 10 epochs',
'pre_dummy_epoch_size0.auto': 'pre-train 0 epoch',
}
lc_plot('20_0310_edm2020_assist09' , ax=axs[0], idx=0, label_names=label_names)
lc_plot('20_0310_edm2020_assist15' , ax=axs[1], idx=1, label_names=label_names)
lc_plot('20_0310_edm2020_synthetic', ax=axs[2], idx=2, label_names=label_names)
lc_plot('20_0310_edm2020_statics' , ax=axs[3], idx=3, label_names=label_names)
plt.show()
```
# `20_0310_edm2020_assist09`
## Simulated curve
```
config_name = '20_0310_edm2020_assist09'
report_list = []
for r in sorted([load_json(get_report_path(projectdir, e)) for e in get_exp_paths(projectdir, config_name)], key=lambda x: x['config']['pre_dummy_epoch_size']):
if r['config']['pre_dummy_epoch_size'] not in {0, 10}:
continue
r['config']['exp_name'] = f"DKT pre {r['config']['pre_dummy_epoch_size']}"
report_list.append(r)
[r['config']['exp_name'] for r in report_list]
def get_simu_res(report_dic):
return report_dic['indicator']['simu_pred']['all']
simures_list = []
for r in report_list:
simu_res = get_simu_res(r)
simures_list.append(simu_res)
base_idx = 0
base_res = {k:v for k, v in sorted(simures_list[base_idx].items(), key=lambda it: it[1][1][-1] - it[1][1][0])}
descres_list = []
for i, simu_res in enumerate(simures_list):
if i == base_idx:
continue
desc_res = {k:simu_res[k] for k in base_res.keys()}
descres_list.append(desc_res)
n_skills = report_list[base_idx]['config']['n_skills']
h, w = (n_skills+7)//8, 8
figscale = 2.5
hspace = 0.35
fig, axs = plt.subplots(h, w, figsize=(w*figscale, h*figscale))
plt.subplots_adjust(hspace=hspace)
for i, (v, (xidx, sanity)) in enumerate(list(base_res.items())[:h*w]):
ax = axs[i//(w), i%(w)]
ax.set_ylim([0, 1])
ax.set_title('KC{}'.format(v))
sns.lineplot(xidx, sanity, ax=ax, label='base', palette="ch:2.5,.25")
for i, desc_res in enumerate(descres_list):
sns.lineplot(xidx, desc_res[v][1], ax=ax, label=str(i+1), palette="ch:2.5,.25")
ax.get_legend().remove()
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper center')
plt.show()
```
## Single ones
```
def plot_single(kc):
x, y = base_res[str(kc)]
sns.lineplot(x=x, y=y)
plot_single(78)
f, axs = plt.subplots(1, 3, sharey=True, figsize=(12,3))
f.tight_layout()
for i, (kc, ax) in enumerate(zip([30, 83, 98], axs)):
ax.set_ylim([0, 1])
x, y = base_res[str(kc)]
sns.lineplot(x=x, y=y, ax=ax)
ax.set_title(f'KC{kc}')
ax.set_ylabel('predicted accuracy')
ax.set_xlabel('$k$\n({})'.format(['a','b','c'][i]))
plt.show()
```
## NDCG
```
for rep in report_list:
r = rep['indicator']['RPhard']['all']
name = rep['config']['exp_name']
sns.distplot(r, bins=10, label=name, kde_kws={'clip': (0.0, 1.0)})
print(f'{name:<20s}\t{mean(r):.4f}±{stdev(r):.4f}')
plt.legend()
plt.show()
for rep in report_list:
r = rep['indicator']['RPsoft']['all']
name = rep['config']['exp_name']
print(f'{name:<20s}\tGood:Bad = {r["good"]}:{r["bad"]}')
```
## Learning curve
```
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'])
plt.show()
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'], idc='eval_loss')
plt.show()
```
## Test AUC
```
for r in report_list:
summarize_test_res(r)
```
# `Debug`
## Simulated curve
```
def get_simu_res(report_dic):
return report_dic['indicator']['simu_pred']['all']
simures_list = []
for r in report_list:
simu_res = get_simu_res(r)
simures_list.append(simu_res)
base_idx = 0
base_res = {k:v for k, v in sorted(simures_list[base_idx].items(), key=lambda it: it[1][1][-1] - it[1][1][0])}
descres_list = []
for i, simu_res in enumerate(simures_list):
if i == base_idx:
continue
desc_res = {k:simu_res[k] for k in base_res.keys()}
descres_list.append(desc_res)
```
## NDCG
```
for rep in report_list:
r = rep['indicator']['RPsoft']['all']
name = rep['config']['exp_name']
print(f'{name:<20s}\tGood:Bad = {r["good"]}:{r["bad"]}')
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'])
plt.show()
```
## Test AUC
## Simulated curve
```
def get_simu_res(report_dic):
return report_dic['indicator']['simu_pred']['all']
simures_list = []
for r in report_list:
simu_res = get_simu_res(r)
simures_list.append(simu_res)
base_idx = 1
base_res = {k:v for k, v in sorted(simures_list[base_idx].items(), key=lambda it: it[1][1][-1] - it[1][1][0])}
descres_list = []
for i, simu_res in enumerate(simures_list):
if i == base_idx:
continue
desc_res = {k:simu_res[k] for k in base_res.keys()}
descres_list.append(desc_res)
```
## NDCG
```
for rep in report_list:
r = rep['indicator']['RPsoft']['all']
name = rep['config']['exp_name']
print(f'{name:<20s}\tGood:Bad = {r["good"]}:{r["bad"]}')
```
## Learning curve
```
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'])
plt.show()
```
## Test AUC
```
def summarize_test_res(rep):
print(rep['config']['exp_name'], end=':\t')
s = pd.Series([rep['best']['auc_epoch'][str(i)] for i in range(5)])
print(f'Best epoch at {s.mean():>6.1f}±{s.std():<5.1f}', end='\t')
s = pd.Series([rep['best']['auc'][str(i)] for i in range(5)])
print(f'Valid AUC: {s.mean()*100:.4f}±{s.std()*100:.4f}', end='\t')
s = rep['indicator']['test_auc']['all'][0]
print(f'Test AUC: {s*100:.4f}')
for r in report_list:
summarize_test_res(r)
```
# `20_0310_edm2020_synthetic`
## Simulated curve
```
config_name = '20_0310_edm2020_synthetic'
report_list = []
for r in sorted([load_json(get_report_path(projectdir, e)) for e in get_exp_paths(projectdir, config_name)], key=lambda x: x['config']['pre_dummy_epoch_size']):
report_list.append(r)
[r['config']['exp_name'] for r in report_list]
def get_simu_res(report_dic):
return report_dic['indicator']['simu_pred']['all']
simures_list = []
for r in report_list:
simu_res = get_simu_res(r)
simures_list.append(simu_res)
base_idx = 0
base_res = {k:v for k, v in sorted(simures_list[base_idx].items(), key=lambda it: it[1][1][-1] - it[1][1][0])}
descres_list = []
for i, simu_res in enumerate(simures_list):
if i == base_idx:
continue
desc_res = {k:simu_res[k] for k in base_res.keys()}
descres_list.append(desc_res)
n_skills = report_list[base_idx]['config']['n_skills']
h, w = (n_skills+7)//8, 8
figscale = 2.5
hspace = 0.35
fig, axs = plt.subplots(h, w, figsize=(w*figscale, h*figscale))
plt.subplots_adjust(hspace=hspace)
for i, (v, (xidx, sanity)) in enumerate(list(base_res.items())[:h*w]):
ax = axs[i//(w), i%(w)]
ax.set_ylim([0, 1])
ax.set_title('KC{} s{}0'.format(v, '>' if sanity[-1]>sanity[0] else '<'))
sns.lineplot(xidx, sanity, ax=ax, label='base', palette="ch:2.5,.25")
for i, desc_res in enumerate(descres_list):
sns.lineplot(xidx, desc_res[v][1], ax=ax, label=str(i+1), palette="ch:2.5,.25")
ax.get_legend().remove()
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles, labels, loc='upper center')
plt.show()
```
## NDCG
```
for rep in report_list:
r = rep['indicator']['RPhard']['all']
name = rep['config']['exp_name']
sns.distplot(r, bins=10, label=name, kde_kws={'clip': (0.0, 1.0)})
print(f'{name:<20s}\t{mean(r):.4f}±{stdev(r):.4f}')
plt.legend()
plt.show()
for rep in report_list:
r = rep['indicator']['RPsoft']['all']
name = rep['config']['exp_name']
print(f'{name:<20s}\tGood:Bad = {r["good"]}:{r["bad"]}')
```
## Learning curve
```
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'])
plt.show()
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'])
plt.show()
```
## Test AUC
```
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'], idc='eval_loss')
summarize_test_res(r, folds=1)
plt.show()
```
## Test AUC
```
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'])
plt.show()
```
## Learning curve
```
for rep in report_list:
r = rep['indicator']['RPsoft']['all']
name = rep['config']['exp_name']
print(f'{name:<20s}\tGood:Bad = {r["good"]}:{r["bad"]}')
```
# `20_0310_edm2020_assist15`
## Simulated curve
```
# config_name = '20_0310_edm2020_assist15'
config_name = '20_0310_edm2020_assist09'
report_paths = [get_report_path(projectdir, config_name, e) for e in get_exp_names(projectdir, config_name)]
reports = [load_json(r) for r in report_paths]
# print([r['config']['exp_name'] for r in reports])
# =>['pre_dummy_epoch_size150.auto', 'pre_dummy_epoch_size10.auto', 'pre_dummy_epoch_size0.auto']
def get_simu_res(report_dic):
return report_dic['indicator']['simu_pred']['all']
simures_list = []
for r in reports:
if r['config']['pre_dummy_epoch_size'] not in {0, 10}:
continue
simu_res = get_simu_res(r)
simures_list.append(simu_res)
base_idx = 1
base_res = {k:v for k, v in sorted(simures_list[base_idx].items(), key=lambda it: it[1][1][-1] - it[1][1][0])}
descres_list = []
for i, simu_res in enumerate(simures_list):
if i == base_idx:
continue
desc_res = {k:simu_res[k] for k in base_res.keys()}
descres_list.append(desc_res)
n_skills = reports[base_idx]['config']['n_skills']
# h, w = (n_skills+7)//8, 8
h, w = 4, 8
figscale = 2.5
hspace = 0.35
fig, axs = plt.subplots(h, w, sharex=True, sharey=True, figsize=(w*figscale, h*figscale))
plt.subplots_adjust(hspace=0.20, wspace=0.05)
for i, (v, (xidx, sanity)) in enumerate(list(base_res.items())[:h*w]):
ax = axs[i//(w), i%(w)]
ax.set_ylim([0, 1])
ax.set_title(f'KC{v}')
sns.lineplot(xidx, sanity, ax=ax, label='pre-train 0', palette="ch:2.5,.25")
for j, desc_res in enumerate(descres_list):
sns.lineplot(xidx, desc_res[v][1], ax=ax, label=f'pre-train {[10,150][j]}', palette="ch:2.5,.25")
if i < 31:
ax.get_legend().remove()
else:
ax.legend()
break
# handles, labels = ax.get_legend_handles_labels()
# fig.legend(handles, labels, loc='upper center')
plt.show()
```
## NDCG
```
for rep in report_list:
r = rep['indicator']['RPhard']['all']
name = rep['config']['exp_name']
sns.distplot(r, bins=20, label=name, kde_kws={'clip': (0.0, 1.0)})
print(f'{name:<20s}\t{mean(r):.4f}±{stdev(r):.4f}')
plt.legend()
plt.show()
```
## Learning curve AUC
```
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'])
summarize_test_res(r)
plt.show()
for r in report_list:
show_valid_lc(r['config']['exp_name'], r['indicator'], idc='eval_loss')
plt.show()
```
|
github_jupyter
|
```
import numpy as np
import torch
from torch import nn, optim
import matplotlib.pyplot as plt
from neurodiffeq import diff
from neurodiffeq.ode import IVP, solve_system, Monitor, ExampleGenerator, Solution, _trial_solution
from neurodiffeq.networks import FCNN, SinActv
from scipy.special import roots_legendre
torch.set_default_tensor_type('torch.DoubleTensor')
FROM, TO = 0., 5.
N_NODE = 8
QUADRATURE_DEGREE = 32
TRAIN_SIZE = 10 # the training set is not actually used
VALID_SIZE = 10
MAX_EPOCHS = 10000
q_points, q_weights = roots_legendre(QUADRATURE_DEGREE)
global_q_points = FROM + torch.tensor(q_points).reshape(-1, 1) * (TO-FROM)
global_q_points.requires_grad = True
global_q_weights = torch.tensor(q_weights).reshape(-1, 1)
def solve_system_quadrature(
ode_system, conditions, t_min, t_max,
single_net=None, nets=None, train_generator=None, shuffle=True, valid_generator=None,
optimizer=None, criterion=None, additional_loss_term=None, metrics=None, batch_size=16,
max_epochs=1000,
monitor=None, return_internal=False,
return_best=False,
):
########################################### subroutines ###########################################
def train(train_generator, net, nets, ode_system, conditions, criterion, additional_loss_term, shuffle, optimizer):
train_examples_t = train_generator.get_examples()
train_examples_t = train_examples_t.reshape((-1, 1))
n_examples_train = train_generator.size
idx = np.random.permutation(n_examples_train) if shuffle else np.arange(n_examples_train)
batch_start, batch_end = 0, batch_size
while batch_start < n_examples_train:
if batch_end > n_examples_train:
batch_end = n_examples_train
batch_idx = idx[batch_start:batch_end]
ts = train_examples_t[batch_idx]
train_loss_batch = calculate_loss(ts, net, nets, ode_system, conditions, criterion, additional_loss_term)
optimizer.zero_grad()
train_loss_batch.backward()
optimizer.step()
batch_start += batch_size
batch_end += batch_size
train_loss_epoch = calculate_loss(train_examples_t, net, nets, ode_system, conditions, criterion, additional_loss_term)
train_metrics_epoch = calculate_metrics(train_examples_t, net, nets, conditions, metrics)
return train_loss_epoch, train_metrics_epoch
def valid(valid_generator, net, nets, ode_system, conditions, criterion, additional_loss_term):
valid_examples_t = valid_generator.get_examples()
valid_examples_t = valid_examples_t.reshape((-1, 1))
valid_loss_epoch = calculate_loss(valid_examples_t, net, nets, ode_system, conditions, criterion, additional_loss_term)
valid_loss_epoch = valid_loss_epoch.item()
valid_metrics_epoch = calculate_metrics(valid_examples_t, net, nets, conditions, metrics)
return valid_loss_epoch, valid_metrics_epoch
# calculate the loss with Gaussian quadrature
# uses global variables, just for convenience
def calculate_loss(ts, net, nets, ode_system, conditions, criterion, additional_loss_term):
ts = global_q_points
ws = global_q_weights
us = _trial_solution(net, nets, ts, conditions)
Futs = ode_system(*us, ts)
loss = sum(
torch.sum(ws * Fut**2) for Fut in Futs
)
return loss
def calculate_metrics(ts, net, nets, conditions, metrics):
us = _trial_solution(net, nets, ts, conditions)
metrics_ = {
metric_name: metric_function(*us, ts).item()
for metric_name, metric_function in metrics.items()
}
return metrics_
###################################################################################################
if single_net and nets:
raise RuntimeError('Only one of net and nets should be specified')
# defaults to use a single neural network
if (not single_net) and (not nets):
single_net = FCNN(n_input_units=1, n_output_units=len(conditions), n_hidden_units=32, n_hidden_layers=1,
actv=nn.Tanh)
if single_net:
# mark the Conditions so that we know which condition correspond to which output unit
for ith, con in enumerate(conditions):
con.set_impose_on(ith)
if not train_generator:
if (t_min is None) or (t_max is None):
raise RuntimeError('Please specify t_min and t_max when train_generator is not specified')
train_generator = ExampleGenerator(32, t_min, t_max, method='equally-spaced-noisy')
if not valid_generator:
if (t_min is None) or (t_max is None):
raise RuntimeError('Please specify t_min and t_max when train_generator is not specified')
valid_generator = ExampleGenerator(32, t_min, t_max, method='equally-spaced')
if (not optimizer) and single_net: # using a single net
optimizer = optim.Adam(single_net.parameters(), lr=0.001)
if (not optimizer) and nets: # using multiple nets
all_parameters = []
for net in nets:
all_parameters += list(net.parameters())
optimizer = optim.Adam(all_parameters, lr=0.001)
if not criterion:
criterion = nn.MSELoss()
if metrics is None:
metrics = {}
history = {}
history['train_loss'] = []
history['valid_loss'] = []
for metric_name, _ in metrics.items():
history['train__' + metric_name] = []
history['valid__' + metric_name] = []
if return_best:
valid_loss_epoch_min = np.inf
solution_min = None
for epoch in range(max_epochs):
train_loss_epoch, train_metrics_epoch = train(train_generator, single_net, nets, ode_system, conditions, criterion, additional_loss_term, shuffle,
optimizer)
history['train_loss'].append(train_loss_epoch)
for metric_name, metric_value in train_metrics_epoch.items():
history['train__'+metric_name].append(metric_value)
valid_loss_epoch, valid_metrics_epoch = valid(valid_generator, single_net, nets, ode_system, conditions, criterion, additional_loss_term,)
history['valid_loss'].append(valid_loss_epoch)
for metric_name, metric_value in valid_metrics_epoch.items():
history['valid__'+metric_name].append(metric_value)
if monitor and epoch % monitor.check_every == 0:
monitor.check(single_net, nets, conditions, history)
if return_best and valid_loss_epoch < valid_loss_epoch_min:
valid_loss_epoch_min = valid_loss_epoch
solution_min = Solution(single_net, nets, conditions)
if return_best:
solution = solution_min
else:
solution = Solution(single_net, nets, conditions)
if return_internal:
internal = {
'single_net': single_net,
'nets': nets,
'conditions': conditions,
'train_generator': train_generator,
'valid_generator': valid_generator,
'optimizer': optimizer,
'criterion': criterion
}
return solution, history, internal
else:
return solution, history
%matplotlib notebook
odes = lambda x, y, t : [diff(x, t) + t*y,
diff(y, t) - t*x]
ivps = [
IVP(t_0=0., x_0=1.),
IVP(t_0=0., x_0=0.)
]
nets = [
FCNN(n_hidden_units=N_NODE, n_hidden_layers=1, actv=SinActv),
FCNN(n_hidden_units=N_NODE, n_hidden_layers=1, actv=SinActv)
]
train_gen = ExampleGenerator(TRAIN_SIZE, t_min=FROM, t_max=TO, method='equally-spaced')
valid_gen = ExampleGenerator(VALID_SIZE, t_min=FROM, t_max=TO, method='equally-spaced')
def rmse(x, y, t):
true_x = torch.cos(t**2/2)
true_y = torch.sin(t**2/2)
x_sse = torch.sum((x - true_x) ** 2)
y_sse = torch.sum((y - true_y) ** 2)
return torch.sqrt( (x_sse+y_sse)/(len(x)+len(y)) )
solution, _ = solve_system_quadrature(
ode_system=odes,
conditions=ivps,
t_min=FROM, t_max=TO,
nets=nets,
train_generator=train_gen,
valid_generator=valid_gen,
batch_size=TRAIN_SIZE,
max_epochs=MAX_EPOCHS,
monitor=Monitor(t_min=FROM, t_max=TO, check_every=100),
metrics={'rmse': rmse}
)
```
|
github_jupyter
|
# imports
```
import sys; sys.path.append(_dh[0].split("knowknow")[0])
from knowknow import *
```
# User settings
```
database_name = "sociology-wos"
pubyears = None
if 'wos' in database_name:
pubyears = load_variable("%s.pubyears" % database_name)
print("Pubyears loaded for %s entries" % len(pubyears.keys()))
RELIABLE_DATA_ENDS_HERE = 2019
if 'jstor' in database_name:
RELIABLE_DATA_ENDS_HERE = 2010
import re
def create_cysum(cits, typ):
meta_counters = defaultdict(int)
cy = defaultdict(lambda:defaultdict(int))
for (c,y),count in cits['c.fy'].items():
cy[c][y] = count
if 'fy' in cits:
fyc = cits['fy']
else:
fyc = cits['y']
cysum = {}
for ci,c in enumerate(cy):
meta_counters['at least one citation'] += 1
count = cy[c]
prop = {
y: county / fyc[y]
for y,county in count.items()
}
res = {
'first': min(count),
'last': max(count),
'maxcounty': max(count, key=lambda y:(count[y],y)),
'maxpropy': max(count, key=lambda y:(prop[y],y))
}
res['maxprop'] = prop[ res['maxpropy'] ]
res['maxcount'] = count[ res['maxcounty'] ]
res['total'] = sum(count.values())
res['totalprop'] = sum(prop.values())
res['name'] = c
# gotta do something here...
res['type'] = 'article'
if typ == 'wos':
sp = c.split("|")
if len(sp) < 2:
continue
try:
res['pub'] = int(sp[1])
res['type'] = 'article'
except ValueError:
res['type'] = 'book'
res['pub'] = pubyears[c]
elif typ == 'jstor':
inparens = re.findall(r'\(([^)]+)\)', c)[0]
res['pub'] = int(inparens)
# DEFINING DEATH1
# death1 is max, as long as it's before RELIABLE_DATA_ENDS_HERE
res['death1'] = None
if res['maxpropy'] <= RELIABLE_DATA_ENDS_HERE:
res['death1'] = res['maxcounty']
# DEFINING DEATH2
# this list has an entry for each year after and including the maximum citations ever received (the last time)
# look ahead to the next ten years and take the average
next_year_sums = [
(ycheck, sum( c for y,c in count.items() if ycheck + 10 >= y > ycheck ))
for ycheck in range(res['maxcounty'], RELIABLE_DATA_ENDS_HERE - 10)
]
# need to make sure ALL subsequent decade intervals are also less...
my_death_year = None
l = len(next_year_sums)
for i in range(l):
not_this_one = False
for j in range(i,l):
if next_year_sums[j][1] >= res['maxcount']:
not_this_one = True
if not_this_one:
break
if not_this_one:
continue
my_death_year = next_year_sums[i][0]
break
if not len(next_year_sums):
res['death2'] = None
else:
res['death2'] = my_death_year
# DEATH3 is last, as long as it's before RELIABLE_DATA_ENDS_HERE
res['death3'] = None
if res['last'] <= RELIABLE_DATA_ENDS_HERE:
res['death3'] = res['last']
# DEATH5
# 90% of their citations were received before death4, and it's been at least 30% of their lifespan
myspan = np.array( [cits['c.fy'][(c,ycheck)] for ycheck in range(1900, 2020)] )
res['death5'] = None
Ea = np.sum(myspan)
csum = np.sum(myspan)
nonzeroyears = list(np.where(myspan>0))
if not len(nonzeroyears):
continue
try:
firsti = np.min(nonzeroyears)
except:
print("some strange error, that shouldn't happen, right??")
first_year = firsti + 1900
for cci, cc in enumerate(myspan[firsti:]):
this_year = first_year+cci
# running residual...
Ea -= cc
# don't let them die too soon
if cc == 0:
continue
if Ea/csum < 0.1 and (RELIABLE_DATA_ENDS_HERE - this_year)/(RELIABLE_DATA_ENDS_HERE - first_year) > 0.3:
res['death5'] = this_year
break
if res['death2'] is not None and res['death2'] < res['pub']:
meta_counters['death2 < pub!? dropped.'] += 1
# small error catch
continue
#small error catch
if res['maxpropy'] < res['pub']:
meta_counters['maxpropy < pub!? dropped.'] += 1
continue
# don't care about those with only a single citation
if res['total'] <= 1:
meta_counters['literally 1 citation. dropped.'] += 1
continue
# we really don't care about those that never rise in use
#if res['first'] == res['maxpropy']:
# continue
meta_counters['passed tests pre-blacklist'] += 1
cysum[c] = res
blacklist = []
for b in blacklist:
if b in cysum:
del cysum[b]
todelete = []
for c in todelete:
if c in cysum:
meta_counters['passed all other tests but was blacklisted'] += 1
del cysum[c]
print(dict(meta_counters))
return cysum
OVERWRITE_EXISTING = True
print("Processing database '%s'"%database_name)
varname = "%s.cysum"%database_name
run = True # run
if not OVERWRITE_EXISTING:
try:
load_variable(varname)
run = False
except FileNotFoundError:
pass
if run:
cits = get_cnt("%s.doc"%database_name, ['c.fy','fy'])
if 'wos' in database_name and 'jstor' in database_name:
raise Exception("Please put 'wos' or 'jstor' but not both in any database_name.")
elif 'wos' in database_name:
cysum = create_cysum(cits, 'wos')
elif 'jstor' in database_name:
cysum = create_cysum(cits, 'jstor')
else:
raise Exception("Please include either 'wos' or 'jstor' in the name of the variable. This keys which data processing algorithm you used.")
save_variable(varname, cysum)
print("%s cysum entries for database '%s'" % (len(cysum), database_name))
```
# only necessary if you plan on filtering based on this set
```
save_variable("%s.included_citations"%database_name, set(cysum.keys()))
```
|
github_jupyter
|
# Classifying Fashion-MNIST
Now it's your turn to build and train a neural network. You'll be using the [Fashion-MNIST dataset](https://github.com/zalandoresearch/fashion-mnist), a drop-in replacement for the MNIST dataset. MNIST is actually quite trivial with neural networks where you can easily achieve better than 97% accuracy. Fashion-MNIST is a set of 28x28 greyscale images of clothes. It's more complex than MNIST, so it's a better representation of the actual performance of your network, and a better representation of datasets you'll use in the real world.
<img src='assets/fashion-mnist-sprite.png' width=500px>
In this notebook, you'll build your own neural network. For the most part, you could just copy and paste the code from Part 3, but you wouldn't be learning. It's important for you to write the code yourself and get it to work. Feel free to consult the previous notebook though as you work through this.
First off, let's load the dataset through torchvision.
```
import torch
from torchvision import datasets, transforms
import helper
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# Download and load the training data
trainset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)
# Download and load the test data
testset = datasets.FashionMNIST('F_MNIST_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)
```
Here we can see one of the images.
```
image, label = next(iter(trainloader))
helper.imshow(image[0,:]);
```
With the data loaded, it's time to import the necessary packages.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import time
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms
import helper
```
## Building the network
Here you should define your network. As with MNIST, each image is 28x28 which is a total of 784 pixels, and there are 10 classes. You should include at least one hidden layer. We suggest you use ReLU activations for the layers and to return the logits from the forward pass. It's up to you how many layers you add and the size of those layers.
```
# TODO: Define your network architecture here
```
# Train the network
Now you should create your network and train it. First you'll want to define [the criterion](http://pytorch.org/docs/master/nn.html#loss-functions) ( something like `nn.CrossEntropyLoss`) and [the optimizer](http://pytorch.org/docs/master/optim.html) (typically `optim.SGD` or `optim.Adam`).
Then write the training code. Remember the training pass is a fairly straightforward process:
* Make a forward pass through the network to get the logits
* Use the logits to calculate the loss
* Perform a backward pass through the network with `loss.backward()` to calculate the gradients
* Take a step with the optimizer to update the weights
By adjusting the hyperparameters (hidden units, learning rate, etc), you should be able to get the training loss below 0.4.
```
# TODO: Create the network, define the criterion and optimizer
# TODO: Train the network here
# Test out your network!
dataiter = iter(testloader)
images, labels = dataiter.next()
img = images[0]
# Convert 2D image to 1D vector
img = img.resize_(1, 784)
# TODO: Calculate the class probabilities (softmax) for img
ps =
# Plot the image and probabilities
helper.view_classify(img.resize_(1, 28, 28), ps, version='Fashion')
```
Now that your network is trained, you'll want to save it to disk so you can load it later instead of training it again. Obviously, it's impractical to train a network every time you need one. In practice, you'll train it once, save the model, then reload it for further training or making predictions. In the next part, I'll show you how to save and load trained models.
|
github_jupyter
|
<center><em>Copyright by Pierian Data Inc.</em></center>
<center><em>For more information, visit us at <a href='http://www.pieriandata.com'>www.pieriandata.com</a></em></center>
# KNN Project Exercise
Due to the simplicity of KNN for Classification, let's focus on using a PipeLine and a GridSearchCV tool, since these skills can be generalized for any model.
## The Sonar Data
### Detecting a Rock or a Mine
Sonar (sound navigation ranging) is a technique that uses sound propagation (usually underwater, as in submarine navigation) to navigate, communicate with or detect objects on or under the surface of the water, such as other vessels.
<img src="sonar.jpg" style="max-height: 500px; max-width: 500px;">
The data set contains the response metrics for 60 separate sonar frequencies sent out against a known mine field (and known rocks). These frequencies are then labeled with the known object they were beaming the sound at (either a rock or a mine).
<img src="mine.jpg" style="max-height: 500px; max-width: 500px;">
Our main goal is to create a machine learning model capable of detecting the difference between a rock or a mine based on the response of the 60 separate sonar frequencies.
Data Source: https://archive.ics.uci.edu/ml/datasets/Connectionist+Bench+(Sonar,+Mines+vs.+Rocks)
### Complete the Tasks in bold
**TASK: Run the cells below to load the data.**
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('../Data/sonar.all-data.csv')
df.head()
```
## Data Exploration
```
df.info()
df.describe()
```
**TASK: Create a heatmap of the correlation between the difference frequency responses.**
```
plt.figure(figsize=(8,6))
sns.heatmap(df.corr(), cmap='coolwarm');
```
-----
**TASK: What are the top 5 correlated frequencies with the target\label?**
*Note: You many need to map the label to 0s and 1s.*
*Additional Note: We're looking for **absolute** correlation values.*
```
df['Label'].value_counts()
# As we can't find the correlation between numbers and label string, we need to map the label (Rock / Mine) to 0s and 1s
df['Target'] = df['Label'].map({'M': 1, 'R': 0})
df.head(1)
df.corr()['Target']
# get the highest 5 ones
np.absolute(df.corr()['Target'].sort_values(ascending=False))[:6]
#option 2
np.absolute(df.corr()['Target'].sort_values()).tail(6)
```
-------
## Train | Test Split
Our approach here will be one of using Cross Validation on 90% of the dataset, and then judging our results on a final test set of 10% to evaluate our model.
**TASK: Split the data into features and labels, and then split into a training set and test set, with 90% for Cross-Validation training, and 10% for a final test set.**
*Note: The solution uses a random_state=42*
```
from sklearn.model_selection import train_test_split
X = df.drop(['Label', 'Target'], axis=1)
y = df['Label']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1, random_state=42)
```
----
**TASK: Create a PipeLine that contains both a StandardScaler and a KNN model**
```
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
scaler = StandardScaler()
knn = KNeighborsClassifier()
operations = [('scaler', scaler), ('knn', knn)]
from sklearn.pipeline import Pipeline
pipe = Pipeline(operations)
```
-----
**TASK: Perform a grid-search with the pipeline to test various values of k and report back the best performing parameters.**
```
from sklearn.model_selection import GridSearchCV
k_values = list(range(1, 30))
parameters = {'knn__n_neighbors': k_values}
full_cv_classifier = GridSearchCV(pipe, parameters, cv=5, scoring='accuracy')
full_cv_classifier.fit(X_train, y_train)
# check best estimator
full_cv_classifier.best_estimator_.get_params()
```
----
**(HARD) TASK: Using the .cv_results_ dictionary, see if you can create a plot of the mean test scores per K value.**
```
pd.DataFrame(full_cv_classifier.cv_results_).head()
mean_test_scores = full_cv_classifier.cv_results_['mean_test_score']
mean_test_scores
# plt.plot(k_values, mean_test_scores, marker='.', markersize=10)
plt.plot(k_values, mean_test_scores, 'o-')
plt.xlabel('K')
plt.ylabel('Mean Test Score / Accuracy');
```
----
### Final Model Evaluation
**TASK: Using the grid classifier object from the previous step, get a final performance classification report and confusion matrix.**
```
full_pred = full_cv_classifier.predict(X_test)
from sklearn.metrics import confusion_matrix, plot_confusion_matrix, classification_report
confusion_matrix(y_test, full_pred)
plot_confusion_matrix(full_cv_classifier, X_test, y_test);
```
**IMPORTANT:**
- As we can see from the confusion matrix, there are 1 False Positive and 1 False Negative.
- Although False Positive case (thinking Rock as a Mine) may not be dangerous, False Negative case (thinking Mine as a Rock) is extremelly dangerous.
- So we may need to revisit the modelling to make sure there is no False Negative.
```
print(classification_report(y_test, full_pred))
```
### Great Job!
|
github_jupyter
|
# Testing
## Introduction
When programming, it is very important to know that the code we have written does what it was intended. Unfortunately, this step is often skipped in scientific programming, especially when developing code for our own personal work.
Researchers sometimes check that their code behaves correctly by manually running it on some sample data and inspecting the results. However, it is much better and safer to automate this process, so the tests can be run often -- perhaps even after each new commit! This not only reassures us that the code behaves as it should at any given moment, it also gives us more flexibility to change it, because we have a way of knowing when we have broken something by accident.
In this chapter, we will mostly look at how to write **unit tests**, which check the behaviour of small parts of our code. We will work with a particular framework for Python code, but the principles we discuss are general. We will also look at how to use a debugger to locate problems in our code, and services that simplify the automated running of tests.
### A few reasons not to do testing
Sensibility | Sense
------------------------------------ | -------------------------------------
**It's boring** | *Maybe*
**Code is just a one off throwaway** | *As with most research codes*
**No time for it** | *A bit more code, a lot less debugging*
**Tests can be buggy too** | *See above*
**Not a professional programmer** | *See above*
**Will do it later** | *See above*
### A few reasons to do testing
* **laziness**: testing saves time
* **peace of mind**: tests (should) ensure code is correct
* **runnable specification**: best way to let others know what a function should do and
not do
* **reproducible debugging**: debugging that happened and is saved for later reuse
* **code structure / modularity**: since we may have to call parts of the code independently during the tests
* **ease of modification**: since results can be tested
### Not a panacea
> Trying to improve the quality of software by doing more testing is like trying to lose weight by
> weighing yourself more often.
- Steve McConnell
* Testing won't corrrect a buggy code
* Testing will tell you were the bugs are...
* ... if the test cases *cover* the bugs
### Tests at different scales
Level of test |Area covered by test
-------------------------- |----------------------
**Unit testing** |smallest logical block of work (often < 10 lines of code)
**Component testing** |several logical blocks of work together
**Integration testing** |all components together / whole program
<br>
<div class="fragment fade-in">
Always start at the smallest scale!
<div class="fragment grow">
If a unit test is too complicated, go smaller.
</div>
</div>
### Legacy code hardening
* Very difficult to create unit-tests for existing code
* Instead we make a **regression test**
* Run program as a black box:
```
setup input
run program
read output
check output against expected result
```
* Does not test correctness of code
* Checks code is a similarly wrong on day N as day 0
### Testing vocabulary
* **fixture**: input data
* **action**: function that is being tested
* **expected result**: the output that should be obtained
* **actual result**: the output that is obtained
* **coverage**: proportion of all possible paths in the code that the tests take
### Branch coverage:
```python
if energy > 0:
! Do this
else:
! Do that
```
Is there a test for both `energy > 0` and `energy <= 0`?
|
github_jupyter
|
```
!python --version
# In case issues with installation of tensortrade, Install the version below using that way
# https://github.com/tensortrade-org/tensortrade/issues/229#issuecomment-633164703
# version: https://github.com/tensortrade-org/tensortrade/releases/tag/v1.0.3
!pip install -U tensortrade==1.0.3 ta matplotlib tensorboardX scikit-learn
from tensortrade.data.cdd import CryptoDataDownload
import pandas as pd
import tensortrade.version
print(tensortrade.__version__)
import random
import ta
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import tensortrade.env.default as default
from tensortrade.feed.core import Stream, DataFeed, NameSpace
from tensortrade.oms.exchanges import Exchange
from tensortrade.oms.services.execution.simulated import execute_order
from tensortrade.oms.instruments import USD, BTC, ETH
from tensortrade.oms.wallets import Wallet, Portfolio
from tensortrade.agents import A2CAgent
import tensortrade.stochastic as sp
from tensortrade.oms.instruments import Instrument
from tensortrade.env.default.actions import SimpleOrders, BSH, ManagedRiskOrders
from collections import OrderedDict
from tensortrade.oms.orders.criteria import Stop, StopDirection
from tensortrade.env.default.actions import ManagedRiskOrders
from tensortrade.env.default.rewards import RiskAdjustedReturns
from scipy.signal import savgol_filter
def fetchTaFeatures(data):
data = ta.add_all_ta_features(data, 'open', 'high', 'low', 'close', 'volume', fillna=True)
data.columns = [name.lower() for name in data.columns]
return data
def createEnv(config):
coins = ["coin{}".format(x) for x in range(5)]
bitfinex_streams = []
with NameSpace("bitfinex"):
for coin in coins:
coinColumns = filter(lambda name: name.startswith(coin), config["data"].columns)
bitfinex_streams += [
Stream.source(list(config["data"][c]), dtype="float").rename(c) for c in coinColumns
]
feed = DataFeed(bitfinex_streams)
streams = []
for coin in coins:
streams.append(Stream.source(list(data[coin+":"+"close"]), dtype="float").rename("USD-"+coin))
streams = tuple(streams)
bitstamp = Exchange("bitfinex", service=execute_order)(
Stream.source(list(data["coin0:close"]), dtype="float").rename("USD-BTC"),
Stream.source(list(data["coin1:close"]), dtype="float").rename("USD-ETH"),
Stream.source(list(data["coin1:close"]), dtype="float").rename("USD-TTC1"),
Stream.source(list(data["coin3:close"]), dtype="float").rename("USD-TTC2"),
Stream.source(list(data["coin4:close"]), dtype="float").rename("USD-TTC3"),
Stream.source(list(data["coin5:close"]), dtype="float").rename("USD-TTC4"),
Stream.source(list(data["coin6:close"]), dtype="float").rename("USD-TTC5"),
Stream.source(list(data["coin7:close"]), dtype="float").rename("USD-TTC6"),
Stream.source(list(data["coin8:close"]), dtype="float").rename("USD-TTC7"),
Stream.source(list(data["coin9:close"]), dtype="float").rename("USD-TTC8"),
)
TTC1 = Instrument("TTC1", 8, "TensorTrade Coin1")
TTC2 = Instrument("TTC2", 8, "TensorTrade Coin2")
TTC3 = Instrument("TTC3", 8, "TensorTrade Coin3")
TTC4 = Instrument("TTC4", 8, "TensorTrade Coin4")
TTC5 = Instrument("TTC5", 8, "TensorTrade Coin5")
TTC6 = Instrument("TTC6", 8, "TensorTrade Coin6")
TTC7 = Instrument("TTC7", 8, "TensorTrade Coin7")
TTC8 = Instrument("TTC8", 8, "TensorTrade Coin8")
cash = Wallet(bitstamp, 10000 * USD)
asset = Wallet(bitstamp, 0 * BTC)
asset1 = Wallet(bitstamp, 0 * ETH)
asset2 = Wallet(bitstamp, 0 * TTC1)
asset3 = Wallet(bitstamp, 0 * TTC2)
asset4 = Wallet(bitstamp, 0 * TTC3)
asset5 = Wallet(bitstamp, 0 * TTC4)
asset6 = Wallet(bitstamp, 0 * TTC5)
asset7 = Wallet(bitstamp, 0 * TTC6)
asset8 = Wallet(bitstamp, 0 * TTC7)
asset9 = Wallet(bitstamp, 0 * TTC8)
portfolio = Portfolio(USD, [cash, asset, asset1, asset2, asset3, asset4, asset5, asset6, asset7, asset8, asset9
])
portfolio = Portfolio(USD, [cash, asset, asset1
])
reward = RiskAdjustedReturns(return_algorithm = "sortino", window_size=300)
action_scheme = ManagedRiskOrders(stop=[0.1], take=[0.05, 0.1, 0.04], trade_sizes=[5])
env = default.create(
feed=feed,
portfolio=portfolio,
action_scheme=action_scheme,
reward_scheme=reward,
window_size=config["window_size"]
)
return env
coins = ["coin{}".format(x) for x in range(10)]
dfs = []
funcs = [sp.gbm, sp.heston]
for coin in coins:
df = funcs[random.randint(0, 1)](
base_price=random.randint(1, 2000),
base_volume=random.randint(10, 5000),
start_date="2010-01-01",
times_to_generate=5000,
time_frame='1H').add_prefix(coin+":")
for column in ["close", "open", "high", "low"]:
df[coin+f":diff_{column}"] = df[coin+f":{column}"].apply(np.log).diff().dropna()
df[coin+f":soft_{column}"] = savgol_filter(df[coin+":"+column], 35, 2)
ta.add_all_ta_features(
df,
colprefix=coin+":",
**{k: coin+":" + k for k in ['open', 'high', 'low', 'close', 'volume']})
dfs.append(df)
data = pd.concat(dfs, axis=1)
scaler = MinMaxScaler()
norm_data = pd.DataFrame(scaler.fit_transform(data), columns=data.columns)
norm_data.to_csv("fake_data_1h_norm.csv", index=False)
config = {
"window_size": 10,
"data": norm_data
}
env = createEnv(config)
!mkdir -p agents/
agent = A2CAgent(env)
reward = agent.train(n_steps=5000, save_path="agents/", n_episodes = 10)
env = createEnv({
"window_size": 10,
"data": norm_data
})
episode_reward = 0
done = False
obs = env.reset()
while not done:
action = agent.get_action(obs)
obs, reward, done, info = env.step(action)
episode_reward += reward
fig, axs = plt.subplots(1, 2, figsize=(15, 10))
fig.suptitle("Performance")
axs[0].plot(np.arange(len(data["coin0:close"])), data["coin0:close"], label="price")
axs[0].set_title("Trading Chart")
performance_df = pd.DataFrame().from_dict(env.action_scheme.portfolio.performance, orient='index')
performance_df.plot(ax=axs[1])
axs[1].set_xlim(0, 5000)
axs[1].set_title("Net Worth")
plt.show()
orDict = OrderedDict()
for k in env.action_scheme.portfolio.performance.keys():
orDict[k] = env.action_scheme.portfolio.performance[k]["net_worth"]
pd.DataFrame().from_dict(orDict, orient='index').plot()
```
|
github_jupyter
|
Copyright 2019 Google LLC.
SPDX-License-Identifier: Apache-2.0
**Notebook Version** - 1.0.0
```
# Install datacommons
!pip install --upgrade --quiet git+https://github.com/datacommonsorg/api-python.git@stable-1.x
```
# Analyzing Income Distribution
The American Community Survey (published by the US Census) annually reports the number of individuals in a given income bracket at the State level. We can use this information, stored in Data Commons, to visualize disparity in income for each State in the US. Our goal for this tutorial will be to generate a plot that visualizes the total number of individuals across a given set of income brackets for a given state.
Before we begin, we'll setup our notebook
```
# Import the Data Commons library
import datacommons as dc
# Import other libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import json
from google.colab import drive
```
We will also need to provide the API with an API key. See the [Analyzing Statistics in Data Commons Using the Python Client API](https://colab.research.google.com/drive/1ZNXTHu3J0W3vo9Mg3kNUpk0hnD6Ce1u6#scrollTo=ijxoBhFHjo3Z) to see how to set this up for a Colab Notebook.
```
# Mount the Drive
drive.mount('/content/drive', force_remount=True)
# REPLACE THIS with the path to your key.
key_path = '/content/drive/My Drive/DataCommons/secret.json'
# Read the key in and provide it to the Data Commons API
with open(key_path, 'r') as f:
secrets = json.load(f)
dc.set_api_key(secrets['dc_api_key'])
```
## Preparing the Data
We'll begin by creating a dataframe with states and their total population. We can use **`get_places_in`** to get all States within the United States. We can then call **`get_populations`** and **`get_observations`** to get the population of all persons in each State.
```
# Initialize a DataFrame holding the USA.
data = pd.DataFrame({'country': ['country/USA']})
# Add a column for states and get their names
data['state'] = dc.get_places_in(data['country'], 'State')
data = dc.flatten_frame(data)
# Get all state names and store it in a column "name"
data['name'] = dc.get_property_values(data['state'], 'name')
data = dc.flatten_frame(data)
# Get StatisticalPopulations representing all persons in each state.
data['all_pop'] = dc.get_populations(data['state'], 'Person')
# Get the total count of all persons in each population
data['all'] = dc.get_observations(data['all_pop'],
'count',
'measuredValue',
'2017',
measurement_method='CenusACS5yrSurvey')
# Display the first five rows of the table.
data.head(5)
```
### Querying for Income Brackets
Next, let's get the population level for each income bracket. The datacommons graph identifies 16 different income brackets. For each bracket and state, we can get the population level. Remember that we first get the StatisticalPopulation, and then a corresponding observation. We'll filter observations to between published in 2017 by the American Community Survey.
```
# A list of income brackets
income_brackets = [
"USDollarUpto10000",
"USDollar10000To14999",
"USDollar15000To19999",
"USDollar20000To24999",
"USDollar25000To29999",
"USDollar30000To34999",
"USDollar35000To39999",
"USDollar40000To44999",
"USDollar45000To49999",
"USDollar50000To59999",
"USDollar60000To74999",
"USDollar75000To99999",
"USDollar100000To124999",
"USDollar125000To149999",
"USDollar150000To199999",
"USDollar200000Onwards",
]
# Add a column containin the population count for each income bracket
for bracket in income_brackets:
# Get the new column names
pop_col = '{}_pop'.format(bracket)
obs_col = bracket
# Create the constraining properties map
pvs = {'income': bracket}
# Get the StatisticalPopulation and Observation
data[pop_col] = dc.get_populations(data['state'], 'Household',
constraining_properties=pvs)
data[obs_col] = dc.get_observations(data[pop_col],
'count',
'measuredValue',
'2017',
measurement_method='CenusACS5yrSurvey')
# Display the table
data.head(5)
```
Let's limit the size of this DataFrame by selecting columns with only the State name and Observations.
```
# Select columns that will be used for plotting
data = data[['name', 'all'] + income_brackets]
# Display the table
data.head(5)
```
## Analyzing the Data
Let's plot our data as a histogram. Notice that the income ranges as tabulated by the US Census are not equal. At the low end, the range is 0-9999, whereas, towards the top, the range 150,000-199,999 is five times as broad! We will make the width of each of the columns correspond to their range, and will give us an idea of the total earnings, not just the number of people in that group.
First we provide code for generating the plot.
```
# Histogram bins
label_to_range = {
"USDollarUpto10000": [0, 9999],
"USDollar10000To14999": [10000, 14999],
"USDollar15000To19999": [15000, 19999],
"USDollar20000To24999": [20000, 24999],
"USDollar25000To29999": [25000, 29999],
"USDollar30000To34999": [30000, 34999],
"USDollar35000To39999": [35000, 39999],
"USDollar40000To44999": [40000, 44999],
"USDollar45000To49999": [45000, 49999],
"USDollar50000To59999": [50000, 59999],
"USDollar60000To74999": [60000, 74999],
"USDollar75000To99999": [75000, 99999],
"USDollar100000To124999": [100000, 124999],
"USDollar125000To149999": [125000, 149999],
"USDollar150000To199999": [150000, 199999],
"USDollar200000Onwards": [250000, 300000],
}
bins = [
0, 10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000, 60000,
75000, 100000, 125000, 150000, 250000
]
def plot_income(data, state_name):
# Assert that "state_name" is a valid state name
frame_search = data.loc[data['name'] == state_name].squeeze()
if frame_search.shape[0] == 0:
print('{} does not have sufficient income data to generate the plot!'.format(state_name))
return
# Print the resulting series
data = frame_search[2:]
# Calculate the bar lengths
lengths = []
for bracket in income_brackets:
r = label_to_range[bracket]
lengths.append(int((r[1] - r[0]) / 18))
# Calculate the x-axis positions
pos, total = [], 0
for l in lengths:
pos.append(total + (l // 2))
total += l
# Plot the histogram
plt.figure(figsize=(12, 10))
plt.xticks(pos, income_brackets, rotation=90)
plt.grid(True)
plt.bar(pos, data.values, lengths, color='b', alpha=0.3)
# Return the resulting frame.
return frame_search
```
We can then call this code with a state to plot the income bracket sizes.
```
#@title Enter State to plot { run: "auto" }
state_name = "Tennessee" #@param ["Missouri", "Arkansas", "Arizona", "Ohio", "Connecticut", "Vermont", "Illinois", "South Dakota", "Iowa", "Oklahoma", "Kansas", "Washington", "Oregon", "Hawaii", "Minnesota", "Idaho", "Alaska", "Colorado", "Delaware", "Alabama", "North Dakota", "Michigan", "California", "Indiana", "Kentucky", "Nebraska", "Louisiana", "New Jersey", "Rhode Island", "Utah", "Nevada", "South Carolina", "Wisconsin", "New York", "North Carolina", "New Hampshire", "Georgia", "Pennsylvania", "West Virginia", "Maine", "Mississippi", "Montana", "Tennessee", "New Mexico", "Massachusetts", "Wyoming", "Maryland", "Florida", "Texas", "Virginia"]
result = plot_income(data, state_name)
# Show the plot
plt.show()
```
and we can display the raw table of values.
```
# Additionally print the table of income bracket sizes
result
```
This is only the beginning! What else can you analyze? For example, you could try computing a measure of income disparity in each state (see [Gini Coefficient](https://en.wikipedia.org/wiki/Gini_coefficient)).
You could then expand the dataframe to include more information and analyze how attributes like education level, crime, or even weather effect income disparity.
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Import the dataset
us = pd.read_csv('US ND prediction/us_disaster_declarations.csv')
us.head()
# checking for null values
us.isnull().sum()
# shape of dataset
us.shape
# Getting the dates coloumn
li = us['declaration_date'].tolist()
li[:5]
# Seperate the day, month, and year into seperate lists.
def date_ordering(dates):
year = []
month = []
day = []
for l in li:
l = str(l)
str_date = l.split('-')
str_date = list(str_date)
day.append(int("".join(str_date[2][:2])))
year.append(int("".join(str_date[0])))
month.append(int("".join(str_date[1])))
return(year, month, day)
year, month, day = date_ordering(li)
#Create new coloumns
us['year'] = year
us['month'] = month
us['day'] = day
us.head()
# collecting the needed coloumns.
us = us[['year','month','day','state','designated_area','declaration_title']]
us.head()
# visual display of the most affected states
state_count = us['state'].value_counts()
sns.set(style="darkgrid")
sns.barplot(state_count.index, state_count.values, alpha=0.9)
plt.title('Frequency Distribution of State')
plt.ylabel('Number of Occurrences', fontsize=12)
plt.xlabel('States', fontsize=12)
plt.xticks(rotation = 90)
plt.show() # So from what we can see there is alot of disasters in the state of texas
state = dict(enumerate(us.state.astype('category').cat.categories))
designated_area = dict(enumerate(us.designated_area.astype('category').cat.categories))
state = dict(enumerate(us.declaration_title.astype('category').cat.categories))
us['state_code'] = us.state.astype('category').cat.codes
us['designated_area_code'] = us.designated_area.astype('category').cat.codes
us['declaration_title_code'] = us.declaration_title.astype('category').cat.codes
us.head()
us.drop(['state','designated_area','declaration_title'],inplace = True, axis=1)
us.head()
year = us['year'].tolist()
month = us['month'].tolist()
day =us['day'].tolist()
combo =[]
zipped = zip(day, month, year)
for i,j, k in zipped:
combo.append(str(i)+'/'+str(j)+'/'+ str(k))
us['fulldate'] = combo
us.set_index("fulldate", inplace = True)
us.head()
# Trying to see if I can use the countries to visualize a trend.
us['state_code'].plot()
# Trying to see if I can visualize a trend in natural disasters.
us['declaration_title_code'].plot()
us_ = us.tolist()
us_test = us[9000:,:]
from sklearn.preprocessing import MinMaxScaler
sc =MinMaxScaler(feature_range = (0,1))
us_scaled_set = sc.fit_transform(us)
time_steps = 365
length = len(us_scaled_set)
col = us.shape[1]
# creating the time steps
x_train = []
y_train = []
for i in range(time_steps, length):
x_train.append(us_scaled_set[i-time_steps:i, :])
y_train.append(us_scaled_set[i,:])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train,(x_train.shape[0], x_train.shape[1], col))
#language['level_back'] = language['code'].map(d)
#https://www.kaggle.com/headsortails/us-natural-disaster-declarations
```
### Vocalnic eruption data
```
volc = pd.read_csv('data/txt/volerup.txt',delimiter = '\t', quoting = 3, encoding='utf-8')
volc.head()
volc.tail()
volc.isnull().sum()
len(volc)
Country_count = volc['Country'].value_counts()
sns.set(style="darkgrid")
sns.barplot(Country_count.index, Country_count.values, alpha=0.9)
plt.title('Frequency Distribution of Countries')
plt.ylabel('Number of Occurrences', fontsize=12)
plt.xlabel('Countries', fontsize=12)
plt.xticks(rotation = 90)
plt.show() # So from what we can see there is alot of volcanic eruption in indonesia
labels = volc['Country'].astype('category').cat.categories.tolist()
counts = volc['Country'].value_counts()
sizes = [counts[var_cat] for var_cat in labels]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True) #autopct is show the % on plot
ax1.axis('equal')
plt.show()
volc = volc[['Year','Month','Day','Latitude','Longitude']]
len(volc)
volc.head()
volc = volc.fillna(volc['Month'].value_counts().index[0])
volc = volc.fillna(volc['Day'].value_counts().index[0])
volc.head()
# Repition shows that there were places that the eruption happend more than once.
print(volc.Latitude.duplicated().sum())
print(volc.Longitude.duplicated().sum())
volc.set_index("Year", inplace = True)
```
### Trying to see if there are trends in the coordinates.
```
volc['Latitude'].plot()
volc['Longitude'].plot()
#volc['country_num'] = volc['Country'].astype('category').cat.codes
#volc['Location_num'] = volc['Location'].astype('category').cat.codes
#volc['Name_num'] = volc['Name'].astype('category').cat.codes
#volc.drop(['Country','Location','Name'],inplace = True, axis=1)
#volc.head()
#from sklearn.preprocessing import OneHotEncoder
#from sklearn.compose import ColumnTransformer
#ct = ColumnTransformer(
# [('one_hot_encoder', OneHotEncoder(categories='auto'), [2,3,4])], # The column numbers to be transformed (here is [0] but can be [0, 1, 3])
# remainder='passthrough' # Leave the rest of the columns untouched
#)
#X = ct.fit_transform(volc)
#x = X.toarray()
#x
# ------------------------------------------------------------------------------------------------------------------------------------
volc = volc.reset_index()
volc.head()
#hold = volc['Latitude'].astype(str) + "," + volc['Longitude'].astype(str)
#volc['coor'] = hold
#volc.head()
from sklearn.preprocessing import MinMaxScaler
sc =MinMaxScaler(feature_range = (0,1))
volc_scaled_set = sc.fit_transform(volc)
volc_scaled_set
time_steps = 30
check = len(volc_scaled_set)
check
# creating the time steps
x_train = []
y_train = []
for i in range(time_steps, check):
x_train.append(volc_scaled_set[i-time_steps:i, :])
y_train.append(volc_scaled_set[i,:])
x_train, y_train = np.array(x_train), np.array(y_train)
len(x_train)
x_train = np.reshape(x_train,(x_train.shape[0], x_train.shape[1], 5))
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras import backend
# f beta metric, incase of imbalanced dataset ......
def f_beta(y_true, y_pred, beta =2):
# clip prediction
y_pred = backend.clip(y_pred, 0, 1)
# calculate elements
tp = backend.sum(backend.round(backend.clip(y_true * y_pred, 0,1)), axis = 1)
fp = backend.sum(backend.round(backend.clip(y_pred - y_true, 0,1)), axis = 1)
fn = backend.sum(backend.round(backend.clip(y_true - y_pred, 0,1)), axis = 1)
# precision
p = tp / (tp + fp + backend.epsilon())
# recall
r = tp / (tp +fn + backend.epsilon())
# calculate fbeta
bb = beta ** 2
fbeta_score = backend.mean((1 + bb) * (p * r) / (bb * p + r + backend.epsilon()))
return fbeta_score
detector.fit(x_train, y_train, epochs = 90, batch_size = 10)
# ---------------------------------------------------------------------------------------------------------
```
### Tsunmi data
```
tsu = pd.read_csv('data/txt/tsrunup.txt',delimiter = '\t', quoting = 3, encoding='latin-1')
tsu.head()
tsu.isnull().sum()
tsu = tsu[['YEAR', 'LOCATION_NAME','COUNTRY','LATITUDE','LONGITUDE']]
tsu.head()
# Repition shows that there were places that the eruption happend more than once.
print(tsu.LATITUDE.duplicated().sum())
print(tsu.LONGITUDE.duplicated().sum())
print(tsu.LOCATION_NAME.duplicated().sum())
### ------------------------------------------------------------------------------------------------------
```
# Earthquake data
```
earth = pd.read_csv('data/txt/signif.txt',delimiter = '\t', quoting = 3, encoding='latin-1')
earth.head()
earth.isnull().sum()
earth =earth[['YEAR', 'LOCATION_NAME','COUNTRY','LATITUDE','LONGITUDE']]
print(earth.LATITUDE.duplicated().sum())
print(earth.LONGITUDE.duplicated().sum())
print(earth.LOCATION_NAME.duplicated().sum())
# https://medium.com/@kasiarachuta/choosing-columns-in-pandas-dataframe-d0677b34a6ca
# https://towardsdatascience.com/time-series-forecasting-with-recurrent-neural-networks-74674e289816
# https://www.datacamp.com/community/tutorials/categorical-data
# https://cmdlinetips.com/2018/11/how-to-join-two-text-columns-into-a-single-column-in-pandas/
```
|
github_jupyter
|
# Self-Driving Car Engineer Nanodegree
## Deep Learning
## Project: Build a Traffic Sign Recognition Classifier
In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary.
> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.
The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
---
## Step 0: Load The Data
```
# Load pickled data
import pickle
import numpy as np
import tensorflow as tf
import cv2 as cv
# TODO: Fill this in based on where you saved the training and testing data
training_file = 'traffic-signs-data/train.p'
validation_file='traffic-signs-data/valid.p'
testing_file = 'traffic-signs-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_validation, y_validation = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
```
---
## Step 1: Dataset Summary & Exploration
The pickled data is a dictionary with 4 key/value pairs:
- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**
Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results.
### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
```
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# TODO: Number of training examples
n_train = len(X_train)
# TODO: Number of validation examples
n_validation = len(X_validation)
# TODO: Number of testing examples.
n_test = len(X_test)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
print("Number of training examples =", n_train)
print("Number of validation examples =", n_validation)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
fig, ax = plt.subplots(figsize =(5, 4))
ax.hist(y_train,bins = n_classes)
# Plot showing number of training examples corresponding to each class
# Show plot
# Labeling the X-axis
plt.xlabel('Y-Value - Traffic Sign Indicator')
# Labeling the Y-axis
plt.ylabel('Number of training data samples')
# Give a title to the graph
plt.title('Hysteresis plot of #training samples for each given Y')
plt.xticks(range(n_classes))
# Show a legend on the plot
#Saving the plot as an image
fig.savefig('examples/Hysteresis_plot.jpg', bbox_inches='tight', dpi=75)
plt.show()
import random
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
plt.imshow(X_train[1500])
cv.imwrite('examples/Pre-process_ex.jpg', cv.cvtColor(X_train[1500], cv.COLOR_RGB2BGR))
import cv2 as cv
M = []
angles = [-10,10,-25,25]
for angle in angles:
M.append(cv.getRotationMatrix2D((16,16), angle, 1))
def augment_data(X,y):
X_n = []
y_n = []
for i in range(0,len(X)):
X_n.append(X[i])
y_n.append(y[i])
for j in range(0,len(M)):
rotated = cv.warpAffine(X[i], M[j], (X[i].shape[1], X[i].shape[0]))
X_n.append(rotated)
y_n.append(y[i])
return X_n,y_n
X_train,y_train = augment_data(X_train,y_train)
plt.imshow(X_train[5*1500+4])
cv.imwrite('examples/Rotated_Image.jpg', cv.cvtColor(X_train[5*1500+4], cv.COLOR_RGB2BGR))
# TODO: Number of training examples
X_train = np.array(X_train)
y_train = np.array(y_train)
n_train = len(y_train)
# TODO: Number of validation examples
n_validation = len(X_validation)
# TODO: Number of testing examples.
n_test = len(X_test)
# TODO: What's the shape of an traffic sign image?
image_shape = X_train[0].shape
# TODO: How many unique classes/labels there are in the dataset.
n_classes = len(np.unique(y_train))
print("Number of training examples =", n_train)
print("Number of validation examples =", n_validation)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
fig, ax = plt.subplots(figsize =(5, 4))
ax.hist(y_train,bins = n_classes)
# Plot showing number of training examples corresponding to each class
# Show plot
# Labeling the X-axis
plt.xlabel('Y-Value - Traffic Sign Indicator')
# Labeling the Y-axis
plt.ylabel('Number of training data samples')
# Give a title to the graph
plt.title('Hysteresis plot of #training samples for each given Y')
plt.xticks(range(n_classes))
# Show a legend on the plot
#Saving the plot as an image
fig.savefig('examples/Post_Augmentation_Hysteresis_plot.jpg', bbox_inches='tight', dpi=75)
plt.show()
```
### Include an exploratory visualization of the dataset
Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.
**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
----
## Step 2: Design and Test a Model Architecture
Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).
The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission.
There are various aspects to consider when thinking about this problem:
- Neural network architecture (is the network over or underfitting?)
- Play around preprocessing techniques (normalization, rgb to grayscale, etc)
- Number of examples per label (some have more than others).
- Generate fake data.
Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
### Pre-process the Data Set (normalization, grayscale, etc.)
Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project.
Other pre-processing steps are optional. You can try different techniques to see if it improves performance.
Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
```
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
# Visualizations will be shown in the notebook.
%matplotlib inline
def normalize(images):
image_out = []
for img in images:
img = (img)/255
image_out.append(img)
norm_img = np.zeros((800,800))
#image_out.append(cv.normalize(img, norm_img, 0, 255, cv.NORM_MINMAX))
return image_out
def grayscale(images):
image_out = []
for img in images:
# Convert the RGB image to HSV
graysc = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
#img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
#img = cv.cvtColor(img, cv.COLOR_RGB2HSV)
#temp = np.dstack((grayscale,grayscale,grayscale))
temp = np.dstack((graysc,graysc,graysc))
image_out.append(temp)
return image_out
#X_train = grayscale(X_train)
#X_validation = grayscale(X_validation)
#X_test = grayscale(X_test)
X_train = normalize(X_train)
X_validation = normalize(X_validation)
X_test = normalize(X_test)
print(X_train[5*1500])
cv.imwrite('examples/Normalized_Image.jpg',X_train[5*1500])
X_train, y_train = shuffle(X_train, y_train)
for i in range(0,len(X_train)):
if(y_train[i] == 28):
img = X_train[i]
break
plt.imshow(img)
from tensorflow.contrib.layers import flatten
EPOCHS = 20
BATCH_SIZE = 128
def LeNet(x):
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
mu = 0
sigma = 0.1
dropout = 0.5
w1 = tf.Variable(tf.random_normal([5, 5, 3, 25],mean=mu,stddev = sigma))
b1 = tf.Variable(tf.random_normal([25],mean=mu, stddev = sigma))
# TODO: Layer 1: Convolutional. Input = 32x32x3. Output = 28x28x25.
l1 = tf.nn.conv2d(x, w1, strides=[1, 1, 1, 1], padding='VALID')
l1 = tf.nn.bias_add(l1, b1)
# TODO: Activation.
l1 = tf.nn.relu(l1)
# TODO: Pooling. Input = 28x28x25. Output = 14x14x25.
l1 = tf.nn.max_pool(l1,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
# TODO: Layer 2: Convolutional. Output = 10x10x50.
w2 = tf.Variable(tf.random_normal([5, 5, 25, 50],mean=mu, stddev = sigma))
b2 = tf.Variable(tf.random_normal([50],mean=mu, stddev = sigma))
l2 = tf.nn.conv2d(l1, w2, strides=[1, 1, 1, 1], padding='VALID')
l2 = tf.nn.bias_add(l2, b2)
# TODO: Activation.
l2 = tf.nn.relu(l2)
# TODO: Pooling. Input = 10x10x16. Output = 5x5x50.
l2 = tf.nn.max_pool(l2,ksize=[1, 2, 2, 1],strides=[1, 2, 2, 1],padding='SAME')
# TODO: Flatten. Input = 5x5x50. Output = 1250.
l2 = tf.contrib.layers.flatten(l2)
# TODO: Layer 3: Fully Connected. Input = 1250. Output = 400.
w3 = tf.Variable(tf.random_normal([1250, 400],mean=mu, stddev = sigma))
b3 = tf.Variable(tf.random_normal([400],mean=mu, stddev = sigma))
fc1 = tf.add(tf.matmul(l2, w3), b3)
# TODO: Activation.
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, dropout)
# TODO: Layer 4: Fully Connected. Input = 400. Output = 100.
w4 = tf.Variable(tf.random_normal([400, 100],mean=mu, stddev = sigma))
b4 = tf.Variable(tf.random_normal([100],mean=mu, stddev = sigma))
fc2 = tf.add(tf.matmul(fc1, w4), b4)
# TODO: Activation.
fc2 = tf.nn.relu(fc2)
#fc2 = tf.nn.dropout(fc2, dropout)
# TODO: Layer 5: Fully Connected. Input = 100. Output = 43.
w5 = tf.Variable(tf.random_normal([100, 43],mean=mu, stddev = sigma))
b5 = tf.Variable(tf.random_normal([43],mean=mu, stddev = sigma))
logits = tf.add(tf.matmul(fc2, w5), b5)
#logits = tf.nn.softmax(logits)
return logits
```
### Train, Validate and Test the Model
A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
```
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
### Define your architecture here.
### Feel free to use as many code cells as needed.
rate = 0.0015
x = tf.placeholder(tf.float32, (None, 32, 32, 3))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, 43)
logits = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy)
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
softmax = tf.nn.softmax(logits)
#pred = tf.argmax(logits,1)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
total_accuracy = 0
sess = tf.get_default_session()
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
num_examples = len(X_train)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
end = offset + BATCH_SIZE
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y})
#print(sess.run(logits, feed_dict = {x:batch_x}))
validation_accuracy = evaluate(X_validation, y_validation)
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
saver.save(sess, './lenet')
print("Model saved")
```
---
## Step 3: Test a Model on New Images
To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
### Load and Output the Images
### Predict the Sign Type for Each Image
```
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
test_accuracy = evaluate(X_test, y_test)
train_Accuracy = evaluate(X_train,y_train)
#print(sess.run(pred,feed_dict={x: X_test}))
# http://localhost:8889/notebooks/Documents/Self_Driving_Car/Project_3_Classifying_Traffic_signs/CarND-Traffic-Sign-Classifier-Project-master/Trial_1.ipynb#Analyze-Performance print(sess.run(tf.nn.top_k(softmax, k=3),feed_dict={x: X_test}))
print("Test Accuracy = {:.3f}".format(test_accuracy))
print("Training Accuracy = = {:.3f}".format(train_Accuracy))
import os
import glob
import matplotlib.image as mpimg
import math
### Loading 5 New Images
new_test_im = []
test = []
for img in os.listdir("new_test_images/"):
image = mpimg.imread(str(os.path.join("new_test_images/",img)))
width = int(32)
height = int(32)
dim = (width, height)
# resize image
resized = cv.resize(image, dim,interpolation = cv.INTER_AREA)
resized = np.uint8(resized)
test.append(image)
new_test_im.append(resized)
### For example, if the model predicted 1 out of 5 signs correctly, it's 20% accurate on these new images.
plt.imshow(test[0])
#X_test_new = grayscale(new_test_im)
X_test_new = normalize(new_test_im)
plt.imshow(X_test_new[0])
y_true_test = [28,19,20,18,36,39,25,25,23,23]
### Feel free to use as many code cells as needed.
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('.'))
#print(sess.run(tf.nn.top_k(softmax, k=3),feed_dict={x: X_test_new}))
#print(sess.run(pred,feed_dict ={x:X_test_new}))
print(sess.run(tf.nn.top_k(softmax, k=3),feed_dict ={x:X_test_new}))
```
For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here.
The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.
`tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:
```
# (5, 6) array
a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
0.12789202],
[ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
0.15899337],
[ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
0.23892179],
[ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
0.16505091],
[ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
0.09155967]])
```
Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:
```
TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
[ 0.28086119, 0.27569815, 0.18063401],
[ 0.26076848, 0.23892179, 0.23664738],
[ 0.29198961, 0.26234032, 0.16505091],
[ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],
[0, 1, 4],
[0, 5, 1],
[1, 3, 5],
[1, 4, 3]], dtype=int32))
```
Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.
```
### Print out the top five softmax probabilities for the predictions on the German traffic sign images found on the web.
### Feel free to use as many code cells as needed.
```
### Project Writeup
Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n",
"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
---
## Step 4 (Optional): Visualize the Neural Network's State with Test Images
This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.
Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.
For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.
<figure>
<img src="visualize_cnn.png" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above)</p>
</figcaption>
</figure>
<p></p>
```
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : image_input})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
```
|
github_jupyter
|
```
# coding: utf-8
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
import seaborn as sn
from pymongo import MongoClient
from pandas.plotting import scatter_matrix
%matplotlib inline
from pymongo import MongoClient
client = MongoClient("mongodb://analytics:cocorosie-password@gamerec-shard-00-00-nbybv.mongodb.net:27017,gamerec-shard-00-01-nbybv.mongodb.net:27017,gamerec-shard-00-02-nbybv.mongodb.net:27017/test?ssl=true&replicaSet=gamerec-shard-0&authSource=admin&retryWrites=true")
print(client.gamerec)
client.database_names()
db = client.cleaned_full_comments
collection = db.cleaned_full_comments
import pandas as pd
comm_df= pd.DataFrame(list(collection.find({}, {'_id': 0})))
# Setting up pivot table for actual userscores
import numpy as np
df_actual_pivot = pd.pivot_table(comm_df, values = ['Userscore'],
index = ['Title', 'Platform', 'Username'],
aggfunc = np.mean).unstack()
actual_user_means = df_actual_pivot.mean(axis=0)
df_actual_pivot_mean = df_actual_pivot - actual_user_means
df_actual_pivot_mean.fillna(0, inplace=True)
# Setting up pivot table for vader rated sentiment scores
df_vader_pivot = pd.pivot_table(comm_df, values = ['actual_sentiment_score'],
index = ['Title', 'Platform', 'Username'],
aggfunc = np.mean).unstack()
vader_user_means = df_vader_pivot.mean(axis=0)
df_vader_pivot_mean = df_vader_pivot - vader_u
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances
actual_cosine_dists = cosine_similarity(df_actual_pivot_mean)
vader_cosine_dists = cosine_similarity(df_vader_pivot_mean)
actual_cosine_dists = pd.DataFrame(actual_cosine_dists, columns=df_actual_pivot_mean.index)
actual_cosine_dists.index = actual_cosine_dists.columns
vader_cosine_dists = pd.DataFrame(vader_cosine_dists, columns=df_vader_pivot_mean.index)
vader_cosine_dists.index = vader_cosine_dists.columns
vader_cosine_dists.iloc[0:5,0:5]
def get_similar_games_actual(games_list, n=100):
games = [game for game in games_list if game in actual_cosine_dists.columns]
games_summed = actual_cosine_dists[games_list].apply(lambda row: np.sum(row), axis=1)
games_summed = games_summed.sort_values(ascending=False)
ranked_games = games_summed.index[games_summed.isin(games_list)==False]
ranked_games = ranked_games.tolist()
for g in games_list:
ranked_games.remove(g)
if n is None:
return ranked_games
else:
return ranked_games[:n]
#games_i_like = [("Baldur's Gate II: Shadows of Amn", 'PC'), ("BioShock",'PlayStation3')]
games_i_like = [("The Legend of Zelda: Breath of the Wild", 'Switch')]
for i, game in enumerate(get_similar_games_actual(games_i_like, 50)):
print("%d. %s on %s" % (i+1, game[0], game[1]))
```
|
github_jupyter
|
# Keyboard shortcuts
In this notebook, you'll get some practice using keyboard shortcuts. These are key to becoming proficient at using notebooks and will greatly increase your work speed.
First up, switching between edit mode and command mode. Edit mode allows you to type into cells while command mode will use key presses to execute commands such as creating new cells and openning the command palette. When you select a cell, you can tell which mode you're currently working in by the color of the box around the cell. In edit mode, the box and thick left border are colored green. In command mode, they are colored blue. Also in edit mode, you should see a cursor in the cell itself.
By default, when you create a new cell or move to the next one, you'll be in command mode. To enter edit mode, press Enter/Return. To go back from edit mode to command mode, press Escape.
> **Exercise:** Click on this cell, then press Enter + Shift to get to the next cell. Switch between edit and command mode a few times.
```
# mode practice
```
## Help with commands
If you ever need to look up a command, you can bring up the list of shortcuts by pressing `H` in command mode. The keyboard shortcuts are also available above in the Help menu. Go ahead and try it now.
## Creating new cells
One of the most common commands is creating new cells. You can create a cell above the current cell by pressing `A` in command mode. Pressing `B` will create a cell below the currently selected cell.
Above!
> **Exercise:** Create a cell above this cell using the keyboard command.
> **Exercise:** Create a cell below this cell using the keyboard command.
And below!
## Switching between Markdown and code
With keyboard shortcuts, it is quick and simple to switch between Markdown and code cells. To change from Markdown to a code cell, press `Y`. To switch from code to Markdown, press `M`.
> **Exercise:** Switch the cell below between Markdown and code cells.
```
## Practice here
def fibo(n): # Recursive Fibonacci sequence!
if n == 0:
return 0
elif n == 1:
return 1
return fibo(n-1) + fibo(n-2)
```
## Line numbers
A lot of times it is helpful to number the lines in your code for debugging purposes. You can turn on numbers by pressing `L` (in command mode of course) on a code cell.
> **Exercise:** Turn line numbers on and off in the above code cell.
## Deleting cells
Deleting cells is done by pressing `D` twice in a row so `D`, `D`. This is to prevent accidently deletions, you have to press the button twice!
> **Exercise:** Delete the cell below.
## Saving the notebook
Notebooks are autosaved every once in a while, but you'll often want to save your work between those times. To save the book, press `S`. So easy!
## The Command Palette
You can easily access the command palette by pressing Shift + Control/Command + `P`.
> **Note:** This won't work in Firefox and Internet Explorer unfortunately. There is already a keyboard shortcut assigned to those keys in those browsers. However, it does work in Chrome and Safari.
This will bring up the command palette where you can search for commands that aren't available through the keyboard shortcuts. For instance, there are buttons on the toolbar that move cells up and down (the up and down arrows), but there aren't corresponding keyboard shortcuts. To move a cell down, you can open up the command palette and type in "move" which will bring up the move commands.
> **Exercise:** Use the command palette to move the cell below down one position.
```
# below this cell
# Move this cell down
```
## Finishing up
There is plenty more you can do such as copying, cutting, and pasting cells. I suggest getting used to using the keyboard shortcuts, you’ll be much quicker at working in notebooks. When you become proficient with them, you'll rarely need to move your hands away from the keyboard, greatly speeding up your work.
Remember, if you ever need to see the shortcuts, just press `H` in command mode.
|
github_jupyter
|
<a href="https://colab.research.google.com/github/SR2090/Image-Classification-MNIST/blob/main/ImageClassificationUsingCNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
```
## 2. Importing and Loading the data
```
from tensorflow.keras.datasets import fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
labelMap = ["T-shirt/top","Trouser","Pullover","Dress","Coat","Sandal","Shirt","Sneaker","Bag","Ankle boot"]
```
## 3. Explore the data
```
from tensorflow.keras.utils import to_categorical
print('Training data shape : ', train_images.shape, train_labels.shape)
print('Testing data shape : ', test_images.shape, test_labels.shape)
classes = np.unique(train_labels)
nClasses = len(classes)
print('Total number of outputs : ', nClasses)
print('Output Classes : ', classes)
plt.figure(figsize=[10,5])
# Display the first image in the training data
# plt.subplot(121);plt.imshow(train_images[0,:,:]);plt.title("Ground Truth : {}".format(train_labels[0]))
# plt.subplot(121);plt.imshow(test_images[0,:,:]);plt.title("Ground Truth : {}".format(test_labels[0]))
plt.subplot(121)
plt.imshow(train_images[0,:,:])
plt.title("Ground Truth : {}".format(train_labels[0]))
# Display the first image in testing data
plt.subplot(122)
plt.imshow(test_images[0,:,:])
plt.title("Ground Truth : {}".format(test_labels[0]))
```
## 4. Preprocess the data
Perform normalization of data (i.e. convert the images to float and normalize the intensity values to lie between 0-1 and convert the labels to categorical variables to be used in Keras.
```
nDims = 1
nRows, nCols = train_images.shape[1:]
train_data = train_images.reshape(train_images.shape[0], nRows, nCols, nDims)
# Input shape to feed it to the neural network
test_data = test_images.reshape(test_images.shape[0], nRows, nCols, nDims)
input_shape = (nRows,nCols,nDims)
# Normalize the value between 0 and 1
# 1. Convert to float32
train_data = train_data.astype('float32')
test_data = test_data.astype('float32')
# 2. Scale the data to lie between 0 to 1
train_data /= 255
test_data /= 255
```
- Category is changed from integer to boolean representation using the to_categorical in keras.
```
train_labels_one_hot = to_categorical(train_labels)
test_lables_one_hot = to_categorical(test_labels)
print('Original Label 0 :', train_labels[0])
print('After conversion to categorical (one-hot): ', train_labels_one_hot[0])
train_labels
```
## 5. Model Architecture
- This is obtained through hit and trial
- Find an existing problem model and try to reconfigure it for the problem you are trying to solve
- Both the aformentioned should be similar
"" For implementing a CNN, we will stack up Convolutional Layers, followed by Max Pooling layers. We will also include Dropout to avoid overfitting.
Finally, we will add a fully connected ( Dense ) layer followed by a softmax layer. Given below is the model structure.
We use 6 convolutional layers and 1 fully-connected layer.
The first 2 convolutional layers have 32 filters / kernels with a window size of 3×3.
The remaining conv layers have 64 filters.
We also add a max pooling layer with window size 2×2 after each pair of conv layer.
We add a dropout layer with a dropout ratio of 0.25 after every pooling layer.
In the final line, we add the dense layer which performs the classification among 10 classes using a softmax layer.""
```
# def createModel():
# model = Sequential()
# # THe first 2 layers have 32 filters of window size 3x3 bigger kernels are not efficient and hardly produce better result
# # Sometimes they even may produce worse result
# model.add(Conv2D(32, (3,3), padding='same', activation='relu', input_shape=input_shape))
# # second conv layer to obtain hierarcial features
# model.add(Conv2D(32, (3, 3), activation='relu'))
# # Redue the length and width to improve efficiency
# model.add(MaxPooling2D(pool_size(2,2)))
# # Dropout to prevent overfitting
# model.add(Dropout(0.25))
# model.add(Conv2D(64,(3,3), padding='same', activation='relu'))
# model.add(Conv2D(64,(3,3),activation='relu')
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
# model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
# model.add(Conv2D(64, (3, 3), activation='relu'))
# model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Dropout(0.25))
# model.add(Flatten())
# model.add(Dense(512, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(nClasses, activation='softmax'))
# return model
def createModel():
model = Sequential()
# The first two layers with 32 filters of window size 3x3
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nClasses, activation='softmax'))
return model
model1 = createModel()
batch_size = 256
epochs = 20
model1.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
model1.summary()
```
## 6. Training the model
```
history = model1.fit(train_data, train_labels_one_hot, batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(test_data, test_lables_one_hot))
# test_loss,test_accuracy = model1.evaluate(test_data, test_labels_one_hot)
```
## 7. Checking loss and accuracy curves
### 7.1 Training Loss vs Validation Loss
- Training and validation loss both going down
```
# Trai
plt.figure(figsize=[8,6])
plt.plot(history.history['loss'],'r',linewidth=3.0)
plt.plot(history.history['val_loss'],'b',linewidth=3.0)
plt.legend(['Training loss', 'Validation Loss'],fontsize=18)
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Loss',fontsize=16)
plt.title('Loss Curves',fontsize=16)
```
### 7.2 Traing Accuracy vs Validation Accuracy
- Training and Validation loss both are increasing
```
plt.figure(figsize=[8,6])
plt.plot(history.history['accuracy'],'r',linewidth=3.0)
plt.plot(history.history['val_accuracy'],'b',linewidth=3.0)
plt.legend(['Training Accuracy', 'Validation Accuracy'],fontsize=18)
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Accuracy',fontsize=16)
plt.title('Accuracy Curves',fontsize=16)
```
## 8. Draw Inference
### 8.1 On a centered Image
```
testSample = test_data[0,:]
plt.imshow(testSample.reshape(28,28));plt.show()
label = model1.predict_classes(testSample.reshape(1,28,28,nDims))[0]
print("Label = {}, Item = {}".format(label,labelMap[label]))
```
### 8.2 On a Shifted Up image
```
shiftUp = np.zeros(testSample.shape)
shiftUp[1:20,:] = testSample[6:25,:]
plt.imshow(shiftUp.reshape(28,28));plt.show()
label = model1.predict_classes(shiftUp.reshape(1,28,28,nDims))[0]
print("Label = {}, Item = {}".format(label,labelMap[label]))
```
### 8.3 On a shifted down image
```
shiftDown = np.zeros(testSample.shape)
shiftDown[10:27,:] = testSample[6:23,:]
plt.imshow(shiftDown.reshape(28,28));plt.show()
label = model1.predict_classes(shiftDown.reshape(1,28,28,nDims))[0]
print("Label = {}, Item = {}".format(label,labelMap[label]))
```
### 8.4 On left shit
```
# testSample.shape
# shiftDown = np.zeros((56,56,1))
# shiftDown[5:22,0:28] = testSample[6:23,0:28]
# plt.imshow(shiftDown);plt.show()
# label = model1.predict_classes(shiftDown.reshape(1,28,28,nDims))[0]
# print("Label = {}, Item = {}".format(label,labelMap[label]))
```
|
github_jupyter
|
# Evaluation of a QA System
EXECUTABLE VERSION: [colab](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial5_Evaluation.ipynb)
To be able to make a statement about the performance of a question-answering system, it is important to evalute it. Furthermore, evaluation allows to determine which parts of the system can be improved.
### Prepare environment
#### Colab: Enable the GPU runtime
Make sure you enable the GPU runtime to experience decent speed in this tutorial.
**Runtime -> Change Runtime type -> Hardware accelerator -> GPU**
<img src="https://raw.githubusercontent.com/deepset-ai/haystack/master/docs/_src/img/colab_gpu_runtime.jpg">
```
# Make sure you have a GPU running
!nvidia-smi
```
## Start an Elasticsearch server
You can start Elasticsearch on your local machine instance using Docker. If Docker is not readily available in your environment (eg., in Colab notebooks), then you can manually download and execute Elasticsearch from source.
```
# Install the latest release of Haystack in your own environment
#! pip install farm-haystack
# Install the latest master of Haystack
!pip install git+https://github.com/deepset-ai/haystack.git
!pip install urllib3==1.25.4
!pip install torch==1.6.0+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html
# In Colab / No Docker environments: Start Elasticsearch from source
! wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.9.2-linux-x86_64.tar.gz -q
! tar -xzf elasticsearch-7.9.2-linux-x86_64.tar.gz
! chown -R daemon:daemon elasticsearch-7.9.2
import os
from subprocess import Popen, PIPE, STDOUT
es_server = Popen(['elasticsearch-7.9.2/bin/elasticsearch'],
stdout=PIPE, stderr=STDOUT,
preexec_fn=lambda: os.setuid(1) # as daemon
)
# wait until ES has started
! sleep 30
from farm.utils import initialize_device_settings
device, n_gpu = initialize_device_settings(use_cuda=True)
from haystack.preprocessor.utils import fetch_archive_from_http
# Download evaluation data, which is a subset of Natural Questions development set containing 50 documents
doc_dir = "../data/nq"
s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/nq_dev_subset_v2.json.zip"
fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
# make sure these indices do not collide with existing ones, the indices will be wiped clean before data is inserted
doc_index = "tutorial5_docs"
label_index = "tutorial5_labels"
# Connect to Elasticsearch
from haystack.document_store.elasticsearch import ElasticsearchDocumentStore
# Connect to Elasticsearch
document_store = ElasticsearchDocumentStore(host="localhost", username="", password="", index="document",
create_index=False, embedding_field="emb",
embedding_dim=768, excluded_meta_data=["emb"])
# Add evaluation data to Elasticsearch Document Store
# We first delete the custom tutorial indices to not have duplicate elements
document_store.delete_all_documents(index=doc_index)
document_store.delete_all_documents(index=label_index)
document_store.add_eval_data(filename="../data/nq/nq_dev_subset_v2.json", doc_index=doc_index, label_index=label_index)
```
## Initialize components of QA-System
```
# Initialize Retriever
from haystack.retriever.sparse import ElasticsearchRetriever
retriever = ElasticsearchRetriever(document_store=document_store)
# Alternative: Evaluate DensePassageRetriever
# Note, that DPR works best when you index short passages < 512 tokens as only those tokens will be used for the embedding.
# Here, for nq_dev_subset_v2.json we have avg. num of tokens = 5220(!).
# DPR still outperforms Elastic's BM25 by a small margin here.
# from haystack.retriever.dense import DensePassageRetriever
# retriever = DensePassageRetriever(document_store=document_store,
# query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
# passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
# use_gpu=True,
# embed_title=True,
# max_seq_len=256,
# batch_size=16,
# remove_sep_tok_from_untitled_passages=True)
#document_store.update_embeddings(retriever, index=doc_index)
# Initialize Reader
from haystack.reader.farm import FARMReader
reader = FARMReader("deepset/roberta-base-squad2", top_k_per_candidate=4)
# Initialize Finder which sticks together Reader and Retriever
from haystack.finder import Finder
finder = Finder(reader, retriever)
```
## Evaluation of Retriever
```
## Evaluate Retriever on its own
retriever_eval_results = retriever.eval(top_k=20, label_index=label_index, doc_index=doc_index)
## Retriever Recall is the proportion of questions for which the correct document containing the answer is
## among the correct documents
print("Retriever Recall:", retriever_eval_results["recall"])
## Retriever Mean Avg Precision rewards retrievers that give relevant documents a higher rank
print("Retriever Mean Avg Precision:", retriever_eval_results["map"])
```
## Evaluation of Reader
```
# Evaluate Reader on its own
reader_eval_results = reader.eval(document_store=document_store, device=device, label_index=label_index, doc_index=doc_index)
# Evaluation of Reader can also be done directly on a SQuAD-formatted file without passing the data to Elasticsearch
#reader_eval_results = reader.eval_on_file("../data/nq", "nq_dev_subset_v2.json", device=device)
## Reader Top-N-Accuracy is the proportion of predicted answers that match with their corresponding correct answer
print("Reader Top-N-Accuracy:", reader_eval_results["top_n_accuracy"])
## Reader Exact Match is the proportion of questions where the predicted answer is exactly the same as the correct answer
print("Reader Exact Match:", reader_eval_results["EM"])
## Reader F1-Score is the average overlap between the predicted answers and the correct answers
print("Reader F1-Score:", reader_eval_results["f1"])
```
## Evaluation of Finder
```
# Evaluate combination of Reader and Retriever through Finder
# Evaluate combination of Reader and Retriever through Finder
finder_eval_results = finder.eval(top_k_retriever=1, top_k_reader=10, label_index=label_index, doc_index=doc_index)
finder.print_eval_results(finder_eval_results)
```
|
github_jupyter
|
## Importing dependencies and loading the data
```
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
dataset=load_boston()
dataset
```
### So in the given data there are certain features and target prices of houses in boston. So let's first transform the given data into dataframe
```
df=pd.DataFrame(dataset.data,columns=dataset.feature_names)
df.head()
```
Let's put the target variable to the dataframe
```
df['Target']=dataset.target
df.head()
```
## Since we have transformed the data into the dataframe now let's do the Exploratory Data Analysis i.e. EDA
```
df.describe()
```
Let's see if there is any missing data or not
```
df.isnull().sum()
```
### Since there is not any missing data let's see the correlation between the features
```
df.corr()
```
### Let's visualize the data on the heatmap
```
plt.figure(figsize=(10,10)) #this increase the dimension of the figure
sns.heatmap(df.corr(),annot=True) #this plots the data into the heatmap
```
### let's see the distribution of the data since all the features are continuous
```
cont=[feature for feature in df.columns]
cont
for feature in cont:
sns.distplot(df[feature])
plt.show()
```
#### Let's draw the regplot between the features and target
```
for feature in cont:
if feature!='Target':
sns.regplot(x=feature,y='Target',data=df)
plt.show()
cont
plt.figure(figsize=(10,10)) #this increase the dimension of the figure
sns.heatmap(df.corr(),annot=True)
```
### Let's do some feature engineering and drop some features which have low correlation with the target
```
'''Now let's take some of the features and test a model and after
seeing the result we can again take some more features to see if the model is working fine or not.'''
x=df.loc[:,[
'ZN',
'INDUS',
'NOX',
'RM',
'AGE',
'DIS',
'TAX',
'PTRATIO',
'B',
'LSTAT']]
y=df.Target
x.head()
# Now let's split the data into train and test data using train_test_split
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=5)
# fitting the model
from sklearn.linear_model import LinearRegression
model=LinearRegression()
model.fit(x_train,y_train)
# Predicting the values
y_pre=model.predict(x_test)
y_pre
# Let's see how our model is performing
from sklearn.metrics import r2_score
score=r2_score(y_test,y_pre)
score
sns.scatterplot(y_test,y_pre)
sns.distplot(y_test-y_pre)
```
|
github_jupyter
|
# Move Files
```
import numpy as np
import pandas as pd
import os
from datetime import datetime
import shutil
import random
pd.set_option('max_colwidth', -1)
```
# Create list of current files
```
SAGEMAKER_REPO_PATH = r'/home/ec2-user/SageMaker/classify-streetview'
ORIGINAL_IMAGE_PATH = os.path.join(SAGEMAKER_REPO_PATH, 'images')
ORIGINAL_TRAIN_PATH = os.path.join(ORIGINAL_IMAGE_PATH, 'train')
os.listdir(ORIGINAL_TRAIN_PATH)
subset_list = ['train', 'valid', 'test']
# Include obstacle and surface prob in case we need to move those images
class_list = ['3_present', '0_missing', '1_null', '2_obstacle', '4_surface_prob']
original_df_list = []
# Get all existing jpgs with their detailed info
for split in subset_list:
for class_name in class_list:
full_folder_path = os.path.join(ORIGINAL_IMAGE_PATH, split, class_name)
jpg_names = os.listdir(full_folder_path)
df_part = pd.DataFrame({'jpg_name' : jpg_names, 'original_folder_path' : full_folder_path, 'original_group' : split, 'original_label' : class_name})
original_df_list.append(df_part)
# Create a full list all files
df_original = pd.concat(original_df_list)
print(df_original.shape)
df_original.head()
df_original.to_csv('March-SmartCrop-ImageList.csv', index = False)
df_original['original_label'].value_counts()
```
## Get the New ROI Image Details
```
df_train = pd.read_csv('train_labels.csv')
df_train['new_group'] = 'train'
df_val = pd.read_csv('validation_labels.csv')
df_val['new_group'] = 'valid'
df_test = pd.read_csv('test_labels.csv')
df_test['new_group'] = 'test'
df_new_roi = pd.concat([df_train, df_val, df_test])
print(df_new_roi.shape)
df_new_roi.head()
df_new_roi['jpg_name'] = df_new_roi['img_id'].astype(str) + '_' + df_new_roi['heading'].astype(str) + '_' +df_new_roi['crop_number'].astype(str) + '.jpg'
df_new_roi.head()
```
# Combine ROI Images with Original Image details
```
df_combine = df_new_roi.merge(df_original, how = 'outer', left_on = 'jpg_name', right_on = 'jpg_name')
print(df_combine.shape)
df_combine.head()
df_combine['crop_number'].value_counts(dropna = False)
df_combine.loc[df_combine['crop_number'].isna()]
df_combine['original_folder_path'].value_counts(dropna = False).head()
df_group_label = df_combine.groupby(['ground_truth', 'original_label'])['jpg_name'].count()
df_group_label
df_combine['jpg_name'].value_counts().describe()
```
# Observations
* There's exactly 1 row per jpg_name
* There's a row with ipynb_checkpoints, which is fine
* There are some lost images (mainly null)
* The grouping by label showing how images move around into the new "ground_truth"
# Create the list of files before and after locations
```
df_move = df_combine.dropna().copy()
df_move.shape
df_move['ground_truth'].value_counts()
df_move['new_group'].value_counts()
df_move.head()
df_move['new_folder_path'] = SAGEMAKER_REPO_PATH + '/roi-images/' + df_move['new_group'] + '/' + df_move['ground_truth']
df_move.head()
df_move.to_csv('roi-images-sagemaker-paths.csv', index = False)
```
# Actually Copy the Images
```
# Make sure folders exst for all new folders
unique_new_folders = list(df_move['new_folder_path'].unique())
print(len(unique_new_folders))
for new_folder in unique_new_folders:
if not os.path.exists(new_folder):
os.makedirs(new_folder)
print(new_folder)
for index, row in df_move.iterrows():
original = os.path.join(row['original_folder_path'], row['jpg_name'])
target = os.path.join(row['new_folder_path'], row['jpg_name'])
try:
shutil.copyfile(original, target)
except:
print(f"could not copy: {row['jpg_name']}")
```
# Make an alphabetical list of the test images
```
df_test = df_move.loc[df_move['new_group'] == 'test']
print(df_test.shape)
df_test.columns
keep_cols = ['img_id', 'heading', 'crop_number', '0_missing', '1_null', '2_present', 'count_all', 'ground_truth', 'jpg_name', 'new_folder_path']
df_test_keep = df_test[keep_cols].copy()
df_test_keep = df_test_keep.sort_values(['new_folder_path', 'jpg_name'])
df_test_keep.head()
df_test_keep.to_csv('test_roi_image_locations_sorted.csv', index = False)
```
|
github_jupyter
|
## Main Driver Notebook for Training Graph NNs on TSP for Edge Classification
### MODELS
- GatedGCN
- GCN
- GAT
- GraphSage
- GIN
- MoNet
- MLP
### DATASET
- TSP
### TASK
- Edge Classification, i.e. Classifying each edge as belonging/not belonging to the optimal TSP solution set.
```
"""
IMPORTING LIBS
"""
import dgl
import numpy as np
import os
import socket
import time
import random
import glob
import argparse, json
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from tqdm import tqdm
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
# """
# AUTORELOAD IPYTHON EXTENSION FOR RELOADING IMPORTED MODULES
# """
def in_ipynb():
try:
cfg = get_ipython().config
return True
except NameError:
return False
notebook_mode = in_ipynb()
print(notebook_mode)
if notebook_mode == True:
%load_ext autoreload
%autoreload 2
"""
IMPORTING CUSTOM MODULES/METHODS
"""
from nets.TSP_edge_classification.load_net import gnn_model # import all GNNS
from data.data import LoadData # import dataset
from train.train_TSP_edge_classification import train_epoch, evaluate_network # import train functions
"""
GPU Setup
"""
def gpu_setup(use_gpu, gpu_id):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available() and use_gpu:
print('cuda available with GPU:',torch.cuda.get_device_name(0))
device = torch.device("cuda")
else:
print('cuda not available')
device = torch.device("cpu")
return device
use_gpu = True
gpu_id = -1
device = None
# """
# USER CONTROLS
# """
if notebook_mode == True:
MODEL_NAME = 'MLP'
# MODEL_NAME = 'GCN'
MODEL_NAME = 'GatedGCN'
# MODEL_NAME = 'GAT'
# MODEL_NAME = 'GraphSage'
# MODEL_NAME = 'DiffPool'
# MODEL_NAME = 'GIN'
DATASET_NAME = 'TSP'
out_dir = 'out/TSP_edge_classification/'
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
print("[I] Loading data (notebook) ...")
dataset = LoadData(DATASET_NAME)
trainset, valset, testset = dataset.train, dataset.val, dataset.test
print("[I] Finished loading.")
MODEL_NAME = 'GatedGCN'
MODEL_NAME = 'GCN'
MODEL_NAME = 'GAT'
#MODEL_NAME = 'GraphSage'
#MODEL_NAME = 'MLP'
#MODEL_NAME = 'GIN'
#MODEL_NAME = 'MoNet'
# """
# PARAMETERS
# """
if notebook_mode == True:
#MODEL_NAME = 'GCN'
n_heads = -1
edge_feat = False
pseudo_dim_MoNet = -1
kernel = -1
gnn_per_block = -1
embedding_dim = -1
pool_ratio = -1
n_mlp_GIN = -1
gated = False
self_loop = False
max_time = 48
if MODEL_NAME == 'MLP':
seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0
L=3; hidden_dim=144; out_dim=hidden_dim; dropout=0.0; readout='mean'; gated = False # Change gated = True for Gated MLP model
if MODEL_NAME == 'GCN':
seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0
L=4; hidden_dim=128; out_dim=hidden_dim; dropout=0.0; readout='mean';
if MODEL_NAME == 'GraphSage':
seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0
L=4; hidden_dim=96; out_dim=hidden_dim; dropout=0.0; readout='mean';
if MODEL_NAME == 'GAT':
seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0
L=4; n_heads=8; hidden_dim=16; out_dim=128; dropout=0.0; readout='mean';
if MODEL_NAME == 'GIN':
seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0
L=4; hidden_dim=112; out_dim=hidden_dim; dropout=0.0; readout='mean';
if MODEL_NAME == 'MoNet':
seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0
L=4; hidden_dim=80; out_dim=hidden_dim; dropout=0.0; readout='mean';
if MODEL_NAME == 'GatedGCN':
seed=41; epochs=500; batch_size=64; init_lr=0.001; lr_reduce_factor=0.5; lr_schedule_patience=10; min_lr = 1e-5; weight_decay=0
L=4; hidden_dim=64; out_dim=hidden_dim; dropout=0.0; readout='mean'; edge_feat = True
# generic new_params
net_params = {}
net_params['device'] = device
net_params['in_dim'] = trainset[0][0].ndata['feat'][0].size(0)
net_params['in_dim_edge'] = trainset[0][0].edata['feat'][0].size(0)
net_params['residual'] = True
net_params['hidden_dim'] = hidden_dim
net_params['out_dim'] = out_dim
num_classes = len(np.unique(np.concatenate(trainset[:][1])))
net_params['n_classes'] = num_classes
net_params['n_heads'] = n_heads
net_params['L'] = L # min L should be 2
net_params['readout'] = "mean"
net_params['graph_norm'] = True
net_params['batch_norm'] = True
net_params['in_feat_dropout'] = 0.0
net_params['dropout'] = 0.0
net_params['edge_feat'] = edge_feat
net_params['self_loop'] = self_loop
# for MLPNet
net_params['gated'] = gated
# specific for MoNet
net_params['pseudo_dim_MoNet'] = 2
net_params['kernel'] = 3
# specific for GIN
net_params['n_mlp_GIN'] = 2
net_params['learn_eps_GIN'] = True
net_params['neighbor_aggr_GIN'] = 'sum'
# specific for graphsage
net_params['sage_aggregator'] = 'meanpool'
# setting seeds
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if device == 'cuda':
torch.cuda.manual_seed(seed)
"""
VIEWING MODEL CONFIG AND PARAMS
"""
def view_model_param(MODEL_NAME, net_params):
model = gnn_model(MODEL_NAME, net_params)
total_param = 0
print("MODEL DETAILS:\n")
#print(model)
for param in model.parameters():
# print(param.data.size())
total_param += np.prod(list(param.data.size()))
print('MODEL/Total parameters:', MODEL_NAME, total_param)
return total_param
if notebook_mode == True:
view_model_param(MODEL_NAME, net_params)
"""
TRAINING CODE
"""
def train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs):
t0 = time.time()
per_epoch_time = []
DATASET_NAME = dataset.name
#assert net_params['self_loop'] == False, "No self-loop support for %s dataset" % DATASET_NAME
trainset, valset, testset = dataset.train, dataset.val, dataset.test
root_log_dir, root_ckpt_dir, write_file_name, write_config_file = dirs
device = net_params['device']
# Write the network and optimization hyper-parameters in folder config/
with open(write_config_file + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n\nTotal Parameters: {}\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, net_params['total_param']))
log_dir = os.path.join(root_log_dir, "RUN_" + str(0))
writer = SummaryWriter(log_dir=log_dir)
# setting seeds
random.seed(params['seed'])
np.random.seed(params['seed'])
torch.manual_seed(params['seed'])
if device == 'cuda':
torch.cuda.manual_seed(params['seed'])
print("Training Graphs: ", len(trainset))
print("Validation Graphs: ", len(valset))
print("Test Graphs: ", len(testset))
print("Number of Classes: ", net_params['n_classes'])
model = gnn_model(MODEL_NAME, net_params)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=params['init_lr'], weight_decay=params['weight_decay'])
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min',
factor=params['lr_reduce_factor'],
patience=params['lr_schedule_patience'],
verbose=True)
epoch_train_losses, epoch_val_losses = [], []
epoch_train_f1s, epoch_val_f1s = [], []
train_loader = DataLoader(trainset, batch_size=params['batch_size'], shuffle=True, collate_fn=dataset.collate)
val_loader = DataLoader(valset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
test_loader = DataLoader(testset, batch_size=params['batch_size'], shuffle=False, collate_fn=dataset.collate)
# At any point you can hit Ctrl + C to break out of training early.
try:
with tqdm(range(params['epochs'])) as t:
for epoch in t:
t.set_description('Epoch %d' % epoch)
start = time.time()
epoch_train_loss, epoch_train_f1, optimizer = train_epoch(model, optimizer, device, train_loader, epoch)
epoch_val_loss, epoch_val_f1 = evaluate_network(model, device, val_loader, epoch)
epoch_train_losses.append(epoch_train_loss)
epoch_val_losses.append(epoch_val_loss)
epoch_train_f1s.append(epoch_train_f1)
epoch_val_f1s.append(epoch_val_f1)
writer.add_scalar('train/_loss', epoch_train_loss, epoch)
writer.add_scalar('val/_loss', epoch_val_loss, epoch)
writer.add_scalar('train/_f1', epoch_train_f1, epoch)
writer.add_scalar('val/_f1', epoch_val_f1, epoch)
writer.add_scalar('learning_rate', optimizer.param_groups[0]['lr'], epoch)
_, epoch_test_f1 = evaluate_network(model, device, test_loader, epoch)
t.set_postfix(time=time.time()-start, lr=optimizer.param_groups[0]['lr'],
train_loss=epoch_train_loss, val_loss=epoch_val_loss,
train_f1=epoch_train_f1, val_f1=epoch_val_f1,
test_f1=epoch_test_f1)
per_epoch_time.append(time.time()-start)
# Saving checkpoint
ckpt_dir = os.path.join(root_ckpt_dir, "RUN_")
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
torch.save(model.state_dict(), '{}.pkl'.format(ckpt_dir + "/epoch_" + str(epoch)))
files = glob.glob(ckpt_dir + '/*.pkl')
for file in files:
epoch_nb = file.split('_')[-1]
epoch_nb = int(epoch_nb.split('.')[0])
if epoch_nb < epoch-1:
os.remove(file)
scheduler.step(epoch_val_loss)
if optimizer.param_groups[0]['lr'] < params['min_lr']:
print("\n!! LR EQUAL TO MIN LR SET.")
break
# Stop training after params['max_time'] hours
if time.time()-t0 > params['max_time']*3600:
print('-' * 89)
print("Max_time for training elapsed {:.2f} hours, so stopping".format(params['max_time']))
break
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early because of KeyboardInterrupt')
_, test_f1 = evaluate_network(model, device, test_loader, epoch)
_, train_f1 = evaluate_network(model, device, train_loader, epoch)
print("Test F1: {:.4f}".format(test_f1))
print("Train F1: {:.4f}".format(train_f1))
print("TOTAL TIME TAKEN: {:.4f}s".format(time.time()-t0))
print("AVG TIME PER EPOCH: {:.4f}s".format(np.mean(per_epoch_time)))
writer.close()
"""
Write the results in out_dir/results folder
"""
with open(write_file_name + '.txt', 'w') as f:
f.write("""Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST F1: {:.4f}\nTRAIN F1: {:.4f}\n\n
Total Time Taken: {:.4f}hrs\nAverage Time Per Epoch: {:.4f}s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(test_f1)), np.mean(np.array(train_f1)), (time.time()-t0)/3600, np.mean(per_epoch_time)))
# send results to gmail
try:
from gmail import send
subject = 'Result for Dataset: {}, Model: {}'.format(DATASET_NAME, MODEL_NAME)
body = """Dataset: {},\nModel: {}\n\nparams={}\n\nnet_params={}\n\n{}\n\nTotal Parameters: {}\n\n
FINAL RESULTS\nTEST F1: {:.4f}\nTRAIN F1: {:.4f}\n\n
Total Time Taken: {:.4f}hrs\nAverage Time Per Epoch: {:.4f}s\n\n\n"""\
.format(DATASET_NAME, MODEL_NAME, params, net_params, model, net_params['total_param'],
np.mean(np.array(test_f1)), np.mean(np.array(train_f1)), (time.time()-t0)/3600, np.mean(per_epoch_time))
send(subject, body)
except:
pass
def main(notebook_mode=False,config=None):
"""
USER CONTROLS
"""
# terminal mode
if notebook_mode==False:
parser = argparse.ArgumentParser()
parser.add_argument('--config', help="Please give a config.json file with training/model/data/param details")
parser.add_argument('--gpu_id', help="Please give a value for gpu id")
parser.add_argument('--model', help="Please give a value for model name")
parser.add_argument('--dataset', help="Please give a value for dataset name")
parser.add_argument('--out_dir', help="Please give a value for out_dir")
parser.add_argument('--seed', help="Please give a value for seed")
parser.add_argument('--epochs', help="Please give a value for epochs")
parser.add_argument('--batch_size', help="Please give a value for batch_size")
parser.add_argument('--init_lr', help="Please give a value for init_lr")
parser.add_argument('--lr_reduce_factor', help="Please give a value for lr_reduce_factor")
parser.add_argument('--lr_schedule_patience', help="Please give a value for lr_schedule_patience")
parser.add_argument('--min_lr', help="Please give a value for min_lr")
parser.add_argument('--weight_decay', help="Please give a value for weight_decay")
parser.add_argument('--print_epoch_interval', help="Please give a value for print_epoch_interval")
parser.add_argument('--L', help="Please give a value for L")
parser.add_argument('--hidden_dim', help="Please give a value for hidden_dim")
parser.add_argument('--out_dim', help="Please give a value for out_dim")
parser.add_argument('--residual', help="Please give a value for residual")
parser.add_argument('--edge_feat', help="Please give a value for edge_feat")
parser.add_argument('--readout', help="Please give a value for readout")
parser.add_argument('--kernel', help="Please give a value for kernel")
parser.add_argument('--n_heads', help="Please give a value for n_heads")
parser.add_argument('--gated', help="Please give a value for gated")
parser.add_argument('--in_feat_dropout', help="Please give a value for in_feat_dropout")
parser.add_argument('--dropout', help="Please give a value for dropout")
parser.add_argument('--graph_norm', help="Please give a value for graph_norm")
parser.add_argument('--batch_norm', help="Please give a value for batch_norm")
parser.add_argument('--sage_aggregator', help="Please give a value for sage_aggregator")
parser.add_argument('--data_mode', help="Please give a value for data_mode")
parser.add_argument('--num_pool', help="Please give a value for num_pool")
parser.add_argument('--gnn_per_block', help="Please give a value for gnn_per_block")
parser.add_argument('--embedding_dim', help="Please give a value for embedding_dim")
parser.add_argument('--pool_ratio', help="Please give a value for pool_ratio")
parser.add_argument('--linkpred', help="Please give a value for linkpred")
parser.add_argument('--cat', help="Please give a value for cat")
parser.add_argument('--self_loop', help="Please give a value for self_loop")
parser.add_argument('--max_time', help="Please give a value for max_time")
args = parser.parse_args()
with open(args.config) as f:
config = json.load(f)
# device
if args.gpu_id is not None:
config['gpu']['id'] = int(args.gpu_id)
config['gpu']['use'] = True
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
# model, dataset, out_dir
if args.model is not None:
MODEL_NAME = args.model
else:
MODEL_NAME = config['model']
if args.dataset is not None:
DATASET_NAME = args.dataset
else:
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
if args.out_dir is not None:
out_dir = args.out_dir
else:
out_dir = config['out_dir']
# parameters
params = config['params']
if args.seed is not None:
params['seed'] = int(args.seed)
if args.epochs is not None:
params['epochs'] = int(args.epochs)
if args.batch_size is not None:
params['batch_size'] = int(args.batch_size)
if args.init_lr is not None:
params['init_lr'] = float(args.init_lr)
if args.lr_reduce_factor is not None:
params['lr_reduce_factor'] = float(args.lr_reduce_factor)
if args.lr_schedule_patience is not None:
params['lr_schedule_patience'] = int(args.lr_schedule_patience)
if args.min_lr is not None:
params['min_lr'] = float(args.min_lr)
if args.weight_decay is not None:
params['weight_decay'] = float(args.weight_decay)
if args.print_epoch_interval is not None:
params['print_epoch_interval'] = int(args.print_epoch_interval)
if args.max_time is not None:
params['max_time'] = float(args.max_time)
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
if args.L is not None:
net_params['L'] = int(args.L)
if args.hidden_dim is not None:
net_params['hidden_dim'] = int(args.hidden_dim)
if args.out_dim is not None:
net_params['out_dim'] = int(args.out_dim)
if args.residual is not None:
net_params['residual'] = True if args.residual=='True' else False
if args.edge_feat is not None:
net_params['edge_feat'] = True if args.edge_feat=='True' else False
if args.readout is not None:
net_params['readout'] = args.readout
if args.kernel is not None:
net_params['kernel'] = int(args.kernel)
if args.n_heads is not None:
net_params['n_heads'] = int(args.n_heads)
if args.gated is not None:
net_params['gated'] = True if args.gated=='True' else False
if args.in_feat_dropout is not None:
net_params['in_feat_dropout'] = float(args.in_feat_dropout)
if args.dropout is not None:
net_params['dropout'] = float(args.dropout)
if args.graph_norm is not None:
net_params['graph_norm'] = True if args.graph_norm=='True' else False
if args.batch_norm is not None:
net_params['batch_norm'] = True if args.batch_norm=='True' else False
if args.sage_aggregator is not None:
net_params['sage_aggregator'] = args.sage_aggregator
if args.data_mode is not None:
net_params['data_mode'] = args.data_mode
if args.num_pool is not None:
net_params['num_pool'] = int(args.num_pool)
if args.gnn_per_block is not None:
net_params['gnn_per_block'] = int(args.gnn_per_block)
if args.embedding_dim is not None:
net_params['embedding_dim'] = int(args.embedding_dim)
if args.pool_ratio is not None:
net_params['pool_ratio'] = float(args.pool_ratio)
if args.linkpred is not None:
net_params['linkpred'] = True if args.linkpred=='True' else False
if args.cat is not None:
net_params['cat'] = True if args.cat=='True' else False
if args.self_loop is not None:
net_params['self_loop'] = True if args.self_loop=='True' else False
# notebook mode
if notebook_mode:
# parameters
params = config['params']
# dataset
DATASET_NAME = config['dataset']
dataset = LoadData(DATASET_NAME)
# device
device = gpu_setup(config['gpu']['use'], config['gpu']['id'])
out_dir = config['out_dir']
# GNN model
MODEL_NAME = config['model']
# network parameters
net_params = config['net_params']
net_params['device'] = device
net_params['gpu_id'] = config['gpu']['id']
net_params['batch_size'] = params['batch_size']
# TSP
net_params['in_dim'] = dataset.train[0][0].ndata['feat'][0].shape[0]
net_params['in_dim_edge'] = dataset.train[0][0].edata['feat'][0].size(0)
num_classes = len(np.unique(np.concatenate(dataset.train[:][1])))
net_params['n_classes'] = num_classes
root_log_dir = out_dir + 'logs/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
root_ckpt_dir = out_dir + 'checkpoints/' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_file_name = out_dir + 'results/result_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
write_config_file = out_dir + 'configs/config_' + MODEL_NAME + "_" + DATASET_NAME + "_GPU" + str(config['gpu']['id']) + "_" + time.strftime('%Hh%Mm%Ss_on_%b_%d_%Y')
dirs = root_log_dir, root_ckpt_dir, write_file_name, write_config_file
if not os.path.exists(out_dir + 'results'):
os.makedirs(out_dir + 'results')
if not os.path.exists(out_dir + 'configs'):
os.makedirs(out_dir + 'configs')
net_params['total_param'] = view_model_param(MODEL_NAME, net_params)
train_val_pipeline(MODEL_NAME, dataset, params, net_params, dirs)
if notebook_mode==True:
config = {}
# gpu config
gpu = {}
gpu['use'] = use_gpu
gpu['id'] = gpu_id
config['gpu'] = gpu
# GNN model, dataset, out_dir
config['model'] = MODEL_NAME
config['dataset'] = DATASET_NAME
config['out_dir'] = out_dir
# parameters
params = {}
params['seed'] = seed
params['epochs'] = epochs
params['batch_size'] = batch_size
params['init_lr'] = init_lr
params['lr_reduce_factor'] = lr_reduce_factor
params['lr_schedule_patience'] = lr_schedule_patience
params['min_lr'] = min_lr
params['weight_decay'] = weight_decay
params['print_epoch_interval'] = 5
params['max_time'] = max_time
config['params'] = params
# network parameters
config['net_params'] = net_params
# convert to .py format
from utils.cleaner_main import *
cleaner_main('main_TSP_edge_classification')
main(True,config)
else:
main()
```
|
github_jupyter
|
# k-Nearest Neighbor (kNN) exercise
#### This assignment was adapted from Stanford's CS231n course: http://cs231n.stanford.edu/
The kNN classifier consists of two stages:
- During training, the classifier takes the training data and simply remembers it
- During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples
- The value of k is cross-validated
In this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code.
### YOUR NAME: YOUR_NAME
### List of collaborators (optional): N/A
```
# Run some setup code for this notebook.
import random
import numpy as np
from psyc272cava.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Information about the CIFAR-10 dataset: https://www.cs.toronto.edu/~kriz/cifar.html
# Load the raw CIFAR-10 data.
cifar10_dir = 'psyc272cava/datasets/cifar-10-batches-py'
# Cleaning up variables to prevent loading data multiple times (which may cause memory issue)
try:
del X_train, y_train
del X_test, y_test
print('Clear previously loaded data.')
except:
pass
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 10
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
# Reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
print(X_train.shape, X_test.shape)
from psyc272cava.classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
```
We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps:
1. First we must compute the distances between all test examples and all train examples.
2. Given these distances, for each test example we find the k nearest examples and have them vote for the label
Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example.
**Note: For the three distance computations that we require you to implement in this notebook, you may not use the np.linalg.norm() function that numpy provides.**
First, open `psyc272cava/classifiers/k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time.
```
# Open psyc272cava/classifiers/k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Test your implementation:
dists = classifier.compute_distances_two_loops(X_test)
print(dists.shape)
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
```
You should expect to see approximately `27%` accuracy. Now lets try out a larger `k`, say `k = 5`:
```
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
```
You should expect to see a slightly better performance than with `k = 1`.
```
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
dists_one = classifier.compute_distances_one_loop(X_test)
# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.
difference = np.linalg.norm(dists - dists_one, ord='fro')
print('One loop difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance matrices are the same')
else:
print('Uh-oh! The distance matrices are different')
# Now implement the fully vectorized version inside compute_distances_no_loops
# and run the code
dists_two = classifier.compute_distances_no_loops(X_test)
# check that the distance matrix agrees with the one we computed before:
difference = np.linalg.norm(dists - dists_two, ord='fro')
print('No loop difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance matrices are the same')
else:
print('Uh-oh! The distance matrices are different')
# Let's compare how fast the implementations are
def time_function(f, *args):
"""
Call a function f with args and return the time (in seconds) that it took to execute.
"""
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier.compute_distances_two_loops, X_test)
print('Two loop version took %f seconds' % two_loop_time)
one_loop_time = time_function(classifier.compute_distances_one_loop, X_test)
print('One loop version took %f seconds' % one_loop_time)
no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
print('No loop version took %f seconds' % no_loop_time)
# You should see significantly faster performance with the fully vectorized implementation!
# NOTE: depending on what machine you're using,
# you might not see a speedup when you go from two loops to one loop,
# and might even see a slow-down.
```
### Cross-validation
We have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation.
```
num_folds = 5
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
X_train_folds = []
y_train_folds = []
################################################################################
# TODO: #
# Split up the training data into folds. After splitting, X_train_folds and #
# y_train_folds should each be lists of length num_folds, where #
# y_train_folds[i] is the label vector for the points in X_train_folds[i]. #
# Hint: Look up the numpy array_split function. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# A dictionary holding the accuracies for different values of k that we find
# when running cross-validation. After running cross-validation,
# k_to_accuracies[k] should be a list of length num_folds giving the different
# accuracy values that we found when using that value of k.
k_to_accuracies = {}
num_split = X_train.shape[0] / num_folds
acc_k = np.zeros((len(k_choices), num_folds), dtype=np.float)
################################################################################
# TODO: #
# Perform k-fold cross validation to find the best value of k. For each #
# possible value of k, run the k-nearest-neighbor algorithm num_folds times, #
# where in each case you use all but one of the folds as training data and the #
# last fold as a validation set. Store the accuracies for all fold and all #
# values of k in the k_to_accuracies dictionary. #
################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Print out the computed accuracies
for k in sorted(k_to_accuracies):
for accuracy in k_to_accuracies[k]:
print('k = %d, accuracy = %f' % (k, accuracy))
# plot the raw observations
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
best_k = 9
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
```
|
github_jupyter
|
<img src="images/usm.jpg" width="480" height="240" align="left"/>
# MAT281 - Laboratorio N°03
## Objetivos del laboratorio
* Reforzar conceptos básicos de análisis no supervisado.
## Contenidos
* [Problema 01](#p1)
<a id='p1'></a>
## I.- Problema 01
<img src="https://freedesignfile.com/upload/2013/06/Car-logos-1.jpg" width="360" height="360" align="center"/>
El conjunto de datos se denomina `vehiculos_procesado_con_grupos.csv`, el cual contine algunas de las características más importante de un vehículo.
En este ejercicio se tiene como objetivo, es poder clasificar los distintos vehículos basados en las cracterísticas que se presentan a continuación. La dificultad de este ejercicio radíca en que ahora tenemos variables numéricas y variables categóricas.
Lo primero será cargar el conjunto de datos:
```
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
from sklearn.dummy import DummyClassifier
from sklearn.cluster import KMeans
%matplotlib inline
sns.set_palette("deep", desat=.6)
sns.set(rc={'figure.figsize':(11.7,8.27)})
# cargar datos
df = pd.read_csv(os.path.join("data","vehiculos_procesado_con_grupos.csv"), sep=",")\
.drop(
["fabricante",
"modelo",
"transmision",
"traccion",
"clase",
"combustible",
"consumo"],
axis=1)
df.head()
```
En este caso, no solo se tienen datos numéricos, sino que también categóricos. Además, tenemos problemas de datos **vacíos (Nan)**. Así que para resolver este problema, seguiremos varios pasos:
## 1.- Normalizar datos
1. Cree un conjunto de datos con las variables numéricas, además, para cada dato vacía, rellene con el promedio asociado a esa columna. Finalmente, normalize los datos mediante el procesamiento **MinMaxScaler** de **sklearn**.
2.- Cree un conjunto de datos con las variables categóricas , además, transforme de variables numéricas a categóricas ocupando el comando **get_dummies** de pandas ([refrenecia](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.get_dummies.html)). Explique a grande rasgo como se realiza la codificación de variables numéricas a categóricas.
3.- Junte ambos dataset en uno, llamado **df_procesado**.
1.-
```
df.dtypes
df_numerica = df[['year','desplazamiento', 'cilindros', 'co2','consumo_litros_milla']]
df_numerica.isnull().sum()
df_numerica = df_numerica.fillna(df.mean())
df_numerica.isnull().sum()
scaler = MinMaxScaler()
columns = ['year','desplazamiento', 'cilindros', 'co2','consumo_litros_milla']
df_numerica[columns] = scaler.fit_transform(df_numerica[columns])
df_numerica.head()
```
2.-
```
df_categoricas = pd.get_dummies(df[['clase_tipo', 'traccion_tipo', 'transmision_tipo', 'combustible_tipo',
'tamano_motor_tipo', 'consumo_tipo', 'co2_tipo']],dummy_na=True)
df_categoricas.head()
```
3.-
```
df_procesado = pd.concat([df_numerica, df_categoricas ], axis=1)
df_procesado
```
## 2.- Realizar ajuste mediante kmeans
Una vez depurado el conjunto de datos, es momento de aplicar el algoritmo de **kmeans**.
1. Ajuste el modelo de **kmeans** sobre el conjunto de datos, con un total de 8 clusters.
2. Calcular los cluster y el valor de los centroides.
3. Realizar que resumas las principales cualidades de cada cluster. Para cada cluster calcule:
a. Valor promedio de las variables numérica.\
b. Moda para las variables numericas
1.-
```
kmeans = KMeans(n_clusters=8, random_state=2)
kmeans = kmeans.fit(df_procesado)
```
2.-
```
centroids = kmeans.cluster_centers_
clusters = kmeans.labels_
df_procesado["cluster"] = clusters
df_procesado["cluster"] = df_procesado["cluster"].astype('category')
```
3.-
```
df_procesado.groupby(['cluster']).mean()
df_procesado.groupby(['cluster']).agg(pd.Series.mode)
```
## 3.- Elegir Número de cluster
Estime mediante la **regla del codo**, el número de cluster apropiados para el caso.
Para efectos prácticos, eliga la siguiente secuencia como número de clusters a comparar:
$$[5, 10, 20, 30, 50, 75, 100, 200, 300]$$
Una ve realizado el gráfico, saque sus propias conclusiones del caso.
```
clasifiers=[5,10,20,30,50,75,100,200,300]
inertia=[]
for i in clasifiers:
km = KMeans(n_clusters = i).fit(df_procesado)
u = km.inertia_
inertia.append(u)
fig, (ax1) = plt.subplots(1, figsize=(16,6))
xx = np.arange(len(clasifiers))
ax1.plot(xx, inertia)
ax1.set_xticks(xx)
ax1.set_xticklabels(clasifiers, rotation = 'vertical')
plt.xlabel('Número de Clusters')
plt.ylabel('Valor Inertia')
plt.title('Elbow Curve')
plt.grid()
plt.show()
```
De la regla del codo se puede notar que los primeros valores críticos a ser candidatos para el numero de clusters son $10$ o $20$, ya que aquí la gráfica tiene un quiebre considerable.
|
github_jupyter
|
# Introduction to Qiskit
Welcome to the Quantum Challenge! Here you will be using Qiskit, the open source quantum software development kit developed by IBM Quantum and community members around the globe. The following exercises will familiarize you with the basic elements of Qiskit and quantum circuits.
To begin, let us define what a quantum circuit is:
> **"A quantum circuit is a computational routine consisting of coherent quantum operations on quantum data, such as qubits. It is an ordered sequence of quantum gates, measurements, and resets, which may be conditioned on real-time classical computation."** (https://qiskit.org/textbook/ch-algorithms/defining-quantum-circuits.html)
While this might be clear to a quantum physicist, don't worry if it is not self-explanatory to you. During this exercise you will learn what a qubit is, how to apply quantum gates to it, and how to measure its final state. You will then be able to create your own quantum circuits! By the end, you should be able to explain the fundamentals of quantum circuits to your colleagues.
Before starting with the exercises, please run cell *Cell 1* below by clicking on it and pressing 'shift' + 'enter'. This is the general way to execute a code cell in the Jupyter notebook environment that you are using now. While it is running, you will see `In [*]:` in the top left of that cell. Once it finishes running, you will see a number instead of the star, which indicates how many cells you've run. You can find more information about Jupyter notebooks here: https://qiskit.org/textbook/ch-prerequisites/python-and-jupyter-notebooks.html.
---
For useful tips to complete this exercise as well as pointers for communicating with other participants and asking questions, please take a look at the following [repository](https://github.com/qiskit-community/may4_challenge_exercises). You will also find a copy of these exercises, so feel free to edit and experiment with these notebooks.
---
```
# Cell 1
import numpy as np
from qiskit import Aer, QuantumCircuit, execute
from qiskit.visualization import plot_histogram
from IPython.display import display, Math, Latex
from may4_challenge import plot_state_qsphere
from may4_challenge.ex1 import minicomposer
from may4_challenge.ex1 import check1, check2, check3, check4, check5, check6, check7, check8
from may4_challenge.ex1 import return_state, vec_in_braket, statevec
```
## Exercise I: Basic Operations on Qubits and Measurements
### Writing down single-qubit states
Let us start by looking at a single qubit. The main difference between a classical bit, which can take the values 0 and 1 only, is that a quantum bit, or **qubit**, can be in the states $\vert0\rangle$, $\vert1\rangle$, as well as a linear combination of these two states. This feature is known as superposition, and allows us to write the most general state of a qubit as:
$$\vert\psi\rangle = \sqrt{1-p}\vert0\rangle + e^{i \phi} \sqrt{p} \vert1\rangle$$
If we were to measure the state of this qubit, we would find the result $1$ with probability $p$, and the result $0$ with probability $1-p$. As you can see, the total probability is $1$, meaning that we will indeed measure either $0$ or $1$, and no other outcomes exists.
In addition to $p$, you might have noticed another parameter above. The variable $\phi$ indicates the relative quantum phase between the two states $\vert0\rangle$ and $\vert1\rangle$. As we will discover later, this relative phase is quite important. For now, it suffices to note that the quantum phase is what enables interference between quantum states, resulting in our ability to write quantum algorithms for solving specific tasks.
If you are interested in learning more, we refer you to [the section in the Qiskit textbook on representations of single-qubit states](https://qiskit.org/textbook/ch-states/representing-qubit-states.html).
### Visualizing quantum states
We visualize quantum states throughout this exercise using what is known as a `qsphere`. Here is how the `qsphere` looks for the states $\vert0\rangle$ and $\vert1\rangle$, respectively. Note that the top-most part of the sphere represents the state $\vert0\rangle$, while the bottom represents $\vert1\rangle$.
<img src="qsphere01.png" alt="qsphere with states 0 and 1" style="width: 400px;"/>
It should be no surprise that the superposition state with quantum phase $\phi = 0$ and probability $p = 1/2$ (meaning an equal likelihood of measuring both 0 and 1) is shown on the `qsphere` with two points. However, note also that the size of the circles at the two points is smaller than when we had simply $\vert0\rangle$ and $\vert1\rangle$ above. This is because the size of the circles is proportional to the probability of measuring each one, which is now reduced by half.
<img src="qsphereplus.png" alt="qsphere with superposition 1" style="width: 200px;"/>
In the case of superposition states, where the quantum phase is non-zero, the qsphere allows us to visualize that phase by changing the color of the respective blob. For example, the state with $\phi = 90^\circ$ (degrees) and probability $p = 1/2$ is shown in the `qsphere` below.
<img src="qspherey.png" alt="qsphere with superposition 2" style="width: 200px;"/>
### Manipulating qubits
Qubits are manipulated by applying quantum gates. Let's go through an overview of the different gates that we will consider in the following exercises.
First, let's describe how we can change the value of $p$ for our general quantum state. To do this, we will use two gates:
1. **$X$-gate**: This gate flips between the two states $\vert0\rangle$ and $\vert1\rangle$. This operation is the same as the classical NOT gate. As a result, the $X$-gate is sometimes referred to as a bit flip or NOT gate. Mathematically, the $X$ gate changes $p$ to $1-p$, so in particular from 0 to 1, and vice versa.
2. **$H$-gate**: This gate allows us to go from the state $\vert0\rangle$ to the state $\frac{1}{\sqrt{2}}\left(\vert0\rangle + \vert1\rangle\right)$. This state is also known as the $\vert+\rangle$. Mathematically, this means going from $p=0, \phi=0$ to $p=1/2, \phi=0$. As the final state of the qubit is a superposition of $\vert0\rangle$ and $\vert1\rangle$, the Hadamard gate represents a true quantum operation.
Notice that both gates changed the value of $p$, but not $\phi$. Fortunately for us, it's quite easy to visualize the action of these gates by looking at the figure below.
<img src="quantumgates.png" alt="quantum gates" style="width: 400px;"/>
Once we have the state $\vert+\rangle$, we can then change the quantum phase by applying several other gates. For example, an $S$ gate adds a phase of $90$ degrees to $\phi$, while the $Z$ gate adds a phase of $180$ degrees to $\phi$. To subtract a phase of $90$ degrees, we can apply the $S^\dagger$ gate, which is read as S-dagger, and commonly written as `sdg`. Finally, there is a $Y$ gate which applies a sequence of $Z$ and $X$ gates.
You can experiment with the gates $X$, $Y$, $Z$, $H$, $S$ and $S^\dagger$ to become accustomed to the different operations and how they affect the state of a qubit. To do so, you can run *Cell 2* which starts our circuit widget. After running the cell, choose a gate to apply to a qubit, and then choose the qubit (in the first examples, the only qubit to choose is qubit 0). Watch how the corresponding state changes with each gate, as well as the description of that state. It will also provide you with the code that creates the corresponding quantum circuit in Qiskit below the qsphere.
If you want to learn more about describing quantum states, Pauli operators, and other single-qubit gates, see chapter 1 of our textbook: https://qiskit.org/textbook/ch-states/introduction.html.
```
# Cell 2
# press shift + return to run this code cell
# then, click on the gate that you want to apply to your qubit
# next, you have to choose the qubit that you want to apply it to (choose '0' here)
# click on clear to restart
minicomposer(1, dirac=True, qsphere=True)
```
Here are four small exercises to attain different states on the qsphere. You can either solve them with the widget above and copy paste the code it provides into the respective cells to create the quantum circuits, or you can directly insert a combination of the following code lines into the program to apply the different gates:
qc.x(0) # bit flip
qc.y(0) # bit and phase flip
qc.z(0) # phase flip
qc.h(0) # superpostion
qc.s(0) # quantum phase rotation by pi/2 (90 degrees)
qc.sdg(0) # quantum phase rotation by -pi/2 (90 degrees)
The '(0)' indicates that we apply this gate to qubit 'q0', which is the first (and in this case only) qubit.
Try to attain the given state on the qsphere in each of the following exercises.
### I.i) Let us start by performing a bit flip. The goal is to reach the state $\vert1\rangle$ starting from state $\vert0\rangle$. <img src="state1.png" width="300">
If you have reached the desired state with the widget, copy and paste the code from *Cell 2* into *Cell 3* (where it says "FILL YOUR CODE IN HERE") and run it to check your solution.
```
# Cell 3
def create_circuit():
qc = QuantumCircuit(1)
#
#
qc.x(0)
#
#
return qc
# check solution
qc = create_circuit()
state = statevec(qc)
check1(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
```
### I.ii) Next, let's create a superposition. The goal is to reach the state $|+\rangle = \frac{1}{\sqrt{2}}\left(|0\rangle + |1\rangle\right)$. <img src="stateplus.png" width="300">
Fill in the code in the lines indicated in *Cell 4*. If you prefer the widget, you can still copy the code that the widget gives in *Cell 2* and paste it into *Cell 4*.
```
# Cell 4
def create_circuit2():
qc = QuantumCircuit(1)
#
#
qc.h(0)
#
#
return qc
qc = create_circuit2()
state = statevec(qc)
check2(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
```
### I.iii) Let's combine those two. The goal is to reach the state $|-\rangle = \frac{1}{\sqrt{2}}\left(|0\rangle - |1\rangle\right)$. <img src="stateminus.png" width="300">
Can you combine the above two tasks to come up with the solution?
```
# Cell 5
def create_circuit3():
qc = QuantumCircuit(1)
#
#
qc.x(0)
qc.h(0)
#
#
return qc
qc = create_circuit3()
state = statevec(qc)
check3(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
```
### I.iv) Finally, we move on to the complex numbers. The goal is to reach the state $|\circlearrowleft\rangle = \frac{1}{\sqrt{2}}\left(|0\rangle - i|1\rangle\right)$. <img src="stateleft.png" width="300">
```
# Cell 6
def create_circuit4():
qc = QuantumCircuit(1)
#
#
qc.h(0)
qc.sdg(0)
#
#
return qc
qc = create_circuit4()
state = statevec(qc)
check4(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
```
## Exercise II: Quantum Circuits Using Multi-Qubit Gates
Great job! Now that you've understood the single-qubit gates, let us look at gates operating on multiple qubits. The basic gates on two qubits are given by
qc.cx(c,t) # controlled-X (= CNOT) gate with control qubit c and target qubit t
qc.cz(c,t) # controlled-Z gate with control qubit c and target qubit t
qc.swap(a,b) # SWAP gate that swaps the states of qubit a and qubit b
If you'd like to read more about the different multi-qubit gates and their relations, visit chapter 2 of our textbook: https://qiskit.org/textbook/ch-gates/introduction.html.
As before, you can use the two-qubit circuit widget below to see how the combined two qubit state evolves when applying different gates (run *Cell 7*) and get the corresponding code that you can copy and paste into the program. Note that for two qubits a general state is of the form $a|00\rangle + b |01\rangle + c |10\rangle + d|11\rangle$, where $a$, $b$, $c$, and $d$ are complex numbers whose absolute values squared give the probability to measure the respective state; e.g., $|a|^2$ would be the probability to end in state '0' on both qubits. This means we can now have up to four points on the qsphere.
```
# Cell 7
# press shift + return to run this code cell
# then, click on the gate that you want to apply followed by the qubit(s) that you want it to apply to
# for controlled gates, the first qubit you choose is the control qubit and the second one the target qubit
# click on clear to restart
minicomposer(2, dirac = True, qsphere = True)
```
We start with the canonical two qubit gate, the controlled-NOT (also CNOT or CX) gate. Here, as with all controlled two qubit gates, one qubit is labelled as the "control", while the other is called the "target". If the control qubit is in state $|0\rangle$, it applies the identity $I$ gate to the target, i.e., no operation is performed. Instead, if the control qubit is in state $|1\rangle$, an X-gate is performed on the target qubit. Therefore, with both qubits in one of the two classical states, $|0\rangle$ or $|1\rangle$, the CNOT gate is limited to classical operations.
This situation changes dramatically when we first apply a Hadamard gate to the control qubit, bringing it into the superposition state $|+\rangle$. The action of a CNOT gate on this non-classical input can produce highly entangled states between control and target qubits. If the target qubit is initially in the $|0\rangle$ state, the resulting state is denoted by $|\Phi^+\rangle$, and is one of the so-called Bell states.
### II.i) Construct the Bell state $|\Phi^+\rangle = \frac{1}{\sqrt{2}}\left(|00\rangle + |11\rangle\right)$. <img src="phi+.png" width="300">
For this state we would have probability $\frac{1}{2}$ to measure "00" and probability $\frac{1}{2}$ to measure "11". Thus, the outcomes of both qubits are perfectly correlated.
```
# Cell 8
def create_circuit():
qc = QuantumCircuit(2)
#
#
qc.h(0)
qc.cx(0, 1)
#
#
return qc
qc = create_circuit()
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
check5(state)
qc.draw(output='mpl') # we draw the circuit
```
Next, try to create the state of perfectly anti-correlated qubits. Note the minus sign here, which indicates the relative phase between the two states.
### II.ii) Construct the Bell state $\vert\Psi^-\rangle = \frac{1}{\sqrt{2}}\left(\vert01\rangle - \vert10\rangle\right)$. <img src="psi-.png" width="300">
```
# Cell 9
def create_circuit6():
qc = QuantumCircuit(2,2) # this time, we not only want two qubits, but also
# two classical bits for the measurement later
#
#
qc.h(0)
qc.x(1)
qc.cx(0, 1)
qc.z(1)
#
#
return qc
qc = create_circuit6()
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
check6(state)
qc.measure(0, 0) # we perform a measurement on qubit q_0 and store the information on the classical bit c_0
qc.measure(1, 1) # we perform a measurement on qubit q_1 and store the information on the classical bit c_1
qc.draw(output='mpl') # we draw the circuit
```
As you can tell from the circuit (and the code) we have added measurement operators to the circuit. Note that in order to store the measurement results, we also need two classical bits, which we have added when creating the quantum circuit: `qc = QuantumCircuit(num_qubits, num_classicalbits)`.
In *Cell 10* we have defined a function `run_circuit()` that will run a circuit on the simulator. If the right state is prepared, we have probability $\frac{1}{2}$ to measure each of the two outcomes, "01" and "10". However, performing the measurement with 1000 shots does not imply that we will measure exactly 500 times "01" and 500 times "10". Just like flipping a coin multiple times, it is unlikely that one will get exactly a 50/50 split between the two possible output values. Instead, there are fluctuations about this ideal distribution. You can call `run_circuit` multiple times to see the variance in the ouput.
```
# Cell 10
def run_circuit(qc):
backend = Aer.get_backend('qasm_simulator') # we choose the simulator as our backend
result = execute(qc, backend, shots = 1000).result() # we run the simulation
counts = result.get_counts() # we get the counts
return counts
counts = run_circuit(qc)
print(counts)
plot_histogram(counts) # let us plot a histogram to see the possible outcomes and corresponding probabilities
```
### II.iii) You are given the quantum circuit described in the function below. Swap the states of the first and the second qubit.
This should be your final state: <img src="stateIIiii.png" width="300">
```
# Cell 11
def create_circuit7():
qc = QuantumCircuit(2)
qc.rx(np.pi/3,0)
qc.x(1)
return qc
qc = create_circuit7()
#
#
qc.swap(0, 1)
#
#
state = statevec(qc) # determine final state after running the circuit
display(Math(vec_in_braket(state.data)))
check7(state)
plot_state_qsphere(state.data, show_state_labels=True, show_state_angles=True)
```
### II.iv) Write a program from scratch that creates the GHZ state (on three qubits), $\vert \text{GHZ}\rangle = \frac{1}{\sqrt{2}} \left(|000\rangle + |111 \rangle \right)$, performs a measurement with 2000 shots, and returns the counts. <img src="ghz.png" width="300">
If you want to track the state as it is evolving, you could use the circuit widget from above for three qubits, i.e., `minicomposer(3, dirac=True, qsphere=True)`. For how to get the counts of a measurement, look at the code in *Cell 9* and *Cell 10*.
```
# Cell 12
#
def run_circuit(qc, shots):
backend = Aer.get_backend('qasm_simulator') # we choose the simulator as our backend
result = execute(qc, backend, shots = shots).result() # we run the simulation
counts = result.get_counts() # we get the counts
return counts
qc = QuantumCircuit(3)
qc.h(0)
qc.cx(0, 1)
qc.cx(1, 2)
qc.measure_all()
counts = run_circuit(qc, 2000)
#
#
#
print(counts)
check8(counts)
plot_histogram(counts)
```
Congratulations for finishing this introduction to Qiskit! Once you've reached all 8 points, the solution string will be displayed. You need to copy and paste that string on the IBM Quantum Challenge page to complete the exercise and track your progress.
Now that you have created and run your first quantum circuits, you are ready for the next exercise, where we will make use of the actual hardware and learn how to reduce the noise in the outputs.
|
github_jupyter
|
```
from pythonosc import dispatcher, osc_server
from pythonosc.udp_client import SimpleUDPClient
import time
deflating = False
def print_volume_handler(unused_addr, args, volume):
global deflating, deflateStartTime
print("[{0}] ~ {1}".format(args, volume))
if (volume > 1090 or deflating):
client.send_message("/actuator/1/inflate", -80.0) # Send float message between -100 and 100
if deflating == False:
deflating = True
deflateStartTime = time.time()
elif time.time() - deflateStartTime > 5:
deflating = False
elif deflating == False:
client.send_message("/actuator/1/inflate", 80.0) # Send float message between -100 and 100
deflateStartTime = time.time()
ip = "192.168.0.106"
port = 32000
client = SimpleUDPClient(ip, port) # Create client
dispatcher = dispatcher.Dispatcher()
dispatcher.map("/sensor/pressure", print_volume_handler, "Pressure")
server = osc_server.ThreadingOSCUDPServer(("192.168.0.106", 31000), dispatcher)
print("Serving on {}".format(server.server_address))
server.serve_forever()
from pythonosc.udp_client import SimpleUDPClient
ip = "192.168.0.106"
port = 32000
client = SimpleUDPClient(ip, port) # Create client
client.send_message("/actuator/1/inflate", -0.0)
from pythonosc import dispatcher, osc_server
from pythonosc.udp_client import SimpleUDPClient
def print_volume_handler(unused_addr, args, volume):
print("[{0}] ~ {1}".format(args, volume))
client.send_message("/actuator/1/inflate", -100.0) # Send float message between -100 and 100
ip = "192.168.0.106"
port = 32000
client = SimpleUDPClient(ip, port) # Create client
dispatcher = dispatcher.Dispatcher()
dispatcher.map("/sensor/pressure", print_volume_handler, "Pressure")
server = osc_server.ThreadingOSCUDPServer(("192.168.0.106", 31000), dispatcher)
print("Serving on {}".format(server.server_address))
server.serve_forever()
import time
time.time()
```
# Respiration Real-Time Analysis
```
import biosppy.signals.resp as resp
import numpy as np
import csv as csv
import json
import matplotlib.pyplot as plt
def calc_resp_intervals(data, last_breath = False):
processed_data = resp.resp(signal=data, sampling_rate=200, show=False)
filtered_signal = processed_data[1]
inst_resp_rate = processed_data[4]
signal_diff = np.diff(filtered_signal)
signal_signum = signal_diff > 0
resp_changes = np.append(np.where(signal_signum[:-1] != signal_signum[1:])[0], [len(signal_signum) - 1])
if not last_breath:
resp_intervals = np.append([0], resp_changes)
interval_lengths = np.diff(resp_intervals)
interval_breathe_in = [signal_signum[i] for i in resp_changes]
return interval_lengths, interval_breathe_in
else:
if len(resp_changes) > 1:
last_interval = resp_changes[-1] - resp_changes[-2]
else:
last_interval = resp_changes[-1]
return last_interval, signal_signum[resp_changes[-1]]
import time
from pythonosc.udp_client import SimpleUDPClient
resp_data = []
last_update = time.time()
update_freq = 0.5
riot_ip = '192.168.1.2'
riot_port = 8888
actuator_port = 12000
actuator_ip = '192.168.0.103'
client = SimpleUDPClient(actuator_ip, actuator_port)
def process_riot_data(unused_addr, *values):
global resp_data, last_update, client
new_data = values[12]
resp_data.append(new_data)
if len(resp_data) > 200*10 and time.time() - last_update > update_freq:
last_int, breathe_in = calc_resp_intervals(resp_data, last_breath = True)
if breathe_in:
print("Breathing in")
client.send_message("/actuator/inflate", 100.0)
else:
print("Breathing out")
client.send_message("/actuator/inflate", -100.0)
last_update = time.time()
# only save the last 5 min of data
if len(resp_data) > 200 * 60 * 5:
resp_data = resp_data[-200*60*5:]
from pythonosc import dispatcher
from pythonosc import osc_server
riot_dispatcher = dispatcher.Dispatcher()
riot_dispatcher.map("/*/raw", process_riot_data)
server = osc_server.ThreadingOSCUDPServer((riot_ip, riot_port), riot_dispatcher)
print("Serving on {}".format(server.server_address))
server.serve_forever()
from pythonosc.udp_client import SimpleUDPClient
riot_ip = '192.168.0.103'
riot_port = 8888
actuator_port = 12000
client = SimpleUDPClient(riot_ip, actuator_port)
client.send_message("/actuator/inflate", -0.0)
def test(lst):
lst[0] = "test"
lst = [1,2,3,4]
lst2 = lst.copy()
test(lst2)
print(lst2)
print(lst)
```
|
github_jupyter
|
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Оценщики
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/estimator"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Смотрите на TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ru/guide/estimator.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Запустите в Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ru/guide/estimator.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Изучайте код на GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ru/guide/estimator.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Скачайте ноутбук</a>
</td>
</table>
Note: Данный раздел переведён с помощью русскоязычного сообщества Tensorflow на общественных началах. Поскольку перевод не является официальным, мы не гарантируем, что он на 100% точен и соответствует [официальной документации на английском языке](https://www.tensorflow.org/?hl=en). Если у вас есть предложения по исправлению перевода, мы будем очень рады увидеть pull request в репозиторий [tensorflow/docs-l10n](https://github.com/tensorflow/docs-l10n) на GitHub. Если вы хотите помочь сделать документацию по Tensorflow лучше (выполнить перевод или проверить перевод, подготовленный кем-то другим), напишите нам на [docs-ru@tensorflow.org list](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ru).
Этот документ знакомит с `tf.estimator` — высокоуровневым TensorFlow
API. Оценщики включают следующие действия:
* обучение
* оценка
* предсказание
* экспорт для serving
Вы можете использовать созданные нами оценщики или
собственные кастомные оценщики. Все оценщики - как созданные нами, так и пользовательские - являются
классами, основанными на классе `tf.estimator.Estimator`.
Быстрый пример можно посмотреть в [Учебниках по оценщикам](../../tutorials/estimator/linear.ipynb). Обзор структуры API приведен в [статье](https://arxiv.org/abs/1708.02637).
## Преимущества
Аналогично `tf.keras.Model`, `estimator` это абстракция на уровне модели. `tf.estimator` предоставляет некоторые возможности, которые все еще находятся в стадии разработки для `tf.keras`. Это:
* Обучение на сервере параметров
* Полная [TFX](http://tensorflow.org/tfx) интеграция.
## Возможности оценщиков
Оценщики предоставляют следующие преимущества:
* Вы можете запускать модели на основе оценщиков на локальном компьютере или в распределенной многосерверной среде без изменения вашей модели. Кроме того, вы можете запускать модели на основе оценщиков на CPU, GPU, или TPU без изменения исходного кода вашей модели.
* Оценщики обеспечивают безопасный распределенный цикл обучения, который контролирует каким образом и когда:
* загружаются данные
* обрабатываются исключения
* создаются файлы чекпоинтов и восстанавливаются после сбоев
* сохраняются сводные данные для TensorBoard
При написании приложения с оценщиками, вы должны отделить конвейер входных данных
от модели. Это разделение упрощает эксперименты с различными наборами данных.
## Готовые оценщики
Предварительно созданные оценщики позволяют вам работать на гораздо более высоком концептуальном уровне, чем базовые API TensorFlow. Вам больше не нужно беспокоиться о создании вычислительного графа или сессий, поскольку оценщики делают всю "грязную работу" за вас. Кроме того, оценщики, позволяют вам экспериментировать с различными архитектурами моделей, внося только минимальные изменения в код. `tf.estimator.DNNClassifier`, например, представляет собой заранее подготовленный класс Estimator, который обучает модели классификации на основе полносвязных нейронных сетей прямого распространения.
### Структура программы на готовом оценщике
Создание программы TensorFlow на готовом оценщике обычно состоит из четырех этапов:
#### 1. Напишите одну или несколько функций импорта данных.
Например, вы можете создать одну функцию для импорта тренировочных данных и другую функцию для импорта тестовых данных. Каждая функция импорта данных должна возвращать два объекта:
* словарь, в котором ключами являются имена признаков, а значениями - тензоры (Tensor или SparseTensor), содержащие соответствующие признаку данные
* Tensor, содержащий одну или более меток
Например, следующий код иллюстрирует базовый каркас для входной функции:
```
def input_fn(dataset):
... # манипулирет датасетом, извлекает словарь со свойствами и метку
return feature_dict, label
```
See [data guide](../../guide/data.md) for details.
#### 2. Определяет столбцы свойств.
Каждый `tf.feature_column` определяет имя признака, его тип и любую предобработку. Например, следующий фрагмент кода создает три столбца признаков содержащих целые числа или значения с плавающей точкой. Первые два столбца признаков просто идентифицируют имя признака и тип. Третий столбец признаков определяет лямбду - программу которая будет вызвана для масштабирования сырых данных:
```
# Определим три столбца с числовыми признаками.
population = tf.feature_column.numeric_column('population')
crime_rate = tf.feature_column.numeric_column('crime_rate')
median_education = tf.feature_column.numeric_column(
'median_education',
normalizer_fn=lambda x: x - global_education_mean)
```
Для дополнительной информации см. [учебник по столбцам признаков](https://www.tensorflow.org/tutorials/keras/feature_columns).
#### 3. Создание экземпляра готового оценщика.
Например, вот так можно просто создать экземпляр оценщика `LinearClassifier`:
```
# Создадим экземпляр оценщика, передав столбцы признаков.
estimator = tf.estimator.LinearClassifier(
feature_columns=[population, crime_rate, median_education])
```
Для дополнительной информации см. [учебник линейной классификации](https://www.tensorflow.org/tutorials/estimator/linear).
#### 4. Вызов метода обучения, оценки или вывода.
Например, все оценщики предоставляют метод `train` который обучает модель.
```
# `input_fn` это функция созданная на шаге 1
estimator.train(input_fn=my_training_set, steps=2000)
```
Вы можете видеть пример этого ниже.
### Преимущества готовых оценщиков
Готовые оценщики включают в себя лучшие практики, предоставляя следующие преимущества:
* Лучшие практики для определения того, где должны выполняться различные части вычислительного графа, реализуя стратегии на одной машине или на кластере.
* Лучшие практики для написания событий (сводок) и универсально полезные сводки.
Если вы не используете готовые оценщики, вам нужно реализовать предыдущие функции самостоятельно.
## Кастомные оценщики
Сердцем каждого оценщика, будь он готовый или кастомный, является его *функция модели*, которая представляет собой метод построения графов для обучения, оценки и прогнозирования. Когда вы используете готовый оценщик, кто-то за вас уже реализовал функцию модели. Если вы пологаетесь на кастомнного оценщика, вам нужно написать функцию модели самостоятельно
## Рекомендуемый процесс работы
1. Предполагая, что существует готовый оценщик, используйте его для посторения своей первой модели и используйте его результаты для определения бейзлайна.
2. Создайте и протестируйте весь конвейер, включая целостность и надежность ваших данных с этим готовым оценщиком.
3. Если имеются другие подходящие готовые оценщики, проведите эксперименты какой из готовых оценщиков дает лучшие результаты.
4. Возможно еще улучшить вашу модель созданием кастомного оценщика.
```
import tensorflow as tf
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
```
## Создание оценщика из модели Keras
Вы можете конвертировать существующие модели Keras models в оценщики с помощью `tf.keras.estimator.model_to_estimator`. Это позволяет вашей модели Keras
получить сильные стороны оценщиков, такие как, например, распределенное обучение.
Создайте экземпляр модели Keras MobileNet V2 и скомпилируйте модель с оптимизатором, потерями и метриками необходимыми для обучения:
```
keras_mobilenet_v2 = tf.keras.applications.MobileNetV2(
input_shape=(160, 160, 3), include_top=False)
keras_mobilenet_v2.trainable = False
estimator_model = tf.keras.Sequential([
keras_mobilenet_v2,
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1, activation='softmax')
])
# Компилируем модель
estimator_model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
```
Создайте `Estimator` из скомпилированной модели Keras. Начальное состояние модели Keras сохранено в созданном `Estimator`:
```
est_mobilenet_v2 = tf.keras.estimator.model_to_estimator(keras_model=estimator_model)
```
Относитесь к полученному `Estimator` так же как к любому другому `Estimator`.
```
IMG_SIZE = 160 # Размер всех изображений будет приведен к 160x160
def preprocess(image, label):
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
def train_input_fn(batch_size):
data = tfds.load('cats_vs_dogs', as_supervised=True)
train_data = data['train']
train_data = train_data.map(preprocess).shuffle(500).batch(batch_size)
return train_data
```
Для обучения вызовите функцию обучения оценщика:
```
est_mobilenet_v2.train(input_fn=lambda: train_input_fn(32), steps=500)
```
Аналогично, для оценки вызовите функцию оценки оценщика:
```
est_mobilenet_v2.evaluate(input_fn=lambda: train_input_fn(32), steps=10)
```
Дополнительную информацию можно получить в документации по `tf.keras.estimator.model_to_estimator`.
|
github_jupyter
|
# Using Variational Autoencoder to Generate Faces
In this example, we are going to use VAE to generate faces. The dataset we are going to use is [CelebA](http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html). The dataset consists of more than 200K celebrity face images. You have to download the Align&Cropped Images from the above website to run this example.
```
from bigdl.nn.layer import *
from bigdl.nn.criterion import *
from bigdl.optim.optimizer import *
from bigdl.dataset import mnist
import datetime as dt
from glob import glob
import os
import numpy as np
from utils import *
import imageio
image_size = 148
Z_DIM = 128
ENCODER_FILTER_NUM = 32
#download the data CelebA, and may repalce with your own data path
DATA_PATH = os.getenv("ANALYTICS_ZOO_HOME") + "/apps/variational-autoencoder/img_align_celeba"
from zoo.common.nncontext import *
sc = init_nncontext("Variational Autoencoder Example")
sc.addFile(os.getenv("ANALYTICS_ZOO_HOME")+"/apps/variational-autoencoder/utils.py")
```
## Define the Model
Here, we define a slightly more complicate CNN networks using convolution, batchnorm, and leakyRelu.
```
def conv_bn_lrelu(in_channels, out_channles, kw=4, kh=4, sw=2, sh=2, pw=-1, ph=-1):
model = Sequential()
model.add(SpatialConvolution(in_channels, out_channles, kw, kh, sw, sh, pw, ph))
model.add(SpatialBatchNormalization(out_channles))
model.add(LeakyReLU(0.2))
return model
def upsample_conv_bn_lrelu(in_channels, out_channles, out_width, out_height, kw=3, kh=3, sw=1, sh=1, pw=-1, ph=-1):
model = Sequential()
model.add(ResizeBilinear(out_width, out_height))
model.add(SpatialConvolution(in_channels, out_channles, kw, kh, sw, sh, pw, ph))
model.add(SpatialBatchNormalization(out_channles))
model.add(LeakyReLU(0.2))
return model
def get_encoder_cnn():
input0 = Input()
#CONV
conv1 = conv_bn_lrelu(3, ENCODER_FILTER_NUM)(input0) # 32 * 32 * 32
conv2 = conv_bn_lrelu(ENCODER_FILTER_NUM, ENCODER_FILTER_NUM * 2)(conv1) # 16 * 16 * 64
conv3 = conv_bn_lrelu(ENCODER_FILTER_NUM * 2, ENCODER_FILTER_NUM * 4)(conv2) # 8 * 8 * 128
conv4 = conv_bn_lrelu(ENCODER_FILTER_NUM * 4, ENCODER_FILTER_NUM * 8)(conv3) # 4 * 4 * 256
view = View([4*4*ENCODER_FILTER_NUM*8])(conv4)
inter = Linear(4*4*ENCODER_FILTER_NUM*8, 2048)(view)
inter = BatchNormalization(2048)(inter)
inter = ReLU()(inter)
# fully connected to generate mean and log-variance
mean = Linear(2048, Z_DIM)(inter)
log_variance = Linear(2048, Z_DIM)(inter)
model = Model([input0], [mean, log_variance])
return model
def get_decoder_cnn():
input0 = Input()
linear = Linear(Z_DIM, 2048)(input0)
linear = Linear(2048, 4*4*ENCODER_FILTER_NUM * 8)(linear)
reshape = Reshape([ENCODER_FILTER_NUM * 8, 4, 4])(linear)
bn = SpatialBatchNormalization(ENCODER_FILTER_NUM * 8)(reshape)
# upsampling
up1 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*8, ENCODER_FILTER_NUM*4, 8, 8)(bn) # 8 * 8 * 128
up2 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*4, ENCODER_FILTER_NUM*2, 16, 16)(up1) # 16 * 16 * 64
up3 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM*2, ENCODER_FILTER_NUM, 32, 32)(up2) # 32 * 32 * 32
up4 = upsample_conv_bn_lrelu(ENCODER_FILTER_NUM, 3, 64, 64)(up3) # 64 * 64 * 3
output = Sigmoid()(up4)
model = Model([input0], [output])
return model
def get_autoencoder_cnn():
input0 = Input()
encoder = get_encoder_cnn()(input0)
sampler = GaussianSampler()(encoder)
decoder_model = get_decoder_cnn()
decoder = decoder_model(sampler)
model = Model([input0], [encoder, decoder])
return model, decoder_model
model, decoder = get_autoencoder_cnn()
```
## Load the Dataset
```
def get_data():
data_files = glob(os.path.join(DATA_PATH, "*.jpg"))
rdd_train_images = sc.parallelize(data_files[:100000]) \
.map(lambda path: inverse_transform(get_image(path, image_size)).transpose(2, 0, 1))
rdd_train_sample = rdd_train_images.map(lambda img: Sample.from_ndarray(img, [np.array(0.0), img]))
return rdd_train_sample
train_data = get_data()
```
## Define the Training Objective
```
criterion = ParallelCriterion()
criterion.add(KLDCriterion(), 1.0) # You may want to twick this parameter
criterion.add(BCECriterion(size_average=False), 1.0 / 64)
```
## Define the Optimizer
```
batch_size = 100
# Create an Optimizer
optimizer = Optimizer(
model=model,
training_rdd=train_data,
criterion=criterion,
optim_method=Adam(0.001, beta1=0.5),
end_trigger=MaxEpoch(1),
batch_size=batch_size)
app_name='vea-'+dt.datetime.now().strftime("%Y%m%d-%H%M%S")
train_summary = TrainSummary(log_dir='/tmp/vae',
app_name=app_name)
train_summary.set_summary_trigger("LearningRate", SeveralIteration(10))
train_summary.set_summary_trigger("Parameters", EveryEpoch())
optimizer.set_train_summary(train_summary)
print ("saving logs to ",app_name)
```
## Spin Up the Training
This could take a while. It took about 2 hours on a desktop with a intel i7-6700 cpu and 40GB java heap memory. You can reduce the training time by using less data (some changes in the "Load the Dataset" section), but the performce may not as good.
```
redire_spark_logs()
show_bigdl_info_logs()
def gen_image_row():
decoder.evaluate()
return np.column_stack([decoder.forward(np.random.randn(1, Z_DIM)).reshape(3, 64,64).transpose(1, 2, 0) for s in range(8)])
def gen_image():
return np.row_stack([gen_image_row() for i in range(8)])
for i in range(1, 6):
optimizer.set_end_when(MaxEpoch(i))
trained_model = optimizer.optimize()
image = gen_image()
if not os.path.exists("./images"):
os.makedirs("./images")
if not os.path.exists("./models"):
os.makedirs("./models")
# you may change the following directory accordingly and make sure the directory
# you are writing to exists
imageio.imwrite("./images/image_%s.png" % i , image)
decoder.saveModel("./models/decoder_%s.model" % i, over_write = True)
import matplotlib
matplotlib.use('Agg')
%pylab inline
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
loss = np.array(train_summary.read_scalar("Loss"))
plt.figure(figsize = (12,12))
plt.plot(loss[:,0],loss[:,1],label='loss')
plt.xlim(0,loss.shape[0]+10)
plt.grid(True)
plt.title("loss")
```
## Random Sample Some Images
```
from matplotlib.pyplot import imshow
img = gen_image()
imshow(img)
```
|
github_jupyter
|
<img src="../Pics/MLSb-T.png" width="160">
<br><br>
<center><u><H1>LSTM and GRU on Sentiment Analysis</H1></u></center>
```
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.log_device_placement = True
sess = tf.Session(config=config)
set_session(sess)
import numpy as np
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.models import Sequential
from keras.layers import Dense, Embedding, GRU, LSTM, CuDNNLSTM, CuDNNGRU, Dropout
from keras.datasets import imdb
from keras.callbacks import EarlyStopping
from keras.optimizers import Adam
num_words = 20000
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=num_words)
print(len(X_train), 'train_data')
print(len(X_test), 'test_data')
print(X_train[0])
len(X_train[0])
```
## Hyperparameters:
```
max_len = 256
embedding_size = 10
batch_size = 128
n_epochs = 10
```
## Creating Sequences
```
pad = 'pre' #'post'
X_train_pad = pad_sequences(X_train, maxlen=max_len, padding=pad, truncating=pad)
X_test_pad = pad_sequences(X_test, maxlen=max_len, padding=pad, truncating=pad)
X_train_pad[0]
```
## Creating the model:
```
model = Sequential()
#The input is a 2D tensor: (samples, sequence_length)
# this layer will return 3D tensor: (samples, sequence_length, embedding_dim)
model.add(Embedding(input_dim=num_words,
output_dim=embedding_size,
input_length=max_len,
name='layer_embedding'))
model.add(Dropout(0.2))
#model.add(LSTM(128,dropout=0.2, recurrent_dropout=0.2))
model.add(CuDNNLSTM(128, return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid', name='classification'))
model.summary()
```
## Compiling the model:
```
#optimizer = Adam(lr=0.001, decay=1e-6)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
```
## Callbacks:
```
callback_early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
```
## Training the model:
```
%%time
model.fit(X_train_pad, y_train,
epochs=n_epochs,
batch_size=batch_size,
validation_split=0.05,
callbacks=[callback_early_stopping]
)
```
## Testing the model:
```
%%time
eval_ = model.evaluate(X_test_pad, y_test)
print("Loss: {0:.5}".format(eval_[0]))
print("Accuracy: {0:.2%}".format(eval_[1]))
```
## Saving the model:
```
model.save("..\data\models\{}".format('Sentiment-LSTM-GRU'))
```
## GRU model:
```
model_GRU = Sequential()
model_GRU.add(Embedding(input_dim=num_words,
output_dim=embedding_size,
input_length=max_len,
name='layer_embedding'))
model_GRU.add(CuDNNGRU(units=16, return_sequences=True))
model_GRU.add(CuDNNGRU(units=8, return_sequences=True))
model_GRU.add(CuDNNGRU(units=4, return_sequences=False))
model_GRU.add(Dense(1, activation='sigmoid'))
model_GRU.summary()
model_GRU.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
%%time
model_GRU.fit(X_train_pad, y_train, validation_split=0.05, epochs=n_epochs, batch_size=batch_size)
%%time
eval_GRU = model_GRU.evaluate(X_test_pad, y_test)
print("Loss: {0:.5}".format(eval_GRU[0]))
print("Accuracy: {0:.2%}".format(eval_GRU[1]))
```
## Examples of Mis-Classified Text
```
#making predictions for the first 1000 test samples
y_pred = model.predict(X_test_pad[:1000])
y_pred = y_pred.T[0]
labels_pred = np.array([1.0 if p > 0.5 else 0.0 for p in y_pred])
true_labels = np.array(y_test[:1000])
incorrect = np.where(labels_pred != true_labels)
incorrect = incorrect[0]
print(incorrect)
len(incorrect)
idx = incorrect[1]
idx
text = X_test[idx]
print(text)
y_pred[idx]
true_labels[idx]
```
## Converting integers in Text
```
# A dictionary mapping words to an integer index
word_index = imdb.get_word_index()
word_index.items()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
print(reverse_word_index)
def decode_index(text):
return ' '.join([reverse_word_index.get(i) for i in text])
decode_index(X_train[0])
text_data = []
for i in range(len(X_train)):
text_data.append(decode_index(X_train[i]))
text_data[0]
```
## Embeddings
```
layer_embedding = model.get_layer('layer_embedding')
weights_embedding = layer_embedding.get_weights()[0]
weights_embedding.shape
weights_embedding[word_index.get('good')]
```
## Similar Words
```
from scipy.spatial.distance import cdist
def print_similar_words(word, metric='cosine'):
token = word_index.get(word)
embedding = weights_embedding[token]
distances = cdist(weights_embedding, [embedding],
metric=metric).T[0]
sorted_index = np.argsort(distances)
sorted_distances = distances[sorted_index]
sorted_words = [reverse_word_index[token] for token in sorted_index
if token != 0]
def print_words(words, distances):
for word, distance in zip(words, distances):
print("{0:.3f} - {1}".format(distance, word))
N = 10
print("Distance from '{0}':".format(word))
print_words(sorted_words[0:N], sorted_distances[0:N])
print("-------")
print_words(sorted_words[-N:], sorted_distances[-N:])
print_similar_words('good', metric='cosine')
```
## Reference:
https://keras.io/layers/recurrent/
|
github_jupyter
|
The PyData ecosystem has a number of core Python data containers that allow users to work with a wide array of datatypes, including:
* [Pandas](http://pandas.pydata.org): DataFrame, Series (columnar/tabular data)
* [XArray](http://xarray.pydata.org): Dataset, DataArray (multidimensional arrays)
* [Dask](http://dask.pydata.org): DataFrame, Series, Array (distributed/out of core arrays and columnar data)
* [Streamz](http://streamz.readthedocs.io): DataFrame(s), Series(s) (streaming columnar data)
* [Intake](http://github.com/ContinuumIO/intake): DataSource (remote data)
Many of these libraries have the concept of a high-level plotting API that lets a user generate common plot types very easily. The native plotting APIs are generally built on [Matplotlib](http://matplotlib.org), which provides a solid foundation, but means that users miss out the benefits of modern, interactive plotting libraries for the web like [Bokeh](http://bokeh.pydata.org) and [HoloViews](http://holoviews.org).
hvPlot provides a high-level plotting API built on HoloViews and Bokeh that provides a general and consistent API for plotting data in all the abovementioned formats.
As a first simple illustration of using hvPlot, let's create a small set of random data in Pandas to explore:
```
import numpy as np
import pandas as pd
index = pd.date_range('1/1/2000', periods=1000)
df = pd.DataFrame(np.random.randn(1000, 4), index=index, columns=list('ABCD')).cumsum()
df.head()
```
## Pandas default .plot()
Pandas provides Matplotlib-based plotting by default, using the `.plot()` method:
```
%matplotlib inline
df.plot();
```
The result is a PNG image that displays easily, but is otherwise static.
## .hvplot()
If we instead change `%matplotlib inline` to `import hvplot.pandas` and use the ``df.hvplot`` method, it will now display an interactively explorable [Bokeh](http://bokeh.pydata.org) plot with panning, zooming, hovering, and clickable/selectable legends:
```
import hvplot.pandas
df.hvplot()
```
This interactive plot makes it much easier to explore the properties of the data, without having to write code to select ranges, columns, or data values manually. Note that while pandas, dask and xarray all use the ``.hvplot`` method, ``intake`` uses hvPlot as its main plotting API, which means that is available using ``.plot()``.
## hvPlot native API
For the plot above, hvPlot dynamically added the Pandas `.hvplot()` method, so that you can use the same syntax as with the Pandas default plotting. If you prefer to be more explicit, you can instead work directly with hvPlot objects:
```
from hvplot import hvPlot
import holoviews as hv
hv.extension('bokeh')
plot = hvPlot(df)
plot(y='A')
```
Here we've imported the HoloViews and hvPlot libraries, loaded the HoloViews extension that initializes it to create Bokeh plots inside Jupyter notebooks, created a hvPlot object `plot` for our dataframe, generated a viewable HoloViews object from it by calling `plot()` (here with some additional options to select one column for plotting), and then had Juypter display the resulting HoloViews object using Bokeh in the notebook.
In most cases we'll assume you are using the simpler `import hvplot.pandas` approach that takes care of all these steps for you, but if you prefer you are welcome to break them down explicitly in this way.
## Getting help
When working inside IPython or the Jupyter notebook hvplot methods will automatically complete valid keywords, e.g. pressing tab after declaring the plot type will provide all valid keywords and the docstring:
```python
df.hvplot.line(<TAB>
```
Outside an interactive environment ``hvplot.help`` will bring up information providing the ``kind`` of plot, e.g.:
```python
hvplot.help('line')
```
For more detail on the available options see the [Customization](Customization.ipynb) user guide.
## Next steps
Now that you can see how hvPlot is used, let's jump straight in and discover some of the more powerful things we can do with it in the [Plotting](Plotting.ipynb) section.
|
github_jupyter
|
```
import pickle
with open('cleaned_texts.pickle', 'rb') as handle:
texts = pickle.load(handle)
with open('labels.pickle', 'rb') as handle:
labels = pickle.load(handle)
MAX_NB_WORDS = 100000 # max no. of words for tokenizer
MAX_SEQUENCE_LENGTH = 400 # max length of each entry (sentence), including padding
VALIDATION_SPLIT = 0.2
EMBEDDING_DIM = 50 # embedding dimensions for word vectors (word2vec/GloVe)
GLOVE_DIR = "glove/glove.6B/glove.6B."+str(EMBEDDING_DIM)+"d.txt"
shortened_texts = []
for text in texts:
shortened_texts.append(text[:MAX_SEQUENCE_LENGTH])
classes = ["fake", "satire", "bias", "conspiracy", "state", "junksci", "hate", "clickbait", "unreliable", "political", "reliable"]
cat_labels = []
for label in labels:
cat_labels.append(classes.index(label))
import numpy as np
import pandas as pd
import re, sys, os, csv, keras, pickle
from keras import regularizers, initializers, optimizers, callbacks
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils.np_utils import to_categorical
from keras.layers import *
from keras.models import Model
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
print("[i] Using Keras version",keras.__version__)
""" #uncomment this chunk to create a new Tokenizer
tokenizer = Tokenizer(num_words=MAX_NB_WORDS/2)
tokenizer.fit_on_texts(shortened_texts)
with open('tokenizer.pickle', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
print("[i] Saved word tokenizer to file: tokenizer.pickle")
"""
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle) # load a previously generated Tokenizer
word_index = tokenizer.word_index
print('[i] Found %s unique tokens.' % len(word_index))
sequences = tokenizer.texts_to_sequences(shortened_texts)
data = pad_sequences(sequences, padding='post', maxlen=(MAX_SEQUENCE_LENGTH))
labels = to_categorical(np.asarray(cat_labels)) # convert the category label to one-hot encoding
print('[i] Shape of data tensor:', data.shape)
print('[i] Shape of label tensor:', labels.shape)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
x_train = data[:-nb_validation_samples]
y_train = labels[:-nb_validation_samples]
x_val = data[-nb_validation_samples:]
y_val = labels[-nb_validation_samples:]
print('[i] Number of entries in each category:')
print("[+] Training:",y_train.sum(axis=0))
print("[+] Validation:",y_val.sum(axis=0))
embeddings_index = {}
f = open(GLOVE_DIR)
print("[i] (long) Loading GloVe from:",GLOVE_DIR,"...",end="")
for line in f:
values = line.split()
word = values[0]
embeddings_index[word] = np.asarray(values[1:], dtype='float32')
f.close()
print("Done.\n[+] Proceeding with Embedding Matrix...", end="")
embedding_matrix = np.random.random((len(word_index) + 1, EMBEDDING_DIM))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print(" Completed!")
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32') # input to the model
embedding_layer = Embedding(len(word_index) + 1,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=True)
embedded_sequences = embedding_layer(sequence_input)
l_conv_3 = Conv1D(filters=256,kernel_size=3,activation='relu')(embedded_sequences)
l_conv_4 = Conv1D(filters=256,kernel_size=5,activation='relu')(embedded_sequences)
l_conv_5 = Conv1D(filters=256,kernel_size=7,activation='relu',)(embedded_sequences)
l_conv = Concatenate(axis=1)([l_conv_3, l_conv_4, l_conv_5])
l_pool = MaxPooling1D(4)(l_conv)
l_drop = Dropout(0.3)(l_pool)
l_flat = GlobalAveragePooling1D()(l_drop)
l_dense = Dense(128, activation='relu')(l_flat)
preds = Dense(11, activation='softmax')(l_dense) #follows the number of classes
from keras.utils import multi_gpu_model
import tensorflow as tf
with tf.device('/cpu:0'):
model = Model(sequence_input, preds)
adadelta = optimizers.Adadelta(lr=2.0, rho=0.95, epsilon=None, decay=0.1)
parallel_model = multi_gpu_model(model, gpus=2)
parallel_model.compile(loss='categorical_crossentropy',
optimizer=adadelta,
metrics=['acc'])
model.summary()
#adadelta = optimizers.Adadelta(lr=2.0, rho=0.95, epsilon=None, decay=0.1) # let's use a hipster optimizer because we can
#model.compile(loss='categorical_crossentropy',
# optimizer=adadelta,
# metrics=['acc'])
#model.summary()
print("Training Progress:")
model_log = parallel_model.fit(x_train, y_train, validation_data=(x_val, y_val),
epochs=30, batch_size=512)
```
|
github_jupyter
|
```
import os
import tarfile
from six.moves import urllib
DOWNLOAD_ROOT = "https://raw.githubusercontent.com/ageron/handson-ml/master/"
HOUSING_PATH = "datasets/housing"
HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + "/housing.tgz"
import matplotlib.pyplot as plt
%matplotlib inline
import pandas as pd
def load_housing_data(housing_path=HOUSING_PATH):
csv_path = os.path.join(housing_path, "housing.csv")
return pd.read_csv(csv_path)
housing = load_housing_data()
housing.info()
housing_num = housing.drop(['ocean_proximity'], axis=1)
housing_cat = housing['ocean_proximity'].values.reshape(-1, 1)
housing_num.shape
housing_cat.shape
type(housing_num)
type(housing_cat)
```
Histogram
```
housing_num.hist(bins=50, figsize=(10, 10))
```
Splitting data
```
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
train_set.hist(bins=50, figsize=(10, 10))
```
Stratified Shuffle
```
import numpy as np
housing['income_cat'] = np.ceil(housing['median_income'] / 1.5)
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
housing['income_cat'].value_counts()/len(housing)
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1,test_size=0.2, random_state=42)
for train_idx, test_idx in split.split(housing, housing['income_cat']):
strat_train = housing.loc[train_idx]
strat_test = housing.loc[test_idx]
strat_train.drop(['income_cat'], axis=1, inplace=True)
strat_test.drop(['income_cat'], axis=1, inplace=True)
strat_train.hist(bins=50, figsize=(10, 10))
strat_test.hist(bins=50, figsize=(10, 10))
strat_test.plot(kind="scatter", x="longitude", y="latitude", alpha=0.05)
strat_train.plot(kind="scatter", x="longitude", y="latitude", alpha=0.05)
import matplotlib.cm as cm
housing.plot(kind='scatter', x="longitude", y="latitude", alpha=0.4, figsize=(15, 10),
c='median_house_value', s=housing["population"]/100, cmap=plt.get_cmap("jet"), colorbar=True)
corr = housing.corr()
corr["median_house_value"].sort_values(ascending=False)
from pandas.tools.plotting import scatter_matrix
attr = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attr], figsize=(10, 10))
housing.plot(kind='scatter', x='median_income', y='median_house_value', alpha=0.05, figsize=(10, 10))
from sklearn.preprocessing import Imputer
imp = Imputer(strategy='median')
housing = train_set.copy()
housing.info()
housing_num = housing.drop(['ocean_proximity'], axis=1)
housing_cat = pd.DataFrame(housing['ocean_proximity'].values.reshape(-1, 1))
imp.fit(housing_num)
X = imp.transform(housing_num)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
housing_cat_encoded = le.fit_transform(housing_cat.values.reshape(-1, ))
housing_cat_encoded
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder()
housing_cat_encoded = ohe.fit_transform(housing_cat_encoded.reshape(-1, 1))
housing_cat_encoded[0].toarray()
from sklearn.preprocessing import LabelBinarizer
lb = LabelBinarizer()
housing_cat_encoded = lb.fit_transform(housing_cat).astype(np.float32)
housing_cat_encoded[0]
from sklearn.preprocessing import StandardScaler
n = np.random.randint(0, 10, size=(10,1)).astype(np.float64)
ss = StandardScaler()
ss.fit_transform(n)
n
```
|
github_jupyter
|
```
import numpy as np
import random
import time
import torch
from x_transformers.x_transformers import XTransformer
import torch
from run_experiment import *
from generate_data import *
```
## Variables
```
from sklearn.model_selection import ParameterGrid
TAG = 'improve_score_2paper_55len'
TASK_NAME = 'reverse'
TRAIN_SIZE = 100_000
VAL_SIZE = 2_000
TEST_SIZE = 10_000
NUM_INITS = 3
NUM_BATCHES = int(2.3e5)
BATCH_SIZE = 128
LEARNING_RATE = 3e-4
GENERATE_EVERY = 3000
NUM_TOKENS = 16 + 2
ENC_SEQ_LEN = 55
DEC_SEQ_LEN = 55
INPUT_LEN = 55
```
#### Generate data
```
# class reverse_generator:
# def __init__(self):
# self.src_mask = torch.ones(BATCH_SIZE, ENC_SEQ_LEN).bool().cuda()
# self.tgt_mask = torch.ones(BATCH_SIZE, DEC_SEQ_LEN+1).bool().cuda()
# def __next__(self):
# X = np.zeros([BATCH_SIZE, ENC_SEQ_LEN]).astype(int)
# y = np.zeros([BATCH_SIZE, DEC_SEQ_LEN+1]).astype(int)
# y[:, 0] = 1
# for i in range(BATCH_SIZE):
# sequence_length = np.random.randint(1, ENC_SEQ_LEN)
# random_sequence = np.random.randint(2, NUM_TOKENS, sequence_length)
# X[i, :sequence_length] = random_sequence
# y[i, 1:sequence_length + 1] = random_sequence[::-1]
# return torch.tensor(X), torch.tensor(y), self.src_mask, self.tgt_mask
# generator = reverse_generator()
# generate_data(generator, task_name=TASK_NAME, train_size=TRAIN_SIZE, test_size=TEST_SIZE, val_size=VAL_SIZE)
```
#### Gridsearch params
```
optimizer = torch.optim.Adam
optim_params = list(ParameterGrid({
'lr': [0.001, 0.0008, 0.0012, 0.0003]
}))
print(len(optim_params))
optim_params
gen_train = data_loader(task_name=f'{TASK_NAME}_train', batch_size=BATCH_SIZE, enc_seq_len=INPUT_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_val = data_loader(task_name=f'{TASK_NAME}_val', batch_size=VAL_SIZE, enc_seq_len=INPUT_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_test = data_loader(task_name=f'{TASK_NAME}_test', batch_size=TEST_SIZE, enc_seq_len=INPUT_LEN, dec_seq_len=DEC_SEQ_LEN)
print_file = f'logs/{TASK_NAME}_{TAG}_cout_logs.txt'
t = time.time()
param = list(model_parameters)[0]
param['enc_depth'], param['enc_heads'] = param['depth,heads']
param['dec_depth'], param['dec_heads'] = param['depth,heads']
param.pop('depth,heads')
with torch.cuda.device(1):
for i, optim_param in enumerate(list(optim_params)):
with open(print_file, 'a') as f:
f.write('\n\n' + str(optim_param)+'\n')
for init_num in range(1):
model = XTransformer(**param).cuda()
model_name = f"{TASK_NAME}{INPUT_LEN}_dim{param['dim']}d{param['enc_depth']}h{param['enc_heads']}M{param['enc_num_memory_tokens']}l{param['enc_max_seq_len']}_v{init_num}_{optim_param}"
optim = optimizer(model.parameters(), **optim_param)
train_validate_model(model,
train_generator=gen_train,
val_generator=gen_val,
optim=optim,
model_name=model_name,
dec_seq_len=DEC_SEQ_LEN,
num_batches=NUM_BATCHES,
generate_every=GENERATE_EVERY,
print_file=print_file)
test_model(model, gen_test, model_name, param, TASK_NAME, tag=str(optim_param), dec_seq_len=param['dec_max_seq_len'])
with open(print_file, 'a') as f:
f.write(f'\nTotal time: {time.time() - t}\n')
t = time.time()
```
### Run
```
LEARNING_RATE = 0.001
model_parameters = ParameterGrid({'dim': [128],
'tie_token_embeds': [True],
'return_tgt_loss': [True],
'enc_num_tokens': [NUM_TOKENS],
'depth,heads': [(2,4)],
'enc_max_seq_len': [55, 28],
'dec_num_tokens': [NUM_TOKENS],
'dec_max_seq_len': [DEC_SEQ_LEN],
'enc_num_memory_tokens': [0, 4, 16]})
print('Total runs: ', NUM_INITS * len(model_parameters))
gen_train = data_loader(task_name=f'{TASK_NAME}_train', batch_size=BATCH_SIZE, enc_seq_len=INPUT_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_val = data_loader(task_name=f'{TASK_NAME}_val', batch_size=VAL_SIZE, enc_seq_len=INPUT_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_test = data_loader(task_name=f'{TASK_NAME}_test', batch_size=TEST_SIZE, enc_seq_len=INPUT_LEN, dec_seq_len=DEC_SEQ_LEN)
print_file = f'logs/{TASK_NAME}_{TAG}_memory_logs.txt'
t = time.time()
with torch.cuda.device(1):
for init_num in range(NUM_INITS):
with open(print_file, 'a') as f:
f.write('\n\nInit number ' + str(init_num)+'\n')
for i, param in enumerate(list(model_parameters)):
with open(print_file, 'a') as f:
f.write('\n\n' + str(param)+'\n')
param['enc_depth'], param['enc_heads'] = param['depth,heads']
param['dec_depth'], param['dec_heads'] = param['depth,heads']
param.pop('depth,heads')
with open(print_file, 'a') as f:
f.write(f'{i / len(model_parameters) * 100}%')
model = XTransformer(**param).cuda()
model_name = f"{TASK_NAME}{INPUT_LEN}_dim{param['dim']}d{param['enc_depth']}h{param['enc_heads']}M{param['enc_num_memory_tokens']}l{param['enc_max_seq_len']}_v{init_num}"
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
train_validate_model(model,
train_generator=gen_train,
val_generator=gen_val,
optim=optim,
model_name=model_name,
dec_seq_len=DEC_SEQ_LEN,
num_batches=NUM_BATCHES,
generate_every=GENERATE_EVERY,
print_file=print_file)
test_model(model, gen_test, model_name, param, TASK_NAME, tag=TAG, dec_seq_len=param['dec_max_seq_len'])
with open(print_file, 'a') as f:
f.write(f'\nTotal time: {time.time() - t}\n')
t = time.time()
```
stopped on:
{'dec_max_seq_len': 16, 'dec_num_tokens': 18, 'depth,heads': (2, 4), 'dim': 64, 'enc_max_seq_len': 8, 'enc_num_memory_tokens': 16, 'enc_num_tokens': 18, 'return_tgt_loss': True, 'tie_token_embeds': True}
### Test!
```
init_num = 0
gen_train = data_loader(task_name=f'{TASK_NAME}_train', batch_size=BATCH_SIZE, enc_seq_len=ENC_SEQ_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_val = data_loader(task_name=f'{TASK_NAME}_val', batch_size=VAL_SIZE, enc_seq_len=ENC_SEQ_LEN, dec_seq_len=DEC_SEQ_LEN)
gen_test = data_loader(task_name=f'{TASK_NAME}_test', batch_size=TEST_SIZE, enc_seq_len=ENC_SEQ_LEN, dec_seq_len=DEC_SEQ_LEN)
param = list(model_parameters)[5]
print(param)
param['enc_depth'], param['enc_heads'] = param['depth,heads']
param['dec_depth'], param['dec_heads'] = param['depth,heads']
param.pop('depth,heads')
model = XTransformer(**param).cuda()
model_name = f"{TASK_NAME}_dim{param['dim']}d{param['enc_depth']}h{param['enc_heads']}M{param['enc_num_memory_tokens']}l{param['enc_max_seq_len']}_v{init_num}"
optim = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
src, tgt, _, _ = next(gen_train)
print(model.encoder.max_seq_len, model.encoder.num_memory_tokens)
model.encoder(torch.cat((src, src)), return_embeddings=True).shape
```
|
github_jupyter
|
# Automated Machine Learning
**Continuous retraining using Pipelines and Time-Series TabularDataset**
## Contents
1. [Introduction](#Introduction)
2. [Setup](#Setup)
3. [Compute](#Compute)
4. [Run Configuration](#Run-Configuration)
5. [Data Ingestion Pipeline](#Data-Ingestion-Pipeline)
6. [Training Pipeline](#Training-Pipeline)
7. [Publish Retraining Pipeline and Schedule](#Publish-Retraining-Pipeline-and-Schedule)
8. [Test Retraining](#Test-Retraining)
## Introduction
In this example we use AutoML and Pipelines to enable contious retraining of a model based on updates to the training dataset. We will create two pipelines, the first one to demonstrate a training dataset that gets updated over time. We leverage time-series capabilities of `TabularDataset` to achieve this. The second pipeline utilizes pipeline `Schedule` to trigger continuous retraining.
Make sure you have executed the [configuration notebook](../../../configuration.ipynb) before running this notebook.
In this notebook you will learn how to:
* Create an Experiment in an existing Workspace.
* Configure AutoML using AutoMLConfig.
* Create data ingestion pipeline to update a time-series based TabularDataset
* Create training pipeline to prepare data, run AutoML, register the model and setup pipeline triggers.
## Setup
As part of the setup you have already created an Azure ML `Workspace` object. For AutoML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments.
```
import logging
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets
import azureml.core
from azureml.core.experiment import Experiment
from azureml.core.workspace import Workspace
from azureml.train.automl import AutoMLConfig
```
This sample notebook may use features that are not available in previous versions of the Azure ML SDK.
Accessing the Azure ML workspace requires authentication with Azure.
The default authentication is interactive authentication using the default tenant. Executing the ws = Workspace.from_config() line in the cell below will prompt for authentication the first time that it is run.
If you have multiple Azure tenants, you can specify the tenant by replacing the ws = Workspace.from_config() line in the cell below with the following:
```
from azureml.core.authentication import InteractiveLoginAuthentication
auth = InteractiveLoginAuthentication(tenant_id = 'mytenantid')
ws = Workspace.from_config(auth = auth)
```
If you need to run in an environment where interactive login is not possible, you can use Service Principal authentication by replacing the ws = Workspace.from_config() line in the cell below with the following:
```
from azureml.core.authentication import ServicePrincipalAuthentication
auth = auth = ServicePrincipalAuthentication('mytenantid', 'myappid', 'mypassword')
ws = Workspace.from_config(auth = auth)
```
For more details, see aka.ms/aml-notebook-auth
```
ws = Workspace.from_config()
dstor = ws.get_default_datastore()
# Choose a name for the run history container in the workspace.
experiment_name = "retrain-noaaweather"
experiment = Experiment(ws, experiment_name)
output = {}
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
output["Run History Name"] = experiment_name
output["SDK Version"] = azureml.core.VERSION
pd.set_option("display.max_colwidth", None)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
```
## Compute
#### Create or Attach existing AmlCompute
You will need to create a compute target for your AutoML run. In this tutorial, you create AmlCompute as your training compute resource.
> Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.
#### Creation of AmlCompute takes approximately 5 minutes.
If the AmlCompute with that name is already in your workspace this code will skip the creation process.
As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
amlcompute_cluster_name = "cont-cluster"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print("Found existing cluster, use it.")
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_DS12_V2", max_nodes=4
)
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
```
## Run Configuration
```
from azureml.core.runconfig import CondaDependencies, RunConfiguration
# create a new RunConfig object
conda_run_config = RunConfiguration(framework="python")
# Set compute target to AmlCompute
conda_run_config.target = compute_target
conda_run_config.environment.docker.enabled = True
cd = CondaDependencies.create(
pip_packages=[
"azureml-sdk[automl]",
"applicationinsights",
"azureml-opendatasets",
"azureml-defaults",
],
conda_packages=["numpy==1.19.5"],
pin_sdk_version=False,
)
conda_run_config.environment.python.conda_dependencies = cd
print("run config is ready")
```
## Data Ingestion Pipeline
For this demo, we will use NOAA weather data from [Azure Open Datasets](https://azure.microsoft.com/services/open-datasets/). You can replace this with your own dataset, or you can skip this pipeline if you already have a time-series based `TabularDataset`.
```
# The name and target column of the Dataset to create
dataset = "NOAA-Weather-DS4"
target_column_name = "temperature"
```
### Upload Data Step
The data ingestion pipeline has a single step with a script to query the latest weather data and upload it to the blob store. During the first run, the script will create and register a time-series based `TabularDataset` with the past one week of weather data. For each subsequent run, the script will create a partition in the blob store by querying NOAA for new weather data since the last modified time of the dataset (`dataset.data_changed_time`) and creating a data.csv file.
```
from azureml.pipeline.core import Pipeline, PipelineParameter
from azureml.pipeline.steps import PythonScriptStep
ds_name = PipelineParameter(name="ds_name", default_value=dataset)
upload_data_step = PythonScriptStep(
script_name="upload_weather_data.py",
allow_reuse=False,
name="upload_weather_data",
arguments=["--ds_name", ds_name],
compute_target=compute_target,
runconfig=conda_run_config,
)
```
### Submit Pipeline Run
```
data_pipeline = Pipeline(
description="pipeline_with_uploaddata", workspace=ws, steps=[upload_data_step]
)
data_pipeline_run = experiment.submit(
data_pipeline, pipeline_parameters={"ds_name": dataset}
)
data_pipeline_run.wait_for_completion(show_output=False)
```
## Training Pipeline
### Prepare Training Data Step
Script to check if new data is available since the model was last trained. If no new data is available, we cancel the remaining pipeline steps. We need to set allow_reuse flag to False to allow the pipeline to run even when inputs don't change. We also need the name of the model to check the time the model was last trained.
```
from azureml.pipeline.core import PipelineData
# The model name with which to register the trained model in the workspace.
model_name = PipelineParameter("model_name", default_value="noaaweatherds")
data_prep_step = PythonScriptStep(
script_name="check_data.py",
allow_reuse=False,
name="check_data",
arguments=["--ds_name", ds_name, "--model_name", model_name],
compute_target=compute_target,
runconfig=conda_run_config,
)
from azureml.core import Dataset
train_ds = Dataset.get_by_name(ws, dataset)
train_ds = train_ds.drop_columns(["partition_date"])
```
### AutoMLStep
Create an AutoMLConfig and a training step.
```
from azureml.train.automl import AutoMLConfig
from azureml.pipeline.steps import AutoMLStep
automl_settings = {
"iteration_timeout_minutes": 10,
"experiment_timeout_hours": 0.25,
"n_cross_validations": 3,
"primary_metric": "r2_score",
"max_concurrent_iterations": 3,
"max_cores_per_iteration": -1,
"verbosity": logging.INFO,
"enable_early_stopping": True,
}
automl_config = AutoMLConfig(
task="regression",
debug_log="automl_errors.log",
path=".",
compute_target=compute_target,
training_data=train_ds,
label_column_name=target_column_name,
**automl_settings,
)
from azureml.pipeline.core import PipelineData, TrainingOutput
metrics_output_name = "metrics_output"
best_model_output_name = "best_model_output"
metrics_data = PipelineData(
name="metrics_data",
datastore=dstor,
pipeline_output_name=metrics_output_name,
training_output=TrainingOutput(type="Metrics"),
)
model_data = PipelineData(
name="model_data",
datastore=dstor,
pipeline_output_name=best_model_output_name,
training_output=TrainingOutput(type="Model"),
)
automl_step = AutoMLStep(
name="automl_module",
automl_config=automl_config,
outputs=[metrics_data, model_data],
allow_reuse=False,
)
```
### Register Model Step
Script to register the model to the workspace.
```
register_model_step = PythonScriptStep(
script_name="register_model.py",
name="register_model",
allow_reuse=False,
arguments=[
"--model_name",
model_name,
"--model_path",
model_data,
"--ds_name",
ds_name,
],
inputs=[model_data],
compute_target=compute_target,
runconfig=conda_run_config,
)
```
### Submit Pipeline Run
```
training_pipeline = Pipeline(
description="training_pipeline",
workspace=ws,
steps=[data_prep_step, automl_step, register_model_step],
)
training_pipeline_run = experiment.submit(
training_pipeline,
pipeline_parameters={"ds_name": dataset, "model_name": "noaaweatherds"},
)
training_pipeline_run.wait_for_completion(show_output=False)
```
### Publish Retraining Pipeline and Schedule
Once we are happy with the pipeline, we can publish the training pipeline to the workspace and create a schedule to trigger on blob change. The schedule polls the blob store where the data is being uploaded and runs the retraining pipeline if there is a data change. A new version of the model will be registered to the workspace once the run is complete.
```
pipeline_name = "Retraining-Pipeline-NOAAWeather"
published_pipeline = training_pipeline.publish(
name=pipeline_name, description="Pipeline that retrains AutoML model"
)
published_pipeline
from azureml.pipeline.core import Schedule
schedule = Schedule.create(
workspace=ws,
name="RetrainingSchedule",
pipeline_parameters={"ds_name": dataset, "model_name": "noaaweatherds"},
pipeline_id=published_pipeline.id,
experiment_name=experiment_name,
datastore=dstor,
wait_for_provisioning=True,
polling_interval=1440,
)
```
## Test Retraining
Here we setup the data ingestion pipeline to run on a schedule, to verify that the retraining pipeline runs as expected.
Note:
* Azure NOAA Weather data is updated daily and retraining will not trigger if there is no new data available.
* Depending on the polling interval set in the schedule, the retraining may take some time trigger after data ingestion pipeline completes.
```
pipeline_name = "DataIngestion-Pipeline-NOAAWeather"
published_pipeline = training_pipeline.publish(
name=pipeline_name, description="Pipeline that updates NOAAWeather Dataset"
)
published_pipeline
from azureml.pipeline.core import Schedule
schedule = Schedule.create(
workspace=ws,
name="RetrainingSchedule-DataIngestion",
pipeline_parameters={"ds_name": dataset},
pipeline_id=published_pipeline.id,
experiment_name=experiment_name,
datastore=dstor,
wait_for_provisioning=True,
polling_interval=1440,
)
```
|
github_jupyter
|
```
# Python Libraries
%matplotlib inline
import pickle
import numpy as np
import pandas as pd
import matplotlib
from keras.datasets import cifar10
from keras import backend as K
# Custom Networks
from networks.lenet import LeNet
from networks.pure_cnn import PureCnn
from networks.network_in_network import NetworkInNetwork
from networks.resnet import ResNet
from networks.densenet import DenseNet
from networks.wide_resnet import WideResNet
from networks.capsnet import CapsNet
import cv2 as cv
# Helper functions
from differential_evolution import differential_evolution
import helper
#from scipy.misc import imsave
import scipy.misc
matplotlib.style.use('ggplot')
np.random.seed(100)
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def perturb_image(xs, img):
# If this function is passed just one perturbation vector,
# pack it in a list to keep the computation the same
if xs.ndim < 2:
xs = np.array([xs])
# Copy the image n == len(xs) times so that we can
# create n new perturbed images
tile = [len(xs)] + [1]*(xs.ndim+1)
imgs = np.tile(img, tile)
# Make sure to floor the members of xs as int types
xs = xs.astype(int)
for x,img in zip(xs, imgs):
# Split x into an array of 5-tuples (perturbation pixels)
# i.e., [[x,y,r,g,b], ...]
pixels = np.split(x, len(x) // 5)
for pixel in pixels:
# At each pixel's x,y position, assign its rgb value
x_pos, y_pos, *rgb = pixel
img[x_pos, y_pos] = rgb
return imgs
K.tensorflow_backend._get_available_gpus()
#nin = NetworkInNetwork()
#resnet = ResNet()
densenet = DenseNet()
models = [densenet]
x_test.shape
def predict_classes(xs, img, target_class, model, minimize=True):
# Perturb the image with the given pixel(s) x and get the prediction of the model
imgs_perturbed = perturb_image(xs, img)
predictions = model.predict(imgs_perturbed)[:,target_class]
# This function should always be minimized, so return its complement if needed
return predictions if minimize else 1 - predictions
def attack_success(x, img, target_class, model, targeted_attack=False, verbose=False):
# Perturb the image with the given pixel(s) and get the prediction of the model
attack_image = perturb_image(x, x_test[img])
confidence = model.predict(attack_image)[0]
predicted_class = np.argmax(confidence)
# If the prediction is what we want (misclassification or
# targeted classification), return True
if (verbose):
print('Confidence:', confidence[target_class])
if ((targeted_attack and predicted_class == target_class) or
(not targeted_attack and predicted_class != target_class)):
return True
# NOTE: return None otherwise (not False), due to how Scipy handles its callback function
# def save_success(img, name):
# scipy.misc.imsave('data/'+name + tail, img)
count = 0
import os
def attack(img, model,cls_id, case_path, target=None, pixel_count=1,
maxiter=75, popsize=400,verbose=False):
# Change the target class based on whether this is a targeted attack or not
targeted_attack = target is not None
target_class = target if targeted_attack else y_test[img,0]
# Define bounds for a flat vector of x,y,r,g,b values
# For more pixels, repeat this layout
bounds = [(0,32), (0,32), (0,256), (0,256), (0,256)] * pixel_count
# Population multiplier, in terms of the size of the perturbation vector x
popmul = max(1, popsize // len(bounds))
# Format the predict/callback functions for the differential evolution algorithm
predict_fn = lambda xs: predict_classes(
xs, x_test[img], target_class, model, target is None)
callback_fn = lambda x, convergence: attack_success(
x, img, target_class, model, targeted_attack, verbose)
# Call Scipy's Implementation of Differential Evolution
attack_result = differential_evolution(
predict_fn, bounds, maxiter=maxiter, popsize=popmul,
recombination=1, atol=-1, callback=callback_fn, polish=False)
# Calculate some useful statistics to return from this function
attack_image = perturb_image(attack_result.x, x_test[img])[0]
prior_probs = model.predict_one(x_test[img])
predicted_probs = model.predict_one(attack_image)
predicted_class = np.argmax(predicted_probs)
actual_class = y_test[img,0]
success = predicted_class != actual_class
# if(success):
# #count += 1
# name = 'horrse_attacked_'+str(img)+'_'+str(actual_class) +'_'+str(predicted_class)+'.png'
# save_success(attack_image,name)
cdiff = prior_probs[actual_class] - predicted_probs[actual_class]
import scipy.misc
if(predicted_probs[actual_class] < 0.5):
# Show the best attempt at a solution (successful or not)
helper.plot_image(attack_image, actual_class, class_names, predicted_class)
#saved
cls_name = case_path + str(cls_id)+'_'+class_names[cls_id]
ori_name = cls_name + '/original/'+str(img) + '_' + str(actual_class) + '.png'
ori_path = cls_name + '/original/'
if not os.path.exists(ori_path):
#os.makedirs(Annotations_path)
os.system('mkdir -p %s' % (ori_path))
scipy.misc.imsave(ori_name, x_test[img])
at_name = cls_name + '/attacked/'+str(img) +'_'+str(actual_class) +'_'+str(predicted_class)+'.png'
at_path = cls_name + '/attacked/'
if not os.path.exists(at_path):
#os.makedirs(Annotations_path)
os.system('mkdir -p %s' %(at_path))
#scipy.misc.imsave(at_name, attack_image)
cv.imwrite(at_name, attack_image)
#np.savetxt('horse_cor_'+str(img)+'.txt', attack_result.x,delimiter=',')
#np.savetxt('test.out', x, delimiter=',')
print("success:", prior_probs[actual_class], predicted_probs[actual_class])
else:
ok_cls_name = case_path+str(cls_id)+'_'+class_names[cls_id]
ok_name = ok_cls_name + '/OK/'+str(img) +'_' + str(actual_class)+ '.png'
ok_path = ok_cls_name + '/OK/'
if not os.path.exists(ok_path):
#os.makedirs(Annotations_path)
os.system('mkdir -p %s' %(ok_path))
cv.imwrite(ok_name, x_test[img])
#scipy.misc.imsave(ok_name, x_test[img])
# Show the best attempt at a solution (successful or not)
helper.plot_image(attack_image, actual_class, class_names, predicted_class)
return [model.name, pixel_count, img, actual_class, predicted_class, success, cdiff, prior_probs, predicted_probs, attack_result.x]
# pixels = 1 # Number of pixels to attack
# model = resnet
# for i in range(10000):
# if(y_test[i] == 0):
# image = i
# cls = 0
# _ = attack(image, model, cls, pixel_count=pixels,verbose=True)
pixels = 1 # Number of pixels to attack
model = densenet
case_path = 'densenet_data_p1/'
for i in range(10000):
print(i)
cls = y_test[i][0]
image = i
_ = attack(image, model, cls, case_path,pixel_count=pixels,verbose=True)
15+52+6+40+28+30+29+27+22+29+33+30+13+57+16+41+10+54+8+63
print(y_test.shap)
```
|
github_jupyter
|
```
import matplotlib.pyplot as plt
%matplotlib inline
plt.scatter([1700, 2100, 1900, 1300, 1600, 2200], [53000, 65000, 59000, 41000, 50000, 68000])
plt.show()
x = [1300, 1400, 1600, 1900, 2100, 2300]
y = [88000, 72000, 94000, 86000, 112000, 98000]
plt.scatter(x, y, s=32, c='cyan', alpha=0.5)
plt.show()
plt.bar(x, y, width=20, alpha=0.5)
plt.show()
plt.hist([100, 400, 200, 100, 400, 100, 300, 200, 100], alpha=0.5)
plt.show()
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
plt.pie([11, 13, 56, 67, 23], colors=colors, shadow=True, startangle=90)
plt.show()
height = [65.0, 59.8, 63.3, 63.2, 65.0, 63.3, 65.8, 62.8, 61.1, 64.3, 63.0, 64.2, 65.4, 64.1, 64.7, 64.0, 66.1, 64.6, 67.0, 64.0, 59.0, 65.2, 62.9, 65.4, 63.7, 65.7, 64.1, 65.4, 64.7, 65.3, 65.2, 64.8, 66.4, 65.0, 65.6, 65.5, 67.4, 65.1, 66.8, 65.5, 67.8, 65.1, 69.5, 65.5, 62.5, 66.6, 63.8, 66.4, 64.5, 66.1, 65.0, 66.0, 64.7, 66.0, 65.7, 66.5, 65.5, 65.7, 65.6, 66.0, 66.9, 65.9, 66.6, 65.9, 66.5, 66.5, 67.9, 65.8, 68.3, 66.3, 67.7, 66.1, 68.5, 66.3, 69.4, 66.3, 71.8, 66.4, 62.4, 67.2, 64.5, 67.5, 64.5, 67.0, 63.9, 66.8, 65.4, 67.0, 65.0, 66.8, 65.7, 69.3, 68.7, 69.1, 66.5, 61.7, 64.9, 65.7, 69.6, 69.0, 64.8, 67.4, 65.3, 67.2, 65.8, 67.1, 65.8, 67.3, 65.6, 67.6, 65.9, 67.5, 65.8, 66.9, 67.1, 67.6, 66.6, 67.2, 67.4, 66.8, 67.3, 67.2, 66.6, 67.5, 68.2, 67.6, 67.8, 67.2, 68.3, 67.5, 68.1, 67.4, 69.0, 67.6, 68.9, 67.3, 69.6, 66.8, 70.4, 66.7, 70.0, 66.9, 72.8, 67.6, 62.8, 68.0, 62.9, 68.5, 63.9, 68.0, 64.5, 68.3, 64.5, 68.3, 66.0, 68.3, 65.8, 68.2, 66.0, 68.5, 65.5, 68.1, 65.7, 68.3, 66.8, 68.0, 66.7, 68.6, 67.0, 67.9, 66.9, 68.1, 66.8, 68.4, 67.1, 67.9, 67.7, 68.2, 68.3, 68.0, 67.6, 68.2, 68.4, 67.9, 67.7, 68.6, 68.7, 68.0, 69.3, 68.3, 68.7, 67.9, 69.1, 68.6, 69.3, 68.2, 68.6, 68.6, 69.6, 68.1, 70.4, 68.4, 71.2, 67.8, 70.8, 68.6, 71.7, 67.9, 73.3, 67.8, 63.0, 68.8, 63.7, 69.6, 65.4, 69.7, 64.6, 69.4, 66.4, 69.7, 65.8, 69.2, 65.7, 69.5, 66.1, 69.6, 66.5, 69.3, 66.6, 69.5, 66.6, 68.7, 67.7, 69.3, 68.5, 69.2, 67.8, 69.2, 67.6, 69.5, 68.1, 69.1, 69.2, 68.9, 68.7, 69.5, 68.6, 69.3, 68.6, 69.2, 68.6, 68.7, 70.4, 69.3, 70.0, 68.9, 70.1, 69.3, 70.2, 69.2, 71.3, 69.6, 70.9, 69.1, 72.2, 69.1, 75.0, 69.0, 64.9, 69.9, 65.6, 70.1, 65.7, 69.9, 65.9, 70.3, 65.9, 70.5, 67.4, 70.5, 67.5, 69.8, 67.6, 70.4, 68.5, 70.0, 68.5, 69.8, 68.1, 70.7, 69.5, 70.2, 69.1, 70.1, 69.4, 70.0, 69.4, 70.3, 69.5, 69.8, 70.2, 70.0, 69.9, 69.9, 70.4, 69.7, 70.9, 70.1, 71.3, 70.0, 72.1, 70.7, 72.2, 70.0, 75.4, 70.1, 64.5, 71.3, 66.4, 70.8, 65.6, 71.4, 66.8, 71.2, 66.9, 71.7, 68.2, 71.4, 67.5, 70.7, 67.8, 71.3, 69.0, 71.0, 69.3, 71.3, 68.7, 70.9, 69.7, 71.3, 70.3, 71.6, 70.0, 71.2, 70.2, 71.0, 70.9, 71.4, 71.2, 71.6, 72.4, 71.1, 73.0, 70.9, 74.8, 71.7, 67.4, 72.4, 67.3, 71.9, 67.8, 72.3, 69.3, 72.2, 68.7, 72.5, 70.0, 72.0, 69.8, 72.3, 70.7, 72.5, 71.1, 72.3, 72.5, 72.0, 72.5, 72.2, 67.5, 72.8, 68.2, 73.0, 68.8, 72.9, 69.9, 73.2, 71.5, 73.6, 70.8, 72.9, 71.9, 73.2, 63.1, 74.3, 68.2, 74.4, 70.1, 73.8, 70.8, 73.9, 72.6, 73.8, 67.9, 75.6, 67.5, 75.7, 72.8, 77.2, 62.7, 61.3, 68.2, 74.3, 65.1, 70.9, 73.4, 75.3, 62.9, 61.8, 62.5, 64.0, 69.9, 62.5, 71.1, 73.7, 71.1, 66.3, 69.5, 62.2, 70.2, 65.4, 65.5, 64.0, 62.0, 62.8, 63.6, 63.5, 65.6, 63.5, 68.0, 62.9, 61.8, 63.7, 63.8, 63.7, 64.9, 64.4, 65.8, 63.7, 66.4, 64.4, 68.8, 64.3, 61.8, 65.2, 64.3, 65.1, 63.7, 65.6, 65.0, 64.9, 65.3, 65.1, 64.8, 65.2, 65.7, 65.6, 66.0, 65.6, 67.0, 64.9, 67.8, 65.4, 69.0, 64.7, 62.2, 65.8, 62.8, 65.8, 63.9, 66.7, 65.4, 66.5, 64.6, 66.4, 65.6, 66.3, 66.2, 66.2, 66.0, 66.4, 65.8, 66.7, 67.4, 66.2, 67.1, 66.4, 67.3, 66.7, 67.9, 65.8, 68.3, 66.2, 68.0, 66.3, 68.7, 65.8, 71.2, 66.3, 62.4, 66.9, 62.9, 66.8, 64.1, 67.4, 63.9, 67.7, 64.8, 67.2, 65.4, 67.3, 64.8, 67.5, 68.7, 69.0, 65.2, 62.0, 64.3, 64.1, 66.1, 66.0, 69.0, 65.8, 64.5, 66.9, 66.1, 66.8, 65.7, 67.0, 66.5, 67.4, 65.6, 66.8, 66.4, 67.3, 67.3, 67.5, 66.8, 67.2, 66.7, 67.6, 67.3, 66.9, 67.4, 66.7, 67.9, 67.2, 67.8, 67.2, 68.1, 66.8, 68.3, 66.8, 68.8, 67.5, 69.4, 67.1, 69.3, 67.1, 70.5, 66.9, 70.1, 67.5, 70.6, 66.9, 62.4, 67.7, 63.2, 67.9, 63.5, 68.7, 63.9, 68.6, 64.6, 68.4, 64.9, 68.4, 65.9, 68.4, 66.2, 68.4, 66.5, 67.9, 65.5, 68.3, 66.9, 68.0, 67.1, 68.2, 66.8, 68.6, 67.2, 68.6, 66.5, 67.8, 67.0, 67.9, 66.6, 68.2, 68.2, 68.0, 67.6, 67.9, 68.3, 67.8, 68.0, 68.6, 69.0, 68.1, 69.3, 67.9, 68.9, 68.5, 68.9, 68.6, 69.4, 68.1, 69.5, 68.1, 70.3, 67.7, 69.9, 68.4, 70.7, 68.6, 70.6, 68.3, 72.4, 68.1, 72.5, 68.4, 62.7, 69.4, 63.9, 69.0, 64.5, 69.4, 64.8, 68.9, 65.4, 69.4, 65.8, 69.3, 66.3, 69.0, 65.8, 69.6, 66.8, 69.2, 67.2, 69.7, 67.3, 68.9, 67.5, 69.1, 67.9, 69.0, 68.4, 69.2, 67.6, 69.6, 68.5, 68.8, 68.6, 69.2, 69.1, 69.1, 68.6, 69.0, 69.2, 69.3, 68.6, 69.2, 68.5, 68.9, 69.7, 69.4, 70.4, 68.7, 70.0, 68.8, 70.3, 69.4, 71.3, 69.4, 71.3, 69.5, 72.6, 69.2, 64.4, 70.0, 64.9, 69.9, 66.3, 70.0, 66.0, 69.7, 65.7, 69.8, 66.7, 69.9, 66.6, 70.3, 68.3, 69.8, 67.9, 69.9, 68.0, 70.3, 68.3, 70.7, 68.8, 70.4, 69.1, 70.4, 69.0, 70.6, 69.4, 70.2, 69.8, 69.9, 69.6, 70.2, 70.5, 70.3, 69.9, 70.3, 71.0, 69.8, 71.2, 70.5, 71.5, 70.0, 71.6, 69.7, 73.2, 69.9, 63.9, 70.9, 66.0, 71.4, 66.0, 71.2, 67.5, 70.8, 66.7, 70.7, 68.3, 71.3, 67.7, 70.9, 68.3, 70.9, 68.4, 70.9, 69.1, 71.2, 69.1, 71.3, 69.7, 71.2, 70.0, 71.4, 69.6, 71.6, 70.0, 71.0, 70.9, 71.2, 70.6, 71.4, 72.3, 71.4, 71.5, 70.9, 73.0, 71.3, 66.2, 72.1, 67.3, 72.0, 67.8, 72.0, 69.1, 71.7, 69.4, 71.9, 69.6, 72.2, 70.1, 72.3, 70.2, 72.6, 71.3, 72.0, 72.1, 71.8, 72.3, 71.8, 67.1, 73.1, 67.9, 73.4, 69.1, 73.2, 69.6, 73.4, 69.7, 73.2, 70.5, 72.9, 72.4, 72.8, 72.8, 73.3, 68.1, 74.0, 68.6, 74.6, 71.3, 73.9, 72.1, 74.6, 74.7, 74.3, 71.2, 75.1, 68.3, 77.2, 60.4, 60.8, 63.9, 62.4, 63.1, 66.4, 64.0, 58.5, 73.9, 70.0, 72.0, 71.0, 61.0, 65.0, 65.4, 70.5, 72.0, 69.2, 71.3, 64.9, 65.2, 68.8, 68.9, 69.9, 64.5, 62.5, 64.0, 63.3, 66.5, 63.5, 67.1, 62.9, 62.3, 63.9, 63.8, 64.3, 65.4, 64.2, 65.6, 64.6, 66.2, 64.3, 67.6, 63.8, 60.2, 65.7, 63.0, 64.8, 63.6, 65.6, 65.2, 64.7, 65.1, 64.8, 64.8, 65.1, 66.2, 65.6, 66.2, 65.3, 66.6, 65.1, 68.0, 65.1, 69.0, 64.8, 62.1, 66.0, 63.2, 66.4, 64.5, 66.4, 63.8, 66.3, 64.6, 65.8, 64.6, 66.1, 66.1, 66.5, 66.0, 66.3, 65.7, 66.6, 67.1, 66.0, 67.3, 66.5, 67.2, 66.0, 68.4, 66.5, 67.6, 66.4, 67.6, 66.3, 68.5, 66.3, 70.0, 66.6, 61.1, 66.8, 62.7, 67.5, 64.3, 67.2, 64.1, 66.8, 63.7, 67.3, 64.7, 66.8, 64.7, 66.9, 68.0, 68.1, 64.6, 63.6, 66.1, 63.2, 65.3, 65.7, 69.7, 67.2, 65.3, 67.4, 65.5, 67.6, 66.1, 67.2, 66.2, 67.1, 66.0, 66.8, 66.0, 67.1, 65.9, 66.9, 67.1, 66.8, 67.0, 67.0, 67.4, 67.7, 67.4, 67.6, 67.3, 67.7, 68.3, 67.3, 68.3, 67.0, 67.6, 67.0, 68.2, 67.1, 69.1, 67.2, 68.8, 67.5, 70.5, 67.7, 70.0, 66.9, 69.5, 67.0, 61.5, 67.7, 62.9, 68.5, 64.1, 67.9, 63.9, 67.8, 65.1, 68.3, 64.6, 68.2, 65.9, 68.6, 66.2, 67.8, 65.7, 67.9, 66.1, 67.7, 66.3, 68.0, 67.2, 68.1, 66.9, 67.8, 66.9, 68.1, 66.6, 68.2, 67.2, 68.6, 67.1, 68.3, 67.9, 68.4, 67.9, 68.6, 67.8, 68.3, 67.9, 67.9, 68.6, 67.7, 68.6, 68.4, 69.0, 68.1, 69.0, 68.0, 69.1, 68.2, 68.5, 68.1, 70.2, 68.5, 69.8, 68.6, 69.9, 67.7, 71.5, 68.7, 72.4, 68.6, 71.9, 68.4, 61.0, 69.1, 63.0, 69.4, 64.6, 69.5, 65.4, 69.1, 64.8, 69.6, 65.5, 69.4, 65.6, 69.2, 66.1, 68.8, 67.4, 69.1, 66.6, 69.0, 67.4, 69.1, 67.2, 69.1, 68.2, 68.7, 67.9, 69.0, 68.0, 69.0, 67.7, 69.2, 68.9, 68.8, 68.7, 68.7, 69.1, 69.0, 68.8, 69.3, 68.8, 69.4, 69.3, 68.9, 69.6, 69.3, 69.8, 69.6, 70.2, 69.4, 70.1, 69.0, 71.1, 69.5, 71.4, 68.7, 71.8, 69.2, 64.4, 70.4, 65.2, 70.1, 66.1, 70.4, 65.8, 70.0, 65.5, 69.8, 66.7, 70.4, 67.2, 70.0, 67.2, 70.3, 68.1, 69.9, 67.9, 70.7, 68.2, 70.3, 69.3, 69.7, 68.8, 70.3, 69.2, 70.0, 68.7, 69.7, 69.5, 70.2, 70.0, 70.0, 69.7, 70.3, 70.2, 70.4, 70.8, 69.8, 70.9, 69.8, 71.3, 70.5, 72.3, 70.0, 72.8, 70.6, 64.4, 71.1, 64.9, 71.2, 65.8, 71.0, 67.4, 70.9, 67.4, 70.8, 67.9, 71.5, 67.9, 71.6, 68.5, 70.8, 67.6, 71.0, 69.4, 71.6, 69.3, 71.1, 69.5, 71.5, 70.2, 71.4, 70.0, 71.5, 69.8, 71.0, 69.6, 71.6, 71.5, 71.4, 72.2, 71.2, 72.4, 70.9, 72.5, 71.5, 64.7, 72.4, 66.8, 72.6, 67.8, 71.8, 68.2, 72.0, 69.2, 72.0, 68.9, 72.4, 70.1, 71.9, 70.1, 72.1, 71.0, 72.6, 71.4, 72.5, 72.0, 71.8, 72.7, 72.6, 68.0, 73.1, 69.0, 73.2, 68.9, 73.4, 69.6, 73.4, 71.2, 73.7, 72.0, 72.8, 72.9, 73.0, 65.9, 74.7, 68.5, 73.9, 70.7, 74.4, 72.3, 74.0, 72.6, 73.9, 68.8, 75.7, 73.5, 76.1, 70.1, 78.2, 67.9, 61.9, 64.7, 69.9, 60.8, 62.3, 74.9, 71.4, 70.6, 71.6, 60.9, 65.5, 65.3, 71.9, 71.4, 71.2, 71.7, 64.5, 62.7, 65.3, 71.4, 69.6, 66.6, 65.1, 67.2, 61.0, 62.5, 63.1, 64.9, 63.6, 66.9, 63.5, 62.4, 63.8, 63.6, 64.2, 65.4, 64.7, 65.0, 64.1, 66.4, 64.4, 66.7, 64.6, 59.5, 64.8, 63.0, 65.2, 64.1, 65.6, 64.1, 65.6, 64.5, 64.9, 65.2, 65.6, 66.3, 65.7, 66.0, 65.6, 66.8, 65.1, 68.2, 64.9, 68.7, 65.7, 61.3, 66.5, 63.3, 66.2, 63.9, 65.9, 64.1, 66.3, 64.8, 66.3, 64.6, 66.2, 66.2, 66.1, 66.5, 66.2, 66.4, 65.8, 67.3, 66.6, 67.4, 66.5, 67.3, 66.3, 67.7, 66.5, 68.4, 66.3, 67.8, 65.9, 69.3, 66.6, 69.7, 66.6, 60.0, 67.3, 62.1, 67.4, 63.6, 67.5, 63.8, 66.9, 64.5, 67.1, 64.7, 67.0, 65.4, 66.9, 65.1, 68.1, 69.3, 67.5, 67.7, 63.0, 64.9, 67.4, 69.5, 68.8, 65.1, 66.9, 65.0, 67.3, 65.6, 67.2, 65.9, 67.1, 65.7, 67.1, 65.9, 67.2, 65.9, 66.7, 67.4, 66.8, 66.5, 67.4, 67.4, 67.4, 67.2, 67.0, 67.3, 67.4, 68.1, 66.9, 68.0, 66.9, 68.0, 66.8, 68.4, 66.8, 68.8, 67.2, 69.5, 67.5, 70.2, 67.7, 69.7, 67.1, 70.2, 67.6, 61.1, 68.0, 62.7, 68.7, 64.3, 68.4, 64.3, 68.0, 65.0, 68.6, 64.8, 68.5, 66.2, 67.8, 66.1, 68.1, 66.4, 68.2, 66.2, 68.5, 65.9, 68.6, 67.0, 68.7, 67.5, 68.4, 66.7, 68.5, 66.7, 68.6, 67.3, 67.9, 67.1, 68.0, 67.8, 68.0, 68.4, 68.6, 67.7, 68.1, 68.1, 67.9, 67.7, 68.6, 69.0, 67.9, 69.2, 68.6, 69.3, 68.7, 69.3, 68.4, 68.8, 68.2, 69.6, 68.3, 69.6, 68.1, 69.9, 67.8, 70.6, 68.2, 72.3, 68.0, 71.6, 68.0, 72.9, 68.1, 63.3, 69.2, 64.3, 69.2, 65.1, 68.9, 65.2, 69.3, 65.9, 69.5, 66.0, 69.0, 65.6, 69.3, 65.8, 69.6, 66.9, 69.4, 67.0, 69.0, 67.4, 69.3, 67.9, 69.1, 67.8, 69.3, 68.4, 69.3, 68.3, 68.7, 68.4, 69.5, 68.7, 69.5, 69.0, 68.9, 69.3, 68.8, 68.8, 69.4, 68.9, 68.8, 69.8, 69.1, 69.7, 69.2, 70.4, 69.3, 70.3, 68.8, 71.0, 69.1, 71.3, 69.4, 72.0, 68.7, 63.3, 70.4, 64.9, 70.5, 65.7, 70.1, 66.1, 69.8, 66.5, 70.3, 66.1, 70.3, 66.7, 69.7, 67.1, 70.1, 67.6, 70.4, 68.2, 69.9, 68.3, 69.8, 68.1, 69.9, 69.2, 70.3, 69.2, 70.2, 68.5, 70.4, 68.8, 70.4, 69.7, 70.7, 69.9, 69.7, 70.5, 70.5, 71.2, 70.5, 70.6, 70.5, 70.5, 70.5, 72.4, 70.3, 73.2, 70.3, 64.1, 71.4, 64.6, 71.0, 65.7, 71.6, 67.1, 70.9, 66.8, 71.4, 68.4, 71.5, 68.3, 71.2, 68.3, 71.6, 68.4, 71.6, 68.7, 71.3, 68.7, 71.1, 69.0, 71.5, 70.2, 71.0, 69.9, 71.5, 70.2, 70.9, 70.2, 71.4, 71.4, 71.3, 70.7, 71.2, 72.4, 71.5, 73.0, 70.8, 64.7, 72.7, 67.1, 72.0, 67.8, 72.3, 68.4, 72.2, 69.2, 72.4, 68.6, 72.0, 69.9, 71.8, 70.2, 72.5, 70.5, 72.6, 71.0, 71.9, 71.8, 72.6, 72.8, 72.4, 68.0, 73.1, 67.8, 73.6, 69.3, 73.3, 70.5, 73.1, 71.4, 73.7, 72.3, 73.6, 71.9, 72.9, 64.6, 73.9, 67.8, 74.3, 69.9, 73.9, 70.9, 74.2, 72.7, 74.5, 69.0, 75.1, 72.4, 76.4, 69.1, 78.4, 70.2, 61.2, 72.4, 72.6, 59.6, 64.9, 73.3, 73.0, 68.1, 71.8, 63.2, 65.3, 66.0, 60.9, 71.5, 72.1, 68.1, 71.0, 65.3, 61.7, 70.4, 67.5, 68.4, 64.4, 61.9, 63.3, 65.0, 63.5, 66.2, 63.7, 59.5, 63.9, 62.8, 63.9, 63.9, 63.9, 64.6, 64.1, 65.6, 64.7, 66.3, 64.4, 70.6, 63.9, 62.1, 64.8, 64.4, 65.3, 64.4, 65.2, 64.9, 65.5, 65.3, 65.2, 65.2, 65.4, 66.1, 65.6, 66.1, 64.7, 67.4, 64.8, 67.8, 65.6, 70.3, 65.4, 63.2, 66.5, 63.7, 65.7, 64.1, 66.1, 64.7, 66.3, 65.1, 66.6, 65.9, 66.4, 65.7, 66.5, 66.0, 66.3, 67.3, 66.4, 66.7, 66.3, 66.6, 66.1, 66.8, 66.5, 68.2, 66.3, 67.5, 66.1, 68.4, 65.9, 69.1, 65.7, 70.8, 66.1, 61.7, 67.6, 63.0, 67.4, 64.3, 67.0, 63.9, 67.2, 65.5, 67.2, 64.7, 67.4, 64.7, 67.4, 67.5, 68.2, 66.8, 62.7, 65.0, 66.3, 69.4, 65.7, 63.7, 68.4, 64.6, 67.5, 66.0, 67.2, 66.0, 67.2, 66.5, 67.5, 66.3, 66.9, 65.7, 67.7, 66.9, 67.0, 66.7, 66.7, 66.5, 66.8, 67.1, 67.4, 67.0, 66.9, 67.6, 67.5, 67.6, 66.9, 67.5, 67.0, 68.1, 66.9, 68.6, 67.0, 68.5, 67.1, 69.2, 67.5, 69.8, 67.5, 69.9, 67.3, 70.5, 67.6, 62.8, 67.8, 63.2, 68.1, 64.4, 68.5, 64.3, 68.6, 64.7, 67.9, 64.7, 68.6, 66.0, 68.2, 65.6, 68.3, 65.8, 68.2, 65.9, 68.2, 66.6, 68.2, 66.8, 68.6, 66.8, 68.3, 67.2, 68.7, 67.1, 68.5, 66.9, 67.7, 68.0, 68.0, 68.0, 67.8, 68.3, 68.1, 68.3, 68.6, 68.2, 68.2, 68.5, 68.2, 68.5, 68.2, 69.1, 68.1, 69.2, 68.1, 69.3, 68.5, 69.4, 68.4, 70.3, 68.2, 69.7, 67.7, 70.5, 67.9, 71.3, 68.3, 72.2, 68.3, 72.3, 68.3, 62.9, 69.6, 63.9, 68.7, 64.6, 68.9, 65.5, 68.8, 65.9, 69.1, 66.3, 69.0, 65.8, 69.0, 66.5, 68.7, 67.3, 69.5, 67.4, 69.1, 67.1, 69.1, 68.5, 69.6, 67.7, 69.4, 68.1, 69.2, 67.6, 68.8, 67.8, 69.0, 69.3, 69.5, 69.1, 69.6, 68.7, 69.4, 69.0, 69.6, 68.9, 69.4, 68.8, 68.9, 69.9, 68.7, 70.5, 69.3, 70.0, 68.7, 69.8, 68.8, 71.4, 69.5, 71.1, 69.7, 72.7, 69.7, 65.3, 70.5, 66.0, 70.0, 65.5, 70.6, 65.6, 70.5, 65.9, 70.0, 67.1, 69.9, 66.9, 70.5, 67.9, 69.9, 67.7, 69.7, 67.5, 70.6, 68.0, 70.2, 68.8, 70.3, 69.4, 70.2, 68.6, 70.5, 69.1, 70.5, 70.5, 70.2, 69.9, 70.0, 70.0, 70.2, 69.6, 70.2, 71.0, 70.0, 70.6, 70.4, 71.5, 70.4, 71.6, 70.2, 73.9, 70.2, 65.0, 70.7, 66.3, 71.5, 65.9, 71.4, 66.9, 71.2, 67.2, 70.9, 68.1, 71.3, 67.6, 71.4, 67.6, 71.2, 69.4, 71.2, 68.9, 71.2, 69.1, 71.0, 69.8, 71.4, 70.0, 71.2, 69.6, 71.6, 70.3, 71.3, 70.7, 71.5, 70.9, 70.9, 72.5, 71.5, 73.0, 71.1, 74.4, 70.9, 67.4, 72.7, 66.5, 71.8, 68.0, 72.6, 68.8, 72.5, 69.3, 71.9, 70.3, 72.2, 70.2, 72.6, 70.8, 72.3, 70.7, 72.1, 72.4, 72.3, 72.4, 72.1, 67.2, 72.8, 67.8, 72.8, 68.9, 73.5, 70.4, 73.7, 71.2, 72.8, 71.4, 73.4, 71.7, 73.0, 72.6, 73.2, 67.6, 74.5, 68.6, 73.8, 71.0, 73.8, 72.0, 73.8, 75.2, 73.8, 73.1, 75.6, 69.9, 77.2, 65.5, 60.1, 72.6, 76.8, 72.2, 66.7, 63.2, 58.8, 73.3, 67.9, 65.8, 61.0, 67.7, 59.8, 67.0, 70.8, 71.3, 68.3, 71.8, 69.3, 70.7, 69.3, 70.3, 67.0, ]
plt.hist(height, alpha=0.5)
plt.show()
```
|
github_jupyter
|
```
from __future__ import division
import pandas as pd
import numpy as np
import os
import re
import copy
from pprint import pprint
from glob import glob
import cPickle as pkl
```
### Functions to generate blocks of trials
Localizer, cognitive, and limbic
```
def create_localizer_block_single_effector(n_trials, response_modality='eye',
block_number=None, pseudorandomize=True,
add_timing=True, null_trials=0, TR=2):
# Only two trial types here: 'left' is correct, or 'right' is correct
trial_types = np.repeat([0, 1], repeats=n_trials/2) # left/right
# Initialize arrays
cue_by_trial = np.zeros(n_trials, dtype='<U5')
correct_answers = np.zeros(n_trials, dtype=np.int8)
# Define the cues for every trial
cue_by_trial[(trial_types == 0)] = 'LEFT'
cue_by_trial[(trial_types == 1)] = 'RIGHT'
# cue_by_trial[(trial_types == 2) | (trial_types == 3)] = 'anti'
# Define the responses ('correct answers')/directions for every trial
correct_answers[trial_types == 0] = 0
correct_answers[trial_types == 1] = 1
# correct_answers[(trial_types == 0) | (trial_types == 2)] = 0 # 0 = respond LEFT
# correct_answers[(trial_types == 1) | (trial_types == 3)] = 1 # 1 = respond RIGHT
# Create dataframe for easier handling
trial_data = pd.DataFrame({'correct_answer': correct_answers,
'cue': cue_by_trial,
'trial_type': trial_types})
# Should we pseudorandomize?
if pseudorandomize:
trial_data = Pseudorandomizer(trial_data, max_identical_iters={'correct_answer': 3,
'cue': 3}).run()
trial_data['null_trial'] = False
if null_trials > 0:
trial_data = add_pseudorandom_null_trials(trial_data, n_null_trials=null_trials)
# Add block number for completeness
if block_number is not None:
trial_data['block'] = block_number
trial_data['block_type'] = 'localizer'
# Usually, we also want to add the duration of all the 'trial phases'
if add_timing:
# Set phase_3 and phase_0 to 0 (no post-cue fixcross, no feedback)
trial_data = get_localizer_timing(trial_data, TR=TR)
trial_data['response_modality'] = response_modality.lower()
return trial_data
def create_cognitive_block(n_trials, block_number=None, response_modality=None,
add_timing=True, pseudorandomize=True, n_null_trials=0, TR=2):
"""
Creates a block of SAT-trials; mixing speed and accuracy trials
"""
trial_types = np.hstack((np.repeat([0, 1], repeats=n_trials / 4), # SPEED cue, left/right corr
np.repeat([2, 3], repeats=n_trials / 4))) # ACCURACY cue, left/right corr
if trial_types.shape[0] != n_trials:
raise(ValueError('The provided n_trials (%d) could not be split into the correct number of trial types. '
'Closest option is %d trials' % (n_trials, trial_types.shape[0])))
cue_by_trial = np.zeros(n_trials, dtype='<U5')
correct_answers = np.zeros(n_trials, dtype=np.int8)
cue_by_trial[(trial_types == 0) | (trial_types == 1)] = 'SPD'
cue_by_trial[(trial_types == 2) | (trial_types == 3)] = 'ACC'
correct_answers[(trial_types == 0) | (trial_types == 2)] = 0 # 0 = left is correct
correct_answers[(trial_types == 1) | (trial_types == 3)] = 1 # 1 = right is correct
# Create dataframe for easier handling
trial_data = pd.DataFrame({'correct_answer': correct_answers,
'cue': cue_by_trial,
'trial_type': trial_types})
if pseudorandomize:
trial_data = Pseudorandomizer(trial_data,
max_identical_iters={'cue': 5, 'correct_answer': 5}).run()
if n_null_trials > 0:
trial_data['null_trial'] = False
trial_data = add_pseudorandom_null_trials(trial_data,
n_null_trials=n_null_trials,
null_column_name='null_trial')
if block_number is not None:
trial_data['block'] = block_number
if response_modality is not None:
trial_data['response_modality'] = response_modality
trial_data['block_type'] = 'cognitive_%s' % response_modality
if add_timing:
while True:
trial_data = get_block_timing(trial_data, TR=TR) # Add default timing
if check_good_ITI_phase0(trial_data):
break
return trial_data
def create_limbic_block(n_trials, subject_number=1, block_number=None,
response_modality=None, add_timing=True, pseudorandomize=True,
n_null_trials=0, TR=2):
trial_types = np.hstack((np.repeat([0, 1], repeats=n_trials/4), # Neutral cue, left/right corr
np.repeat([2, 3], repeats=n_trials/8), # Left cue, left/right corr
np.repeat([4, 5], repeats=n_trials/8))) # Right cue, left/right corr
if trial_types.shape[0] != n_trials:
raise(ValueError('The provided n_trials (%d) could not be split into the correct number of trial types. '
'Closest option is %d trials' % (n_trials, trial_types.shape[0])))
cue_by_trial = np.zeros(n_trials, dtype='<U5')
correct_answers = np.zeros(n_trials, dtype=np.int8)
cue_by_trial[(trial_types == 0) | (trial_types == 1)] = 'NEU'
cue_by_trial[(trial_types == 2) | (trial_types == 3)] = 'LEFT'
cue_by_trial[(trial_types == 4) | (trial_types == 5)] = 'RIGHT'
correct_answers[(trial_types == 0) |
(trial_types == 2) |
(trial_types == 4)] = 0 # 0 = left is correct
correct_answers[(trial_types == 1) |
(trial_types == 3) |
(trial_types == 5)] = 1 # 1 = right is correct
# Create dataframe for easier handling
trial_data = pd.DataFrame({'correct_answer': correct_answers,
'cue': cue_by_trial,
'trial_type': trial_types})
if pseudorandomize:
trial_data = Pseudorandomizer(trial_data,
max_identical_iters={'cue': 4, 'correct_answer': 4}).run()
if n_null_trials > 0:
trial_data['null_trial'] = False
trial_data = add_pseudorandom_null_trials(trial_data,
n_null_trials=n_null_trials,
null_column_name='null_trial')
if block_number is not None:
trial_data['block'] = block_number
if response_modality is not None:
trial_data['response_modality'] = response_modality
trial_data['block_type'] = 'limbic_%s' % response_modality
if add_timing:
while True:
trial_data = get_block_timing(trial_data, TR=TR) # Add default timing
if check_good_ITI_phase0(trial_data):
break
return trial_data
```
### Function that creates timing columns for a block of trials
```
def get_localizer_timing(trial_data, phase_0=None, phase_1=None, phase_2=None, phase_3=None, phase_4=None, phase_5=None, phase_6=None, TR=2):
"""
Each localizer trial consists of 7 phases.
In phase_0, we wait for the scanner pulse. Note that phase_0 of trial n is the ITI after trial n-1. Set this timing always to 0: it is the `minimum` time to wait for the pulse
In phase_1, we show the pre-cue fixation cross. By default, timing is jittered (0s, 0.5s, 1s, 1.5s if TR=2 -- 0, .75, 1.5, 2.25 if TR=3).
In phase_2, we show the cue. Follows an exponential distrbibu.
In phase_3, we show the post-cue fixation cross. Defaults to 0s.
In phase_4, we assume the participant responds, and wait a bit until we show the fix cross. Defaults to 0.6s
In phase_5 and phase_6, we do nothing (exist for compatibility with the experimental blocks)
Phase_7 is ITI
"""
if TR == 2:
trial_data['phase_0'] = 0 if phase_0 is None else phase_0
trial_data['phase_1'] = np.random.choice([0.2, .7, 1.2, 1.7], size=trial_data.shape[0]) if phase_1 is None else phase_1
trial_data['phase_2'] = 0.8 if phase_2 is None else phase_2
trial_data['phase_3'] = 0 if phase_3 is None else phase_3
trial_data['phase_4'] = 0.6 if phase_4 is None else phase_4
trial_data['phase_5'] = 0 if phase_5 is None else phase_5
trial_data['phase_6'] = 0 if phase_6 is None else phase_6
elif TR == 3:
trial_data['phase_0'] = 0 if phase_0 is None else phase_0
trial_data['phase_1'] = np.random.choice([0, .750, 1.500, 2.250], size=trial_data.shape[0]) if phase_1 is None else phase_1
trial_data['phase_2'] = np.round(np.random.exponential(scale=1/6, size=trial_data.shape[0])+.8, 3) if phase_2 is None else phase_2
# trial_data['phase_2'] = np.round(np.random.uniform() ) if phase_2 is None else phase_2
trial_data['phase_3'] = 0 if phase_3 is None else phase_3
trial_data['phase_4'] = 0.8 if phase_4 is None else phase_4
trial_data['phase_5'] = 0 if phase_5 is None else phase_5
trial_data['phase_6'] = 0 if phase_6 is None else phase_6
# Calculate duration of trial (depends on random, jittered durations of the fix cross)
trial_data['trial_duration'] = trial_data[['phase_' + str(x) for x in range(7)]].sum(axis=1)
# Because of TR = 2s, some trials can last 8 seconds, but most will last 10. Find trials with total time < 8 seconds
# We calculate the ITI as the difference between the minimum number of pulses necessary for all phases to show.
# [same applies to TR = 3]
# min_TRs = np.ceil(trial_data['trial_duration'].values / TR)
# trial_data['phase_7'] = min_TRs*TR - trial_data['trial_duration'].values
trial_data['phase_7'] = 6 - trial_data['trial_duration'].values
# Recalculate trial duration so it includes the ITI
trial_data['trial_duration'] = trial_data[['phase_' + str(x) for x in range(8)]].sum(axis=1)
# Add trial start times relative to start of block
trial_data['trial_start_time_block'] = trial_data['trial_duration'].shift(1).cumsum()
trial_data.loc[0, 'trial_start_time_block'] = 0
# Add cue onset times relative to start of block
trial_data['cue_onset_time_block'] = trial_data['trial_start_time_block'] + \
trial_data['phase_1']
# Add stimulus onset times relative to start of block
trial_data['stimulus_onset_time_block'] = trial_data['trial_start_time_block'] + \
trial_data['phase_1'] + \
trial_data['phase_2'] + \
trial_data['phase_3']
return trial_data
def get_block_timing(trial_data, phase_0=None, phase_1=None, phase_2=None, phase_3=None, phase_4=None, phase_5=None, phase_6=None, TR=2):
"""
Each trial consists of 7 phases.
In phase_0, we wait for the scanner pulse. Note that phase_0 of trial n is the ITI after trial n-1. Set this timing always to 0: it is the `minimum` time to wait for the pulse
In phase_1, we show the pre-cue fixation cross. By default, timing is jittered (0s, 0.5s, 1s, 1.5s)
In phase_2, we show the cue. In decision-making trials, this is 4.8 seconds.
In phase_3, we show the post-cue fixation cross. Timing is jittered (0s, 0.5s, 1s, 1.5s)
In phase_4, we show the stimulus. Default is 1.5s.
Phase 5 is defined as the period of stimulus presentation, after the participant made a response. The duration is determined by the participant RT, so not set here.
In phase_6, we show feedback. Default is 0.35s.
"""
if TR == 2:
trial_data['phase_0'] = 0 if phase_0 is None else phase_0
trial_data['phase_1'] = np.random.choice([0, .5, 1, 1.5], size=trial_data.shape[0]) if phase_1 is None else phase_1
trial_data['phase_2'] = 4.8 if phase_2 is None else phase_2
trial_data['phase_3'] = np.random.choice([0, .5, 1, 1.5], size=trial_data.shape[0]) if phase_3 is None else phase_3
trial_data['phase_4'] = 2 if phase_4 is None else phase_4
trial_data['phase_5'] = 0 if phase_5 is None else phase_5
trial_data['phase_6'] = 0.35 if phase_6 is None else phase_6
elif TR == 3:
trial_data['phase_0'] = 0 if phase_0 is None else phase_0
trial_data['phase_1'] = np.random.choice([0, .750, 1.500, 2.250], size=trial_data.shape[0]) if phase_1 is None else phase_1
trial_data['phase_2'] = 1 if phase_2 is None else phase_2
trial_data['phase_3'] = np.random.choice([0.750, 1.500, 2.250, 3.000], size=trial_data.shape[0]) if phase_3 is None else phase_3
trial_data['phase_4'] = 2 if phase_4 is None else phase_4
trial_data['phase_5'] = 0 if phase_5 is None else phase_5
trial_data['phase_6'] = 0.5 if phase_6 is None else phase_6
# Calculate duration of trial (depends on random, jittered durations of the fix cross)
trial_data['trial_duration'] = trial_data[['phase_' + str(x) for x in range(7)]].sum(axis=1)
if TR == 2:
# Because of TR = 2s, some trials can last 8 seconds, but most will last 10. Find trials with total time < 8 seconds
# We calculate the ITI as the difference between the minimum number of pulses necessary for all phases to show.
min_TRs = np.ceil(trial_data['trial_duration'].values / TR)
trial_data['phase_7'] = min_TRs*TR - trial_data['trial_duration'].values
elif TR == 3:
# In this case, fill all trials until 9s have passed.
trial_data['phase_7'] = 9 - trial_data['trial_duration'].values
# Recalculate trial duration so it includes the ITI
trial_data['trial_duration'] = trial_data[['phase_' + str(x) for x in range(8)]].sum(axis=1)
# Add trial start times relative to start of block
trial_data['trial_start_time_block'] = trial_data['trial_duration'].shift(1).cumsum()
trial_data.loc[0, 'trial_start_time_block'] = 0
# Add cue onset times relative to start of block
trial_data['cue_onset_time_block'] = trial_data['trial_start_time_block'] + \
trial_data['phase_1']
# Add stimulus onset times relative to start of block
trial_data['stimulus_onset_time_block'] = trial_data['trial_start_time_block'] + \
trial_data['phase_1'] + \
trial_data['phase_2'] + \
trial_data['phase_3']
return trial_data
```
### Function to check timing
If ITI after trial n is 0, it is not allowed to have trial n+1 phase1 = 0 (otherwise, a new cue can be shown immediately after feedback)
```
def check_good_ITI_phase0(data):
# Get rid of Null Trials, and the trials before the Null Trials
nulls = data[data['null_trial'] == True].index.values
nulls = np.hstack((nulls, data.shape[0]-1))
start_id = 0
for end_id in nulls:
data_subset = data.iloc[np.arange(start_id, end_id)].copy()
# Shift rows in column phase_1
data_subset['phase_1'] = data_subset['phase_1'].shift(-1)
# Check whether (shifted) phase_1 values == phase_7 values, AND phase_1 values is 0.
idx = (data_subset['phase_1'].values == data_subset['phase_7'].values) & (data_subset['phase_1'].values == 0.00)
if np.sum(idx) > 0:
# print(data_subset[['phase_1', 'phase_7']])
return False
start_id = end_id + 1
return True
# dat = create_cognitive_block(n_trials=100,
# block_number=1,
# response_modality='hand',
# n_null_trials=7,
# TR=3)
# print(dat[['phase_1', 'phase_7']].head(6))
# check_good_ITI_phase0(dat)
```
### Class for pseudorandomization
```
class Pseudorandomizer(object):
def __init__(self, data, max_identical_iters={'cue': 4, 'correct_answer': 4}):
self.data = data
self.max_identical_iters = {x: y+1 for x, y in max_identical_iters.items()}
# add 1: if 4 rows is allowed, only give an error after 5 identical rows
def check_trial_rows(self, data, row_n):
"""
Returns True if any of the conditions for pseudorandomization are violated for the given rows,
False if they are fine.
"""
# First, check for the maximum iterations
for column, max_iter in self.max_identical_iters.items():
if row_n - max_iter < 0:
continue
# Select rows [max_iter-1 - row_n] we're going to check. Never select any row with index < 0
row_selection = [x for x in np.arange(row_n, row_n-max_iter, -1)]
# Next, we check if the selected rows only contain *1* trial type.
# If so, this means we have max_iter rows of the same trials, and we need to change something.
if data.iloc[row_selection][column].nunique() == 1:
return True
return False
def run(self, debug=False):
"""
Pseudorandomizes: makes sure that it is not possible to have more than x iterations for every type of column, specified in columns.
"""
# Start by copying from original data, and shuffle
self.data = self.data.sample(frac=1,
random_state=np.random.randint(0, 1e7, dtype='int')).reset_index(drop=True)
if debug:
outer_while_i = 0
debug_print_after_i = 100
good_set = False
while not good_set:
if debug:
outer_while_i += 1
reshuffle = False # Assume the dataset does not need reshuffling.
for row_n in range(0, self.data.shape[0]):
# Select rows [max_iter-1 - row_n] we're going to check
# Check if the current row, and the (max_iters-1) rows before, are the same value (number of unique values = 1).
# If so, then move the current row number to the bottom of the dataframe. However, we need to re-check the same four rows again
# after moving a row to the bottom: therefore, a while loop is necessary.
checked_row = False
n_attempts_at_moving = 0
if debug:
inner_while_i = 0
while not checked_row:
if debug:
inner_while_i += 1
if inner_while_i > debug_print_after_i:
print('New inner loop started for current row')
if self.check_trial_rows(self.data, row_n):
if debug and inner_while_i > debug_print_after_i:
print('Found too many consecutively identical rows.')
# If there are too many consecutively identical rows at the bottom of the dataframe,
# break and start over/shuffle
if row_n >= (self.data.shape[0] - self.max_identical_iters[self.max_identical_iters.keys()[0]]):
if debug and inner_while_i > debug_print_after_i:
print('These occurred at row_n %d, which is at the bottom of the DF.' % row_n)
checked_row = True
reshuffle = True
# Too many consecutive identical rows? Move row_n to the bottom, and check again with the new row_n.
else:
if debug and inner_while_i > debug_print_after_i:
print('These occurred at row_n %d. Checking the remainder of the DF.' % row_n)
# Check if moving to the bottom even makes sense: if all remaining values are identical, it doesn't.
if (self.data.iloc[row_n:][self.max_identical_iters.keys()].nunique().values < 2).any():
if debug and inner_while_i > debug_print_after_i:
print('All remaining values are identical. I should stop the for-loop, and start over.')
checked_row = True
reshuffle = True
else:
if n_attempts_at_moving < 50:
n_attempts_at_moving += 1
if debug and inner_while_i > debug_print_after_i:
print('Not all remaining values are identical. I should move the final part to the bottom.')
# If not, move the current row to the bottom
row_to_move = self.data.iloc[row_n,:]
# Delete row from df
self.data.drop(row_n, axis=0, inplace=True)
# Append original row to end. Make sure to reset index
self.data = self.data.append(row_to_move).reset_index(drop=True)
# If we already tried moving the current row to the bottom for 50 times, let's forget about it and restart
else:
checked_row = True
reshuffle = True
else:
if debug and inner_while_i > debug_print_after_i:
print('Checked row, but the row is fine. Next row.')
checked_row = True
if reshuffle:
good_set = False
break # out of the for loop
# Reached the bottom of the dataframe, but no reshuffle call? Then we're set.
if row_n == self.data.shape[0]-1:
good_set = True
if reshuffle:
# Shuffle, reset index to ensure trial_data.drop(row_n) rows
self.data = self.data.sample(frac=1, random_state=np.random.randint(0, 1e7, dtype='int')).reset_index(drop=True)
return self.data
def add_pseudorandom_null_trials(data, min_row=4, max_row=4, min_n_rows_separate=7,
n_null_trials=10, null_column_name=''):
"""
Adds null trials interspersed at pseudorandom locations. You can determine the minimum
number of trials at the start before a null trial, the minimum number of trials at the end in which no
nulls are shown, and the minimum number of trials that the null trials have to be separated
"""
good_idx = False
while not good_idx:
indx = np.random.choice(np.arange(min_row, data.shape[0]-max_row),
replace=False, size=n_null_trials)
diffs = np.diff(np.sort(indx))
if (diffs >= min_n_rows_separate).all():
good_idx = True
data.index = np.setdiff1d(np.arange(data.shape[0] + n_null_trials), indx)
new_rows = pd.DataFrame({null_column_name: [True]*n_null_trials}, columns=data.columns, index=indx)
data = data.append(new_rows).sort_index()
# Always end with a null trial
last_row = pd.DataFrame({null_column_name: [True]*1}, columns=data.columns, index=[data.shape[0]])
data = data.append(last_row).sort_index()
return data
```
### Functions for brute-force optimizing
```
def create_localizer(n_localizer_blocks, n_trials_per_localizer_block, localizer_order, block_number=0, pseudorandomize=True, TR=3):
# Initiate empty dataframe, generate block
block_data = pd.DataFrame()
for localizer_block in range(int(n_localizer_blocks/2)):
loc_block = create_localizer_block_single_effector(n_trials=n_trials_per_localizer_block,
response_modality=localizer_order[0],
block_number=block_number,
pseudorandomize=pseudorandomize, TR=TR)
block_data = block_data.append(loc_block)
loc_block = create_localizer_block_single_effector(n_trials=n_trials_per_localizer_block,
response_modality=localizer_order[1],
block_number=block_number,
pseudorandomize=pseudorandomize, TR=TR)
block_data = block_data.append(loc_block)
return block_data
# Function to convolve design
from nipy.modalities.fmri import hrf, utils
def stim_to_design(pp_design, block=None, silent=False):
# Check if we only need to do a subset of the design
if block is not None:
pp_design = pp_design.loc[pp_design['block'] == block]
# Get rid of null trials
pp_design = pp_design.loc[pp_design['null_trial'] == False,:]
# hrf.glover is a symbolic function; get a function of time to work on arrays
hrf_func = utils.lambdify_t(hrf.glover(utils.T))
max_time = np.ceil((pp_design['stimulus_onset_time'].max()+25)*10)
if 0 in pp_design['block'].unique():
block0_trials = pp_design.loc[pp_design['block'] == 0]
# Get cue-types and response types for the first block
loc_cue_vec = np.zeros(shape=(int(max_time), 4))
response_vec = np.zeros(shape=(int(max_time), 4))
loc_cue_names = []
response_names = []
i = -1
for effector_type in block0_trials['response_modality'].unique():
for cue_type in block0_trials['cue'].unique():
subset = pp_design.loc[(pp_design['block'] == 0 ) &
(pp_design['response_modality'] == effector_type) &
(pp_design['cue'] == cue_type)]
i += 1
response_names.append('resp_%s_%s' % (effector_type, cue_type))
loc_cue_names.append('cue_%s_%s' % (effector_type, cue_type))
# Get cue onsets & durations
onsets = np.round(subset['cue_onset_time'].values*10)
durations = np.round(subset['phase_2'].values*10)
for onset, duration in zip(onsets, durations):
loc_cue_vec[np.arange(onset, onset+duration, dtype='int'), i] = 1
# Get response onsets & durations
onsets = np.round(subset['stimulus_onset_time'].values*10)
durations = np.round(subset['phase_4'].values*10)
for onset, duration in zip(onsets, durations):
response_vec[np.arange(onset, onset+duration, dtype='int'), i] = 1
# For all further EVs, make sure not to include the localizer trials.
pp_design = pp_design.loc[pp_design['block'] > 0]
# 10 types of stimuli: (n_cue_types) * (n_stim_types)
stim_vec = np.zeros(shape=(int(max_time), pp_design['correct_answer'].nunique()*pp_design['cue'].nunique()))
stim_names = []
# Get stimulus onsets and durations
i = -1
for stim_type in pp_design['correct_answer'].unique():
for cue_type in pp_design['cue'].unique():
i += 1
stim_names.append('stimulus_' + str(int(stim_type)) + '_' + cue_type)
subset = pp_design.loc[(pp_design['correct_answer'] == stim_type) &
(pp_design['cue'] == cue_type)]
stim_onsets = np.round(subset['stimulus_onset_time'].values*10)
stim_durations = np.round(subset['phase_4'].values*10)
for onset, duration in zip(stim_onsets, stim_durations):
# stim_vec = np.hstack((stim_vec, np.zeros(shape=(int(max_time), 1))))
stim_vec[np.arange(onset, onset+duration, dtype='int'), i] = 1
# Get cue onsets by cue type
cue_names = []
n_conditions = len(np.unique(pp_design['cue']))
cue_vec = np.zeros(shape=(int(max_time), n_conditions)) # A column per cue type condition
i = -1
for condition in pp_design['cue'].unique():
i += 1
cue_names.append('cue_' + condition)
# Find cue onsets
onsets = np.round(pp_design.loc[pp_design['cue'] == condition, 'cue_onset_time'].values*10)
durations = np.round(pp_design.loc[pp_design['cue'] == condition, 'phase_2'].values*10)
for onset, duration in zip(onsets, durations):
cue_vec[np.arange(onset, onset+duration, dtype='int'), i] = 1
# Combine everything in a single array
if 'loc_cue_vec' in locals():
ev_vec = np.hstack((loc_cue_vec, response_vec, cue_vec, stim_vec))
ev_names = loc_cue_names + response_names + cue_names + stim_names
else:
ev_vec = np.hstack((cue_vec, stim_vec))
ev_names = cue_names + stim_names
# Create hrf to convolve with
hrf_full = hrf_func(np.linspace(0, stop=int(max_time/10), num=int(max_time)))
# Pre-allocate output. This will be an n_timepoints x n_conditions+1 matrix.
X = np.empty(shape=(int(max_time), ev_vec.shape[1]))
# Convolve everything: the stimulus first, the cues afterwards.
for i, ev_name in enumerate(ev_names):
if not silent:
print('Convolving %s...' % ev_name)
X[:, i] = np.convolve(hrf_full, ev_vec[:, i])[:int(max_time)]
return X, ev_names
def optimize_brute_force(n_trials,
c, # contrasts to be optimized
run_type='localizer',
block_number=0,
pseudorandomize=True,
TR=3,
n_attempts=1e4,
# For non-localizer:
n_null_trials=0,
response_modality='hand',
# for localizer:
n_localizer_blocks=None, localizer_order=None):
""" Performs a brute force search of the best possible trial order & jitter times for a single run """
n_attempts = int(n_attempts)
# Generate n_attempt seeds to check
seeds = np.round(np.random.uniform(low=int(0), high=int(2**32 - 1), size=n_attempts)).astype(int)
effs = np.zeros(shape=seeds.shape)
best_eff = 0
best_block = None
for i in range(n_attempts):
# Set seed
np.random.seed(seed=seeds[i])
# Generate run
if run_type == 'localizer':
block_data = create_localizer(n_localizer_blocks=n_localizer_blocks,
n_trials_per_localizer_block=n_trials,
localizer_order=localizer_order,
block_number=block_number,
pseudorandomize=pseudorandomize, TR=TR)
elif run_type == 'cognitive':
block_data = create_cognitive_block(n_trials=n_trials,
block_number=block_number,
response_modality=response_modality,
n_null_trials=n_null_trials,
TR=TR)
elif run_type == 'limbic':
block_data = create_limbic_block(n_trials=n_trials,
block_number=block_number,
response_modality=response_modality,
n_null_trials=n_null_trials,
TR=TR)
# Add trial start times (relative to start of experiment)
block_data['trial_start_time'] = block_data['trial_duration'].shift(1).cumsum()
block_data.loc[0, 'trial_start_time'] = 0
# Add cue onset times (relative to start of experiment)
block_data['cue_onset_time'] = block_data['trial_start_time'] + \
block_data['phase_1']
# Add stimulus onset times (relative to start of experiment)
block_data['stimulus_onset_time'] = block_data['trial_start_time'] + \
block_data['phase_1'] + \
block_data['phase_2'] + \
block_data['phase_3']
# Calculate efficiency
X, ev_names = stim_to_design(block_data, block=block_number, silent=True)
# Loop over the contrasts
dvars = [(c[ii, :].dot(np.linalg.pinv(X.T.dot(X))).dot(c[ii, :].T))
for ii in range(c.shape[0])]
eff = c.shape[0] / np.sum(dvars)
effs[i] = eff
# Found a better block than anything earlier? Save this
if eff > best_eff:
best_eff = eff
best_seed = seeds[i]
best_block = block_data
# Save everything
out_dict = {'seeds': seeds,
'efficiencies': effs,
'best_eff': best_eff,
'best_seed': best_seed,
'best_block': best_block,
'contrasts': c,
'contrast_ev_names': ev_names}
print('Done optimizing, tried %d seeds. Best efficiency: %.4f (mean eff: %.3f (SD %.3f))' %
(n_attempts, best_eff, np.mean(effs), np.std(effs)))
# return block
return best_block, out_dict
```
## Order of blocks by participant
- X = localizer hand
- Y = localizer eye
- A = cognitive, hand
- B = cognitive, eye
- C = limbic, hand
- D = limbic, eye
4 blocks, 4\*3\*2\*1 = 4! = 24 block orders
2 localizer 'blocks', 2\*1 = 2 possible orders.
In total, 48 possible block orders
```
import itertools
from pprint import pprint
# Use itertools permutations to get all possible orders of both blocks and localizers
block_order = list(itertools.permutations("ABCD"))
loc_order = list(itertools.permutations("XY"))
# Repeat all elements in loc_orders to match sizes
loc_order = [item for item in loc_order for i in range(len(block_order))]
# Merge localizer and blocks
block_order = [(x[0], x[1], y[0], y[1], y[2], y[3]) for x, y in zip(loc_order, block_order*2)]
pprint(block_order)
len(block_order) # 48 possible conditions!
```
## Loop over participant numbers to generate the correct blocks in order, and save
```
a = [x for x in range(120) if x % 4 == 0 and x % 8 == 0]
print(a)
```
## For which participants do you want to create designs?
```
participant_range = [25, 26]
n_trials_per_localizer_block = 8
n_localizer_blocks = 6
TR = 3
os.chdir('/Users/steven/Sync/PhDprojects/subcortex/flashtask/designs')
n_optim_attempts = 1e3
n_trials_limbic = 80
n_trials_cognitive = 72
n_null_trials_limbic = 8
n_null_trials_cognitive = 7
## Contrasts to optim for
c_localizer = np.array([
[0, 0, 0, 0, 1, 1, 0, 0], # hand vs baseline
[0, 0, 0, 0, 0, 0, 1, 1], # eye vs baseline
[0, 0, 0, 0, 1, 1, -1, -1] # hand - eye
])
# Cognitive: [u'cue_ACC', u'cue_SPD', u'stimulus_0_ACC', u'stimulus_0_SPD', u'stimulus_1_ACC', u'stimulus_1_SPD']
# Of interest: ACC vs SPD cue
# ACC vs SPD stimulus
# ACC vs SPD trial (cue + stim)
c_cognitive = np.array([
[1, -1, 0, 0, 0, 0], # ACC vs SPD cue
[0, 0, 1, -1, 1, -1], # ACC vs SPD stimulus
[1, -1, 1, -1, 1, -1] # ACC vs SPD cue and stimulus
])
# Limbic:
# [u'cue_RIGHT', u'cue_NEU', u'cue_LEFT',
# u'stimulus_1_RIGHT', u'stimulus_1_NEU', u'stimulus_1_LEFT',
# u'stimulus_0_RIGHT', u'stimulus_0_NEU', u'stimulus_0_LEFT']
# Of interest: direction vs neutral cue; direction vs neutral stim; direction vs neutral cue+stim
c_limbic = np.array([
[1, -2, 1, 0, 0, 0, 0, 0, 0], # direction vs neutral cue
[0, 0, 0, 1, -2, 1, 1, -2, 1], # direction vs neutral stim
[1, -2, 1, 1, -2, 1, 1, -2, 1] # direction vs neutral cue+stim
])
# Loop & Run
for pp in range(participant_range[0], participant_range[1]):
pp_str = str(pp).zfill(3)
print('Processing pp %s...' % pp_str)
block_order_this_pp = block_order[pp % len(block_order)]
# Empty DataFrame
design_this_pp = pd.DataFrame()
# Get localizer block
if block_order_this_pp[0] == 'X':
localizer_order = ['hand', 'eye']
else:
localizer_order = ['eye', 'hand']
design_this_pp, optim_res = optimize_brute_force(n_trials=n_trials_per_localizer_block,
c=c_localizer,
run_type='localizer',
block_number=0,
pseudorandomize=True,
TR=3,
n_attempts=n_optim_attempts,
n_localizer_blocks=6,
localizer_order=localizer_order)
with open('pp_' + pp_str + '_block_0_optim.pkl', 'wb') as f:
pkl.dump(optim_res, f)
# Get blocks
for block_number, block_char in enumerate(block_order_this_pp):
if block_char in ['X', 'Y']:
continue
if block_char == 'A':
block_data, optim_res = optimize_brute_force(n_trials=n_trials_cognitive,
c=c_cognitive,
run_type='cognitive',
block_number=block_number-1,
response_modality='hand',
n_null_trials=n_null_trials_cognitive,
TR=TR,
pseudorandomize=True,
n_attempts=n_optim_attempts)
elif block_char == 'B':
block_data, optim_res = optimize_brute_force(n_trials=n_trials_cognitive,
c=c_cognitive,
run_type='cognitive',
block_number=block_number-1,
response_modality='eye',
n_null_trials=n_null_trials_cognitive,
TR=TR,
pseudorandomize=True,
n_attempts=n_optim_attempts)
elif block_char == 'C':
block_data, optim_res = optimize_brute_force(n_trials=n_trials_limbic,
c=c_limbic,
run_type='limbic',
block_number=block_number-1,
response_modality='hand',
n_null_trials=n_null_trials_limbic,
TR=TR,
pseudorandomize=True,
n_attempts=n_optim_attempts)
elif block_char == 'D':
block_data, optim_res = optimize_brute_force(n_trials=n_trials_limbic,
c=c_limbic,
run_type='limbic',
block_number=block_number-1,
response_modality='eye',
n_null_trials=n_null_trials_limbic,
TR=TR,
pseudorandomize=True,
n_attempts=n_optim_attempts)
with open('pp_' + pp_str + '_block_' + str(block_number-1) + '_optim.pkl', 'wb') as f:
pkl.dump(optim_res, f)
design_this_pp = design_this_pp.append(block_data)
# Set indices
design_this_pp.index.name = 'block_trial_ID'
design_this_pp.reset_index(inplace=True)
design_this_pp.index.name = 'trial_ID'
# Add trial start times (relative to start of experiment)
design_this_pp['trial_start_time'] = design_this_pp['trial_duration'].shift(1).cumsum()
design_this_pp.loc[0, 'trial_start_time'] = 0
# Add cue onset times (relative to start of experiment)
design_this_pp['cue_onset_time'] = design_this_pp['trial_start_time'] + \
design_this_pp['phase_1']
# Add stimulus onset times (relative to start of experiment)
design_this_pp['stimulus_onset_time'] = design_this_pp['trial_start_time'] + \
design_this_pp['phase_1'] + \
design_this_pp['phase_2'] + \
design_this_pp['phase_3']
# Re-order column order for nicety
design_this_pp = design_this_pp[['block_trial_ID', 'block', 'block_type', 'null_trial', 'correct_answer', 'cue', 'response_modality', 'trial_type',
'phase_0', 'phase_1', 'phase_2', 'phase_3', 'phase_4', 'phase_5', 'phase_6', 'phase_7', 'trial_duration',
'trial_start_time', 'cue_onset_time', 'stimulus_onset_time',
'trial_start_time_block', 'cue_onset_time_block', 'stimulus_onset_time_block']]
# Save full data
if not os.path.exists(os.path.join('pp_%s' % pp_str, 'all_blocks')):
os.makedirs(os.path.join('pp_%s' % pp_str, 'all_blocks'))
design_this_pp.to_csv(os.path.join('pp_%s' % pp_str, 'all_blocks', 'trials.csv'), index=True)
# Save individual blocks
for block_num, block_type in zip(design_this_pp['block'].unique(), design_this_pp['block_type'].unique()):
block = design_this_pp.loc[design_this_pp['block'] == block_num]
if not os.path.exists(os.path.join('pp_%s' % pp_str, 'block_%d_type_%s' % (block_num, block_type))):
os.makedirs(os.path.join('pp_%s' % pp_str, 'block_%d_type_%s' % (block_num, block_type)))
block.to_csv(os.path.join('pp_%s' % pp_str, 'block_%d_type_%s' % (block_num, block_type), 'trials.csv'), index=True)
```
# Creating EVs, .fsf-files, and running FSL
All following code is used to transform the created designs into FSL-accepted formats. First, we create 3-column .txt-files for every regressor. Then, we *manually* and painfully make the .fsf file for a single subject (for all designs) in the FSL gui (if someone can point me to a CLI for this, I'd be very grateful).
Finally, we copy the created .fsf-file and substitute all references to the first pp for each other pp.
The current set-up is to model the following EVs:
- Localizer (8 EVs):
1. cue: direction x response modality (left/right x eye/hand) = 4 EVs
2. response: direction x response modality (left/right x eye/hand) = 4 EVs.
- Limbic blocks (9 EVs):
1. cue: direction (left/right/neutral) = 3 EVs
2. stimulus: cued direction (left/right/neutral) x stimulus direction (left/right) = 6 EVs
- Cognitive blocks (6 EVs):
1. cue: instruction (spd/acc) = 2 EVs
2. stimulus: cued instruction (spd/acc) x stimulus direction (left/right) = 4 EVs
In total, the first-level design has 23 EVs (currently not including responses). In the following code, we extract the EV timing from the design files.
### Create EV .txts files that can be read by FSL
```
for pp_num in range(participant_range[0], participant_range[1]):
print('Processing pp %d...' % (pp_num))
pp_str = str(pp_num).zfill(3)
# Get all blocks directories of this subject, making sure to ignore any existing .feat-dirs
pp_block_dirs = glob('pp_%s/*' % pp_str)
pp_block_dirs = [x for x in pp_block_dirs if not x.endswith('.feat')]
# Loop over blocks
for pp_block_dir in pp_block_dirs:
# For all blocks, or the localizer block, we can use the "global" timing (not within-block) to create EVs.
if 'all_blocks' in pp_block_dir or 'localizer' in pp_block_dir:
stim_onset_col = 'stimulus_onset_time'
cue_onset_col = 'cue_onset_time'
else: # Otherwise, use only the block timing
stim_onset_col = 'stimulus_onset_time_block'
cue_onset_col = 'cue_onset_time_block'
# Define output directory, create if it doesnt exist
output_dir = os.path.join(pp_block_dir, 'evs')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Read design from csv
design = pd.read_csv(os.path.join(pp_block_dir, 'trials.csv'))
design = design[['correct_answer','trial_type', 'block', 'phase_2', 'phase_4', 'cue', cue_onset_col,
stim_onset_col, 'response_modality', 'null_trial']]
# Get rid of all null trials
design = design.loc[design['null_trial'] != True]
# Weights of all EVs are 1
design['weight'] = 1
# For the localizer block
if 0 in design['block'].unique():
subs = design.loc[design['block'] == 0]
# EVs for cues and responses, separated for the response modality (eye/hand) and direction (left/right):
# 4 EVs for cue (left/right x eye/hand), and 4 EVs for the responses (left/right x eye/hand)
for effector in subs['response_modality'].unique():
for cue in subs['cue'].unique():
# response
ev = subs.loc[(subs['response_modality'] == effector) &
(subs['cue'] == cue), [stim_onset_col, 'phase_4', 'weight']].values.tolist()
with open(os.path.join(output_dir, 'ev_resp_%s_%s.txt' % (effector, cue)), 'wb') as f:
for _list in ev:
for _string in _list:
f.write(str(_string) + '\n')
f.write('\n')
# responses
ev = subs.loc[(subs['response_modality'] == effector) &
(subs['cue'] == cue), [cue_onset_col, 'phase_2', 'weight']].values.tolist()
with open(os.path.join(output_dir, 'ev_cue_%s_%s.txt' % (effector, cue)), 'wb') as f:
for _list in ev:
for _string in _list:
f.write(str(_string) + '\n')
f.write('\n')
# Get rid of localizer block here
design = design.loc[design['block'] > 0]
# For all decision-making blocks, we model the cue types (spd/acc or left/neu/right), so create EVs for these
for cue_type in design['cue'].unique():
# Get cue onset time, cue duration, and cue weight
ev = design.loc[(design['cue'] == cue_type), [cue_onset_col, 'phase_2', 'weight']].values.tolist()
with open(os.path.join(output_dir, 'ev_cue_%s.txt' % cue_type), 'wb') as f:
for _list in ev:
for _string in _list:
f.write(str(_string) + '\n')
f.write('\n')
# We also model the stimuli, separate for type (left/right), but separate for every cue type (spd/acc OR left/neu/right)
for stim_type in design['correct_answer'].unique():
if np.isnan(stim_type): # a nan stimtype corresponds to a null trial, so skip these
continue
for cue_type in design['cue'].unique():
# Get stimulus onset time, stimulus duration, and weight
ev = design.loc[(design['correct_answer'] == stim_type) &
(design['cue'] == cue_type),
[stim_onset_col, 'phase_4', 'weight']].values.tolist()
with open(os.path.join(output_dir, 'ev_stimulus_%d_%s.txt' % (stim_type, cue_type)), 'wb') as f:
for _list in ev:
for _string in _list:
f.write(str(_string) + '\n')
f.write('\n')
```
### Load FSL design text file for pp 1, and create for all other pps
Before running this, the .fsf-design files for pp1 (each block) should be created manually!
```
for block_name in ['all_blocks', '_type_localizer', '_type_cognitive_hand', '_type_cognitive_eye', '_type_limbic_eye', '*_type_limbic_hand']:
# Get .fsf-file from pp001
pp_001_fn = glob(os.path.join('pp_001', '*' + block_name, 'design.fsf'))[0]
pp_001_block_name = pp_001_fn.split('/')[1]
with open(pp_001_fn, 'rb') as f:
fsf_templ = f.readlines()
for pp in range(participant_range[0], participant_range[1]):
fsf_thispp = copy.copy(fsf_templ)
# Get path to save the design.fsf file for this pp
this_pp_fn = os.path.join(glob(os.path.join('pp_' + str(pp).zfill(3), '*' + block_name))[0], 'design.fsf')
this_pp_block_name = this_pp_fn.split('/')[1]
for i, line in enumerate(fsf_thispp):
if not block_name == 'all_blocks':
fsf_thispp[i] = re.sub(pp_001_block_name,
this_pp_block_name, line)
fsf_thispp[i] = re.sub('pp_001', 'pp_' + str(pp).zfill(3), fsf_thispp[i])
# Find fn for current pp
with open(this_pp_fn, 'wb') as f:
f.writelines(fsf_thispp)
```
## Loop over subject and block directories, calling command line feat every time
```
wd = os.getcwd()
for sub in range(participant_range[0], participant_range[1]):
os.chdir(wd)
sub_dir = 'pp_' + str(sub).zfill(3)
block_dirs = [f for f in os.listdir(sub_dir) if os.path.isdir(os.path.join(sub_dir, f))]
block_dirs = [f for f in block_dirs if not '.feat' in f]
for block_dir in block_dirs:
design_dir = os.path.join(wd, sub_dir, block_dir)
os.chdir(design_dir)
os.system('feat design.fsf')
os.chdir(wd)
```
## The following is old, do not run
design creation without optimizing:
|
github_jupyter
|
```
from pathlib import Path
import pandas as pd
import numpy as np
import xarray as xr
import gcsfs
from typing import List
import io
import hashlib
import os
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import torch
from torch import nn
import torch.nn.functional as F
import pytorch_lightning as pl
import nowcasting_dataset.time as nd_time
from nowcasting_dataset.dataset import worker_init_fn, NetCDFDataset
from nowcasting_dataset.geospatial import osgb_to_lat_lon
import tilemapbase
from neptune.new.integrations.pytorch_lightning import NeptuneLogger
from neptune.new.types import File
import logging
logging.basicConfig()
logger = logging.getLogger('nowcasting_dataset')
logger.setLevel(logging.DEBUG)
%%time
train_dataset = NetCDFDataset(12_500, 'gs://solar-pv-nowcasting-data/prepared_ML_training_data/v2/train/', '/home/jack/temp/train')
#validation_dataset = NetCDFDataset(1_000, 'gs://solar-pv-nowcasting-data/prepared_ML_training_data/v2/validation/', '/home/jack/temp/validation')
def get_batch():
"""Useful for testing."""
train_dataset.per_worker_init(0)
batch = train_dataset[1]
return batch
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
pin_memory=True,
num_workers=24,
prefetch_factor=8,
worker_init_fn=worker_init_fn,
persistent_workers=True,
# Disable automatic batching because dataset
# returns complete batches.
batch_size=None,
)
```
## Define simple ML model
```
params = dict(
batch_size=32,
history_len=6, #: Number of timesteps of history, not including t0.
forecast_len=12, #: Number of timesteps of forecast.
image_size_pixels=32,
nwp_channels=('t', 'dswrf', 'prate', 'r', 'sde', 'si10', 'vis', 'lcc', 'mcc', 'hcc'),
sat_channels=(
'HRV', 'IR_016', 'IR_039', 'IR_087', 'IR_097', 'IR_108', 'IR_120',
'IR_134', 'VIS006', 'VIS008', 'WV_062', 'WV_073')
)
tilemapbase.init(create=True)
def plot_example(batch, model_output, example_i: int=0, border: int=0):
fig = plt.figure(figsize=(20, 20))
ncols=4
nrows=2
# Satellite data
extent = (
float(batch['sat_x_coords'][example_i, 0].cpu().numpy()),
float(batch['sat_x_coords'][example_i, -1].cpu().numpy()),
float(batch['sat_y_coords'][example_i, -1].cpu().numpy()),
float(batch['sat_y_coords'][example_i, 0].cpu().numpy())) # left, right, bottom, top
def _format_ax(ax):
ax.scatter(
batch['x_meters_center'][example_i].cpu(),
batch['y_meters_center'][example_i].cpu(),
s=500, color='white', marker='x')
ax = fig.add_subplot(nrows, ncols, 1) #, projection=ccrs.OSGB(approx=False))
sat_data = batch['sat_data'][example_i, :, :, :, 0].cpu().numpy()
sat_min = np.min(sat_data)
sat_max = np.max(sat_data)
ax.imshow(sat_data[0], extent=extent, interpolation='none', vmin=sat_min, vmax=sat_max)
ax.set_title('t = -{}'.format(params['history_len']))
_format_ax(ax)
ax = fig.add_subplot(nrows, ncols, 2)
ax.imshow(sat_data[params['history_len']+1], extent=extent, interpolation='none', vmin=sat_min, vmax=sat_max)
ax.set_title('t = 0')
_format_ax(ax)
ax = fig.add_subplot(nrows, ncols, 3)
ax.imshow(sat_data[-1], extent=extent, interpolation='none', vmin=sat_min, vmax=sat_max)
ax.set_title('t = {}'.format(params['forecast_len']))
_format_ax(ax)
ax = fig.add_subplot(nrows, ncols, 4)
lat_lon_bottom_left = osgb_to_lat_lon(extent[0], extent[2])
lat_lon_top_right = osgb_to_lat_lon(extent[1], extent[3])
tiles = tilemapbase.tiles.build_OSM()
lat_lon_extent = tilemapbase.Extent.from_lonlat(
longitude_min=lat_lon_bottom_left[1],
longitude_max=lat_lon_top_right[1],
latitude_min=lat_lon_bottom_left[0],
latitude_max=lat_lon_top_right[0])
plotter = tilemapbase.Plotter(lat_lon_extent, tile_provider=tiles, zoom=6)
plotter.plot(ax, tiles)
############## TIMESERIES ##################
# NWP
ax = fig.add_subplot(nrows, ncols, 5)
nwp_dt_index = pd.to_datetime(batch['nwp_target_time'][example_i].cpu().numpy(), unit='s')
pd.DataFrame(
batch['nwp'][example_i, :, :, 0, 0].T.cpu().numpy(),
index=nwp_dt_index,
columns=params['nwp_channels']).plot(ax=ax)
ax.set_title('NWP')
# datetime features
ax = fig.add_subplot(nrows, ncols, 6)
ax.set_title('datetime features')
datetime_feature_cols = ['hour_of_day_sin', 'hour_of_day_cos', 'day_of_year_sin', 'day_of_year_cos']
datetime_features_df = pd.DataFrame(index=nwp_dt_index, columns=datetime_feature_cols)
for key in datetime_feature_cols:
datetime_features_df[key] = batch[key][example_i].cpu().numpy()
datetime_features_df.plot(ax=ax)
ax.legend()
ax.set_xlabel(nwp_dt_index[0].date())
# PV yield
ax = fig.add_subplot(nrows, ncols, 7)
ax.set_title('PV yield for PV ID {:,d}'.format(batch['pv_system_id'][example_i].cpu()))
pv_actual = pd.Series(
batch['pv_yield'][example_i].cpu().numpy(),
index=nwp_dt_index,
name='actual')
pv_pred = pd.Series(
model_output[example_i].detach().cpu().numpy(),
index=nwp_dt_index[params['history_len']+1:],
name='prediction')
pd.concat([pv_actual, pv_pred], axis='columns').plot(ax=ax)
ax.legend()
# fig.tight_layout()
return fig
SAT_X_MEAN = np.float32(309000)
SAT_X_STD = np.float32(316387.42073603)
SAT_Y_MEAN = np.float32(519000)
SAT_Y_STD = np.float32(406454.17945938)
TOTAL_SEQ_LEN = params['history_len'] + params['forecast_len'] + 1
CHANNELS = 32
N_CHANNELS_LAST_CONV = 4
KERNEL = 3
EMBEDDING_DIM = 16
NWP_SIZE = 10 * 2 * 2 # channels x width x height
N_DATETIME_FEATURES = 4
CNN_OUTPUT_SIZE = N_CHANNELS_LAST_CONV * ((params['image_size_pixels'] - 6) ** 2)
FC_OUTPUT_SIZE = 8
RNN_HIDDEN_SIZE = 16
class LitAutoEncoder(pl.LightningModule):
def __init__(
self,
history_len = params['history_len'],
forecast_len = params['forecast_len'],
):
super().__init__()
self.history_len = history_len
self.forecast_len = forecast_len
self.sat_conv1 = nn.Conv2d(in_channels=len(params['sat_channels'])+5, out_channels=CHANNELS, kernel_size=KERNEL)#, groups=history_len+1)
self.sat_conv2 = nn.Conv2d(in_channels=CHANNELS, out_channels=CHANNELS, kernel_size=KERNEL) #, groups=CHANNELS//2)
self.sat_conv3 = nn.Conv2d(in_channels=CHANNELS, out_channels=N_CHANNELS_LAST_CONV, kernel_size=KERNEL) #, groups=CHANNELS)
#self.maxpool = nn.MaxPool2d(kernel_size=KERNEL)
self.fc1 = nn.Linear(
in_features=CNN_OUTPUT_SIZE,
out_features=256)
self.fc2 = nn.Linear(
in_features=256 + EMBEDDING_DIM,
out_features=128)
#self.fc2 = nn.Linear(in_features=EMBEDDING_DIM + N_DATETIME_FEATURES, out_features=128)
self.fc3 = nn.Linear(in_features=128, out_features=64)
self.fc4 = nn.Linear(in_features=64, out_features=32)
self.fc5 = nn.Linear(in_features=32, out_features=FC_OUTPUT_SIZE)
if EMBEDDING_DIM:
self.pv_system_id_embedding = nn.Embedding(
num_embeddings=940,
embedding_dim=EMBEDDING_DIM)
self.encoder_rnn = nn.GRU(
input_size=FC_OUTPUT_SIZE + N_DATETIME_FEATURES + 1 + NWP_SIZE, # plus 1 for history
hidden_size=RNN_HIDDEN_SIZE,
num_layers=2,
batch_first=True)
self.decoder_rnn = nn.GRU(
input_size=FC_OUTPUT_SIZE + N_DATETIME_FEATURES + NWP_SIZE,
hidden_size=RNN_HIDDEN_SIZE,
num_layers=2,
batch_first=True)
self.decoder_fc1 = nn.Linear(
in_features=RNN_HIDDEN_SIZE,
out_features=8)
self.decoder_fc2 = nn.Linear(
in_features=8,
out_features=1)
### EXTRA CHANNELS
# Center marker
new_batch_size = params['batch_size'] * TOTAL_SEQ_LEN
self.center_marker = torch.zeros(
(
new_batch_size,
1,
params['image_size_pixels'],
params['image_size_pixels']
),
dtype=torch.float32, device=self.device)
half_width = params['image_size_pixels'] // 2
self.center_marker[..., half_width-2:half_width+2, half_width-2:half_width+2] = 1
# pixel x & y
pixel_range = (torch.arange(params['image_size_pixels'], device=self.device) - 64) / 37
pixel_range = pixel_range.unsqueeze(0).unsqueeze(0)
self.pixel_x = pixel_range.unsqueeze(-2).expand(new_batch_size, 1, params['image_size_pixels'], -1)
self.pixel_y = pixel_range.unsqueeze(-1).expand(new_batch_size, 1, -1, params['image_size_pixels'])
def forward(self, x):
# ******************* Satellite imagery *************************
# Shape: batch_size, seq_length, width, height, channel
# TODO: Use optical flow, not actual sat images of the future!
sat_data = x['sat_data']
batch_size, seq_len, width, height, n_chans = sat_data.shape
# Stack timesteps as extra examples
new_batch_size = batch_size * seq_len
# 0 1 2 3
sat_data = sat_data.reshape(new_batch_size, width, height, n_chans)
# Conv2d expects channels to be the 2nd dim!
sat_data = sat_data.permute(0, 3, 1, 2)
# Now shape: new_batch_size, n_chans, width, height
### EXTRA CHANNELS
# geo-spatial x
x_coords = x['sat_x_coords'] # shape: batch_size, image_size_pixels
x_coords = x_coords - SAT_X_MEAN
x_coords = x_coords / SAT_X_STD
x_coords = x_coords.unsqueeze(1).expand(-1, width, -1).unsqueeze(1).repeat_interleave(repeats=TOTAL_SEQ_LEN, dim=0)
# geo-spatial y
y_coords = x['sat_y_coords'] # shape: batch_size, image_size_pixels
y_coords = y_coords - SAT_Y_MEAN
y_coords = y_coords / SAT_Y_STD
y_coords = y_coords.unsqueeze(-1).expand(-1, -1, height).unsqueeze(1).repeat_interleave(repeats=TOTAL_SEQ_LEN, dim=0)
# Concat
if sat_data.device != self.center_marker.device:
self.center_marker = self.center_marker.to(sat_data.device)
self.pixel_x = self.pixel_x.to(sat_data.device)
self.pixel_y = self.pixel_y.to(sat_data.device)
sat_data = torch.cat((sat_data, self.center_marker, x_coords, y_coords, self.pixel_x, self.pixel_y), dim=1)
del x_coords, y_coords
# Pass data through the network :)
out = F.relu(self.sat_conv1(sat_data))
#out = self.maxpool(out)
out = F.relu(self.sat_conv2(out))
#out = self.maxpool(out)
out = F.relu(self.sat_conv3(out))
out = out.reshape(new_batch_size, CNN_OUTPUT_SIZE)
out = F.relu(self.fc1(out))
# ********************** Embedding of PV system ID *********************
if EMBEDDING_DIM:
pv_embedding = self.pv_system_id_embedding(x['pv_system_row_number'].repeat_interleave(TOTAL_SEQ_LEN))
out = torch.cat(
(
out,
pv_embedding
),
dim=1)
# Fully connected layers.
out = F.relu(self.fc2(out))
out = F.relu(self.fc3(out))
out = F.relu(self.fc4(out))
out = F.relu(self.fc5(out))
# ******************* PREP DATA FOR RNN *****************************************
out = out.reshape(batch_size, TOTAL_SEQ_LEN, FC_OUTPUT_SIZE) # TODO: Double-check this does what we expect!
# The RNN encoder gets recent history: satellite, NWP, datetime features, and recent PV history.
# The RNN decoder gets what we know about the future: satellite, NWP, and datetime features.
# *********************** NWP Data **************************************
nwp_data = x['nwp'].float() # Shape: batch_size, channel, seq_length, width, height
nwp_data = nwp_data.permute(0, 2, 1, 3, 4) # RNN expects seq_len to be dim 1.
batch_size, nwp_seq_len, n_nwp_chans, nwp_width, nwp_height = nwp_data.shape
nwp_data = nwp_data.reshape(batch_size, nwp_seq_len, n_nwp_chans * nwp_width * nwp_height)
# Concat
rnn_input = torch.cat(
(
out,
nwp_data,
x['hour_of_day_sin'].unsqueeze(-1),
x['hour_of_day_cos'].unsqueeze(-1),
x['day_of_year_sin'].unsqueeze(-1),
x['day_of_year_cos'].unsqueeze(-1),
),
dim=2)
pv_yield_history = x['pv_yield'][:, :self.history_len+1].unsqueeze(-1)
encoder_input = torch.cat(
(
rnn_input[:, :self.history_len+1],
pv_yield_history
),
dim=2)
encoder_output, encoder_hidden = self.encoder_rnn(encoder_input)
decoder_output, _ = self.decoder_rnn(rnn_input[:, -self.forecast_len:], encoder_hidden)
# decoder_output is shape batch_size, seq_len, rnn_hidden_size
decoder_output = F.relu(self.decoder_fc1(decoder_output))
decoder_output = self.decoder_fc2(decoder_output)
return decoder_output.squeeze()
def _training_or_validation_step(self, batch, is_train_step):
y_hat = self(batch)
y = batch['pv_yield'][:, -self.forecast_len:]
#y = torch.rand((32, 1), device=self.device)
mse_loss = F.mse_loss(y_hat, y)
nmae_loss = (y_hat - y).abs().mean()
# TODO: Compute correlation coef using np.corrcoef(tensor with shape (2, num_timesteps))[0, 1]
# on each example, and taking the mean across the batch?
tag = "Train" if is_train_step else "Validation"
self.log_dict({f'MSE/{tag}': mse_loss}, on_step=is_train_step, on_epoch=True)
self.log_dict({f'NMAE/{tag}': nmae_loss}, on_step=is_train_step, on_epoch=True)
return nmae_loss
def training_step(self, batch, batch_idx):
return self._training_or_validation_step(batch, is_train_step=True)
def validation_step(self, batch, batch_idx):
if batch_idx == 0:
# Plot example
model_output = self(batch)
fig = plot_example(batch, model_output)
self.logger.experiment['validation/plot'].log(File.as_image(fig))
return self._training_or_validation_step(batch, is_train_step=False)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.001)
return optimizer
model = LitAutoEncoder()
logger = NeptuneLogger(project='OpenClimateFix/predict-pv-yield')
logger.log_hyperparams(params)
print('logger.version =', logger.version)
trainer = pl.Trainer(gpus=1, max_epochs=10_000, logger=logger)
trainer.fit(model, train_dataloader)
```
|
github_jupyter
|
<a class="anchor" id="2nd-bullet">
### 1.1. Import the needed libraries
</a>
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# data partition
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PowerTransformer
import matplotlib.pyplot as plt
import seaborn as sns
from math import ceil
from sklearn.metrics import confusion_matrix
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_curve
from regressors import stats
#filter methods
# spearman
# chi-square
import scipy.stats as stats
from scipy.stats import chi2_contingency
#wrapper methods
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import RFE
# embedded methods
from sklearn.linear_model import LassoCV
import warnings
warnings.filterwarnings('ignore')
```
<a class="anchor" id="3rd-bullet">
### Import the dataset
</a>
```
df = pd.read_csv('train.csv')
df.set_index('Access_ID', inplace = True)
df.head(3)
```
<a class="anchor" id="4th-bullet">
## 3. Data Understanding
</a>
<a class="anchor" id="4th-bullet">
### 3.1 Logical Checks
</a>
```
#Check the info of the dataset
df.info()
#no missing values
#fix data types - will create dummy variables later
df.Type_of_Traffic = df.Type_of_Traffic.astype("str")
df.Browser = df.Browser.astype("str")
df["Date"]=pd.to_datetime(df["Date"])
# check distribution of target variable
print(df["Buy"].value_counts())
print("Percent of positive labels: " + str(round(df["Buy"].value_counts()[1]/len(df),2)))
#MISSING: checking the page values and duration variables
```
<a class="anchor" id="4th-bullet">
### 3.2 Data exploration
</a>
```
df.describe().T
```
#### Observations:
- the dataset don't have null values
- it has outliers in some features - need to explore and solve them
- it has 9.999 observations and 15 features (9 numerical and 6 categorical)
- the dependent variable is 'Buy'
```
# split the dataset
X = df.drop('Buy', axis=1)
y = df['Buy']
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.25, random_state=5, stratify=y)
# Define metric and non-metric features
non_metric_features = ['Date', 'OS', 'Browser', 'Country', 'Type_of_Traffic', 'Type_of_Visitor']
metric_features = X_train.columns.drop(non_metric_features).to_list()
# All Numeric Variables' Box Plots in one figure
sns.set()
# Prepare figure. Create individual axes where each box plot will be placed
fig, axes = plt.subplots(3, ceil(len(metric_features) / 3), figsize=(22, 15))
# Plot data
# Iterate across axes objects and associate each box plot (hint: use the ax argument):
for ax, feat in zip(axes.flatten(), metric_features): # Notice the zip() function and flatten() method
sns.boxplot(y=X_train[feat], ax=ax)
#ax.set_title(feat, y=-0.16)
# Layout
# Add a centered title to the figure:
title = "Numeric Variables' Box Plots"
plt.suptitle(title, y=0.91)
# plt.savefig(os.path.join('..', 'figures', 'numeric_variables_boxplots.png'), dpi=200)
plt.show()
# All Numeric Variables' Histograms in one figure
sns.set()
# Prepare figure. Create individual axes where each histogram will be placed
fig, axes = plt.subplots(3, ceil(len(metric_features) / 3), figsize=(22, 15))
# Plot data
# Iterate across axes objects and associate each histogram (hint: use the ax.hist() instead of plt.hist()):
for ax, feat in zip(axes.flatten(), metric_features): # Notice the zip() function and flatten() method
ax.hist(X_train[feat], bins=30)
ax.set_title(feat, y=-0.15)
# Layout
# Add a centered title to the figure:
title = "Numeric Variables' Histograms"
plt.suptitle(title, y=0.91)
# plt.savefig(os.path.join('..', 'figures', 'numeric_variables_histograms.png'), dpi=200)
plt.show()
# All Numeric Variables' Box Plots in one figure - with the dependent variable
sns.set()
# Prepare figure. Create individual axes where each box plot will be placed
fig, axes = plt.subplots(3, ceil(len(metric_features) / 3), figsize=(22, 15))
# Plot data
# Iterate across axes objects and associate each box plot (hint: use the ax argument):
for ax, feat in zip(axes.flatten(), metric_features): # Notice the zip() function and flatten() method
sns.boxplot(y=X_train[feat], x=y_train, ax=ax)
# Layout
# Add a centered title to the figure:
title = "Numeric Variables' Box Plots"
plt.suptitle(title, y=0.91)
# plt.savefig(os.path.join('..', 'figures', 'numeric_variables_boxplots.png'), dpi=200)
plt.show()
# All Numeric Variables' Histograms in one figure
sns.set()
# Prepare figure. Create individual axes where each histogram will be placed
fig, axes = plt.subplots(3, ceil(len(metric_features) / 3), figsize=(22, 15))
# Plot data
# Iterate across axes objects and associate each histogram (hint: use the ax.hist() instead of plt.hist()):
for ax, feat in zip(axes.flatten(), metric_features): # Notice the zip() function and flatten() method
sns.histplot(data=X_train, x=feat, hue=y_train, ax=ax, bins=30)
# Layout
# Add a centered title to the figure:
title = "Numeric Variables' Histograms"
plt.suptitle(title, y=0.91)
# plt.savefig(os.path.join('..', 'figures', 'numeric_variables_histograms.png'), dpi=200)
plt.show()
```
Observations:
- the dataset has outliers in all of the numeric features
- the data is right skewed in all of the numeric features, there is a lot of zero values in all features except in 'GoogleAnalytics_ExitRate'
- the distribution of the observations that didn't buy the products is very similar to those that bought the product. This means that it can be difficult to the model to learn the differences of these two groups
<a class="anchor" id="4th-bullet">
### 3.3 Outliers
</a>
```
#baseline model performance
X_train_num = X_train[metric_features]
X_val_num = X_val[metric_features]
#define model
model1 = LogisticRegression().fit(X_train_num, y_train)
y_pred_train = model1.predict(X_train_num)
y_pred_val = model1.predict(X_val_num)
#results
print('f1_train:', f1_score(y_train, y_pred_train))
print(confusion_matrix(y_val, y_pred_val))
print('precision:', precision_score(y_val, y_pred_val))
print('f1_val:', f1_score(y_val, y_pred_val))
df_train = pd.concat([X_train, y_train], axis=1)
df_train_backup = df_train.copy()
#function to automatically remove outliers besed on the IQR, not currently in use
def outliers(df_train,metric_features):
for variable in metric_features:
var_mean = df_train[variable].mean()
var_std = df_train[variable].std()
df_train=df_train.loc[df_train[variable] < var_mean + (5 * var_std)]
df_train=df_train.loc[df_train[variable] > var_mean - (5 * var_std)]
return df_train
df_train = outliers(df_train,metric_features)
print('Percentage of data kept after removing outliers:', np.round(df_train.shape[0] / df_train_backup.shape[0], 4))
#testing model performance after removing outliers
X_train_num = df_train.drop(['Buy'], axis=1)[metric_features]
y_train = df_train['Buy']
#define model
model2 = LogisticRegression().fit(X_train_num, y_train)
y_pred_train = model2.predict(X_train_num)
y_pred_val = model2.predict(X_val_num)
#results
print('f1_train:', f1_score(y_train, y_pred_train))
print(confusion_matrix(y_val, y_pred_val))
print('precision:', precision_score(y_val, y_pred_val))
print('f1_val:', f1_score(y_val, y_pred_val))
#resetting the dataset
df_train = df_train_backup.copy()
# Manually defined tresholds for outliers using boxplots
filters1 = (
(df_train['AccountMng_Duration']<=2000)
&(df_train['AccountMng_Pages']<=20)
&(df_train['GoogleAnalytics_BounceRate']<=.17)
&(df_train['FAQ_Duration']<=1500)
&(df_train['FAQ_Pages']<=13)
&(df_train['Product_Pages']<=500)
&(df_train['Product_Duration']<=25000)
&(df_train['GoogleAnalytics_PageValue']<=300)
)
filters2 = (
(df_train['AccountMng_Duration']<=2000)
&
(df_train['FAQ_Duration']<=2000)
&
(df_train['Product_Pages']<=650)
&
(df_train['Product_Duration']<=50000)
&
(df_train['GoogleAnalytics_PageValue']<=350)
)
df_train = df_train[filters1]
print('Percentage of data kept after removing outliers:', np.round(df_train.shape[0] / df_train_backup.shape[0], 4))
#testing model performance after removing outliers using manual thresholds
X_train_num = df_train.drop(['Buy'], axis=1)[metric_features]
y_train = df_train['Buy']
#define model
model3 = LogisticRegression().fit(X_train_num, y_train)
y_pred_train = model3.predict(X_train_num)
y_pred_val = model3.predict(X_val_num)
#results
print('f1_train:', f1_score(y_train, y_pred_train))
print(confusion_matrix(y_val, y_pred_val))
print('precision:', precision_score(y_val, y_pred_val))
print('f1_val:', f1_score(y_val, y_pred_val))
```
Observations:
- at this stage, using automated outlier removal was the better option, but this was partially due to the inclusion of all numeric variables, like FAQ_Duration that has many outliers but not a lot of relevency. As we refine our variable selection, manual outlier selection produced a more accurate model.
- filters2 is the result of finding the best outlier filters after feature selection, which we will use here to keep the results accurate to our report.
```
df_train = df_train_backup.copy()
df_train = df_train[filters2]
print('Percentage of data kept after removing outliers:', np.round(df_train.shape[0] / df_train_backup.shape[0], 4))
```
<a class="anchor" id="4th-bullet">
## 4. Data Preparation
</a>
<a class="anchor" id="4th-bullet">
### 4.1 Feature Engineering and Transformation
</a>
```
X_train = df_train.drop(['Buy'], axis=1)
y_train = df_train['Buy']
#create dummy variables in train data:
#type of visitor
dict_visitor = {'Returner': 0, 'New_Access': 1, 'Other': 0}
X_train['Type_of_Visitor_new'] = X_train['Type_of_Visitor'].map(dict_visitor)
dict_visitor = {'Returner': 1, 'New_Access': 0, 'Other': 0}
X_train['Type_of_Visitor_return'] = X_train['Type_of_Visitor'].map(dict_visitor)
#type of traffic
X_train["Type_of_Traffic_high"]=X_train["Type_of_Traffic"].map(lambda x: 1 if (x in [7,8,15]) else 0)
X_train["Type_of_Traffic_med"]=X_train["Type_of_Traffic"].map(lambda x: 1 if (x in [10,11,2,5]) else 0)
X_train = pd.get_dummies(X_train, columns = ["Type_of_Traffic"], drop_first=True)
#create month variable from the date information
X_train["Month"]=X_train["Date"].map(lambda x: x.month)
X_train["Months_high"]=X_train["Month"].map(lambda x: 1 if x>7 & x<12 else 0)
today = pd.to_datetime("2021-01-01")
X_train["Time_not_visited"]=X_train["Date"].map(lambda x: (today-x).days)
#OS
X_train["is_apple"]=X_train["OS"].map(lambda x: 1 if (x in ['iOS', 'MacOSX']) else 0)
X_train = pd.get_dummies(X_train, columns = ['OS'], drop_first=True)
X_train.drop('OS_Other', inplace=True, axis=1)
# same for validation data
#type of visitor
X_val['Type_of_Visitor_new'] = X_val['Type_of_Visitor'].map(dict_visitor)
X_val['Type_of_Visitor_return'] = X_val['Type_of_Visitor'].map(dict_visitor)
#type of traffic
X_val["Type_of_Traffic_high"]=X_val["Type_of_Traffic"].map(lambda x: 1 if (x in [7,8,15]) else 0)
X_val["Type_of_Traffic_med"]=X_val["Type_of_Traffic"].map(lambda x: 1 if (x in [10,11,2,5]) else 0)
X_val = pd.get_dummies(X_val, columns = ["Type_of_Traffic"], drop_first=True)
#create month variable from the date information
X_val["Month"]=X_val["Date"].map(lambda x: x.month)
X_val["Months_high"]=X_val["Month"].map(lambda x: 1 if x>7 & x<12 else 0)
X_val["Time_not_visited"]=X_val["Date"].map(lambda x: (today-x).days)
#OS
X_val["is_apple"]=X_val["OS"].map(lambda x: 1 if (x in ['iOS', 'MacOSX']) else 0)
X_val = pd.get_dummies(X_val, columns = ['OS'], drop_first=True)
#engineering: time spent per page variables
X_train["Mng"] = X_train.AccountMng_Duration/X_train.AccountMng_Pages
X_train["FAQ"] = X_train.FAQ_Duration/X_train.FAQ_Pages
X_train["Product"] = X_train.Product_Duration/X_train.Product_Pages
X_train.fillna(0, inplace=True)
X_val["Mng"] = X_val.AccountMng_Duration/X_val.AccountMng_Pages
X_val["FAQ"] = X_val.FAQ_Duration/X_val.FAQ_Pages
X_val["Product"] = X_val.Product_Duration/X_val.Product_Pages
X_val.fillna(0, inplace=True)
#engineering: Country data
country_gdp_2019 = {
"Portugal": 79,
"Spain": 91,
"Brazil": 100,
"France": 106,
"Other": 100,
"Italy": 96,
"United Kingdom": 104,
"Germany": 120,
"Switzerland": 157
}
country_digital_2019 = {
"Portugal": 20.71,
"Spain": 32.48,
"Brazil": 62.03,
"France": 52.84,
"Other": 57.80,
"Italy": 39.79,
"United Kingdom": 72.77,
"Germany": 58.69,
"Switzerland": 67.49
}
#engineering: Country data train
X_train["country_gdp_2019"] = X_train["Country"].apply(lambda x: country_gdp_2019[x])
X_train["country_digital_2019"] = X_train["Country"].apply(lambda x: country_digital_2019[x])
#engineering: Country data val
X_val["country_gdp_2019"] = X_val["Country"].apply(lambda x: country_gdp_2019[x])
X_val["country_digital_2019"] = X_val["Country"].apply(lambda x: country_digital_2019[x])
#creating log transormations of numeric variables
#AccountMng_Pages
X_train["logAccountMng_Pages"]=X_train["AccountMng_Pages"].map(lambda x : 1 if x<=1 else x)
X_train["logAccountMng_Pages"]=np.log(X_train["logAccountMng_Pages"])
#AccountMng_Pages
X_train["logAccountMng_Duration"]=X_train["AccountMng_Duration"].map(lambda x : 1 if x<=1 else x)
X_train["logAccountMng_Duration"]=np.log(X_train["logAccountMng_Duration"])
#logFAQ_Pages
X_train["logFAQ_Pages"]=X_train["FAQ_Pages"].map(lambda x : 1 if x<=1 else x)
X_train["logFAQ_Pages"]=np.log(X_train["logFAQ_Pages"])
#AccountMng_Pages
X_val["logAccountMng_Pages"]=X_val["AccountMng_Pages"].map(lambda x : 1 if x<=1 else x)
X_val["logAccountMng_Pages"]=np.log(X_val["logAccountMng_Pages"])
#AccountMng_Pages
X_val["logAccountMng_Duration"]=X_val["AccountMng_Duration"].map(lambda x : 1 if x<=1 else x)
X_val["logAccountMng_Duration"]=np.log(X_val["logAccountMng_Duration"])
#logFAQ_Pages
X_val["logFAQ_Pages"]=X_val["FAQ_Pages"].map(lambda x : 1 if x<=1 else x)
X_val["logFAQ_Pages"]=np.log(X_val["logFAQ_Pages"])
#confirming the same variables were created for both sets
X_train.shape[1]==X_val.shape[1]
#MISSING: scaling numeric variables
#scaler = MinMaxScaler().fit(X_train_num)
#X_train_num_scaled = scaler.transform(X_train_num) # this will return an array
# Convert the array to a pandas dataframe
#X_train_num_scaled = pd.DataFrame(X_train_num_scaled, columns = X_train_num.columns).set_index(X_train.index)
#X_val_num_scaled = scaler.transform(X_val_num)
#X_val_num_scaled = pd.DataFrame(X_val_num_scaled, columns = X_val_num.columns).set_index(X_val.index)
#Power transforming variables
non_metric_features = X_train.columns.drop(metric_features).to_list()
#separate numeric and non-numeric
X_train_num = X_train[metric_features]
X_train_cat = X_train[non_metric_features]
# DO IT for validation
X_val_num = X_val[metric_features]
X_val_cat = X_val[non_metric_features]
#use train to power transform train
power = PowerTransformer().fit(X_train_num)
X_train_num_power = power.transform(X_train_num)
X_train_num_power = pd.DataFrame(X_train_num_power, columns = X_train_num.columns).set_index(X_train_num.index)
#and for validation (using train data)
X_val_num_power = power.transform(X_val_num)
# Convert the array to a pandas dataframe
X_val_num_power = pd.DataFrame(X_val_num_power, columns = X_val_num.columns).set_index(X_val_num.index)
X_val_num_power.head(3)
X_train_power = pd.concat([X_train_num_power, X_train_cat], axis=1)
X_val_power = pd.concat([X_val_num_power, X_val_cat], axis=1)
```
<a class="anchor" id="4th-bullet">
### 4.2 Feature Selection
</a>
```
#none of the features are univariate
X_train_num.var()
all_train_num = X_train_num.join(y_train)
def cor_heatmap(cor):
plt.figure(figsize=(12,10))
sns.heatmap(data = cor, annot = True, cmap = plt.cm.Reds, fmt='.1')
plt.show()
#build correlation matrix
cor_spearman = all_train_num.corr(method ='spearman')
cor_heatmap(cor_spearman)
```
Observations:
- Features highly correlated (keep only one):
- __'AccountMng_Pages'__ and __'AccountMng_Duration'__ (Number of pages visited and total amount of time spent by the user - account management related pages)
- __'FAQ_Pages'__ and __'FAQ_Duration'__ (Number of pages visited and total amount of time spent by the user - FAQ related pages)
- __'Product_Pages'__ and __'Product_Duration'__ (Number of pages visited and total amount of time spent by the user - Product related pages)
- __'GoogleAnalytics_BounceRate'__ and __'GoogleAnalytics_ExitRate'__ (Bounce and exit rate, both explains the the exit rate of the pages visited by the user)
```
#lasso part 1: correlated features only
X_train_num_sub = X_train_num[['AccountMng_Pages', 'AccountMng_Duration', 'FAQ_Pages', 'FAQ_Duration',
'Product_Pages', 'Product_Duration', 'GoogleAnalytics_BounceRate',
'GoogleAnalytics_ExitRate']]
lasso1 = LogisticRegression(penalty='l1', solver='liblinear')
lasso1.fit(X_train_num_sub, y_train)
coef = pd.Series(lasso1.coef_[0], index = X_train_num_sub.columns)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(sum(coef == 0)) + " variables")
coef.sort_values()
```
Observations:
- the number of pages visited variables were more valuable for the first three pairs, and ‘GoogleAnalytics_ExitRate’ was more important than ‘GoogleAnalytics_BounceRate’, so the latter variable was dropped for each of the above pairs.
```
X_train_power.drop(["GoogleAnalytics_BounceRate","AccountMng_Duration","Product_Duration","FAQ_Duration"], inplace=True, axis=1)
X_val_power.drop(["GoogleAnalytics_BounceRate","AccountMng_Duration","Product_Duration","FAQ_Duration"], inplace=True, axis=1)
#chi-squared test for categorical variables
def TestIndependence(X,y,var,alpha=0.05):
dfObserved = pd.crosstab(y,X)
chi2, p, dof, expected = stats.chi2_contingency(dfObserved.values)
dfExpected = pd.DataFrame(expected, columns=dfObserved.columns, index = dfObserved.index)
if p<alpha:
result="{0} is IMPORTANT for Prediction".format(var)
else:
result="{0} is NOT an important predictor. (Discard {0} from model)".format(var)
print(result)
df_sub = df_train[['Date', 'OS', 'Browser', 'Country', 'Type_of_Traffic', 'Type_of_Visitor']]
for var in df_sub:
TestIndependence(df_train[var],df_train["Buy"], var)
X_train_power.drop("Country",inplace=True,axis=1)
X_val_power.drop("Country",inplace=True,axis=1)
X_train_num_power = X_train_power.select_dtypes(include=np.number).set_index(X_train_power.index)
X_val_num_power = X_val_power.select_dtypes(include=np.number).set_index(X_val_power.index)
#lasso regression part 2
def plot_importance(coef,name):
imp_coef = coef.sort_values()
plt.figure(figsize=(8,10))
imp_coef.plot(kind = "barh")
plt.title("Feature importance using " + name + " Model")
plt.show()
lasso2 = LogisticRegression(penalty='l1', solver='liblinear', C=.4)
lasso2.fit(X_train_num_power, y_train)
coef = pd.Series(lasso2.coef_[0], index = X_train_num_power.columns)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(sum(coef == 0)) + " variables")
coef.sort_values()
plot_importance(coef,'Lasso')
X_train_num_power.drop(['OS_Ubuntu','Type_of_Traffic_5','OS_Fedora',
'OS_Chrome OS','Type_of_Traffic_9','Type_of_Traffic_7',
'Type_of_Traffic_6','OS_Windows','Type_of_Traffic_3',
'Type_of_Traffic_14','Type_of_Traffic_12','Type_of_Traffic_med',
'Type_of_Traffic_high','Type_of_Visitor_new','Type_of_Traffic_4'], inplace=True, axis=1)
X_val_num_power.drop(['OS_Ubuntu','Type_of_Traffic_5','OS_Fedora',
'OS_Chrome OS','Type_of_Traffic_9','Type_of_Traffic_7',
'Type_of_Traffic_6','OS_Windows','Type_of_Traffic_3',
'Type_of_Traffic_14','Type_of_Traffic_12','Type_of_Traffic_med',
'Type_of_Traffic_high','Type_of_Visitor_new','Type_of_Traffic_4'], inplace=True, axis=1)
#RFE loop test with remaining variables
#no of features
nof_list=np.arange(1,27)
high_score=0
#Variable to store the optimum features
nof=0
score_list =[]
for n in range(len(nof_list)):
model = LogisticRegression()
rfe = RFE(model,nof_list[n])
X_train_rfe = rfe.fit_transform(X_train_num_power,y_train)
X_val_rfe = rfe.transform(X_val_num_power)
model.fit(X_train_rfe,y_train)
score = model.score(X_val_rfe,y_val)
score_list.append(score)
if(score>high_score):
high_score = score
nof = nof_list[n]
print("Optimum number of features: %d" %nof)
print("Score with %d features: %f" % (nof, high_score))
model = LogisticRegression()
rfe = RFE(estimator = model, n_features_to_select = 10)
X_rfe = rfe.fit_transform(X = X_train_num_power, y = y_train)
selected_features = pd.Series(rfe.support_, index = X_train_num_power.columns)
selected_features
model = LogisticRegression()
rfe = RFE(estimator = model, n_features_to_select = 4)
X_rfe = rfe.fit_transform(X = X_train_num_power, y = y_train)
selected_features = pd.Series(rfe.support_, index = X_train_num_power.columns)
selected_features
```
Observations:
- Important variables include 'GoogleAnalytics_PageValue', 'Type_of_Visitor_return', 'Type_of_Traffic_11' and 'Type_of_Traffic_8'
<a class="anchor" id="4th-bullet">
### 4.3 Data Balancing
</a>
|
github_jupyter
|
# Exercise Set 5: Python plotting
*Morning, August 15, 2018
In this Exercise set we will work with visualizations in python, using two powerful plotting libraries. We will also quickly touch upon using pandas for exploratory plotting.
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
%matplotlib inline
iris = sns.load_dataset('iris')
titanic = sns.load_dataset('titanic')
```
## Exercise Section 5.1: Exploring the data with plots
We will work with the two datasets `iris` and `titanic` both of which you should already have loaded. The goal with the plots you produce in this section is to give yourself and your group members an improved understanding of the datasets.
> **Ex. 5.1.1:**: Show the first five rows of the titanic dataset. What information is in the dataset? Use a barplot to show the probability of survival for men and women within each passenger class. Can you make a boxplot showing the same information (why/why not?). _Bonus:_ show a boxplot for the fare-prices within each passenger class.
>
> Spend five minutes discussing what you can learn about the survival-selection aboard titanic from the figure(s).
>
> > _Hint:_ https://seaborn.pydata.org/generated/seaborn.barplot.html, specifically the `hue` option.
```
# [Answer to Ex. 5.1.1]
# Will be in assignment 1
```
> **Ex. 5.1.2:** Using the iris flower dataset, draw a scatterplot of sepal length and petal length. Include a second order polynomial fitted to the data. Add a title to the plot and rename the axis labels.
> _Discuss:_ Is this a meaningful way to display the data? What could we do differently?
>
> For a better understanding of the dataset this image might be useful:
> <img src="iris_pic.png" alt="Drawing" style="width: 200px;"/>
>
>> _Hint:_ use the `.regplot` method from seaborn.
```
# [Answer to Ex. 5.1.2]
# Will be in assignment 1
```
> **Ex. 5.1.3:** Combine the two of the figures you created above into a two-panel figure similar to the one shown here:
> <img src="Example.png" alt="Drawing" style="width: 600px;"/>
>
> Save the figure as a png file on your computer.
>> _Hint:_ See [this question](https://stackoverflow.com/questions/41384040/subplot-for-seaborn-boxplot) on stackoverflow for inspiration.
```
# [Answer to Ex. 5.1.3]
# Will be in assignment 1
```
> **Ex. 5.1.4:** Use [pairplot with hue](https://seaborn.pydata.org/generated/seaborn.pairplot.html) to create a figure that clearly shows how the different species vary across measurements. Change the color palette and remove the shading from the density plots. _Bonus:_ Try to explain how the `diag_kws` argument works (_hint:_ [read here](https://stackoverflow.com/questions/1769403/understanding-kwargs-in-python))
```
# [Answer to Ex. 5.1.4]
# Will be in assignment 1
```
## Exercise Section 5.2: Explanatory plotting
In this section we will only work with the titanic dataset. We will create a simple figure from the bottom using the [_grammar of graphics_](http://vita.had.co.nz/papers/layered-grammar.pdf) framework.
<br>
**_NOTE:_** Because of the way the jupyter notebooks are made, you will have to complete this exercise in a single code cell.
> **Ex. 5.2.1:** Create an empty coordinate system with the *x* axis spanning from 0 to 100 and the *y* axis spanning 0 to 0.05.
<br><br>
> **Ex. 5.2.2:** Add three KDE-curves to the existing axis. The KDEs should estimate the density of passenger age within each passenger class. Add a figure title and axis labels. Make sure the legend entries makes sense. *If* you have time, change the colors.
>
> > _Hint:_ a `for` loop might be useful here.
<br><br>
The following exercises highlight some of the advanced uses of matplotlib and seaborn. These techniques allow you to create customized plots with a lot of versatility. These are **_BONUS_** questions.
> **Ex. 5.2.3:** Add a new subplot that sits within the outer one. Use `[0.55, 0.6, 0.3, 0.2]` the subplots size. At this point your figure should look something like this:
>
> <img src="exampleq3.png" alt="Drawing" style="width: 400px;"/>
>
>> _Hint:_ This [link](https://jakevdp.github.io/PythonDataScienceHandbook/04.08-multiple-subplots.html) has some tips for plotting subplots.
<br><br>
> **Ex. 5.2.4:** Move the legend outside the graph window, and add a barplot of survival probabilities split by class to the small subplot.
>
>> _Hint:_ [Look here](https://stackoverflow.com/questions/4700614/how-to-put-the-legend-out-of-the-plot) for examples of how to move the legend box around.
>
> In the end your figure should look similar to this one:
> <img src="final4.png" alt="Drawing" style="width: 400px;"/>
```
# [Answer to Ex. 5.1.5]
# Question 1
fig, ax1 = plt.subplots(1,1)
ax1.set_xlim(0, 100)
ax1.set_ylim(0, 0.05)
# Question 2
for c in set(titanic['class']):
sub_data = titanic.loc[titanic['class'] == c]
sns.kdeplot(sub_data.age, ax = ax1, label = c + ' class')
ax1.set_xlabel("Age")
ax1.set_ylabel("Density")
ax1.set_title("Age densities")
# BONUS QUESTIONS ----------------------------------------
# Question 3
ax2 = fig.add_axes([0.55, 0.6, 0.3, 0.2])
plt.savefig('exampleq3.png')
# Question 4
box = ax1.get_position()
ax1.set_position([box.x0, box.y0 + box.height * 0.1,
box.width, box.height * 0.9])
ax1.legend(loc='upper center', bbox_to_anchor=(0.5, -0.2),
fancybox=True, shadow=True, ncol=5)
# Question 5
sns.barplot(x='class', y='survived', data=titanic, ax = ax2)
plt.savefig('final4.png')
```
|
github_jupyter
|

[](https://colab.research.google.com/github/JohnSnowLabs/spark-nlp-workshop/blob/master/tutorials/Certification_Trainings/Healthcare/6.Clinical_Context_Spell_Checker.ipynb)
<H1> Context Spell Checker - Medical </H1>
```
import json
from google.colab import files
license_keys = files.upload()
with open(list(license_keys.keys())[0]) as f:
license_keys = json.load(f)
license_keys.keys()
license_keys['JSL_VERSION']
import os
# Install java
! apt-get update -qq
! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
! java -version
secret = license_keys['SECRET']
os.environ['SPARK_NLP_LICENSE'] = license_keys['SPARK_NLP_LICENSE']
os.environ['AWS_ACCESS_KEY_ID']= license_keys['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY'] = license_keys['AWS_SECRET_ACCESS_KEY']
jsl_version = license_keys['JSL_VERSION']
version = license_keys['PUBLIC_VERSION']
! pip install --ignore-installed -q pyspark==2.4.4
! python -m pip install --upgrade spark-nlp-jsl==$jsl_version --extra-index-url https://pypi.johnsnowlabs.com/$secret
! pip install --ignore-installed -q spark-nlp==$version
import sparknlp
print (sparknlp.version())
import json
import os
from pyspark.ml import Pipeline
from pyspark.sql import SparkSession
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.base import *
import sparknlp_jsl
spark = sparknlp_jsl.start(secret)
documentAssembler = DocumentAssembler()\
.setInputCol("text")\
.setOutputCol("document")
tokenizer = RecursiveTokenizer()\
.setInputCols(["document"])\
.setOutputCol("token")\
.setPrefixes(["\"", "(", "[", "\n"])\
.setSuffixes([".", ",", "?", ")","!", "'s"])
spellModel = ContextSpellCheckerModel\
.pretrained('spellcheck_clinical', 'en', 'clinical/models')\
.setInputCols("token")\
.setOutputCol("checked")
pipeline = Pipeline(
stages = [
documentAssembler,
tokenizer,
spellModel
])
empty_ds = spark.createDataFrame([[""]]).toDF("text")
lp = LightPipeline(pipeline.fit(empty_ds))
```
Ok!, at this point we have our spell checking pipeline as expected. Let's see what we can do with it, see these errors,
_
__Witth__ the __hell__ of __phisical__ __terapy__ the patient was __imbulated__ and on posoperative, the __impatient__ tolerating a post __curgical__ soft diet._
_With __paint__ __wel__ controlled on __orall__ pain medications, she was discharged __too__ __reihabilitation__ __facilitay__._
_She is to also call the __ofice__ if she has any __ever__ greater than 101, or __leeding__ __form__ the surgical wounds._
_Abdomen is __sort__, nontender, and __nonintended__._
_Patient not showing pain or any __wealth__ problems._
_No __cute__ distress_
Check that some of the errors are valid English words, only by considering the context the right choice can be made.
```
example = ["Witth the hell of phisical terapy the patient was imbulated and on posoperative, the impatient tolerating a post curgical soft diet.",
"With paint wel controlled on orall pain medications, she was discharged too reihabilitation facilitay.",
"She is to also call the ofice if she has any ever greater than 101, or leeding form the surgical wounds.",
"Abdomen is sort, nontender, and nonintended.",
"Patient not showing pain or any wealth problems.",
"No cute distress"
]
for pairs in lp.annotate(example):
print (list(zip(pairs['token'],pairs['checked'])))
```
|
github_jupyter
|
### Hipótese 1 (MLPRegressor)
`Matheus Raz (mrol@cin.ufpe.br)`
`João Paulo Lins (jplo@cin.ufpe.br)`
#### É possível prever o número de vendas globais de um game baseado no seu gênero, rating, publisher e plataforma?
```
from IPython.display import display
import numpy as np
import pandas as pd
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import explained_variance_score
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn import metrics
df = pd.read_csv('vgsalesPP2.csv')
df.drop(['Unnamed: 0'],axis=1,inplace=True)
df
all_genres = df.loc[:,'Action':'Strategy'].copy().copy()
all_ratings = df.loc[:,'AO':'T'].copy()
all_platforms = df.loc[:,'2600':'XOne'].copy()
all_publishers = df.loc[:,'10TACLE Studios':'responDESIGN'].copy()
genres_and_ratings = all_genres.join(all_ratings).copy()
platforms_and_publishers = all_platforms.join(all_publishers).copy()
X = genres_and_ratings.join(platforms_and_publishers).values.copy()
y = df["Global_Sales"].values.copy()
X
y
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
scores = []
for i in range(5, 100):
mlp = MLPRegressor(
hidden_layer_sizes=(i,), activation='relu', solver='adam', alpha=0.001, batch_size='auto',
learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=1000, shuffle=True,
random_state=9, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model = mlp.fit(X_train, y_train)
pred_i = model.predict(X_test)
scores.append(model.score(X_test, y_test))
best_result = 0
for i in scores:
if(i > best_result):
best_result = i
best_hidden_layer_sizes = scores.index(best_result) + 1
print("Melhor resultado:",best_result,"para",best_hidden_layer_sizes,"camadas escondidas")
```
### Conclusão
Variando da 5 a 100 o número de camadas da Rede Neural MLP Regressor o melhor resultado obitido foi de apenas 15% de taxa de acerto. Sendo assim, a hipótese de que seria possível prever o número de vendas globais de um game baseado no seu gênero, rating, publisher e plataforma foi refutada.
```
mlp = MLPRegressor(
hidden_layer_sizes=(best_hidden_layer_sizes,), activation='relu', solver='adam', alpha=0.001, batch_size='auto',
learning_rate='constant', learning_rate_init=0.01, power_t=0.5, max_iter=1000, shuffle=True,
random_state=9, tol=0.0001, verbose=False, warm_start=False, momentum=0.9, nesterovs_momentum=True,
early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
mlp
model = mlp.fit(X_train, y_train)
predictions = mlp.predict(X_test)
plt.scatter(y_test, predictions)
plt.xlabel("True Values")
plt.ylabel("Predictions")
print("Score:", model.score(X_test, y_test))
scores = cross_val_score(model, X, y, cv=6)
print ("Cross-validated scores:", scores)
predictions = cross_val_predict(model, X, y, cv=6)
plt.scatter(y, predictions)
accuracy = metrics.r2_score(y, predictions)
print ("Cross-Predicted Accuracy:", accuracy)
```
|
github_jupyter
|
```
from datetime import datetime
import numpy as np
import pandas as pd
import sklearn
from sklearn.linear_model import LinearRegression
#parse data
from sklearn import preprocessing
from sklearn.preprocessing import LabelEncoder
#label encoding on categorical data
#FAMA 49CRSP Common Stocks
df = pd.read_csv('FAMA_49CRSP.csv', dtype={'public_date' : str})
import sklearn.preprocessing
from sklearn.preprocessing import KBinsDiscretizer
from sklearn.model_selection import train_test_split
#preprocessing here
#sort by date
df = df.sort_values(by = 'public_date', ascending = True)
df = df.dropna()
#encode integer categories into numbers
label_encoder = LabelEncoder()
integer_encoded = label_encoder.fit_transform(df.FFI49_desc)
df.FFI49_desc = integer_encoded
#df = df.dropna()
ewlabels = df.indret_ew
vwlabels = df.indret_vw
df = df.drop(labels=['indret_ew', 'indret_vw'], axis=1)
#3year on year change as a prediction feature, raw pct change
yoythree = ewlabels.diff(periods = 3)
#3 years rolling percent change, averaged ie. (y1-y2 + (y3-y2)change)/2
rollavgpct = ewlabels.rolling(3).mean()
#drop first 3 years
df = df.iloc[3:]
ewlabels = ewlabels.iloc[3:]
yoythree = yoythree.iloc[3:]
#yoypctthree = yoypctthree.iloc[3:]
rollavgpct = rollavgpct.iloc[3:]
#add -1 and 1 so the bins will take on bins to be equal and set to max -1 and 1
#extrema = pd.Series([-1,1])
#ewnlabels = ewlabels.append(extrema)
#make a new output (bucket by percentage?)
# enc = KBinsDiscretizer(n_bins=8, encode='ordinal',strategy = 'uniform')
# ewnlabels = np.asarray(ewnlabels)
# ewnlabels = ewnlabels.reshape((-1,1))
# labels_binned = enc.fit_transform(ewnlabels)
# labels_binned = labels_binned[:-2]
#1 Split-Timer series data, 0.64 Train, 0.16 dev, 0.2 Test
#x_train, x_test, y_train, y_test = train_test_split(df, labels_binned, test_size = 0.2, shuffle = False)
x_train, x_test, y_train, y_test = train_test_split(df, ewlabels, test_size = 0.2, shuffle = False)
x_train, x_dev, y_train, y_dev = train_test_split(x_train, y_train, test_size = 0.2, shuffle = False)
def get_dates(x_train, x_dev, x_test):
train_dates = [datetime(year=int(x[0:4]), month=int(x[4:6]), day=int(x[6:8])) for x in x_train['public_date']]
dev_dates = [datetime(year=int(x[0:4]), month=int(x[4:6]), day=int(x[6:8])) for x in x_dev['public_date']]
test_dates = [datetime(year=int(x[0:4]), month=int(x[4:6]), day=int(x[6:8])) for x in x_test['public_date']]
x_train = x_train.drop('public_date', axis=1)
x_dev = x_dev.drop('public_date', axis=1)
x_test = x_test.drop('public_date', axis=1)
return train_dates, dev_dates, test_dates, x_train, x_dev, x_test
train_dates, dev_dates, test_dates, x_train, x_dev, x_test = get_dates(x_train, x_dev, x_test)
print(x_train.shape)
print(x_test.shape)
from matplotlib import pyplot
fig1 = pyplot.figure(1, figsize = (6,6))
pyplot.plot(train_dates, y_train, color = 'green', label = 'industry_ew_train')
pyplot.plot(dev_dates, y_dev, color = 'yellow', label = 'industry_ew_dev')
pyplot.plot(test_dates, y_test, color = 'red', label = 'industry_ew_test')
pyplot.xlabel('Date')
pyplot.ylabel('Industry Return (Equally Weighted)')
pyplot.legend()
pyplot.show()
#tutorial keras practice
#https://machinelearningmastery.com/tutorial-first-neural-network-python-keras/
#####IGNORE THIS!!!!!!!!
from keras.models import Sequential
from keras.layers import Dense, Activation,Softmax
from keras.optimizers import SGD
from sklearn.metrics import mean_squared_error
import numpy
model = Sequential()
#parameters = number of neurons, initialization method, activation function
model.add(Dense(32, input_dim=76, init = 'uniform', activation = 'relu'))
model.add(Dense(16, init = 'uniform', activation = 'relu'))
model.add(Dense(1, init = 'uniform', activation = 'sigmoid'))
# For a binary classification problem
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(x_tra, y_tra, epochs=25, batch_size=32)
print("----------------------------------------------------------")
scores = model.evaluate(x_tra,y_tra)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
y_devpred = model.predict(x_dev)
print("--------------------------------------------")
print(mean_squared_error(y_dev,y_devpred))
#Regression Model:
#1 Layer: 76 -> 12.78% and 15.28%
#2 Layer: 76,1 -> 0.40% and 0.48%
#3 Layers: 76, 32, 1 -> 0% and 65%
#4 layers: 76,48,32,1 + adam + -> 60.52% and 56.70%
#4 Layers: 76,32,16,1 -> 61.33% and 57.18%
#4 Layers: 76,32,8,1 -> 0%
#4 layers: 76,48,8,1 -> 0%
#6 layers: 76,48,32,16,8,1 -> 20% and 0%
import pandas
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from matplotlib import pyplot
import seaborn as sns
model = Sequential()
#parameters = number of neurons, initialization method, activation function
model.add(Dense(72, input_dim=x_train.shape[1], kernel_initializer='normal', activation='relu'))
#model.add(Dense(48, kernel_initializer='normal',activation = 'relu'))
model.add(Dense(32, kernel_initializer='normal',activation = 'relu'))
#model.add(Dense(16, kernel_initializer='normal',activation = 'relu'))
model.add(Dense(8, kernel_initializer='normal',activation = 'relu'))
model.add(Dense(1, kernel_initializer='normal', activation = 'linear'))
# Compile model
#opt = Adam(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
#model.compile(loss='mean_squared_error', optimizer=opt, metrics=['accuracy'])
#model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy'])
model.compile(loss='mse', optimizer='adam', metrics=['mse', 'mae', 'mape', 'cosine'])
history = model.fit(np.asarray(x_train), y_train, epochs=50)
fig2 = pyplot.figure(2,figsize = (10,10))
pyplot.plot(history.history['mean_squared_error'], color = 'blue')
pyplot.plot(history.history['mean_absolute_error'], color = 'green')
pyplot.plot(history.history['mean_absolute_percentage_error'], color = 'orange')
pyplot.plot(history.history['cosine_proximity'], color = 'red')
pyplot.ylim(-10,10)
pyplot.show()
#train set
print("----------------------------------------------------------")
train_predictions = model.predict(x_train)
scores = model.evaluate(np.asarray(x_train), y_train)
for i in range(len(scores)):
print("\n%s: %.2f%%" % (model.metrics_names[i], scores[i]))
#dev set
print("----------------------------------------------------------")
dev_predictions = model.predict(x_dev)
scores = model.evaluate(np.asarray(x_dev),y_dev)
for i in range(len(scores)):
print("\n%s: %.2f%%" % (model.metrics_names[i], scores[i]))
fig3 = pyplot.figure(3, figsize = (10,10))
pyplot.plot(dev_dates, y_dev, color = 'green')
pyplot.plot(dev_dates, dev_predictions, color = 'red')
pyplot.show()
#test set
print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
test_predictions = model.predict(x_test).reshape((-1))
scores = model.evaluate(np.asarray(x_test),y_test)
for i in range(len(scores)):
print("\n%s: %.2f%%" % (model.metrics_names[i], scores[i]))
fig4 = pyplot.figure(4, figsize = (10,10))
pyplot.plot(test_dates, y_test, color = 'green')
pyplot.plot(test_dates, test_predictions, color = 'red')
pyplot.show()
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD
from sklearn.metrics import mean_squared_error
import numpy
model = Sequential()
model.add(Dense(32, input_dim=x_train.shape[1], init = 'uniform', activation = 'relu'))
model.add(Dense(16, init = 'uniform', activation = 'relu'))
model.add(Dense(8, init = 'uniform', activation = 'softmax'))
# For a multi-class classification problem
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=25, batch_size=32)
print("----------------------------------------------------------")
scores = model.evaluate(x_train,y_train)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
y_devpred = model.predict(x_dev)
print("--------------------------------------------")
print(mean_squared_error(y_dev,y_devpred))
from keras.models import Sequential
from keras.layers import Dense, Activation,Softmax
from keras.optimizers import SGD
model = Sequential()
model.add(Dense(32, input_shape = (x_train.shape)))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
# For a multi-class classification problem
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
# For a binary classification problem
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# For a mean squared error regression problem
model.compile(optimizer='rmsprop',
loss='mse')
model.fit(x_train, y_train, epochs=5, batch_size=32)
y_devpred = model.predict(x_dev)
print("--------------------------------------------")
print(mean_squared_error(y_dev,y_devpred))
```
|
github_jupyter
|
<font face=楷体 size=6><b>黑人抬棺人脸检测:</b>
<font face=楷体 size=5><b>背景:</b>
<font face=楷体 size=3>黑人抬棺这么火,怎么能不用paddlehub试一试呢?
<br>
<font face=楷体 size=3>临近期末,准备考试,还要准备考研,555,明明有好点子,但是没时间做,先出一个黑人抬棺的视频8
<font face=楷体 size=5><b>结果:</b>
<font face=楷体 size=3>在我的B站上: <a href=https://www.bilibili.com/video/BV1Sk4y1r7Zz>https://www.bilibili.com/video/BV1Sk4y1r7Zz</a>
<font face=楷体 size=5><b>思路和步骤:</b>
<font face=楷体 size=3>思路嘛,再简单不过,一帧一帧拆分,一帧一帧人脸检测
<font face=楷体 size=3>步骤嘛,人脸检测 + ffmpeg拆分合并
<font face=楷体 size=5><b>总结:</b><br>
<font face=楷体 size=3>paddlehub蛮好用的,改日有时间定要搞一番事业<br>
<font face=楷体 size=3>时间太少了,考研党伤不起啊啊啊
```
from IPython.display import HTML
HTML('<iframe style="width:98%;height: 450px;" src="//player.bilibili.com/player.html?bvid=BV1Sk4y1r7Zz" scrolling="no" border="0" frameborder="no" framespacing="0" allowfullscreen="true"> </iframe>')
# ---------------------------------------------------------------------------
# 为使用 `GPU` 设置环境变量 , (仍然报错, 已在github上提交issue——目前已解决)
# ---------------------------------------------------------------------------
%set_env CUDA_VISIBLE_DEVICES = 0
# ---------------------------------------------------------------------------
# 安装视频处理环境
# ---------------------------------------------------------------------------
!pip install moviepy -i https://pypi.tuna.tsinghua.edu.cn/simple
!pip install ffmpeg
# ---------------------------------------------------------------------------
# 安装paddlehub环境和下载模型
# ---------------------------------------------------------------------------
try:
import paddlehub as hub
except ImportError:
!pip install paddlehub==1.6.0 -i https://pypi.tuna.tsinghua.edu.cn/simple
import paddlehub as hub
try:
module = hub.Module(name="ultra_light_fast_generic_face_detector_1mb_640")
# module = hub.Moudle(name="ultra_light_fast_generic_face_detector_1mb_320")
except FileNotFoundError:
!hub install ultra_light_fast_generic_face_detector_1mb_640==1.1.2
module = hub.Module(name="ultra_light_fast_generic_face_detector_1mb_640")
# module = hub.Moudle(name="ultra_light_fast_generic_face_detector_1mb_320")
```
注:
Ultra-Light-Fast-Generic-Face-Detector-1MB提供了两种预训练模型,ultra_light_fast_generic_face_detector_1mb_320和ultra_light_fast_generic_face_detector_1mb_640。
- ultra_light_fast_generic_face_detector_1mb_320,在预测时会将图片输入缩放为320 * 240,预测速度更快。关于该模型更多介绍, 查看[PaddleHub官网介绍](https://www.paddlepaddle.org.cn/hubdetail?name=ultra_light_fast_generic_face_detector_1mb_320&en_category=ObjectDetection)
- ultra_light_fast_generic_face_detector_1mb_640,在预测时会将图片输入缩放为640 * 480,预测精度更高。关于该模型更多介绍, 查看[PaddleHub官网介绍](https://www.paddlepaddle.org.cn/hubdetail?name=ultra_light_fast_generic_face_detector_1mb_640&en_category=ObjectDetection)
用户根据需要,选择具体模型。利用PaddleHub使用该模型时,只需更改指定name,即可实现无缝切换。
```
# ---------------------------------------------------------------------------
# 查看黑人抬棺视频的基本信息
# ---------------------------------------------------------------------------
import os
import cv2
import json
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from tqdm import tqdm
video = cv2.VideoCapture("video.flv")
fps = video.get(cv2.CAP_PROP_FPS) # 视频帧率
frameCount = video.get(cv2.CAP_PROP_FRAME_COUNT) # 获得视频的总帧数
width = video.get(cv2.CAP_PROP_FRAME_WIDTH) # 获得视频的宽度
height = video.get(cv2.CAP_PROP_FRAME_HEIGHT) # 获得视频的高度
print('视频的宽度:{}'.format(width))
print('视频的高度:{}'.format(height))
print('视频帧率:{}'.format(fps))
print('视频的总帧数:{}'.format(frameCount))
cv2.__version__
# ---------------------------------------------------------------------------
# 将视频数据变为帧数据, 并且保存
# ---------------------------------------------------------------------------
if not os.path.exists('frame'):
os.mkdir('frame')
all_img = []
all_img_path_dict = {'image':[]}
success, frame = video.read()
i = 0
while success:
all_img.append(frame)
i += 1
# if not i % 10:print(i)
success, frame = video.read()
path = os.path.join('frame', str(i)+'.jpg')
all_img_path_dict['image'].append(path)
cv2.imwrite(path, frame)
all_img_path_dict['image'].pop()
print('完毕')
# ---------------------------------------------------------------------------
# 预测并打印输出(或者找到已经保存的文件)
# ---------------------------------------------------------------------------
# 读取视频所保存的信息文件
info_path = 'info.json'
# hub版本更新后, 其检测精度浮点数太飘了, 这里赶时间, 就暂时不写了(准备期末考试ing...)
# if os.path.exists(info_path):
# # 读取已经保存的`json`数据
# with open(info_path, 'r') as f:
# json_dict = json.load(f)
# results = json_dict['data']
if False:
pass
else: # 若没有找到`json`数据
# PaddleHub对于支持一键预测的module,可以调用module的相应预测API,完成预测功能。
results = module.face_detection(data=all_img_path_dict,
use_gpu=True,
visualization=True)
# save_json = {'data':results}
# with open(info_path, 'w') as f:
# f.write(json.dumps(save_json))
# ---------------------------------------------------------------------------
# 输出制作视频文件的备用文件
# ---------------------------------------------------------------------------
# 输出视频的size
size = (int(width), int(height))
size = (int(height), int(width))
# 创建写视频对象(不好用)
# videoWriter = cv2.VideoWriter("a.avi", cv2.VideoWriter_fourcc('M','J','P','G'), fps, size)
for i, info in tqdm(enumerate(results)):
num_info = info['data']
if not len(num_info):
# 如果该画面没有人, 则 `frame`变量赋值为原来的图片
frame = all_img[i].copy()[:,:, ::-1]
else:
# frame = mpimg.imread(info['save_path']) # 之前的save_path现在没了......
frame = mpimg.imread(info['path'].replace('frame', 'face_detector_640_predict_output'))
cv2.putText(frame, 'fps: {:.2f}'.format(fps), (20, 370), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0 ,255), 2)
cv2.putText(frame, 'count: ' + str(len(num_info)), (20, 400), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0 ,255), 2)
cv2.putText(frame, 'frame: ' + str(i), (20, 430), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 0 ,255), 2)
# cv2.putText(frame, 'time: {:.2f}s'.format(i / fps), (20,460), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255,0,255), 2)
plt.imsave('./img_out/{}.jpg'.format(i), frame)
# ---------------------------------------------------------------------------
# 输出视频文件(没加配乐, 没有灵魂)(找了一堆python工具, 不如ffmpeg好用)
# ---------------------------------------------------------------------------
if os.path.exists('temp.mp4'):
!rm -f temp.mp4
!ffmpeg -f image2 -i img_out/%d.jpg -vcodec libx264 -r 60.0 temp.mp4
# ---------------------------------------------------------------------------
# 抽离源文件配乐
# ---------------------------------------------------------------------------
if os.path.exists('nb.mp3'):
!rm -f nb.mp3
!ffmpeg -i video.flv -f mp3 nb.mp3
# ---------------------------------------------------------------------------
# 音乐视频合成(由于需要调整视频速度, 使音频和视频时间一样, 命令行不太好调整, 我将合成放在了本地端)
# ---------------------------------------------------------------------------
# # 去掉temp视频音轨
# !ffmpeg -i temp.mp4 -c:v copy -an temp_new.mp4
# # 给视频加背景音乐
# !ffmpeg -i temp_new.mp4 -i nb.mp3 -t 52 -y last.mp4
```
|
github_jupyter
|
# Monte Carlo Methods
In this notebook, you will write your own implementations of many Monte Carlo (MC) algorithms.
While we have provided some starter code, you are welcome to erase these hints and write your code from scratch.
### Part 0: Explore BlackjackEnv
We begin by importing the necessary packages.
```
import sys
import gym
import numpy as np
from collections import defaultdict
from plot_utils import plot_blackjack_values, plot_policy
```
Use the code cell below to create an instance of the [Blackjack](https://github.com/openai/gym/blob/master/gym/envs/toy_text/blackjack.py) environment.
```
env = gym.make('Blackjack-v1')
```
Each state is a 3-tuple of:
- the player's current sum $\in \{0, 1, \ldots, 31\}$,
- the dealer's face up card $\in \{1, \ldots, 10\}$, and
- whether or not the player has a usable ace (`no` $=0$, `yes` $=1$).
The agent has two potential actions:
```
STICK = 0
HIT = 1
```
Verify this by running the code cell below.
```
print(env.observation_space)
print(env.action_space)
```
Execute the code cell below to play Blackjack with a random policy.
(_The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to get some experience with the output that is returned as the agent interacts with the environment._)
```
for i_episode in range(3):
state = env.reset()
while True:
print(state)
action = env.action_space.sample()
state, reward, done, info = env.step(action)
if done:
print('End game! Reward: ', reward)
print('You won :)\n') if reward > 0 else print('You lost :(\n')
break
```
### Part 1: MC Prediction
In this section, you will write your own implementation of MC prediction (for estimating the action-value function).
We will begin by investigating a policy where the player _almost_ always sticks if the sum of her cards exceeds 18. In particular, she selects action `STICK` with 80% probability if the sum is greater than 18; and, if the sum is 18 or below, she selects action `HIT` with 80% probability. The function `generate_episode_from_limit_stochastic` samples an episode using this policy.
The function accepts as **input**:
- `bj_env`: This is an instance of OpenAI Gym's Blackjack environment.
It returns as **output**:
- `episode`: This is a list of (state, action, reward) tuples (of tuples) and corresponds to $(S_0, A_0, R_1, \ldots, S_{T-1}, A_{T-1}, R_{T})$, where $T$ is the final time step. In particular, `episode[i]` returns $(S_i, A_i, R_{i+1})$, and `episode[i][0]`, `episode[i][1]`, and `episode[i][2]` return $S_i$, $A_i$, and $R_{i+1}$, respectively.
```
def generate_episode_from_limit_stochastic(bj_env):
episode = []
state = bj_env.reset()
while True:
probs = [0.8, 0.2] if state[0] > 18 else [0.2, 0.8]
action = np.random.choice(np.arange(2), p=probs)
next_state, reward, done, info = bj_env.step(action)
episode.append((state, action, reward))
state = next_state
if done:
break
return episode
```
Execute the code cell below to play Blackjack with the policy.
(*The code currently plays Blackjack three times - feel free to change this number, or to run the cell multiple times. The cell is designed for you to gain some familiarity with the output of the `generate_episode_from_limit_stochastic` function.*)
```
for i in range(5):
print(generate_episode_from_limit_stochastic(env))
```
Now, you are ready to write your own implementation of MC prediction. Feel free to implement either first-visit or every-visit MC prediction; in the case of the Blackjack environment, the techniques are equivalent.
Your algorithm has three arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `generate_episode`: This is a function that returns an episode of interaction.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
```
def mc_prediction_q(env, num_episodes, generate_episode, gamma=1.0):
# initialize empty dictionaries of arrays
returns_sum = defaultdict(lambda: np.zeros(env.action_space.n))
N = defaultdict(lambda: np.zeros(env.action_space.n))
Q = defaultdict(lambda: np.zeros(env.action_space.n))
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# generate an episode
episode = generate_episode(env)
# obtain the states, actions, and rewards
states, actions, rewards = zip(*episode)
# prepare for discounting
discounts = np.array([gamma**i for i in range(len(rewards)+1)])
# update the sum of the returns, number of visits, and action-value
# function estimates for each state-action pair in the episode
for i, state in enumerate(states):
returns_sum[state][actions[i]] += sum(rewards[i:]*discounts[:-(1+i)])
N[state][actions[i]] += 1.0
Q[state][actions[i]] = returns_sum[state][actions[i]] / N[state][actions[i]]
return Q
```
Use the cell below to obtain the action-value function estimate $Q$. We have also plotted the corresponding state-value function.
To check the accuracy of your implementation, compare the plot below to the corresponding plot in the solutions notebook **Monte_Carlo_Solution.ipynb**.
```
# obtain the action-value function
Q = mc_prediction_q(env, 500000, generate_episode_from_limit_stochastic)
# obtain the corresponding state-value function
V_to_plot = dict((k,(k[0]>18)*(np.dot([0.8, 0.2],v)) + (k[0]<=18)*(np.dot([0.2, 0.8],v))) \
for k, v in Q.items())
# plot the state-value function
plot_blackjack_values(V_to_plot)
```
### Part 2: MC Control
In this section, you will write your own implementation of constant-$\alpha$ MC control.
Your algorithm has four arguments:
- `env`: This is an instance of an OpenAI Gym environment.
- `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
- `alpha`: This is the step-size parameter for the update step.
- `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
The algorithm returns as output:
- `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
- `policy`: This is a dictionary where `policy[s]` returns the action that the agent chooses after observing state `s`.
(_Feel free to define additional functions to help you to organize your code._)
```
def generate_episode_from_Q(env, Q, epsilon, nA):
""" generates an episode from following the epsilon-greedy policy """
episode = []
state = env.reset()
while True:
action = np.random.choice(np.arange(nA), p=get_probs(Q[state], epsilon, nA)) \
if state in Q else env.action_space.sample()
next_state, reward, done, info = env.step(action)
episode.append((state, action, reward))
state = next_state
if done:
break
return episode
def get_probs(Q_s, epsilon, nA):
""" obtains the action probabilities corresponding to epsilon-greedy policy """
policy_s = np.ones(nA) * epsilon / nA
best_a = np.argmax(Q_s)
policy_s[best_a] = 1 - epsilon + (epsilon / nA)
return policy_s
def update_Q(env, episode, Q, alpha, gamma):
""" updates the action-value function estimate using the most recent episode """
states, actions, rewards = zip(*episode)
# prepare for discounting
discounts = np.array([gamma**i for i in range(len(rewards)+1)])
for i, state in enumerate(states):
old_Q = Q[state][actions[i]]
Q[state][actions[i]] = old_Q + alpha*(sum(rewards[i:]*discounts[:-(1+i)]) - old_Q)
return Q
def mc_control(env, num_episodes, alpha, gamma=1.0, eps_start=1.0, eps_decay=.99999, eps_min=0.05):
nA = env.action_space.n
# initialize empty dictionary of arrays
Q = defaultdict(lambda: np.zeros(nA))
epsilon = eps_start
# loop over episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 1000 == 0:
print("\rEpisode {}/{}.".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# set the value of epsilon
epsilon = max(epsilon*eps_decay, eps_min)
# generate an episode by following epsilon-greedy policy
episode = generate_episode_from_Q(env, Q, epsilon, nA)
# update the action-value function estimate using the episode
Q = update_Q(env, episode, Q, alpha, gamma)
# determine the policy corresponding to the final action-value function estimate
policy = dict((k,np.argmax(v)) for k, v in Q.items())
return policy, Q
```
Use the cell below to obtain the estimated optimal policy and action-value function. Note that you should fill in your own values for the `num_episodes` and `alpha` parameters.
```
# obtain the estimated optimal policy and action-value function
policy, Q = mc_control(env, 500000, 0.02)
```
Next, we plot the corresponding state-value function.
```
# obtain the corresponding state-value function
V = dict((k,np.max(v)) for k, v in Q.items())
# plot the state-value function
plot_blackjack_values(V)
```
Finally, we visualize the policy that is estimated to be optimal.
```
# plot the policy
plot_policy(policy)
```
The **true** optimal policy $\pi_*$ can be found in Figure 5.2 of the [textbook](http://go.udacity.com/rl-textbook) (and appears below). Compare your final estimate to the optimal policy - how close are you able to get? If you are not happy with the performance of your algorithm, take the time to tweak the decay rate of $\epsilon$, change the value of $\alpha$, and/or run the algorithm for more episodes to attain better results.

|
github_jupyter
|
**Aims**:
- extract the omics mentioned in multi-omics articles
**NOTE**: the articles not in PMC/with no full text need to be analysed separately, or at least highlighted.
```
%run notebook_setup.ipynb
import pandas
pandas.set_option('display.max_colwidth', 100)
%vault from pubmed_derived_data import literature, literature_subjects
literature['title_abstract_text_subjects'] = (
literature['title']
+ ' ' + literature['abstract_clean'].fillna('')
+ ' ' + literature_subjects.apply(lambda x: ' '.join(x[x == True].index), axis=1)
+ ' ' + literature['full_text'].fillna('')
)
omics_features = literature.index.to_frame().drop(columns='uid').copy()
from functools import partial
from helpers.text_processing import check_usage
from pandas import Series
check_usage_in_input = partial(
check_usage,
data=literature,
column='title_abstract_text_subjects',
limit=5 # show only first 5 results
)
TERM_IN_AT_LEAST_N_ARTICLES = 5
```
# Omics
## 1. Lookup by words which end with -ome
```
cellular_structures = {
# organelles
'peroxisome',
'proteasome',
'ribosome',
'exosome',
'nucleosome',
'polysome',
'autosome',
'autophagosome',
'endosome',
'lysosome',
# proteins and molecular complexes
'spliceosome',
'cryptochrome',
# chromosmes
'autosome',
'chromosome',
'x-chromosome',
'y-chromosome',
}
species = {
'trichome'
}
tools_and_methods = {
# dry lab
'dphenome',
'dgenome',
'reactome',
'rexposome',
'phytozome',
'rgenome',
'igenome', # iGenomes
# wet lab
'microtome'
}
not_an_ome = {
'outcome',
'middle-income',
'welcome',
'wellcome', # :)
'chrome',
'some',
'cumbersome',
'become',
'home',
'come',
'overcome',
'cytochrome',
'syndrome',
'ubiome',
'biome', # this IS an ome, but more into envrionmental studies, rather than molecular biology!
'fluorochrome',
'post-genome',
'ubiquitin-proteasome', # UPS
*tools_and_methods,
*cellular_structures,
*species
}
from omics import get_ome_regexp
ome_re = get_ome_regexp()
get_ome_regexp??
ome_occurrences = (
literature['title_abstract_text_subjects'].str.lower()
.str.extractall(ome_re)[0]
.to_frame('term').reset_index()
)
ome_occurrences = ome_occurrences[~ome_occurrences.term.isin(not_an_ome)]
ome_occurrences.head(3)
```
### 1.1 Harmonise hyphenation
```
from helpers.text_processing import report_hyphenation_trends, harmonise_hyphenation
hyphenation_rules = report_hyphenation_trends(ome_occurrences.term)
hyphenation_rules
ome_occurrences.term = harmonise_hyphenation(ome_occurrences.term, hyphenation_rules)
```
### 1.2 Fix typos
```
from helpers.text_processing import find_term_typos, create_typos_map
ome_counts = ome_occurrences.drop_duplicates(['uid', 'term']).term.sorted_value_counts()
potential_ome_typos = find_term_typos(ome_counts, TERM_IN_AT_LEAST_N_ARTICLES - 1)
potential_ome_typos
check_usage_in_input('1-metabolome')
check_usage_in_input('miRNAome')
check_usage_in_input('miRome')
check_usage_in_input('rexposome')
check_usage_in_input('glycol-proteome')
check_usage_in_input('rgenome')
check_usage_in_input('iGenomes')
check_usage_in_input('cancergenome')
is_typo_subset_or_variant = {
('transcritome', 'transcriptome'): True,
('transciptome', 'transcriptome'): True,
('tanscriptome', 'transcriptome'): True,
('trascriptome', 'transcriptome'): True,
('microbome', 'microbiome'): True,
('protenome', 'proteome'): True,
# (neither n- nor o- is frequent enough on its own)
('o-glycoproteome', 'glycoproteome'): True,
('n-glycoproteome', 'glycoproteome'): True,
('glycol-proteome', 'glycoproteome'): True, # note "glycol" instead of "glyco"
('mirome', 'mirnome'): True,
('1-metabolome', 'metabolome'): True
}
ome_typos_map = create_typos_map(potential_ome_typos, is_typo_subset_or_variant)
replaced = ome_occurrences.term[ome_occurrences.term.isin(ome_typos_map)]
replaced.value_counts()
len(replaced)
ome_occurrences.term = ome_occurrences.term.replace(ome_typos_map)
```
### 1.3 Replace synonymous and narrow terms
```
ome_replacements = {}
```
#### miRNAomics → miRNomics
miRNAome is more popular name for -ome, while miRNomics is more popular for -omics.
```
ome_occurrences.term.value_counts().loc[['mirnome', 'mirnaome']]
```
As I use -omcis for later on, for consistency I will change miRNAome → miRNome
```
ome_replacements['miRNAome'] = 'miRNome'
```
#### Cancer genome → genome
```
ome_occurrences.term.value_counts().loc[['genome', 'cancer-genome']]
ome_replacements['cancer-genome'] = 'genome'
```
#### Host microbiome → microbiome
```
ome_occurrences.term.value_counts().loc[['microbiome', 'host-microbiome']]
ome_replacements['host-microbiome'] = 'microbiome'
```
#### Replace the values
```
ome_occurrences.term = ome_occurrences.term.replace(
{k.lower(): v.lower() for k, v in ome_replacements.items()}
)
```
### 1.4 Summarise popular \*ome terms
```
ome_counts = ome_occurrences.drop_duplicates(['uid', 'term']).term.sorted_value_counts()
ome_common_counts = ome_counts[ome_counts >= TERM_IN_AT_LEAST_N_ARTICLES]
ome_common_counts
ome_common_terms = Series(ome_common_counts.index)
ome_common_terms[ome_common_terms.str.endswith('some')]
```
### 2. Lookup by omics and adjectives
```
from omics import get_omics_regexp
omics_re = get_omics_regexp()
get_omics_regexp??
check_usage_in_input('integromics')
check_usage_in_input('meta-omics')
check_usage_in_input('post-genomic')
check_usage_in_input('3-omics')
multi_omic = {
'multi-omic',
'muti-omic',
'mutli-omic',
'multiomic',
'cross-omic',
'panomic',
'pan-omic',
'trans-omic',
'transomic',
'four-omic',
'multiple-omic',
'inter-omic',
'poly-omic',
'polyomic',
'integromic',
'integrated-omic',
'integrative-omic',
'3-omic'
}
tools = {
# MixOmics
'mixomic',
# MetaRbolomics
'metarbolomic',
# MinOmics
'minomic',
# LinkedOmics - TCGA portal
'linkedomic',
# Mergeomics - https://doi.org/10.1186/s12864-016-3198-9
'mergeomic'
}
vague = {
'single-omic'
}
adjectives = {
'economic',
'socio-economic',
'socioeconomic',
'taxonomic',
'syndromic',
'non-syndromic',
'agronomic',
'anatomic',
'autonomic',
'atomic',
'palindromic',
# temporal
'postgenomic',
'post-genomic'
}
not_an_omic = {
'non-omic', # this on was straightforward :)
*adjectives,
*multi_omic,
*tools,
*vague
}
omic_occurrences = (
literature['title_abstract_text_subjects'].str.lower()
.str.extractall(omics_re)[0]
.to_frame('term').reset_index()
)
omic_occurrences = omic_occurrences[~omic_occurrences.term.isin(not_an_omic)]
omic_occurrences.head(2)
```
### 2.1 Harmonise hyphenation
```
hyphenation_rules = report_hyphenation_trends(omic_occurrences.term)
hyphenation_rules
omic_occurrences.term = harmonise_hyphenation(omic_occurrences.term, hyphenation_rules)
```
### 2.2 Fix typos
```
omic_counts = omic_occurrences.drop_duplicates(['uid', 'term']).term.sorted_value_counts()
potential_omic_typos = find_term_typos(omic_counts, TERM_IN_AT_LEAST_N_ARTICLES - 1)
potential_omic_typos
check_usage_in_input('non-omic')
check_usage_in_input('C-metabolomics')
```
Not captured in the text abstract, but full version has 13C, so carbon-13, so type of metabolomics.
```
check_usage_in_input('miRNAomics')
check_usage_in_input('miRomics')
check_usage_in_input('MinOmics')
check_usage_in_input('onomic', words=True)
literature.loc[omic_occurrences[omic_occurrences.term == 'onomic'].uid].title_abstract_text_subjects
check_usage_in_input(r'\bonomic', words=False, highlight=' onomic')
check_usage_in_input(' ionomic', words=False)
check_usage_in_input('integratomic', words=False)
```
Note: integratomics has literally three hits in PubMed, two because of http://www.integratomics-time.com/
```
is_typo_subset_or_variant = {
('phoshphoproteomic', 'phosphoproteomic'): True,
('transriptomic', 'transcriptomic'): True,
('transcripomic', 'transcriptomic'): True,
('transciptomic', 'transcriptomic'): True,
('trancriptomic', 'transcriptomic'): True,
('trascriptomic', 'transcriptomic'): True,
('metageonomic', 'metagenomic'): True,
('metaobolomic', 'metabolomic'): True,
('metabotranscriptomic', 'metatranscriptomic'): False,
('mirnaomic', 'mirnomic'): True,
('metranscriptomic', 'metatranscriptomic'): True,
('metranscriptomic', 'transcriptomic'): False,
('miromic', 'mirnomic'): True,
('n-glycoproteomic', 'glycoproteomic'): True,
('onomic', 'ionomic'): False,
('c-metabolomic', 'metabolomic'): True,
('integratomic', 'interactomic'): False,
('pharmacoepigenomic', 'pharmacogenomic'): False,
('metobolomic', 'metabolomic'): True,
# how to treat single-cell?
('scepigenomic', 'epigenomic'): True,
#('epitranscriptomic', 'transcriptomic'): False
('epigenomomic', 'epigenomic'): True,
}
omic_typos_map = create_typos_map(potential_omic_typos, is_typo_subset_or_variant)
replaced = omic_occurrences.term[omic_occurrences.term.isin(omic_typos_map)]
replaced.value_counts()
len(replaced)
omic_occurrences.term = omic_occurrences.term.replace(omic_typos_map)
```
### 2.3 Popular *omic(s) terms:
```
omic_counts = omic_occurrences.drop_duplicates(['uid', 'term']).term.sorted_value_counts()
omic_counts[omic_counts >= TERM_IN_AT_LEAST_N_ARTICLES].add_suffix('s')
```
### Crude overview
```
ome_terms = Series(ome_counts[ome_counts >= TERM_IN_AT_LEAST_N_ARTICLES].index)
omic_terms = Series(omic_counts[omic_counts >= TERM_IN_AT_LEAST_N_ARTICLES].index)
assert omics_features.index.name == 'uid'
for term in ome_terms:
mentioned_by_uid = set(ome_occurrences[ome_occurrences.term == term].uid)
omics_features['mentions_' + term] = omics_features.index.isin(mentioned_by_uid)
for term in omic_terms:
mentioned_by_uid = set(omic_occurrences[omic_occurrences.term == term].uid)
omics_features['mentions_' + term] = omics_features.index.isin(mentioned_by_uid)
from helpers.text_processing import prefix_remover
ome_terms_mentioned = omics_features['mentions_' + ome_terms].rename(columns=prefix_remover('mentions_'))
omic_terms_mentioned = omics_features['mentions_' + omic_terms].rename(columns=prefix_remover('mentions_'))
%R library(ComplexUpset);
%%R -i ome_terms_mentioned -w 800 -r 100
upset(ome_terms_mentioned, colnames(ome_terms_mentioned), min_size=10, width_ratio=0.1)
```
## Merge -ome and -omic terms
```
from warnings import warn
terms_associated_with_omic = {
omic + 's': [omic]
for omic in omic_terms
}
for ome in ome_terms:
assert ome.endswith('ome')
auto_generate_omic_term = ome[:-3] + 'omics'
omic = auto_generate_omic_term
if omic not in terms_associated_with_omic:
if omic in omic_counts.index:
warn(f'{omic} was removed at thresholding, but it is a frequent -ome!')
else:
print(f'Creating omic {omic}')
terms_associated_with_omic[omic] = []
terms_associated_with_omic[omic].append(ome)
from omics import add_entities_to_features
add_entities_to_omic_features = partial(
add_entities_to_features,
features=omics_features,
omics_terms=terms_associated_with_omic
)
omics = {k: [k] for k in terms_associated_with_omic}
add_entities_to_omic_features(omics, entity_type='ome_or_omic')
from omics import omics_by_entity, omics_by_entity_group
```
interactomics is a proper "omics", but it is difficult to assign to a single entity - by definition
```
check_usage_in_input('interactomics')
```
phylogenomics is not an omic on its own, but if used in context of metagenomics it can refer to actual omics data
```
check_usage_in_input('phylogenomics')
```
regulomics is both a name of a tool, group (@MIM UW), and omics:
```
check_usage_in_input('regulomics')
from functools import reduce
omics_mapped_to_entities = reduce(set.union, omics_by_entity.values())
set(terms_associated_with_omic) - omics_mapped_to_entities
assert omics_mapped_to_entities - set(terms_associated_with_omic) == set()
omics_mapped_to_entities_groups = reduce(set.union, omics_by_entity_group.values())
set(terms_associated_with_omic) - omics_mapped_to_entities_groups
add_entities_to_omic_features(omics_by_entity, entity_type='entity')
add_entities_to_omic_features(omics_by_entity_group, entity_type='entity_group')
```
### Visualize the entities & entities groups
```
omic_entities = omics_features['entity_' + Series(list(omics_by_entity.keys()))].rename(columns=prefix_remover('entity_'))
omic_entities_groups = omics_features['entity_group_' + Series(list(omics_by_entity_group.keys()))].rename(columns=prefix_remover('entity_group_'))
%%R -i omic_entities -w 800 -r 100
upset(omic_entities, colnames(omic_entities), min_size=10, width_ratio=0.1)
%%R -i omic_entities_groups -w 800 -r 100
upset(omic_entities_groups, colnames(omic_entities_groups), min_size=10, width_ratio=0.1)
```
### Number of omics mentioned in abstract vs the multi-omic term used
```
omes_or_omics_df = omics_features['ome_or_omic_' + Series(list(omics.keys()))].rename(columns=prefix_remover('ome_or_omic_'))
literature['omic_terms_detected'] = omes_or_omics_df.sum(axis=1)
lt = literature[['term', 'omic_terms_detected']]
literature.sort_values('omic_terms_detected', ascending=False)[['title', 'omic_terms_detected']].head(10)
%%R -i lt -w 800
(
ggplot(lt, aes(x=term, y=omic_terms_detected))
+ geom_violin(adjust=2)
+ geom_point()
+ theme_bw()
)
%vault store omics_features in pubmed_derived_data
```
# Current limitations
## Patchy coverage
Currently I only detected omic-describing terms in less than 70% of abstracts:
```
omic_entities.any(axis=1).mean()
```
Potential solution: select a random sample of 50 articles, annotate manually, calculate sensitivity and specificity.
If any omic is consistently omitted, reconsider how search terms are created.
## Apostrophes
Are we missing out on \*'omic terms, such us meta'omic used in [here](https://doi.org/10.1053/j.gastro.2014.01.049)?
```
check_usage_in_input(
r'\w+\'omic',
words=False,
highlight='\'omic'
)
```
unlikely (but would be nice to get it in!)
## Fields of study
```
'genetics', 'epigenetics'
```
Some authors may prefer to say "we integrated genetic and proteomic data" rather than "genomic and proteomic"
|
github_jupyter
|
# [Advent of Code 2019: Day 4](https://adventofcode.com/2019/day/4)
<h2>--- Day 4: Secure Container ---</h2><p>You arrive at the Venus fuel depot only to discover it's protected by a password. The Elves had written the password on a sticky note, but someone <span title="Look on the bright side - isn't it more secure if nobody knows the password?">threw it out</span>.</p>
<p>However, they do remember a few key facts about the password:</p>
<ul>
<li>It is a six-digit number.</li>
<li>The value is within the range given in your puzzle input.</li>
<li>Two adjacent digits are the same (like <code>22</code> in <code>1<em>22</em>345</code>).</li>
<li>Going from left to right, the digits <em>never decrease</em>; they only ever increase or stay the same (like <code>111123</code> or <code>135679</code>).</li>
</ul>
<p>Other than the range rule, the following are true:</p>
<ul>
<li><code>111111</code> meets these criteria (double <code>11</code>, never decreases).</li>
<li><code>2234<em>50</em></code> does not meet these criteria (decreasing pair of digits <code>50</code>).</li>
<li><code>123789</code> does not meet these criteria (no double).</li>
</ul>
<p><em>How many different passwords</em> within the range given in your puzzle input meet these criteria?</p>
```
pass_min = 256310
pass_max = 732736
all_possible_pass = range(pass_min+1, pass_max)
possible_pass = []
def password_criteria1(password):
""" Returns TRUE if the password meets the password criteria given by the elves in part 1 """
previous_digit = '0'
has_repeat_digit = False
for digit in str(password):
if digit < previous_digit:
return False
if digit == previous_digit:
has_repeat_digit = True
else:
previous_digit = digit
return has_repeat_digit
for password in all_possible_pass:
if password_criteria1(password):
possible_pass.append(password)
print(f'Part one answer: {len(possible_pass)}')
```
<h2 id="part2">--- Part Two ---</h2><p>An Elf just remembered one more important detail: the two adjacent matching digits <em>are not part of a larger group of matching digits</em>.</p>
<p>Given this additional criterion, but still ignoring the range rule, the following are now true:</p>
<ul>
<li><code>112233</code> meets these criteria because the digits never decrease and all repeated digits are exactly two digits long.</li>
<li><code>123<em>444</em></code> no longer meets the criteria (the repeated <code>44</code> is part of a larger group of <code>444</code>).</li>
<li><code>111122</code> meets the criteria (even though <code>1</code> is repeated more than twice, it still contains a double <code>22</code>).</li>
</ul>
<p><em>How many different passwords</em> within the range given in your puzzle input meet all of the criteria?</p>
```
possible_pass = []
def password_criteria2(password):
""" Returns TRUE if the password meets the password criteria given by the elves in part 1 & 2 """
previous_digit = '0'
repeat_len = 0
repeat_len_list = []
pass_str = str(password)
for digit in pass_str:
if digit < previous_digit:
return False
if digit == previous_digit:
repeat_len += 1
if (digit > previous_digit):
if repeat_len > 0:
repeat_len_list.append(repeat_len+1)
repeat_len = 0
previous_digit = digit
# I originally forgot to check the repeat_len after the for loop finished
# Forgetting this took me a long time to figure out why my answer was wrong
if repeat_len > 0:
repeat_len_list.append(repeat_len+1)
return (2 in repeat_len_list)
for password in all_possible_pass:
if password_criteria2(password):
possible_pass.append(password)
print(f'Part two answer: {len(possible_pass)}')
```
|
github_jupyter
|
```
from django.template import Context
from django.template.base import Token
from django.template.base import Parser
from django.template.base import Template
from django.template.base import TokenType
from django.core.management import call_command
from wagtail_srcset.templatetags.wagtail_srcset_tags import srcset_image
from django.core.files.uploadedfile import SimpleUploadedFile
from wagtail.images.models import Image as WagtailImage
```
# setup db
```
call_command("migrate")
```
# create image
```
import io
from PIL import Image
def create_small_rgb():
# this is a small test jpeg
img = Image.new('RGB', (200, 200), (255, 0, 0, 0))
return img
def small_jpeg_io():
rgb = create_small_rgb()
im_io = io.BytesIO()
rgb.save(im_io, format="JPEG", quality=60, optimize=True, progressive=True)
im_io.seek(0)
im_io.name = "testimage.jpg"
return im_io
def small_uploaded_file(small_jpeg_io):
simple_png = SimpleUploadedFile(
name="test.png", content=small_jpeg_io.read(), content_type="image/png"
)
small_jpeg_io.seek(0)
return simple_png
simple_png = small_uploaded_file(small_jpeg_io())
from django.conf import settings
print(settings.DATABASES)
image = WagtailImage(file=simple_png)
image.save()
```
# render template
```
template_text = """
{% load wagtailimages_tags %}
{% load wagtail_srcset_tags %}
{% image img width-300 %}
{% srcset_image img width-300 jpegquality-90 %}
"""
t = Template(template_text)
print(t.render(Context({"img": image})))
template_text = """
{% load wagtailimages_tags %}
{% image img width-300 %}
"""
t = Template(template_text)
t.render(Context({"img": image}))
image_tag = "{% image block.value width-300 %}"
image_tag = "block.value width-300}"
token = Token(TokenType.BLOCK, image_tag)
parser = Parser(token.split_contents())
t = Template(template_text)
t.render(Context({}))
```
# Get image size in tag
```
from django import template
from django.conf import settings
from wagtail.images.templatetags.wagtailimages_tags import image
register = template.Library()
@register.tag(name="srcset_image2")
def srcset_image(parser, token):
image_node = image(parser, token)
print(image_node)
print(dir(image_node))
image_node.attrs["srcset"] = SrcSet(image_node)
return image_node
class SrcSet:
def __init__(self, image_node):
self.image_node = image_node
srcset = image_node.attrs.get("srcset", None)
print("image node attrs: ", image_node.attrs)
print("image node width: ", image_node.attrs.get("width"))
print("image node filter: ", image_node.filter.operations)
if srcset is None:
self.renditions = self.default_renditions
else:
self.renditions = self.renditions_from_srcset(srcset.token)
@property
def default_renditions(self):
if hasattr(settings, "DEFAULT_SRCSET_RENDITIONS"):
return settings.DEFAULT_SRCSET_RENDITIONS
else:
return [
"width-2200|jpegquality-60",
"width-1100|jpegquality-60",
"width-768|jpegquality-60",
"width-500|jpegquality-60",
"width-300|jpegquality-60",
]
def renditions_from_srcset(self, srcset):
srcset = srcset.strip('"').strip("'")
return srcset.split(" ")
def resolve(self, context):
image = self.image_node.image_expr.resolve(context)
out_renditions = []
for rendition in self.renditions:
rendered_image = image.get_rendition(rendition)
out_renditions.append(f"{rendered_image.url} {rendered_image.width}w")
srcset_string = ", ".join(out_renditions)
return srcset_string
template_text = """
{% load wagtailimages_tags %}
{% load wagtail_srcset_tags %}
{% image img width-300 %}
{% srcset_image2 img width-300 %}
"""
t = Template(template_text)
t.render(Context({"img": image}))
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/kylehounslow/gdg_workshop/blob/master/notebooks/hello_tensorflow.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Hello TensorFlow!
This notebook is a gentle introduction to TensorFlow.
Mostly taken from [here](https://github.com/aymericdamien/TensorFlow-Examples/tree/master/examples)
___
In this notebook we will learn about:
* How to run jupyter notebook cells
* How to build and execute a computational graph in Tensorflow
* How to visualize the computational graph in a notebook cell
```
import numpy as np
import tensorflow as tf
from IPython.display import HTML
# Create a Constant op
# The op is added as a node to the default graph.
hello = tf.constant('Hello, TensorFlow!')
# Start tf session
with tf.Session() as sess:
# Run the op
print(sess.run(hello))
# Basic constant operations
# The value returned by the constructor represents the output
# of the Constant op.
a = tf.constant(7)
b = tf.constant(6)
# Launch the default graph.
with tf.Session() as sess:
print("Addition with constants: %i" % sess.run(a+b))
print("Multiplication with constants: %i" % sess.run(a*b))
```
## Define some helper functions to render the computational graph in a notebook cell
```
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = tf.compat.as_bytes("<stripped %d bytes>"%size)
return strip_def
def rename_nodes(graph_def, rename_func):
res_def = tf.GraphDef()
for n0 in graph_def.node:
n = res_def.node.add()
n.MergeFrom(n0)
n.name = rename_func(n.name)
for i, s in enumerate(n.input):
n.input[i] = rename_func(s) if s[0]!='^' else '^'+rename_func(s[1:])
return res_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:800px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
show_graph(tf.get_default_graph())
# Basic Operations with variable as graph input
# The value returned by the constructor represents the output
# of the Variable op. (define as input when running session)
# tf Graph input
a = tf.placeholder(tf.int16)
b = tf.placeholder(tf.int16)
# Define some operations
add = tf.add(a, b)
mul = tf.multiply(a, b)
# Launch the default graph.
with tf.Session() as sess:
# Run every operation with variable input
print("Addition with variables: %i" % sess.run(add, feed_dict={a: 2, b: 3}))
print("Multiplication with variables: %i" % sess.run(mul, feed_dict={a: 2, b: 3}))
show_graph(tf.get_default_graph())
# ----------------
# More in details:
# Matrix Multiplication from TensorFlow official tutorial
# Create a Constant op that produces a 1x2 matrix. The op is
# added as a node to the default graph.
#
# The value returned by the constructor represents the output
# of the Constant op.
matrix1 = tf.constant([[3., 3.]])
# Create another Constant that produces a 2x1 matrix.
matrix2 = tf.constant([[2.],[2.]])
# Create a Matmul op that takes 'matrix1' and 'matrix2' as inputs.
# The returned value, 'product', represents the result of the matrix
# multiplication.
product = tf.matmul(matrix1, matrix2)
# To run the matmul op we call the session 'run()' method, passing 'product'
# which represents the output of the matmul op. This indicates to the call
# that we want to get the output of the matmul op back.
#
# All inputs needed by the op are run automatically by the session. They
# typically are run in parallel.
#
# The call 'run(product)' thus causes the execution of threes ops in the
# graph: the two constants and matmul.
#
# The output of the op is returned in 'result' as a numpy `ndarray` object.
with tf.Session() as sess:
result = sess.run(product)
print(result)
```
## To reset the graph, use `tf.reset_default_graph()`
```
tf.reset_default_graph()
a = tf.constant(7)
b = tf.constant(6)
op = tf.add(a, b)
show_graph(tf.get_default_graph())
```
|
github_jupyter
|
# Example PV curve
The purpose of this document is to showcase how a Q-V hysteresis loop can be transformed to a P-E hysteresis loop, as shown in the paper
```
import pair_conformal as pair_conformal
import infinite_fourier as infinite_fourier
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import tqdm
import scipy.optimize
# IDE geometry
t = 0.230 # film thickness, µm
b = 3.5526128590971275 # electrode width, µm
a = 10-b # electrode spacing, µm
N = 100 # number of fingers
L = 900 # finger overlap lenght, µm
eps_air = 1
eps_sub = 317 # relative dielectric constant of substrate [-]
eps_0 = 8.8541878128*10**-12 # dielectric permitivity of vacuum [F/m]
eta = b/(a+b) # cover fraction
tau = t/(a+b) # normalized thickness
# charge in [µC]
Q = [-0.002387185, -0.002369244, -0.002349509, -0.002327839, -0.00230595, -0.002283729, -0.002259291, -0.00223318, -0.002207021, -0.00217887, -0.002148065, -0.002116512, -0.002082226, -0.002045068, -0.002007333, -0.001967078, -0.001924073, -0.00187983, -0.001831545, -0.001776912, -0.001717819, -0.001652946, -0.00158003, -0.001499078, -0.001407161, -0.001296693, -0.001157247, -0.0009688821, -0.0007027968, -0.0003493804, 6.637685E-05, 0.0005045007, 0.0009297, 0.001327835, 0.001694989, 0.002024981, 0.002312988, 0.002544892, 0.002708135, 0.002813413, 0.002884158, 0.002935813, 0.002977405, 0.003014872, 0.003051124, 0.003085805, 0.003118195, 0.003150051, 0.003181595, 0.003211494, 0.003240727, 0.003271036, 0.003300532, 0.003329036, 0.003358051, 0.003386047, 0.003412343, 0.00343873, 0.003465813, 0.003491773, 0.003516818, 0.003542413, 0.003568233, 0.003592727, 0.003616624, 0.003641341, 0.003665666, 0.003689383, 0.00371402, 0.003738411, 0.003761646, 0.00378567, 0.00380854, 0.003830412, 0.003853858, 0.003876645, 0.00389871, 0.00392127, 0.0039428, 0.003964474, 0.003987001, 0.00400799, 0.004029234, 0.004051416, 0.004072585, 0.004094301, 0.004115343, 0.004136069, 0.004156884, 0.004176097, 0.004197029, 0.004218405, 0.004238687, 0.004260382, 0.004282224, 0.004302702, 0.004322752, 0.004343851, 0.004364848, 0.004384621, 0.004389469, 0.00437785, 0.00436301, 0.004347459, 0.004332854, 0.004317561, 0.004301789, 0.004286926, 0.004271545, 0.004255491, 0.004240051, 0.004224759, 0.00420843, 0.004191772, 0.004176366, 0.00416125, 0.004145099, 0.004128415, 0.004112476, 0.004096543, 0.004079259, 0.004062261, 0.004046589, 0.004030942, 0.004014314, 0.003997051, 0.003980191, 0.003963542, 0.003946654, 0.00392912, 0.003911167, 0.003893725, 0.00387713, 0.003860145, 0.003841765, 0.003823862, 0.003806447, 0.003788375, 0.003769961, 0.003752647, 0.003735448, 0.003717227, 0.003700021, 0.003682116, 0.003662905, 0.003644769, 0.003627122, 0.00360835, 0.003589583, 0.00357172, 0.003552353, 0.003532552, 0.003514034, 0.003494644, 0.003474555, 0.003455356, 0.003436633, 0.003416994, 0.003396764, 0.0033774, 0.003356971, 0.003335312, 0.003315078, 0.00329518, 0.003274194, 0.003253128, 0.003232777, 0.003211828, 0.003189375, 0.003167216, 0.003146184, 0.00312443, 0.00310179, 0.003080117, 0.00305787, 0.003034205, 0.003011555, 0.002989346, 0.002965535, 0.002941398, 0.002918004, 0.00289363, 0.002868321, 0.002843333, 0.002818347, 0.002792202, 0.002766602, 0.002741391, 0.002714635, 0.002687873, 0.002660486, 0.002632803, 0.00260405, 0.002574675, 0.002545929, 0.002515959, 0.002486143, 0.002455284, 0.00242318, 0.002390967, 0.002356995, 0.002322765, 0.002287607, 0.002251305, 0.002215339, 0.002176916, 0.002137335, 0.002096899, 0.002053197, 0.002008466, 0.001963086, 0.001914314, 0.001864452, 0.001813566, 0.001757648, 0.001698819, 0.001635419, 0.001566245, 0.001493, 0.00141098, 0.001318391, 0.001211927, 0.00108212, 0.0009163766, 0.000690813, 0.0003883601, 1.692951E-05, -0.000392243, -0.0007981218, -0.001181059, -0.001535184, -0.001858449, -0.002146879, -0.002386551, -0.002571249, -0.002703419, -0.002794477, -0.002861993, -0.00291466, -0.002959006, -0.002999532, -0.003036268, -0.003071271, -0.0031041, -0.003136221, -0.00316746, -0.003198189, -0.003228038, -0.003256713, -0.003284778, -0.003312415, -0.003340448, -0.003366892, -0.003393282, -0.003418866, -0.003444512, -0.003470579, -0.003495327, -0.003520311, -0.003543997, -0.003567151, -0.003590962, -0.003613797, -0.003637049, -0.003659737, -0.003682343, -0.003704853, -0.003727164, -0.003749261, -0.00377138, -0.003793217, -0.003815038, -0.003836964, -0.003857836, -0.003879352, -0.003900119, -0.003921316, -0.003942682, -0.003962902, -0.003984258, -0.004005409, -0.004026261, -0.004047064, -0.004066859, -0.004086888, -0.004106357, -0.004126163, -0.004146, -0.004166117, -0.004186207, -0.004205392, -0.004225425, -0.004244706, -0.004263909, -0.004284097, -0.004303301, -0.004322754, -0.004342262, -0.004361021, -0.004380661, -0.004384621, -0.004372234, -0.004358755, -0.004344455, -0.00433072, -0.004316052, -0.004300078, -0.004284856, -0.00426906, -0.004253508, -0.00423859, -0.004222432, -0.004207311, -0.004192688, -0.004176455, -0.004160648, -0.004145491, -0.004128887, -0.004113142, -0.00409762, -0.004080968, -0.004065461, -0.004049503, -0.004032505, -0.004016477, -0.004000217, -0.003983392, -0.003967661, -0.003951622, -0.003934831, -0.003918328, -0.003901679, -0.003884285, -0.003867609, -0.003851268, -0.003833688, -0.003816898, -0.00380087, -0.003783329, -0.003765523, -0.003748579, -0.003730843, -0.003713503, -0.003696942, -0.003679079, -0.003661704, -0.003644798, -0.003626271, -0.003607254, -0.003589577, -0.003571913, -0.003553253, -0.003535882, -0.003518591, -0.003499268, -0.003479955, -0.003461624, -0.003442215, -0.003422801, -0.003404754, -0.003385643, -0.003366159, -0.003347591, -0.003328024, -0.003307475, -0.003288023, -0.003269004, -0.003248631, -0.003228407, -0.003209213, -0.003188924, -0.003167785, -0.003147349, -0.003126381, -0.003104725, -0.003084507, -0.003064405, -0.003042441, -0.003020605, -0.002998852, -0.002975501, -0.00295198, -0.002929463, -0.00290585, -0.002882403, -0.002859337, -0.002834672, -0.00281071, -0.002785981, -0.002760758, -0.002736262, -0.002710309, -0.002684624, -0.002659622, -0.002632805, -0.002605139, -0.002578091, -0.002550153, -0.002521417, -0.002493659, -0.002476145]
# potential in [V]
V = [0.04437793, 0.1739239, 0.2942994, 0.4361776, 0.5788627, 0.7110827, 0.8366968, 0.9845453, 1.117674, 1.243658, 1.387886, 1.532312, 1.646195, 1.802599, 1.932895, 2.052276, 2.199362, 2.343338, 2.461306, 2.60569, 2.752447, 2.871715, 3.013873, 3.157966, 3.280472, 3.412226, 3.563019, 3.692115, 3.812608, 3.970434, 4.090213, 4.22052, 4.3725, 4.489566, 4.643403, 4.778002, 4.898654, 5.04967, 5.196358, 5.317157, 5.449754, 5.593945, 5.732201, 5.865779, 5.994799, 6.144366, 6.277221, 6.401462, 6.539872, 6.688802, 6.810711, 6.945728, 7.098175, 7.219552, 7.351885, 7.499124, 7.632737, 7.759171, 7.896377, 8.041049, 8.173279, 8.299081, 8.447179, 8.586698, 8.708143, 8.841791, 8.989992, 9.126638, 9.250328, 9.403575, 9.52848, 9.654918, 9.808635, 9.931167, 10.06538, 10.21923, 10.33743, 10.47862, 10.62457, 10.7402, 10.8833, 11.03156, 11.14934, 11.29342, 11.43369, 11.55816, 11.71303, 11.82484, 11.97619, 12.10787, 12.22994, 12.38982, 12.51661, 12.64386, 12.78661, 12.93372, 13.05551, 13.18304, 13.33218, 13.47126, 13.47057, 13.34165, 13.21203, 13.0657, 12.93242, 12.81065, 12.65633, 12.52214, 12.40226, 12.25602, 12.11738, 11.98418, 11.8587, 11.71189, 11.56643, 11.44239, 11.31596, 11.1702, 11.02897, 10.9072, 10.77088, 10.62157, 10.48739, 10.35589, 10.22745, 10.08808, 9.944459, 9.810282, 9.676345, 9.547437, 9.407411, 9.264067, 9.128718, 9.006959, 8.871855, 8.722055, 8.585446, 8.458357, 8.321081, 8.176833, 8.058867, 7.916413, 7.772325, 7.65478, 7.506218, 7.362387, 7.234265, 7.107719, 6.960752, 6.828286, 6.703134, 6.548166, 6.416759, 6.294292, 6.149382, 6.007489, 5.880206, 5.751837, 5.606753, 5.463816, 5.343571, 5.197211, 5.054191, 4.93077, 4.800648, 4.653707, 4.517845, 4.393469, 4.256721, 4.105326, 3.978059, 3.85098, 3.705955, 3.565578, 3.445307, 3.299403, 3.153569, 3.03095, 2.90015, 2.7503, 2.617978, 2.494177, 2.356169, 2.20776, 2.080731, 1.946691, 1.796866, 1.67866, 1.538039, 1.400089, 1.275009, 1.120759, 1.007585, 0.8491522, 0.7261425, 0.5891596, 0.4411228, 0.3306482, 0.172344, 0.04901001, -0.09255664, -0.2328214, -0.3526073, -0.5034429, -0.6367069, -0.7543792, -0.9117505, -1.035949, -1.170603, -1.326794, -1.441486, -1.573293, -1.729042, -1.848447, -1.980201, -2.137717, -2.246692, -2.402132, -2.534279, -2.65746, -2.81039, -2.930528, -3.068533, -3.212284, -3.334133, -3.485053, -3.603587, -3.74382, -3.889321, -4.005676, -4.159727, -4.286889, -4.427423, -4.575235, -4.691462, -4.843907, -4.97285, -5.10098, -5.254734, -5.36598, -5.526317, -5.650281, -5.787211, -5.934189, -6.050872, -6.204697, -6.323915, -6.477433, -6.594819, -6.752144, -6.865386, -7.01591, -7.147833, -7.274442, -7.425538, -7.542306, -7.697025, -7.829558, -7.955273, -8.108235, -8.217963, -8.370495, -8.502801, -8.631045, -8.784983, -8.899929, -9.058136, -9.170603, -9.326056, -9.439473, -9.599152, -9.713706, -9.874289, -9.988198, -10.13291, -10.2771, -10.3928, -10.54458, -10.67271, -10.79995, -10.95884, -11.07297, -11.2276, -11.34653, -11.4879, -11.62453, -11.75633, -11.89593, -12.02081, -12.17913, -12.297, -12.43557, -12.58356, -12.69842, -12.84797, -12.99229, -13.10664, -13.26098, -13.38555, -13.52003, -13.54577, -13.39104, -13.27562, -13.12593, -12.99292, -12.86961, -12.71928, -12.57959, -12.46102, -12.30327, -12.18371, -12.05147, -11.89865, -11.78261, -11.64637, -11.49334, -11.36798, -11.23715, -11.08353, -10.97168, -10.82847, -10.68037, -10.56139, -10.41533, -10.27377, -10.15177, -10.00852, -9.867024, -9.747101, -9.605703, -9.460669, -9.33259, -9.19675, -9.043613, -8.926188, -8.793416, -8.643863, -8.51504, -8.393796, -8.240279, -8.103074, -7.981934, -7.82956, -7.704981, -7.576974, -7.419369, -7.29804, -7.167288, -7.022652, -6.875559, -6.758023, -6.619517, -6.473402, -6.350123, -6.221588, -6.066411, -5.931273, -5.812472, -5.659986, -5.526576, -5.405522, -5.256872, -5.117758, -4.996664, -4.854961, -4.706422, -4.588032, -4.455454, -4.308749, -4.169603, -4.045834, -3.900617, -3.759233, -3.637866, -3.498157, -3.352002, -3.232586, -3.096836, -2.94648, -2.819279, -2.686219, -2.541309, -2.404865, -2.279498, -2.126449, -2.010972, -1.869708, -1.72437, -1.607475, -1.450987, -1.32675, -1.196558, -1.045765, -0.9183388, -0.7919357, -0.6417174, -0.504509, -0.3828973, -0.237842, -0.09474963, -0.04021149]
# plot Q-V curve
fig,ax=plt.subplots()
ax.plot(V,Q,'k')
ax.set_ylabel(r'$Q$ [µC]')
ax.set_xlabel(r'$V$ [V]')
```
In order to subtract the dielectric contribution from the substrate and air, the dielectric constant of the film should be known as a function of the field applied to the electrodes.
We follow the aproach of the 'Example_CV_curve' notebook, and make the function V_to_eps()
```
# capacitance in [F]
C=[1.428721E-10, 1.429671E-10, 1.456579E-10, 1.42468E-10, 1.460786E-10, 1.446451E-10, 1.461049E-10, 1.431395E-10, 1.463592E-10, 1.439421E-10, 1.471552E-10, 1.459837E-10, 1.470516E-10, 1.472527E-10, 1.468963E-10, 1.45112E-10, 1.470788E-10, 1.498311E-10, 1.475891E-10, 1.488992E-10, 1.482823E-10, 1.490999E-10, 1.490423E-10, 1.493171E-10, 1.487169E-10, 1.52244E-10, 1.49938E-10, 1.524042E-10, 1.503689E-10, 1.525724E-10, 1.507887E-10, 1.517274E-10, 1.51396E-10, 1.545277E-10, 1.523037E-10, 1.552043E-10, 1.537927E-10, 1.559966E-10, 1.534998E-10, 1.546678E-10, 1.56135E-10, 1.5839E-10, 1.553621E-10, 1.590284E-10, 1.577874E-10, 1.585799E-10, 1.58952E-10, 1.578791E-10, 1.60478E-10, 1.590341E-10, 1.621604E-10, 1.607127E-10, 1.619988E-10, 1.625879E-10, 1.632023E-10, 1.629058E-10, 1.670639E-10, 1.648712E-10, 1.680846E-10, 1.668316E-10, 1.669878E-10, 1.679075E-10, 1.66629E-10, 1.678708E-10, 1.665938E-10, 1.667221E-10, 1.665966E-10, 1.67136E-10, 1.644409E-10, 1.634124E-10, 1.62773E-10, 1.629638E-10, 1.604434E-10, 1.603101E-10, 1.580713E-10, 1.570457E-10, 1.551462E-10, 1.565918E-10, 1.540866E-10, 1.544135E-10, 1.516288E-10, 1.519453E-10, 1.486024E-10, 1.483967E-10, 1.459251E-10, 1.467474E-10, 1.446417E-10, 1.447932E-10, 1.442392E-10, 1.427798E-10, 1.407578E-10, 1.409972E-10, 1.405182E-10, 1.408034E-10, 1.384185E-10, 1.397462E-10, 1.385293E-10, 1.374531E-10, 1.368658E-10, 1.38535E-10, 1.364199E-10, 1.394704E-10, 1.34885E-10, 1.403326E-10, 1.374839E-10, 1.383247E-10, 1.346173E-10, 1.370586E-10, 1.359852E-10, 1.361504E-10, 1.349626E-10, 1.36789E-10, 1.346052E-10, 1.349358E-10, 1.332896E-10, 1.354997E-10, 1.344141E-10, 1.347519E-10, 1.342153E-10, 1.35133E-10, 1.321496E-10, 1.326097E-10, 1.321919E-10, 1.324373E-10, 1.321308E-10, 1.318906E-10, 1.306188E-10, 1.328086E-10, 1.298423E-10, 1.308546E-10, 1.304036E-10, 1.320288E-10, 1.307834E-10, 1.321233E-10, 1.299351E-10, 1.323872E-10, 1.282756E-10, 1.291338E-10, 1.288475E-10, 1.303299E-10, 1.293892E-10, 1.306845E-10, 1.272201E-10, 1.299654E-10, 1.28993E-10, 1.282188E-10, 1.301725E-10, 1.280745E-10, 1.300307E-10, 1.281592E-10, 1.276642E-10, 1.270934E-10, 1.277682E-10, 1.260331E-10, 1.283642E-10, 1.266525E-10, 1.274558E-10, 1.270772E-10, 1.269208E-10, 1.257932E-10, 1.251995E-10, 1.263143E-10, 1.258134E-10, 1.259102E-10, 1.282777E-10, 1.257423E-10, 1.265298E-10, 1.252837E-10, 1.248545E-10, 1.263677E-10, 1.232039E-10, 1.26069E-10, 1.251772E-10, 1.261557E-10, 1.233324E-10, 1.245177E-10, 1.23786E-10, 1.242947E-10, 1.242804E-10, 1.265923E-10, 1.244359E-10, 1.248464E-10, 1.234826E-10, 1.227252E-10, 1.242627E-10, 1.219141E-10, 1.232067E-10, 1.223098E-10, 1.237167E-10, 1.220581E-10, 1.22659E-10, 1.238581E-10, 1.239247E-10, 1.231712E-10, 1.234949E-10, 1.24836E-10, 1.22424E-10, 1.224423E-10, 1.215715E-10, 1.233904E-10, 1.207226E-10, 1.231654E-10, 1.217444E-10, 1.231121E-10, 1.211624E-10, 1.216655E-10, 1.223138E-10, 1.21632E-10, 1.217327E-10, 1.221195E-10, 1.215047E-10, 1.203513E-10, 1.219082E-10, 1.198786E-10, 1.22356E-10, 1.195481E-10, 1.214618E-10, 1.201914E-10, 1.211654E-10, 1.194529E-10, 1.208156E-10, 1.20112E-10, 1.206554E-10, 1.19269E-10, 1.204647E-10, 1.201762E-10, 1.210913E-10, 1.191995E-10, 1.191091E-10, 1.208559E-10, 1.192752E-10, 1.194189E-10, 1.189132E-10, 1.201548E-10, 1.174888E-10, 1.194773E-10, 1.17665E-10, 1.188119E-10, 1.184596E-10, 1.194681E-10, 1.195105E-10, 1.199525E-10, 1.190634E-10, 1.170919E-10, 1.197311E-10, 1.173843E-10, 1.194264E-10, 1.179738E-10, 1.186101E-10, 1.1739E-10, 1.17134E-10, 1.172322E-10, 1.178816E-10, 1.185782E-10, 1.179067E-10, 1.175537E-10, 1.171477E-10, 1.184548E-10, 1.176971E-10, 1.19847E-10, 1.181765E-10, 1.186303E-10, 1.190044E-10, 1.181325E-10, 1.180539E-10, 1.180162E-10, 1.194967E-10, 1.178069E-10, 1.190385E-10, 1.183011E-10, 1.190183E-10, 1.187815E-10, 1.179626E-10, 1.194848E-10, 1.184054E-10, 1.201075E-10, 1.19958E-10, 1.194483E-10, 1.194521E-10, 1.183017E-10, 1.196058E-10, 1.189984E-10, 1.202177E-10, 1.184128E-10, 1.216556E-10, 1.194658E-10, 1.191103E-10, 1.204886E-10, 1.187929E-10, 1.199223E-10, 1.188856E-10, 1.220987E-10, 1.195576E-10, 1.192179E-10, 1.193729E-10, 1.197234E-10, 1.214019E-10, 1.202983E-10, 1.219464E-10, 1.201709E-10, 1.205171E-10, 1.202237E-10, 1.222552E-10, 1.203094E-10, 1.204452E-10, 1.222237E-10, 1.208789E-10, 1.222743E-10, 1.222312E-10, 1.214719E-10, 1.212069E-10, 1.196694E-10, 1.21599E-10, 1.221965E-10, 1.196936E-10, 1.223418E-10, 1.209638E-10, 1.21349E-10, 1.221708E-10, 1.21822E-10, 1.224692E-10, 1.216677E-10, 1.227133E-10, 1.215861E-10, 1.220879E-10, 1.231638E-10, 1.214378E-10, 1.222153E-10, 1.225601E-10, 1.23204E-10, 1.233956E-10, 1.219813E-10, 1.248303E-10, 1.217187E-10, 1.235352E-10, 1.223645E-10, 1.243875E-10, 1.236328E-10, 1.237825E-10, 1.236018E-10, 1.229477E-10, 1.241237E-10, 1.232171E-10, 1.231176E-10, 1.24429E-10, 1.228869E-10, 1.239139E-10, 1.237866E-10, 1.237431E-10, 1.229345E-10, 1.241784E-10, 1.247035E-10, 1.239413E-10, 1.260249E-10, 1.242357E-10, 1.255621E-10, 1.238913E-10, 1.250542E-10, 1.263224E-10, 1.246612E-10, 1.258194E-10, 1.252038E-10, 1.258805E-10, 1.244981E-10, 1.256105E-10, 1.273458E-10, 1.25373E-10, 1.270745E-10, 1.26058E-10, 1.260408E-10, 1.254987E-10, 1.253545E-10, 1.272783E-10, 1.259068E-10, 1.26717E-10, 1.26604E-10, 1.273636E-10, 1.24815E-10, 1.269945E-10, 1.265434E-10, 1.267167E-10, 1.272067E-10, 1.270353E-10, 1.278764E-10, 1.267783E-10, 1.264522E-10, 1.267218E-10, 1.288877E-10, 1.270128E-10, 1.290969E-10, 1.276366E-10, 1.277471E-10, 1.270148E-10, 1.27719E-10, 1.278782E-10, 1.291366E-10, 1.287927E-10, 1.294219E-10, 1.298759E-10, 1.282135E-10, 1.286388E-10, 1.281722E-10, 1.297523E-10, 1.290929E-10, 1.279392E-10, 1.293432E-10, 1.290771E-10, 1.302782E-10, 1.293932E-10, 1.291463E-10, 1.282145E-10, 1.29826E-10, 1.316245E-10, 1.301372E-10, 1.312541E-10, 1.302919E-10, 1.314303E-10, 1.297458E-10, 1.313611E-10, 1.322797E-10, 1.322087E-10, 1.312691E-10, 1.309969E-10, 1.318371E-10, 1.300058E-10, 1.319758E-10, 1.318336E-10, 1.343787E-10, 1.32808E-10, 1.337327E-10, 1.319999E-10, 1.319136E-10, 1.30856E-10, 1.323935E-10, 1.323159E-10, 1.324899E-10, 1.331841E-10, 1.328379E-10, 1.336677E-10, 1.328461E-10, 1.345795E-10, 1.330804E-10, 1.354537E-10, 1.340898E-10, 1.345113E-10, 1.34861E-10, 1.349088E-10, 1.351589E-10, 1.349915E-10, 1.36734E-10, 1.350067E-10, 1.349123E-10, 1.343106E-10, 1.369865E-10, 1.357753E-10, 1.362019E-10, 1.368768E-10, 1.364561E-10, 1.36423E-10, 1.352735E-10, 1.374813E-10, 1.352082E-10, 1.369885E-10, 1.37419E-10, 1.373838E-10, 1.389009E-10, 1.369029E-10, 1.385697E-10, 1.375682E-10, 1.385671E-10, 1.375384E-10, 1.398098E-10, 1.390579E-10, 1.39294E-10, 1.39684E-10, 1.402992E-10, 1.402546E-10, 1.382871E-10, 1.404137E-10, 1.4E-10, 1.404329E-10, 1.402902E-10, 1.41171E-10, 1.416159E-10, 1.399021E-10, 1.421314E-10, 1.411536E-10, 1.424295E-10, 1.418465E-10, 1.445525E-10, 1.431039E-10, 1.408782E-10, 1.44329E-10, 1.434428E-10, 1.455031E-10, 1.434753E-10, 1.452175E-10, 1.447142E-10, 1.435076E-10, 1.442E-10, 1.449591E-10, 1.469557E-10, 1.450768E-10, 1.475219E-10, 1.461938E-10, 1.462057E-10, 1.456248E-10, 1.462486E-10, 1.479291E-10, 1.468286E-10, 1.489587E-10, 1.474222E-10, 1.486653E-10, 1.471242E-10, 1.486498E-10, 1.481523E-10, 1.482389E-10, 1.50632E-10, 1.498335E-10, 1.505008E-10, 1.488144E-10, 1.519302E-10, 1.508203E-10, 1.511956E-10, 1.513241E-10, 1.512755E-10, 1.534453E-10, 1.535899E-10, 1.529881E-10, 1.526742E-10, 1.548066E-10, 1.533232E-10, 1.552791E-10, 1.545258E-10, 1.579156E-10, 1.547743E-10, 1.572102E-10, 1.566178E-10, 1.571229E-10, 1.570229E-10, 1.582999E-10, 1.591891E-10, 1.578437E-10, 1.5837E-10, 1.591771E-10, 1.607237E-10, 1.579443E-10, 1.604031E-10, 1.634649E-10, 1.606308E-10, 1.63667E-10, 1.631409E-10, 1.637429E-10, 1.616804E-10, 1.638019E-10, 1.662795E-10, 1.633521E-10, 1.664587E-10, 1.665746E-10, 1.688786E-10, 1.659572E-10, 1.667493E-10, 1.69185E-10, 1.692778E-10, 1.700613E-10, 1.684758E-10, 1.691753E-10, 1.660604E-10, 1.64958E-10, 1.649965E-10, 1.643371E-10, 1.635227E-10, 1.607512E-10, 1.631345E-10, 1.587088E-10, 1.582055E-10, 1.576575E-10, 1.584195E-10, 1.550761E-10, 1.545665E-10, 1.548717E-10, 1.524997E-10, 1.513253E-10, 1.489942E-10, 1.491857E-10, 1.459719E-10, 1.466107E-10, 1.45585E-10, 1.450864E-10, 1.424654E-10, 1.424399E-10, 1.417381E-10, 1.39913E-10, 1.406248E-10, 1.397817E-10, 1.408002E-10, 1.393248E-10, 1.398172E-10, 1.384365E-10, 1.375876E-10, 1.379492E-10, 1.367683E-10, 1.375969E-10, 1.3668E-10, 1.373079E-10, 1.356807E-10, 1.366002E-10, 1.344991E-10, 1.357231E-10, 1.337806E-10, 1.372409E-10, 1.334823E-10, 1.345474E-10, 1.341379E-10, 1.328704E-10, 1.328098E-10, 1.31878E-10, 1.331996E-10, 1.324442E-10, 1.31565E-10, 1.319125E-10, 1.32202E-10, 1.326845E-10, 1.307521E-10, 1.326938E-10, 1.300787E-10, 1.309132E-10, 1.295855E-10, 1.304956E-10, 1.300937E-10, 1.292458E-10, 1.307986E-10, 1.289652E-10, 1.286655E-10, 1.290608E-10, 1.296629E-10, 1.287674E-10, 1.274861E-10, 1.299446E-10, 1.296159E-10, 1.279053E-10, 1.297014E-10, 1.278301E-10, 1.281703E-10, 1.266053E-10, 1.287248E-10, 1.264719E-10, 1.273514E-10, 1.274536E-10, 1.269692E-10, 1.273148E-10, 1.255489E-10, 1.279162E-10, 1.256964E-10, 1.261561E-10, 1.269629E-10, 1.250016E-10, 1.259307E-10, 1.241788E-10, 1.257906E-10, 1.250511E-10, 1.247705E-10, 1.266612E-10, 1.25039E-10, 1.244062E-10, 1.248842E-10, 1.247148E-10, 1.238486E-10, 1.228222E-10, 1.255463E-10, 1.247814E-10, 1.243626E-10, 1.237665E-10, 1.246724E-10, 1.238904E-10, 1.230388E-10, 1.23642E-10, 1.224003E-10, 1.222853E-10, 1.229146E-10, 1.219639E-10, 1.228983E-10, 1.213694E-10, 1.226758E-10, 1.232989E-10, 1.225779E-10, 1.224417E-10, 1.226791E-10, 1.226576E-10, 1.206459E-10, 1.219063E-10, 1.211855E-10, 1.228589E-10, 1.218584E-10, 1.216945E-10, 1.223193E-10, 1.190148E-10, 1.214944E-10, 1.21628E-10, 1.2092E-10, 1.214292E-10, 1.215809E-10, 1.214492E-10, 1.208093E-10, 1.201614E-10, 1.198732E-10, 1.190848E-10, 1.215263E-10, 1.206319E-10, 1.194802E-10, 1.19245E-10, 1.199772E-10, 1.199073E-10, 1.196559E-10, 1.191041E-10, 1.209232E-10, 1.18883E-10, 1.200479E-10, 1.20238E-10, 1.20357E-10, 1.178351E-10, 1.188417E-10, 1.197694E-10, 1.174261E-10, 1.191596E-10, 1.19546E-10, 1.18227E-10, 1.177878E-10, 1.178595E-10, 1.187553E-10, 1.182249E-10, 1.182545E-10, 1.196385E-10, 1.182275E-10, 1.173036E-10, 1.171271E-10, 1.176164E-10, 1.16336E-10, 1.172684E-10, 1.181993E-10, 1.158593E-10, 1.184176E-10, 1.167167E-10, 1.175387E-10, 1.166047E-10, 1.179134E-10, 1.192259E-10, 1.157749E-10, 1.176932E-10, 1.175949E-10, 1.160609E-10, 1.178535E-10, 1.169975E-10, 1.186814E-10, 1.176641E-10, 1.17394E-10, 1.181469E-10, 1.17172E-10, 1.173988E-10, 1.176118E-10, 1.175405E-10, 1.17021E-10, 1.169642E-10, 1.177946E-10, 1.168386E-10, 1.17849E-10, 1.187442E-10, 1.181597E-10, 1.19315E-10, 1.178069E-10, 1.188536E-10, 1.173716E-10, 1.171542E-10, 1.183029E-10, 1.175457E-10, 1.177983E-10, 1.181589E-10, 1.183508E-10, 1.180423E-10, 1.178909E-10, 1.191793E-10, 1.190843E-10, 1.194647E-10, 1.192538E-10, 1.196658E-10, 1.190537E-10, 1.185755E-10, 1.19343E-10, 1.193129E-10, 1.19667E-10, 1.206632E-10, 1.191684E-10, 1.19486E-10, 1.184836E-10, 1.197159E-10, 1.180492E-10, 1.193817E-10, 1.197055E-10, 1.201422E-10, 1.199055E-10, 1.201559E-10, 1.190369E-10, 1.211554E-10, 1.181058E-10, 1.213385E-10, 1.209126E-10, 1.201931E-10, 1.215587E-10, 1.218929E-10, 1.199184E-10, 1.206235E-10, 1.20644E-10, 1.206732E-10, 1.204866E-10, 1.217333E-10, 1.20814E-10, 1.205608E-10, 1.205133E-10, 1.203666E-10, 1.213848E-10, 1.203342E-10, 1.208187E-10, 1.21343E-10, 1.203883E-10, 1.210015E-10, 1.216253E-10, 1.219758E-10, 1.216929E-10, 1.227504E-10, 1.232504E-10, 1.218303E-10, 1.220115E-10, 1.214376E-10, 1.220161E-10, 1.2192E-10, 1.230869E-10, 1.236932E-10, 1.220605E-10, 1.226611E-10, 1.229358E-10, 1.209405E-10, 1.230221E-10, 1.234145E-10, 1.232747E-10, 1.244998E-10, 1.229632E-10, 1.228223E-10, 1.22294E-10, 1.239279E-10, 1.226526E-10, 1.239677E-10, 1.234594E-10, 1.241096E-10, 1.243929E-10, 1.214138E-10, 1.249787E-10, 1.236943E-10, 1.223724E-10, 1.240914E-10, 1.247876E-10, 1.247921E-10, 1.241966E-10, 1.248835E-10, 1.241835E-10, 1.237608E-10, 1.251001E-10, 1.24361E-10, 1.255462E-10, 1.24335E-10, 1.2568E-10, 1.254916E-10, 1.241557E-10, 1.250553E-10, 1.254905E-10, 1.262875E-10, 1.262949E-10, 1.264549E-10, 1.264461E-10, 1.254883E-10, 1.270468E-10, 1.255024E-10, 1.267051E-10, 1.253347E-10, 1.268411E-10, 1.269329E-10, 1.256907E-10, 1.26421E-10, 1.267391E-10, 1.268496E-10, 1.256197E-10, 1.281011E-10, 1.269234E-10, 1.279229E-10, 1.265155E-10, 1.281218E-10, 1.2799E-10, 1.283278E-10, 1.283215E-10, 1.283213E-10, 1.289431E-10, 1.288066E-10, 1.278198E-10, 1.286094E-10, 1.297161E-10, 1.282801E-10, 1.274884E-10, 1.284096E-10, 1.302183E-10, 1.277342E-10, 1.271422E-10, 1.302213E-10, 1.284466E-10, 1.290159E-10, 1.299743E-10, 1.301856E-10, 1.28916E-10, 1.293691E-10, 1.307307E-10, 1.303285E-10, 1.300997E-10, 1.310647E-10, 1.321416E-10, 1.317851E-10, 1.309396E-10, 1.31486E-10, 1.312269E-10, 1.310693E-10, 1.321261E-10, 1.311203E-10, 1.304322E-10, 1.314821E-10, 1.322425E-10, 1.316547E-10, 1.324183E-10, 1.31749E-10, 1.336512E-10, 1.324706E-10, 1.336776E-10, 1.340733E-10, 1.325697E-10, 1.330598E-10, 1.332061E-10, 1.346693E-10, 1.33342E-10, 1.328168E-10, 1.338396E-10, 1.333693E-10, 1.349823E-10, 1.334637E-10, 1.363382E-10, 1.353431E-10, 1.347797E-10, 1.355181E-10, 1.351888E-10, 1.343639E-10, 1.349301E-10, 1.367382E-10, 1.356508E-10, 1.358091E-10, 1.372926E-10, 1.364726E-10, 1.355097E-10, 1.343606E-10, 1.385406E-10, 1.36499E-10, 1.357648E-10, 1.383952E-10, 1.364459E-10, 1.369492E-10, 1.381099E-10, 1.379858E-10, 1.388637E-10, 1.389333E-10, 1.411425E-10, 1.388296E-10, 1.38931E-10, 1.387482E-10, 1.39394E-10, 1.388384E-10, 1.395211E-10, 1.409691E-10, 1.408148E-10, 1.396364E-10, 1.415756E-10, 1.395477E-10, 1.392756E-10, 1.400201E-10, 1.426077E-10, 1.415948E-10, 1.414231E-10, 1.431624E-10, 1.417033E-10, 1.419081E-10, 1.423782E-10, 1.423188E-10, 1.438651E-10, 1.421889E-10, 1.457114E-10, 1.451269E-10, 1.430276E-10]
# potential in [V]
V_for_C=[0.01191011, 0.06739906, 0.1201201, 0.1759969, 0.2295195, 0.2840591, 0.3374494, 0.3918903, 0.4462339, 0.5007762, 0.5535439, 0.6091848, 0.6625938, 0.7165433, 0.7704356, 0.8254675, 0.8797089, 0.9330992, 0.9881527, 1.042617, 1.096388, 1.150839, 1.204423, 1.260016, 1.313489, 1.368334, 1.421432, 1.475078, 1.530656, 1.584063, 1.638988, 1.69438, 1.747316, 1.8024, 1.85547, 1.910495, 1.964437, 2.018809, 2.073711, 2.127366, 2.180579, 2.235825, 2.28984, 2.344729, 2.398085, 2.453089, 2.507114, 2.560865, 2.615914, 2.669687, 2.724157, 2.77851, 2.832239, 2.887692, 2.939321, 2.995628, 3.049465, 3.103488, 3.158154, 3.212128, 3.266889, 3.320622, 3.374575, 3.42943, 3.48325, 3.53935, 3.59272, 3.646377, 3.70198, 3.755396, 3.80976, 3.864622, 3.919071, 3.973298, 4.026449, 4.082187, 4.135396, 4.190305, 4.244122, 4.297615, 4.353129, 4.404792, 4.460375, 4.514986, 4.569502, 4.623884, 4.677803, 4.733296, 4.786708, 4.840779, 4.894762, 4.94942, 5.0046, 5.057308, 5.110567, 5.166449, 5.220076, 5.274992, 5.328375, 5.383769, 5.437879, 5.491142, 5.545815, 5.599965, 5.654662, 5.708647, 5.762293, 5.817238, 5.870321, 5.925716, 5.980515, 6.034531, 6.088582, 6.142398, 6.197326, 6.251199, 6.30502, 6.359588, 6.414106, 6.468644, 6.522204, 6.575385, 6.631247, 6.684447, 6.740004, 6.793278, 6.848399, 6.902913, 6.95617, 7.0119, 7.065376, 7.119857, 7.174293, 7.228476, 7.282898, 7.334863, 7.390632, 7.444824, 7.498833, 7.553909, 7.607858, 7.661393, 7.716085, 7.77018, 7.825095, 7.878773, 7.933629, 7.987752, 8.039755, 8.095821, 8.149455, 8.204766, 8.258869, 8.312849, 8.367912, 8.421227, 8.475548, 8.529559, 8.583943, 8.639063, 8.692272, 8.747465, 8.799543, 8.855269, 8.910067, 8.962831, 9.017801, 9.072355, 9.126358, 9.180974, 9.234551, 9.289795, 9.343569, 9.398029, 9.452306, 9.50483, 9.56109, 9.614802, 9.668894, 9.723388, 9.777187, 9.831928, 9.885862, 9.939739, 9.994928, 10.04863, 10.10417, 10.15775, 10.21225, 10.26612, 10.32052, 10.3758, 10.42966, 10.48455, 10.53756, 10.59178, 10.6467, 10.69998, 10.75492, 10.80882, 10.8634, 10.91807, 10.97017, 11.02613, 11.08098, 11.13471, 11.18958, 11.24336, 11.29839, 11.35147, 11.40594, 11.46018, 11.51404, 11.56901, 11.62311, 11.67725, 11.73093, 11.78518, 11.83981, 11.89379, 11.9481, 12.0022, 12.05692, 12.11177, 12.16554, 12.21942, 12.27424, 12.3273, 12.38287, 12.43502, 12.49147, 12.5451, 12.59938, 12.65407, 12.70835, 12.76269, 12.81708, 12.87082, 12.92595, 12.97889, 13.03439, 13.0882, 13.141, 13.19476, 13.25067, 13.30513, 13.35884, 13.413, 13.46876, 13.52237, 13.52231, 13.468, 13.41405, 13.35868, 13.30473, 13.25048, 13.20038, 13.1414, 13.08778, 13.03315, 12.98025, 12.92488, 12.87143, 12.81651, 12.76204, 12.70817, 12.65345, 12.59898, 12.54486, 12.49072, 12.44081, 12.38174, 12.32798, 12.27312, 12.21972, 12.16557, 12.11056, 12.057, 12.00273, 11.94757, 11.89427, 11.83925, 11.7863, 11.73558, 11.67737, 11.62334, 11.56801, 11.51407, 11.46015, 11.40599, 11.35149, 11.29647, 11.24337, 11.18907, 11.13477, 11.08011, 11.02584, 10.97636, 10.91769, 10.86329, 10.80905, 10.75433, 10.70058, 10.64594, 10.59229, 10.53778, 10.48349, 10.43022, 10.37578, 10.32224, 10.27123, 10.21204, 10.15841, 10.10332, 10.0487, 9.994202, 9.940793, 9.886208, 9.831479, 9.777735, 9.723001, 9.66848, 9.615495, 9.560504, 9.51065, 9.451662, 9.397901, 9.343657, 9.288941, 9.235573, 9.180358, 9.126715, 9.07256, 9.017631, 8.964513, 8.909222, 8.855954, 8.805246, 8.746452, 8.692693, 8.637917, 8.584547, 8.529857, 8.475002, 8.421703, 8.365972, 8.313353, 8.258323, 8.203183, 8.149745, 8.095141, 8.045647, 7.98688, 7.932303, 7.879701, 7.824194, 7.771094, 7.715513, 7.661452, 7.607806, 7.552743, 7.499367, 7.444696, 7.390461, 7.340446, 7.282408, 7.229108, 7.173337, 7.119409, 7.065888, 7.010467, 6.95742, 6.902437, 6.848364, 6.793663, 6.739047, 6.685345, 6.630303, 6.580764, 6.521909, 6.467827, 6.414277, 6.359096, 6.305518, 6.251251, 6.196578, 6.142967, 6.088358, 6.034603, 5.979641, 5.925657, 5.875819, 5.816795, 5.762847, 5.708324, 5.65356, 5.600811, 5.544879, 5.492324, 5.437263, 5.382861, 5.329098, 5.273652, 5.220762, 5.166106, 5.115547, 5.058087, 5.002563, 4.94906, 4.894761, 4.841048, 4.786169, 4.731363, 4.678175, 4.623295, 4.569555, 4.515054, 4.459955, 4.410748, 4.352152, 4.298113, 4.243972, 4.188936, 4.135714, 4.081049, 4.02706, 3.97232, 3.918532, 3.86402, 3.809766, 3.756411, 3.701824, 3.65126, 3.592932, 3.538192, 3.484104, 3.428653, 3.375171, 3.320953, 3.266038, 3.212028, 3.157657, 3.103818, 3.049781, 2.994385, 2.945776, 2.886736, 2.832584, 2.777942, 2.723931, 2.669719, 2.615535, 2.561099, 2.507411, 2.452189, 2.398422, 2.343653, 2.290297, 2.235892, 2.186057, 2.127272, 2.072822, 2.019279, 1.964853, 1.909774, 1.856421, 1.801484, 1.748288, 1.693451, 1.638645, 1.584336, 1.530479, 1.480425, 1.421528, 1.368038, 1.314231, 1.258353, 1.206551, 1.150405, 1.096059, 1.041813, 0.9876209, 0.9335759, 0.8793103, 0.8252564, 0.7711366, 0.7208143, 0.6634691, 0.60781, 0.5541749, 0.5001592, 0.4462102, 0.3922565, 0.3370365, 0.2835259, 0.2299835, 0.1748018, 0.1212871, 0.06608507, 0.01706269, -0.04278337, -0.09608474, -0.1501069, -0.2049531, -0.2591379, -0.3128958, -0.3676249, -0.4207624, -0.4761039, -0.529731, -0.5839917, -0.6380167, -0.6931398, -0.7432034, -0.8008682, -0.8557751, -0.9104832, -0.9638286, -1.018817, -1.072178, -1.127392, -1.180659, -1.23547, -1.289606, -1.342982, -1.398579, -1.452242, -1.502133, -1.561502, -1.614794, -1.669877, -1.723134, -1.778362, -1.832346, -1.886201, -1.94017, -1.995302, -2.048746, -2.103518, -2.156412, -2.208521, -2.265952, -2.320601, -2.374912, -2.428579, -2.482548, -2.536872, -2.59143, -2.645834, -2.699263, -2.754478, -2.807458, -2.86316, -2.916692, -2.967284, -3.025887, -3.080168, -3.134965, -3.189729, -3.242755, -3.298487, -3.35196, -3.407055, -3.46049, -3.515166, -3.568669, -3.622741, -3.672946, -3.731996, -3.785404, -3.840461, -3.893523, -3.94929, -4.002339, -4.057425, -4.112042, -4.166074, -4.220563, -4.273969, -4.328477, -4.383264, -4.432277, -4.491757, -4.544897, -4.60043, -4.65337, -4.708736, -4.762434, -4.816083, -4.870473, -4.926074, -4.978502, -5.034609, -5.087198, -5.138796, -5.196416, -5.25117, -5.305133, -5.359031, -5.41343, -5.468124, -5.521652, -5.577043, -5.630259, -5.685618, -5.738328, -5.793639, -5.847428, -5.897664, -5.956308, -6.010489, -6.063376, -6.119191, -6.171927, -6.227531, -6.28102, -6.33601, -6.390467, -6.445347, -6.498667, -6.553333, -6.603573, -6.662948, -6.714832, -6.771336, -6.822889, -6.879321, -6.931473, -6.98693, -7.041861, -7.096051, -7.148703, -7.204553, -7.257607, -7.31354, -7.361538, -7.421769, -7.474513, -7.529566, -7.583166, -7.638109, -7.691708, -7.747105, -7.79931, -7.855292, -7.90823, -7.964141, -8.016598, -8.067803, -8.12613, -8.180237, -8.234504, -8.289588, -8.34299, -8.398144, -8.451004, -8.506999, -8.559003, -8.614864, -8.668035, -8.723178, -8.776966, -8.827826, -8.885082, -8.940835, -8.993632, -9.04905, -9.101123, -9.157326, -9.210323, -9.266196, -9.319288, -9.373612, -9.427997, -9.482795, -9.531478, -9.591477, -9.645019, -9.701389, -9.753842, -9.809556, -9.862202, -9.917953, -9.971086, -10.02607, -10.0791, -10.13497, -10.18719, -10.24339, -10.29171, -10.35205, -10.40512, -10.46004, -10.51418, -10.56813, -10.62231, -10.67765, -10.72971, -10.78638, -10.83821, -10.8944, -10.94731, -10.99842, -11.056, -11.11083, -11.16441, -11.22007, -11.27263, -11.32843, -11.38045, -11.43691, -11.48973, -11.5456, -11.59879, -11.65404, -11.7072, -11.75799, -11.81588, -11.87118, -11.9232, -11.97988, -12.03242, -12.08778, -12.14132, -12.19609, -12.2494, -12.30484, -12.35747, -12.41355, -12.46147, -12.52225, -12.57434, -12.63016, -12.68405, -12.73881, -12.79273, -12.84797, -12.90115, -12.95661, -13.00959, -13.06567, -13.11733, -13.17368, -13.22221, -13.28209, -13.33549, -13.39013, -13.44395, -13.49904, -13.55129, -13.55324, -13.4979, -13.44533, -13.38949, -13.33604, -13.28165, -13.22841, -13.17224, -13.11943, -13.06367, -13.01129, -12.95458, -12.90261, -12.84679, -12.79346, -12.73795, -12.68386, -12.62943, -12.57543, -12.52081, -12.46986, -12.4119, -12.35925, -12.30331, -12.25058, -12.19547, -12.14141, -12.08664, -12.03287, -11.9784, -11.92449, -11.86913, -11.81673, -11.7612, -11.70816, -11.65281, -11.59945, -11.54431, -11.49011, -11.43538, -11.38229, -11.32663, -11.27402, -11.2183, -11.16587, -11.10984, -11.05647, -11.0028, -10.94824, -10.89295, -10.83986, -10.78483, -10.73164, -10.67554, -10.62317, -10.56761, -10.51436, -10.4589, -10.40613, -10.35052, -10.29787, -10.24177, -10.18928, -10.13295, -10.08132, -10.02464, -9.972691, -9.917088, -9.863487, -9.808712, -9.753768, -9.698977, -9.64609, -9.589997, -9.538164, -9.481107, -9.428586, -9.372593, -9.319863, -9.264706, -9.211174, -9.156712, -9.103079, -9.047662, -8.99439, -8.938172, -8.886223, -8.831287, -8.777548, -8.722864, -8.668149, -8.613771, -8.560018, -8.505346, -8.452108, -8.396232, -8.343747, -8.288088, -8.234856, -8.179289, -8.126564, -8.072516, -8.018468, -7.962571, -7.909718, -7.853374, -7.800959, -7.745126, -7.692129, -7.637188, -7.583608, -7.529106, -7.475137, -7.420242, -7.367808, -7.311427, -7.259215, -7.202894, -7.149843, -7.094233, -7.041565, -6.985809, -6.932612, -6.878032, -6.823979, -6.76939, -6.716991, -6.660836, -6.609323, -6.552857, -6.499479, -6.444643, -6.390263, -6.33493, -6.281358, -6.226657, -6.173764, -6.118687, -6.066292, -6.009394, -5.957428, -5.903566, -5.848761, -5.794306, -5.740449, -5.685512, -5.631788, -5.576566, -5.523378, -5.468136, -5.415892, -5.358974, -5.305958, -5.2508, -5.197234, -5.14308, -5.088435, -5.034035, -4.980464, -4.924985, -4.872462, -4.816075, -4.763245, -4.708172, -4.654146, -4.600249, -4.547088, -4.491176, -4.439456, -4.382683, -4.330057, -4.27449, -4.221737, -4.166123, -4.11238, -4.058031, -4.002587, -3.948179, -3.895012, -3.839002, -3.786421, -3.730602, -3.678894, -3.622616, -3.569306, -3.515458, -3.460159, -3.406107, -3.352739, -3.297656, -3.244501, -3.18857, -3.135115, -3.079687, -3.025592, -2.971762, -2.917168, -2.862685, -2.808732, -2.75387, -2.700411, -2.645385, -2.592348, -2.537074, -2.483268, -2.429199, -2.374538, -2.32028, -2.26636, -2.21259, -2.158089, -2.101698, -2.049733, -1.993665, -1.940466, -1.886327, -1.832216, -1.77829, -1.723934, -1.668955, -1.615565, -1.560224, -1.508332, -1.45194, -1.398924, -1.344095, -1.289378, -1.235677, -1.180901, -1.127134, -1.072838, -1.017269, -0.9652575, -0.9090891, -0.8562682, -0.8006728, -0.7484967, -0.6933431, -0.6383141, -0.5842198, -0.5306764, -0.4755589, -0.422482, -0.3669971, -0.3141344, -0.2585589, -0.2057641, -0.1508175, -0.096172, -0.04309347]
# function to get capacitance from model
def capacitance_of_IDEs(tau,eta,N,L,epss):
inf_case = infinite_fourier.multiple_recursive_images([eta,0],[tau],epss,epss,8,180,accuracy_limit=10**-15,hybrid=True)
c_I = inf_case.get_C() # [F/m]
pair_case = pair_conformal.multiple_recursive_images([eta,0],[tau],epss,epss,8,20,accuracy_limit=10**-15)
c_2 = pair_case.get_C() # [F/m]
c_E=2*c_I*c_2/(c_I+c_2) # [F/m]
C=L*10**-6*(2*c_E+(N-3)*c_I)
return C
# refactor of the input of the above function to work with scipy.optimize.curve_fit
def get_C_for_lsqfit(x,xi):
tau = x[0]
eta = x[1]
N = x[2]
L = x[3]
epss = [x[4],xi,x[5]]
return capacitance_of_IDEs(tau,eta,N,L,epss)
# function to get dielectric constant of film from geometry of capacitance using scipy.optimize.curve_fit
def get_eps(tau,eta,N,L,eps_air,guess_eps_film,eps_sub,C):
x=[tau,eta,N,L,eps_air,eps_sub]
popt,pcov=scipy.optimize.curve_fit(get_C_for_lsqfit,x,C,p0=guess_eps_film)#,ftol=0.01,xtol=1,gtol=1)
eps_film=popt[0]
return eps_film
# in this aproach we only calculate the dielectric constant of the film at 11 point, and use this to create a polynomial
# the polynomial describes the relationship capacitance->dielectric constant with high accuracy,
# because this relationship is almost linear
eps_film_guess=1500
trail_C_step=(np.max(C)-np.min(C))/10
trail_C = np.arange(np.min(C),np.max(C)+0.5*trail_C_step,trail_C_step)
trail_eps = []
for c in tqdm.tqdm(trail_C):
eps=get_eps(tau,eta,N,L,eps_air,eps_film_guess,eps_sub,c)
eps_film_guess=eps
trail_eps.append(eps)
C_to_eps = np.poly1d(np.polyfit(trail_C, trail_eps, 4)) # <- function that gets the dielectric constant of the film given C as input
eps_film = C_to_eps(C)
l=len(V_for_C)//4
V_to_eps = np.poly1d(np.polyfit(V_for_C[l:2*l+1], C_to_eps(C[l:2*l+1]), 4))
# plot eps-V curve
fig,ax=plt.subplots()
ax.plot(V_for_C,eps_film,'k')
ax.plot(V_for_C[l:2*l+1], V_to_eps(V_for_C[l:2*l+1]),lw=4,color=[1,0.4,0])
ax.set_ylabel(r'$\varepsilon_{film}$ [-]')
ax.set_xlabel(r'$V$ [V]')
```
The orange line above is the function V_to_eps(), which we will use to describe the dielectric constant of the film.
We choose the lower curve in the above plot since the upper curve includes contributions from motion of domain walls, etc.
We here ignore these contributions to the dielectric constant
## Calculate capacitive contribution from substrate
```
#function to get capacitive contribution of film
def getCintEx_film(case):
g,error=scipy.integrate.quad(lambda y: case.get_Ex(0,y), 0,infinite.t[0])
eps_0 = 8.8541878128*10**-12 # dielectric permitivity of vacuum [F/m]
c=g*case.single[0].layers[1].eps_x*eps_0
return c
#function to get capacitive contribution from substrate and air
def capacitive_contribution_of_sub_and_air(tau,eta,N,L,epss):
# calculate total capacitance of structure
inf_case = infinite_fourier.multiple_recursive_images([eta,0],[tau],epss,epss,8,180,accuracy_limit=10**-15,hybrid=True)
c_I = inf_case.get_C() # [F/m]
pair_case = pair_conformal.multiple_recursive_images([eta,0],[tau],epss,epss,8,20,accuracy_limit=10**-15)
c_2 = pair_case.get_C() # [F/m]
c_E=2*c_I*c_2/(c_I+c_2) # [F/m]
C=L*10**-6*(2*c_E+(N-3)*c_I)
# calculate film contribution
g,error=scipy.integrate.quad(lambda y: inf_case.get_Ex(0,y), 0,tau)
eps_0 = 8.8541878128*10**-12 # dielectric permitivity of vacuum [F/m]
film_contribution_to_C=(N-1)*L*10**-6*g*epss[1]*eps_0
return C-film_contribution_to_C # charge in [C]
trail_V_step=(np.max(V)-np.min(V))/10
trail_V = np.arange(np.min(V),np.max(V)+0.5*trail_V_step,trail_V_step)
trail_air_sub_cont = []
for v in tqdm.tqdm(trail_V):
epss = [eps_air,V_to_eps(v),eps_sub]
contrib = capacitive_contribution_of_sub_and_air(tau,eta,N,L,epss)
trail_air_sub_cont.append(contrib)
V_to_capcitive_contrib = np.poly1d(np.polyfit(trail_V, trail_air_sub_cont, 4))
charge_contrib_from_sub_and_air = [0]
for i in range(len(V)-1):
additional_charge=V_to_capcitive_contrib(np.abs(0.5*(V[1]+V[i+1])))*(V[i+1]-V[i])
charge_contrib_from_sub_and_air.append(additional_charge+charge_contrib_from_sub_and_air[-1])
charge_contrib_from_sub_and_air=np.array(charge_contrib_from_sub_and_air)
# plot Q-V curve
fig,ax=plt.subplots()
ax.plot(V,Q,'k',label='total charge')
ax.plot(V,charge_contrib_from_sub_and_air*10**6,label='contribution from substrate and air')
Q_corrected = Q-charge_contrib_from_sub_and_air*10**6
ax.plot(V,Q_corrected,label='charge minus contributions')
ax.legend()
ax.set_ylabel(r'$Q$ [µC]')
ax.set_xlabel(r'$V$ [V]')
```
## Transform to polarization VS electric field
```
# plot P-E curve
P=Q_corrected/((N-1)*L*t*10**-8) # polarization, µC/cm2
delta_a = 4*np.log(2)*t/np.pi
E = V/(a+delta_a)*10 #kV/cm
fig,ax=plt.subplots()
ax.plot(E,P,'k')
ax.set_ylabel(r'$P$ [µC/cm$^2$]')
ax.set_xlabel(r'$V$ [V]')
```
|
github_jupyter
|
## Experiment
```
experiment_label = 'rforest01'
```
### Aim:
* compare basic random forest to best logreg
### Findings:
* ROC on training hugs the top left; overfitting.
* Next: increase min samples per leaf.
## Set up
```
import pandas as pd
import numpy as np
from joblib import dump, load # simpler than pickle!
import matplotlib.pyplot as plt
import seaborn as sns
```
## Data
```
#load data
data_path = '../data/raw/uts-advdsi-nba-career-prediction'
train_raw = pd.read_csv(data_path + '/train.csv')
test_raw = pd.read_csv(data_path + '/test.csv')
#shapes & head
print(train_raw.shape)
print(test_raw.shape)
train_raw.head()
test_raw.head()
# info
train_raw.info()
#variable descriptions
train_raw.describe()
test_raw.describe()
```
## Cleaning
```
train = train_raw.copy()
test = test_raw.copy()
cols_drop = ['Id_old', 'Id'] #, 'MIN', 'FGM', 'FGA', 'TOV', '3PA', 'FTM', 'FTA', 'REB']
train.drop(cols_drop, axis=1, inplace=True)
test.drop(cols_drop, axis=1, inplace=True)
train.head()
test.head()
train_target = train.pop('TARGET_5Yrs')
```
# Modelling
```
#transformations
# fit scaler to training data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train = scaler.fit_transform(train)
dump(scaler, '../models/aj_' + experiment_label + '_scaler.joblib')
# transform test data
test = scaler.transform(test)
#examine shapes
print('train:' + str(train.shape))
print('test:' + str(test.shape))
# split training into train & validation
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train, train_target, test_size=0.2, random_state=8)
# in this case we will use the Kaggle submission as our test
#X_train, y_train = train, train_target
#import models
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
# Define model
model = RandomForestClassifier(class_weight='balanced',random_state=8)
#fit model to training data
model.fit(X_train, y_train)
#save model to file
dump(model, '../models/aj_' + experiment_label + '.joblib')
#predictions for test and validation sets
y_train_preds = model.predict(X_train)
y_val_preds = model.predict(X_val)
```
## Evaluation
```
import sys
import os
sys.path.append(os.path.abspath('..'))
from src.models.aj_metrics import confusion_matrix
print("Training:")
print(confusion_matrix(y_train, y_train_preds))
print('')
print("Validation:")
print(confusion_matrix(y_val, y_val_preds))
from sklearn import metrics
print("Training:")
print(metrics.classification_report(y_train, y_train_preds))
print('')
print("Validation:")
print(metrics.classification_report(y_val, y_val_preds))
print("Training:")
print(metrics.roc_auc_score(y_train, model.decision_function(X_train)))
print('')
print("Validation:")
print(metrics.roc_auc_score(y_val, model.decision_function(X_val)))
import matplotlib.pyplot as plt
from sklearn import metrics
metrics.plot_roc_curve(model, X_train, y_train)
plt.show()
metrics.plot_roc_curve(model, X_val, y_val)
plt.show()
```
# Apply to test data for submission
```
y_test_preds = model.predict(test)
y_test_preds
y_test_probs = model.predict_proba(test)
y_test_probs
len(y_test_probs)
test_raw.shape
test_raw['Id'].shape
submission = pd.DataFrame({'Id': range(0,3799), 'TARGET_5Yrs': [p[1] for p in y_test_probs]})
submission.head()
submission.to_csv('../reports/aj_' + experiment_label + 'submission.csv',
index=False,
)
```
|
github_jupyter
|
# 训练你的物体检测器
```
!pip install gluoncv
import gluoncv as gcv
import mxnet as mx
```
# 准备训练集
```
import os
class DetectionDataset(gcv.data.VOCDetection):
CLASSES = ['cocacola', 'noodles', 'hand']
def __init__(self, root):
self._im_shapes = {}
self._root = os.path.expanduser(root)
self._transform = None
self._items = [(self._root, x.strip('.jpg')) for x in os.listdir(self._root) if x.endswith('.jpg')]
self._anno_path = os.path.join('{}', '{}.xml')
self._image_path = os.path.join('{}', '{}.jpg')
self.index_map = dict(zip(self.classes, range(self.num_class)))
self._label_cache = self._preload_labels()
def __str__(self):
detail = self._root
return self.__class__.__name__ + '(' + detail + ')'
@property
def classes(self):
return self.CLASSES
@property
def num_class(self):
return len(self.classes)
train_dataset = DetectionDataset('../images/shenzhen_v1')
print('class_names:', train_dataset.classes)
print('num_images:', len(train_dataset))
```
# 可视化数据
```
from matplotlib import pyplot as plt
from gluoncv.utils import viz
sample = train_dataset[0]
train_image = sample[0]
train_label = sample[1]
ax = viz.plot_bbox(
train_image.asnumpy(),
train_label[:, :4],
labels=train_label[:, 4:5],
class_names=train_dataset.classes)
plt.show()
# for i in range(len(train_dataset)):
# sample = train_dataset[i]
# train_image = sample[0]
# train_label = sample[1]
# ax = viz.plot_bbox(
# train_image.asnumpy(),
# train_label[:, :4],
# labels=train_label[:, 4:5],
# class_names=train_dataset.classes)
# plt.show()
```
# 定义训练过程
```
import time
from datetime import datetime
from mxnet import autograd
from gluoncv.data.batchify import Tuple, Stack, Pad
def train_model(train_dataset, epochs=50):
ctx = mx.gpu(0)
# ctx = mx.cpu(0)
net = gcv.model_zoo.get_model('ssd_512_resnet50_v1_custom', classes=train_dataset.classes, transfer='coco')
# net.load_parameters('object_detector_epoch200_10_22_2019_20_28_41.params') # TODO continue training
net.collect_params().reset_ctx(ctx)
width, height = 512, 512 # suppose we use 512 as base training size
train_transform = gcv.data.transforms.presets.ssd.SSDDefaultTrainTransform(width, height)
gcv.utils.random.seed(233)
# batch_size = 4
batch_size = 32 # 32 for p3.2xlarge, 16 for p2.2xlarge
# you can make it larger(if your CPU has more cores) to accelerate data loading
num_workers = 4
with autograd.train_mode():
_, _, anchors = net(mx.nd.zeros((1, 3, height, width), ctx))
anchors = anchors.as_in_context(mx.cpu())
train_transform = gcv.data.transforms.presets.ssd.SSDDefaultTrainTransform(width, height, anchors)
batchify_fn = Tuple(Stack(), Stack(), Stack())
train_loader = mx.gluon.data.DataLoader(
train_dataset.transform(train_transform),
batch_size,
shuffle=True,
batchify_fn=batchify_fn,
last_batch='rollover',
num_workers=num_workers)
mbox_loss = gcv.loss.SSDMultiBoxLoss()
ce_metric = mx.metric.Loss('CrossEntropy')
smoothl1_metric = mx.metric.Loss('SmoothL1')
for k, v in net.collect_params().items():
if 'convpredictor' not in k:
# freeze upper layers
v.grad_req = 'null'
trainer = mx.gluon.Trainer(
net.collect_params(), 'sgd',
{'learning_rate': 0.001, 'wd': 0.0005, 'momentum': 0.9})
net.hybridize(static_alloc=True, static_shape=True)
for epoch in range(epochs):
tic = time.time()
btic = time.time()
for i, batch in enumerate(train_loader):
data = mx.gluon.utils.split_and_load(batch[0], ctx_list=[ctx], batch_axis=0)
cls_targets = mx.gluon.utils.split_and_load(batch[1], ctx_list=[ctx], batch_axis=0)
box_targets = mx.gluon.utils.split_and_load(batch[2], ctx_list=[ctx], batch_axis=0)
with autograd.record():
cls_preds = []
box_preds = []
for x in data:
cls_pred, box_pred, _ = net(x)
cls_preds.append(cls_pred)
box_preds.append(box_pred)
sum_loss, cls_loss, box_loss = mbox_loss(
cls_preds, box_preds, cls_targets, box_targets)
autograd.backward(sum_loss)
# since we have already normalized the loss, we don't want to normalize
# by batch-size anymore
trainer.step(1)
ce_metric.update(0, [l * batch_size for l in cls_loss])
smoothl1_metric.update(0, [l * batch_size for l in box_loss])
name1, loss1 = ce_metric.get()
name2, loss2 = smoothl1_metric.get()
print('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}={:.3f}, {}={:.3f}'.format(
epoch, i, batch_size/(time.time()-btic), name1, loss1, name2, loss2))
btic = time.time()
return net
```
# 开始训练
```
epochs = 300
net = train_model(train_dataset, epochs=epochs)
save_file = 'object_detector_epoch{}_{}.params'.format(epochs, datetime.now().strftime("%m_%d_%Y_%H_%M_%S"))
net.save_parameters(save_file)
print('Saved model to disk: ' + save_file)
```
|
github_jupyter
|
# Simple Evolutionary Exploration Walkthrough
This notebook contains instructions on how to use the SEE module, along with several examples. These instructions will cover the following parts:
* [Import Image Files](#Import_Image_Files)
* [Manual Search](#Manual_Search)
* [Genetic Algorithm Search](#Genetic_Algorithm_Search)
* [Reading the Results](#Reading_the_Results)
These examples use the Jupyter widgets to make it interactive and easier to use and learn.
----
<a name="Import_Image_Files"></a>
## Import Image Files
First import the following packages:
```
%matplotlib inline
import matplotlib.pylab as plt
import imageio
from see import Segmentors
from see import JupyterGUI
```
Pick an image from the example folder.
```
data = JupyterGUI.pickimage('Image_data/Examples/')
```
# Select ColorSpace
```
colorspace = JupyterGUI.colorwidget(data.img, paramlist=None)
```
# Select ColorSpace
```
colorspace = JupyterGUI.colorwidget(data.img, paramlist=None)
```
----
<a name="Manual_Search"></a>
## Manual Search
Manual searching of parameters can easily be done using the provided GUI. Pre-established parameters can be put into the widget, or the parameter values can be changed using the sliders. To change the algorithm, simply change the `alg` input. For a list of available inputs print `Segmentors.algorithmspace`
```
from see.Segmentors import segmentor
alg = JupyterGUI.picksegment(list(segmentor.algorithmspace.keys()))
### Example of input for params
params = JupyterGUI.segmentwidget(data.img, params = None, alg = alg.value)
```
----
<a name="Genetic_Algorithm_Search"></a>
## Genetic Algorithm Search
First import image files, as well as the following packages:
```
from see.Segmentors import segmentor
from see.ColorSpace import colorspace
from see.Workflow import workflow
from see.Segment_Fitness import segment_fitness
from see import base_classes, GeneticSearch
workflow.addalgos([colorspace, segmentor, segment_fitness])
```
To run the genetic algorithm, we need to initialize an instance of an evolver. The original image and ground truth segmentation image are inputs to it, along with an integer value for population size. This value sets how many indivudals are in our population. For this example, we'll set this number to be equal to 10.
```
mydata = base_classes.pipedata()
mydata.img = data.img
mydata.gmask = data.gmask
my_evolver = GeneticSearch.Evolver(workflow, mydata, pop_size=10)
```
Now that the evolver has been initialized, we can run the genetic algorithm for a specified number of generations (or iterations). Here we will set this number equal to 5.
```
# warnings may appear when this runs
population = my_evolver.run(ngen=5)
```
----
<a name="Reading_the_Results"></a>
## Reading the Results
After the genetic algorithm is complete, we can retrieve the individuals that resulted in the lowest (best) fitness values by printing `my_evolver.hof`. These individuals are sorted according to fitness value, so to get the overal best individual, we can simply look at the first individual in the list.
```
params = my_evolver.hof[0]
print('Best Individual:\n', params)
```
We can see the mask this individual generates by evaluating it, then plotting the result:
```
seg = Segmentors.algoFromParams(params)
mask = seg.evaluate(data.img)
plt.figure(figsize=(20, 10))
plt.subplot(121)
plt.imshow(data.img)
plt.title("Original Image")
plt.axis('off')
plt.subplot(122)
plt.imshow(mask)
plt.title("Segmentation")
plt.axis('off')
plt.tight_layout
plt.show()
```
We can also use `FitnessFunction` to calculate the final fitness value for this algorithm:
```
print('Fitness Value: ', Segmentors.FitnessFunction(mask, data.mask)[0])
```
If this value is satisfactory, we can then get usable code to run this algorithm anywhere, including outside this notebook. The `print_best_algorithm_code` function does this using the given individual:
```
ex = Segmentors.print_best_algorithm_code(my_evolver.hof[0])
```
With this code, make sure to import skimage, along with any input images this algorithm will be applied to.
|
github_jupyter
|
# Demonstrating sparkmagic
## This notebook will demonstrate how we can use the spark magic to interspere our Python code with code that is running against a Spark cluster
Let’s say we’re working in an IPython notebook and we want to use Spark to analyze some data. So, we'll load `sparkmagic` in order to be able to talk to Spark from our Python notebook.
```
%load_ext sparkmagic.magics
```
With it, the `%manage_spark` line magic and the `%%spark` magic are available.
The %%manage_spark line magic lets you manage Livy endpoints and Spark sessions.
Let's start by adding an Endpoint.
An Endpoint is a [Livy](https://github.com/cloudera/livy) installation running on a Spark cluster.
`sparkmagic` allows us to specify the Livy endpoint along with a username and password to authenticate to it. If the Livy endpoint is on your local machine or has no password, simply leave the text fields for username and password blank.
```
%manage_spark
```

Now, add a session to the endpoint you added. The name you give to the session will be used with the `%%spark` magic to run Spark code. You can also specify the configuration you want to start the session with. You can create either Python (PySpark) or Scala (Spark) sessions.
Creating a session will create a `SparkContext` with the name `sc` and a `HiveContext` with the name `sqlContext`.
We'll start by adding a PySpark session.

You can now run Spark code against your Livy session. For information on the available commands, run %spark?
```
%spark?
```
## Pyspark
You can run code against your Spark session by adding `%%spark` at the beginning of the cell. Since we’ve only created a single session, we don’t need to specify the session name.
In the following cell, I'll create a Resilient Distributed Dataset (RDD) called fruits, and print its first element.
```
%%spark
numbers = sc.parallelize([1, 2, 3, 4])
print('First element of numbers is {} and its description is:\n{}'.format(numbers.first(), numbers.toDebugString()))
```
Now, you've created your session and executed some statements. If you want to look at the Livy logs for this session, simply run a cell like so:
```
%spark logs
```
## SparkSQL
You can run SQL queries by passing the arguments `-c sql` to the %%spark magic
First, let's create a table:
```
%%spark
df = spark.read.json("/apps/spark-2.3.3/examples/src/main/resources/people.json")
df.createOrReplaceTempView("people")
```
Now we can see what tables we have:
```
%%spark -c sql
SHOW TABLES
```
Now, let's query one of the available tables.
Notice that we are passing the `--output` or `-o` parameter with a value of `df_hvac` so that the output of our SQL query is saved in the `df_hvac` variable in the IPython kernel context as a [Pandas](http://pandas.pydata.org/) DataFrame.
```
%%spark -c sql -o df_people --maxrows 10
SELECT * FROM people
```
>SQL queries also have other parameters you can pass in, like `--samplemethod`, `--maxrows`, `--samplefraction`, and `--quiet`.
We can now simply use the Pandas dataframe from the IPython notebook.
```
df_people.head()
```
If you want to visualize the data in the Pandas dataframe, you can write your own code to do so, or you can use our autovisualization widget:
```
from autovizwidget.widget.utils import display_dataframe
display_dataframe(df_people)
```
>You could also choose to have this widget display by default for *all* Pandas dataframes from here on by running this piece of code:
```
ip = get_ipython()
ip.display_formatter.ipython_display_formatter.for_type_by_name('pandas.core.frame', 'DataFrame', display_dataframe)
```
### Server-side rendering
You can also have images rendered on the server, and then display them locally. This prevents the need to ship large amounts of data locally to do visualizations. First, we render a PNG, in this case using matplotlib:
```
%%spark
import matplotlib.pyplot as plt
ax = df.toPandas().plot.bar(x='name',y='age')
```
And now we can view the resulting image using the `%matplot` magic:
```
%%spark
%matplot plt
```
## Scala support
If you want to write your Spark code in Scala, you can easily do that.
Let's add a Scala session:
```
%manage_spark
```

And just run some Spark code. Notice that we now specify the session we want to use, `-s my_spark`.
```
%%spark -s my_spark
val hvacText = sc.parallelize(Array(1, 2, 3, 4))
hvacText.first()
```
Now, we can query the table with **SparkSQL** too:
```
%%spark -s my_spark -c sql -o my_df_from_scala --maxrows 10
SELECT * FROM hivesampletable
```
And we can still access the result of the Spark query from Scala as a Pandas dataframe!
```
my_df_from_scala.head()
```
# Cleaning up
Now that you’re done with your Livy sessions, you should clean them up.
Simply click on the `Delete` buttons!
```
%manage_spark
```

|
github_jupyter
|
<img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# Hugging Face - Ask boolean question to T5
<a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Hugging%20Face/Hugging_Face_Ask_boolean_question_to_T5.ipynb" target="_parent"><img src="https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg=="/></a>
## T5-base finetuned on BoolQ (superglue task)
This notebook is for demonstrating the training and use of the text-to-text-transfer-transformer (better known as T5) on boolean questions (BoolQ). The example use case is a validator indicating if an idea is environmentally friendly. Nearly any question can be passed into the `query` function (see below) as long as a context to a question is given.
Author: Maximilian Frank ([script4all.com](//script4all.com)) - Copyleft license
Notes:
- The model from [huggingface.co/mrm8488/t5-base-finetuned-boolq](//huggingface.co/mrm8488/t5-base-finetuned-boolq) is used in this example as it is an already trained t5-base model on boolean questions (BoolQ task of superglue).
- Documentation references on [huggingface.co/transformers/model_doc/t5.html#training](//huggingface.co/transformers/model_doc/t5.html#training), template script on [programming-review.com/machine-learning/t5](//programming-review.com/machine-learning/t5)
- The greater the model, the higher the accuracy on BoolQ (see [arxiv.org/pdf/1910.10683.pdf](//arxiv.org/pdf/1910.10683.pdf)):
t5-small|t5-base|t5-large|t5-3B|t5-11B
-|-|-|-|-
76.4%|81.4%|85.4%|89.9%|91.2%
## Loading the model
If here comes an error, install the packages via `python3 -m pip install … --user`.
You can also load a T5 plain model (not finetuned). Just replace the following code
```python
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
tokenizer = AutoTokenizer.from_pretrained('mrm8488/t5-base-finetuned-boolq')
model = AutoModelForSeq2SeqLM.from_pretrained('mrm8488/t5-base-finetuned-boolq')…
```
with
```python
from transformers import T5Tokenizer, T5ForConditionalGeneration
tokenizer = T5Tokenizer.from_pretrained('t5-small')
model = T5ForConditionalGeneration.from_pretrained('t5-small')
```
where `t5-small` is one of the names in the table above.
```
!pip install transformers
!pip install sentencepiece
import json
import torch
from operator import itemgetter
from distutils.util import strtobool
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# load model
tokenizer = AutoTokenizer.from_pretrained('mrm8488/t5-base-finetuned-boolq')
model = AutoModelForSeq2SeqLM.from_pretrained('mrm8488/t5-base-finetuned-boolq').to(torch.device('cuda' if torch.cuda.is_available() else 'cpu'))
try:model.parallelize()
except:pass
```
## Training
> **Optional:** You can leave the following out, if you don't have custom datasets. By default the number of training epochs equals 0, so nothing is trained.
> **Warning:** This option consumes a lot of runtime and thus *naas.ai* credits. Make sure to have enough credits on your account.
For each dataset a stream-opener has to be provided which is readable line by line (e.g. file, database). In the array with key `keys` are all dictionary keys which exist in the jsonl-line. So in this example the first training dataset has the keys `question` for the questions (string),`passage` for the contexts (string) and `answer` for the answers (boolean). Adjust these keys to your dataset.
At last you have to adjust the number of epochs to be trained (see comment `# epochs`).
```
srcs = [
{ 'stream': lambda:open('boolq/train.jsonl', 'r'),
'keys': ['question', 'passage', 'answer'] },
{ 'stream': lambda:open('boolq/dev.jsonl', 'r'),
'keys': ['question', 'passage', 'answer'] },
{ 'stream': lambda:open('boolq-nat-perturb/train.jsonl', 'r'),
'keys': ['question', 'passage', 'roberta_hard'] }
]
model.train()
for _ in range(0): # epochs
for src in srcs:
with src['stream']() as s:
for d in s:
q, p, a = itemgetter(src['keys'][0], src['keys'][1], src['keys'][2])(json.loads(d))
tokens = tokenizer('question:'+q+'\ncontext:'+p, return_tensors='pt')
if len(tokens.input_ids[0]) > model.config.n_positions:
continue
model(input_ids=tokens.input_ids,
labels=tokenizer(str(a), return_tensors='pt').input_ids,
attention_mask=tokens.attention_mask,
use_cache=True
).loss.backward()
model.eval(); # ; suppresses long output on jupyter
```
## Define query function
As the model is ready, define the querying function.
```
def query(q='question', c='context'):
return strtobool(
tokenizer.decode(
token_ids=model.generate(
input_ids=tokenizer.encode('question:'+q+'\ncontext:'+c, return_tensors='pt')
)[0],
skip_special_tokens=True,
max_length=3)
)
```
## Querying on the task
Now the actual task begins: Query the model with your ideas (see list `ideas`).
```
if __name__ == '__main__':
ideas = [ 'The idea is to pollute the air instead of riding the bike.', # should be false
'The idea is to go cycling instead of driving the car.', # should be true
'The idea is to put your trash everywhere.', # should be false
'The idea is to reduce transport distances.', # should be true
'The idea is to put plants on all the roofs.', # should be true
'The idea is to forbid opensource vaccines.', # should be true
'The idea is to go buy an Iphone every five years.', # should be false
'The idea is to walk once every week in the nature.', # should be true
'The idea is to go buy Green bonds.', # should be true
'The idea is to go buy fast fashion.', # should be false
'The idea is to buy single-use items.', # should be false
'The idea is to drink plastic bottled water.', # should be false
'The idea is to use import goods.', # should be false
'The idea is to use buy more food than you need.', # should be false
'The idea is to eat a lot of meat.', # should be false
'The idea is to eat less meat.', # should be false
'The idea is to always travel by plane.', # should be false
'The idea is to opensource vaccines.' # should be false
]
for idea in ideas:
print('🌏 Idea:', idea)
print('\t✅ Good idea' if query('Is the idea environmentally friendly?', idea) else '\t❌ Bad idea' )
```
|
github_jupyter
|
## Goes over modeling, starting from modeling tables.
### We're using modeling tables which were prepared based on 12 hours worth of vital sign data from each patient, as well as medication history during the stay, and patient characteristics.
### The model predicts the probability of having a rapid response team event in 1 hour's time from the time of prediction. A RRT event is called after personnel identify that a patient has an urgent need for medical service.
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import scipy as sp
# import datetime as datetime
import cPickle as pickle
%matplotlib inline
plt.style.use('ggplot')
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import train_test_split, KFold
from sklearn.metrics import confusion_matrix, roc_auc_score, precision_score, recall_score, classification_report
from sklearn.ensemble import GradientBoostingClassifier #, RandomForestClassifier,
from sklearn.ensemble.partial_dependence import plot_partial_dependence, partial_dependence
from sklearn.grid_search import GridSearchCV
```
### function definitions
```
def score_printout(X_test, y_test, fittedModel):
print "AUC-ROC Score of model: ", roc_auc_score(y_test, fittedModel.predict_proba(X_test)[:,1])
print "Precision Score of model: ", precision_score(y_test, fittedModel.predict(X_test))
print "Recall Score of model: ", recall_score(y_test, fittedModel.predict(X_test))
def make_feature_importance_plot(featuresAndImportances, numFeatures):
topN = featuresAndImportances[:numFeatures]
labels = [pair[0] for pair in topN]
values = [pair[1] for pair in topN]
ind = np.arange(len(values)+2)
width = 0.35
plt.barh(range(numFeatures),values)
ax = plt.subplot(111)
ax.set_yticks(ind+width)
ax.set_yticklabels(labels, rotation=0, size=12)
plt.ylabel('Feature', size=20)
plt.xlabel('Importance', size=20)
plt.show()
```
### Read in data
We did not share our modeling data, so you will have to create your own. The pipeline tool can help you do this. If you save the results to a csv, `masterdf_rrt` and `masterdf_nonrrt` are dataframes with the modeling data for each of the positive and negative classes, respectively.
```
masterdf_rrt = pd.read_csv('RRT_modeling_table_13hr_raw.csv')
masterdf_nonrrt = pd.read_csv('NonRRT_modeling_table_13hr_raw.csv')
```
### Look at summary statistics for numeric columns for rrt & non-rrt tables (35 cols)
```
masterdf_rrt.columns
masterdf_rrt.describe().T
masterdf_nonrrt.describe().T
```
### We have a good amount of nan values in some columns. Lets plot the nan values to get a sense of how many there are
```
def show_df_nans(masterdf, collist=None):
'''
Create a data frame for features which may be nan.
Make nan values be 1, numeric values be 0
A heat map where dark squares/lines show where data is missing.
'''
if not collist:
plot_cols = ['obese','DBP_mean', 'DBP_recent', 'SBP_mean', 'SBP_recent', 'HR_mean', 'HR_recent',
'MAP_mean', 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',
'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent',
'CO2_mean', 'CO2_recent', 'GCS_mean', 'GCS_recent']
else:
plot_cols = collist
df_viznan = pd.DataFrame(data = 1,index=masterdf.index,columns=plot_cols)
df_viznan[~pd.isnull(masterdf[plot_cols])] = 0
plt.figure(figsize=(10,8))
plt.title('Dark values are nans')
return sns.heatmap(df_viznan.astype(float))
# subset of numeric columns we'll use in modeling (sufficient data available)
plot_cols_good = ['obese','DBP_mean', 'DBP_recent', 'SBP_mean', 'SBP_recent',
'MAP_mean', 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',
'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent']
show_df_nans(masterdf_nonrrt) # show all columns that may have nans
# show_df_nans(masterdf_nonrrt, plot_cols_good) # show the columns whch we plan to use for modeling
show_df_nans(masterdf_rrt)
# show_df_nans(masterdf_rrt, plot_cols_good)
```
### Let's not use those columns where there are significant nans: drop HR (heart rate; we have pulse rate instead), CO2, and GCS, which leaves us with 28 features.
```
col_use = ['age', 'sex', 'obese', 'smoker', 'prev_rrt', 'on_iv', 'bu-nal', 'DBP_mean',
'DBP_recent', 'SBP_mean', 'SBP_recent',
'MAP_mean', 'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',
'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent',
'anticoagulants', 'narcotics', 'narc-ans', 'antipsychotics',
'chemo', 'dialysis', 'race']
X_rrt = masterdf_rrt[col_use]
X_notrrt = masterdf_nonrrt[col_use]
```
### We need to deal with these nans before we can start modeling. (There should not be any nans in the modeling table)
```
# let's look at getting rid of the data rows where vitals signs are all nans
vitals_cols = ['DBP_mean', 'DBP_recent', # take the mean of all the measurements & the most recently observed point
'SBP_mean', 'SBP_recent',
'MAP_mean', 'MAP_recent', # mean arterial pressure
'temp_mean', 'temp_recent',# temperature
'SPO2_mean', 'SPO2_recent',
'RR_mean', 'RR_recent', # respiratory rate
'pulse_mean', 'pulse_recent']
# Write out rows that are not all 0/NaNs across. (if all nans, remove this sample)
X_rrt = X_rrt.loc[np.where(X_rrt.ix[:, vitals_cols].sum(axis=1, skipna=True)!=0)[0]]
X_rrt = X_rrt.reset_index(drop=True)
X_notrrt = X_notrrt.loc[np.where(X_notrrt.ix[:, vitals_cols].sum(axis=1, skipna=True)!=0)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
# if 'obese' is Nan, then set the patient to be not obese.
X_rrt.loc[np.where(pd.isnull(X_rrt['obese']))[0], 'obese'] = 0
X_notrrt.loc[np.where(pd.isnull(X_notrrt['obese']))[0], 'obese'] = 0
```
### Let's see how X_rrt & X_notrrt look
```
show_df_nans(X_rrt, vitals_cols)
show_df_nans(X_notrrt, vitals_cols)
```
### Some columns have significant missing values.
```
print X_rrt[['pulse_mean', 'pulse_recent']].describe().T
print "size of X_rrt: "+str(len(X_rrt))
print
print X_notrrt[['pulse_mean', 'pulse_recent']].describe().T
print "size of X_notrrt: " + str(len(X_notrrt))
```
### We have plenty of samples for the non-RRT case. We can delete off rows with values that are missing without concern that we'll lose negtive examples for RRT events for modeling.
```
# DROP THE ROWS WHERE PULSE IS NAN
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['pulse_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
# And similarly for all rows with significant nans:
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['RR_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['MAP_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['temp_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
X_notrrt = X_notrrt.ix[np.where(pd.isnull(X_notrrt['SPO2_mean'])!=True)[0]]
X_notrrt = X_notrrt.reset_index(drop=True)
all_cols = ['age', 'sex', 'obese', 'smoker', 'prev_rrt', 'on_iv', 'bu-nal',
'DBP_mean', 'DBP_recent', 'SBP_mean', 'SBP_recent', 'MAP_mean',
'MAP_recent', 'temp_mean', 'temp_recent', 'SPO2_mean',
'SPO2_recent', 'RR_mean', 'RR_recent', 'pulse_mean', 'pulse_recent',
'anticoagulants', 'narcotics', 'narc-ans', 'antipsychotics',
'chemo', 'dialysis', 'race']
show_df_nans(X_notrrt, all_cols)
```
### Still need to deal with nans in X_rrt. Temp & pulse are the most of concern
```
X_rrt[['temp_mean', 'pulse_mean']].describe().T
```
### We'll impute missing values in X_rrt after combining that data with X_notrrt, and use the mean from each column after merging to fill the values.
```
# add labels to indicate positive or negative class
X_rrt['label'] = 1
X_notrrt['label'] = 0
# Combine the tables
XY = pd.concat([X_rrt, X_notrrt])
XY = XY.reset_index(drop=True)
y = XY.pop('label')
X = XY
# Fill nans with mean of columns
X = X.fillna(X.mean())
# map genders to 1/0
X['is_male'] = X['sex'].map({'M': 1, 'F': 0})
X.pop('sex')
X.race.value_counts()
# we won't use race in modeling
X.pop('race')
show_df_nans(X, vitals_cols)
X.columns
X.describe().T
```
# Modeling
```
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
print len(y_train)
print len(y_train[y_train]==1)
len(y_test[y_test==1])
Xscaled = StandardScaler().fit_transform(X)
Xs_train, Xs_test, ys_train, ys_test = train_test_split(Xscaled, y, test_size=0.3)
```
## Gradient Boosting Classifier - Unscaled (with partial dependence plots below)
```
paramGrid = {'n_estimators': [100, 200, 300],
'learning_rate': [0.1, 0.05, 0.01, 0.2],
'max_depth': [3, 4, 5, 6],
'min_samples_leaf': [1, 2],
'subsample': [0.75, 1.0, 0.85],
'loss': ['deviance'],
'max_features': [None, 'auto']
}
gs = GridSearchCV(GradientBoostingClassifier(),
param_grid=paramGrid,
scoring='roc_auc',
n_jobs=-1,
cv=5,
verbose=10)
gs.fit(X_train, y_train)
# Result:
# GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',
# max_depth=3, max_features=None, max_leaf_nodes=None,
# min_samples_leaf=2, min_samples_split=2,
# min_weight_fraction_leaf=0.0, n_estimators=300,
# presort='auto', random_state=None, subsample=0.75, verbose=0,
# warm_start=False)
```
## Grid search for best GBC - Scaled (with partial dependece plots below)
```
paramGrid = {'n_estimators': [100, 200, 300],
'learning_rate': [0.1, 0.05, 0.01, 0.2],
'max_depth': [3, 4, 5, 6],
'min_samples_leaf': [1, 2],
'subsample': [0.75, 1.0, 0.85],
'loss': ['deviance'],
'max_features': [None, 'auto']
}
gss = GridSearchCV(GradientBoostingClassifier(),
param_grid=paramGrid,
scoring='roc_auc',
n_jobs=-1,
cv=5,
verbose=10)
gss.fit(Xs_train, ys_train)
# Result:
# GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',
# max_depth=3, max_features='auto', max_leaf_nodes=None,
# min_samples_leaf=1, min_samples_split=2,
# min_weight_fraction_leaf=0.0, n_estimators=300,
# presort='auto', random_state=None, subsample=0.75, verbose=0,
# warm_start=False)
```
## How different are best estimators for scaled & unscaled data?
```
gbc = GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance',
max_depth=3, max_features=None, max_leaf_nodes=None,
min_samples_leaf=2, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=300,
presort='auto', random_state=None, subsample=0.75, verbose=0,
warm_start=False)
gbc.fit(X_train, y_train)
score_printout(X_test, y_test, gbc)
print classification_report(y_test, gbc.predict(X_test))
confusion_matrix(y_test, gbc.predict(X_test))
# gbcs = gss.best_estimator_
# gbcs.fit(Xs_train, ys_train)
# score_printout(Xs_test, ys_test, gbc)
# print classification_report(ys_test, gbcs.predict(Xs_test))
# confusion_matrix(ys_test, gbcs.predict(Xs_test))
```
### Use unscaled data -- better results & easier interpretability
```
# Let's plot the confusion matrix so it's a little clearer
plt.figure()
sns.set(font_scale=1.5)
sns.heatmap(confusion_matrix(y_test, gbc.predict(X_test)), annot=True, fmt='d')
```
## Let's look at the most important features in this model
```
gbcRankedFeatures = sorted(zip(X.columns, gbc.feature_importances_),
key=lambda pair: pair[1],
reverse=False)
plt.figure()
make_feature_importance_plot(gbcRankedFeatures, 27) # note - we have 27 features currently
```
### Let's look a partial dependence plots
#### If the partial dependence is high, then the model for that given value of that given feature is more likely to predict an rrt result.
#### Will not show more complex interactions -- if importance is high but partial dependence is marginal, this may be due to interactions
```
fig, axs = plot_partial_dependence(gbc, X_train, range(0, 6, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
fig, axs = plot_partial_dependence(gbc, X_train, range(6, 12, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
fig, axs = plot_partial_dependence(gbc, X_train, range(12, 18, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
fig, axs = plot_partial_dependence(gbc, X_train, range(18, 24, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
fig, axs = plot_partial_dependence(gbc, X_train, range(24, 27, 1), feature_names=X.columns.get_values(), n_jobs=-1, grid_resolution=50)
plt.subplots_adjust(top=0.9)
```
## Use 3-D plot to investigate feature interactions for weak partial dependence plots... (weak effect may be masked by stronger interaction with other features)
```
names = X_train.columns
zip(range(len(names)), names)
from mpl_toolkits.mplot3d import Axes3D
# not all features may work for this viz
fig = plt.figure(figsize=(10,8))
target_feature = (16, 18) # <-- change the two numbers here to determine what to plot up
pdp, (x_axis, y_axis) = partial_dependence(gbc, target_feature, X=X_train, grid_resolution=50)
XX, YY = np.meshgrid(x_axis, y_axis)
Z = pdp.T.reshape(XX.shape).T
ax = Axes3D(fig)
surf = ax.plot_surface(XX, YY, Z, rstride=1, cstride=1, cmap=plt.cm.BuPu)
ax.set_xlabel(names[target_feature[0]])
ax.set_ylabel(names[target_feature[1]])
ax.set_zlabel('Partial dependence')
# pretty init view
ax.view_init(elev=22, azim=122)
plt.colorbar(surf)
plt.suptitle('')
plt.subplots_adjust(top=0.9)
plt.show()
```
## From Model to Risk Score
```
# Return probabilities from the model, rather than predictions
y_proba = gbc.predict_proba(X_test)
# note - y_proba contains probabilities for class 0 in column 0 & probabilities for class 1 in column 1.
# we're only interested in the probability for class 1
y_proba
pred_probs = pd.DataFrame(data=y_proba[:,1], columns =["model_probability_of_rrt"], index = X_test.index)
X_test.head()
y_test.head()
pred_probs['model_probability_of_rrt'] = pd.to_numeric(pred_probs.model_probability_of_rrt)
pred_probs.hist(bins = 20, xlabelsize = 16, ylabelsize=16)
plt.tick_params(labelsize=14)
plt.title("Model output probabilities")
plt.ylabel('Count', fontsize=14)
```
### We see that although we see more values close to 0 and 1, we also see that the model outputs a full range of probabilities, which would translate well into risk scores.
### Patient Risk Score = model probability * 10
The score should be rounded to whole values to give the sense that this is not an exact measure.
```
pred_probs['score'] = pred_probs['model_probability_of_rrt'].apply(lambda x: int(round(x*10.0, 0)))
pred_probs.head()
pred_probs.score.value_counts()
```
### Save model
```
from sklearn.externals import joblib
# joblib.dump(gbc, 'gbc_base.pkl') # note - if left uncompressed, this writes a whole lot of supporting numpy files.
joblib.dump(gbc, 'my_trained_model.compressed', compress=True)
# to unpack: joblib.load(filename)
```
### Save modeling table
```
# Create combined data frame including modeling table, rrt label, and proability associated with result
df = pd.concat([X_test, pred_probs, y_test],axis=1, join_axes=[X_test.index])
df.head()
# May need to rename columns to get rid of dash in name...
df.rename(columns={'bu-nal': 'bu_nal', 'narc-ans': 'narc_ans'}, inplace=True)
df.to_csv('ModelingTable_with_results.csv')
```
|
github_jupyter
|
```
import numpy as np
import pickle
import scipy
import combo
import os
import urllib
import ssl
import matplotlib.pyplot as plt
%matplotlib inline
ssl._create_default_https_context = ssl._create_unverified_context
def download():
if not os.path.exists('data/s5-210.csv'):
if not os.path.exists('data'):
os.mkdir('data')
print('Downloading...')
with urllib.request.urlopen("http://www.tsudalab.org/files/s5-210.csv") as response, open('data/s5-210.csv', 'wb') as out_file:
out_file.write(response.read())
print('Done')
def load_data():
download()
A = np.asarray(np.loadtxt('data/s5-210.csv',skiprows=1,delimiter=',') )
X = A[:,0:3]
t = -A[:,3]
return X, t
# Load the data.
# X is the N x d dimensional matrix. Each row of X denotes the d-dimensional feature vector of search candidate.
# t is the N-dimensional vector that represents the corresponding negative energy of search candidates.
# ( It is of course unknown in practice. )
X, t = load_data()
# Normalize the mean and standard deviation along the each column of X to 0 and 1, respectively
X = combo.misc.centering( X )
# Declare the class for calling the simulator.
# In this tutorial, we simply refer to the value of t.
# If you want to apply combo to other problems, you have to customize this class.
class simulator:
def __init__( self ):
_, self.t = load_data()
def __call__( self, action ):
return self.t[action]
# Design of policy
# Declaring the policy by
policy = combo.search.discrete.policy(test_X=X)
# test_X is the set of candidates which is represented by numpy.array.
# Each row vector represents the feature vector of the corresponding candidate
# set the seed parameter
policy.set_seed( 0 )
# If you want to perform the initial random search before starting the Bayesian optimization,
# the random sampling is performed by
res = policy.random_search(max_num_probes=20, simulator=simulator())
# Input:
# max_num_probes: number of random search
# simulator = simulator
# output: combo.search.discreate.results (class)
# single query Bayesian search
# The single query version of COMBO is performed by
res = policy.bayes_search(max_num_probes=80, simulator=simulator(), score='TS',
interval=20, num_rand_basis=5000)
# Input
# max_num_probes: number of searching by Bayesian optimization
# simulator: the class of simulator which is defined above
# score: the type of aquision funciton. TS, EI and PI are available
# interval: the timing for learning the hyper parameter.
# In this case, the hyper parameter is learned at each 20 steps
# If you set the negative value to interval, the hyper parameter learning is not performed
# If you set zero to interval, the hyper parameter learning is performed only at the first step
# num_rand_basis: the number of basis function. If you choose 0, ordinary Gaussian process runs
# The result of searching is summarized in the class combo.search.discrete.results.history()
# res.fx: observed negative energy at each step
# res.chosed_actions: history of choosed actions
# fbest, best_action= res.export_all_sequence_best_fx(): current best fx and current best action
# that has been observed until each step
# res.total_num_search: total number of search
print('f(x)=')
print(res.fx[0:res.total_num_search])
best_fx, best_action = res.export_all_sequence_best_fx()
print('current best')
print (best_fx)
print ('current best action=')
print (best_action)
print ('history of chosed actions=')
print (res.chosed_actions[0:res.total_num_search])
# save the results
res.save('test.npz')
del res
# load the results
res = combo.search.discrete.results.history()
res.load('test.npz')
```
|
github_jupyter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.