code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://discourse.pymc.io/t/mixture-model-with-boxcox-transformation/988
# %pylab inline
import pymc3 as pm
from pymc3.theanof import gradient, floatX
from scipy import special
import numpy.testing as npt
import theano.tensor as tt
import theano
# # Box-Cox transformation
# +
mu0, sd0 = 3., .05
y = np.random.normal(mu0, sd0, size=200)
lam = -.2
y_tr = special.inv_boxcox(y, lam)
_, ax = plt.subplots(1, 2, figsize=(15, 4))
ax[0].hist(y, bins=50, alpha=.5)
ax[0].hist(special.boxcox(y_tr, lam), bins=50, alpha=.5)
ax[1].hist(y_tr, bins=50, alpha=.5)
plt.tight_layout();
# +
from pymc3.distributions.transforms import ElemwiseTransform
class BoxCox(ElemwiseTransform):
"""Box-Cox transformation from real line to real line"""
name = "boxcox"
def __init__(self, lmbda):
self.lmbda = tt.as_tensor_variable(lmbda)
def forward(self, x):
# inverse box-cox transformation
return tt.exp(tt.log1p(self.lmbda * x) / self.lmbda)
def backward(self, y):
# box-cox transformation
return tt.expm1(self.lmbda * tt.log(y)) / self.lmbda
def forward_val(self, y, point=None):
lmbda = draw_values([self.lmbda], point=point)
return np.exp(np.log1p(lmbda * y) / lmbda)
def jacobian_det(self, x):
x = tt.as_tensor_variable(x)
grad = tt.reshape(gradient(tt.sum(self.backward(x)), [x]), x.shape)
return tt.log(tt.abs_(grad))
boxcox = BoxCox(lam)
# +
with pm.Model() as m:
mu = pm.Normal('mu', 0., 100.)
sd = pm.HalfNormal('sd', 5.)
obs = pm.Normal('y', mu, sd, observed=boxcox.backward(y_tr))
trace = pm.sample(1000, tune=1000)
pm.traceplot(trace, lines=dict(mu=mu0, sd=sd0));
# +
with pm.Model() as m:
x = pm.Normal('x', mu0, sd0, transform=boxcox)
trace = pm.sample(5000, tune=1000)
x_tr = m.free_RVs[0]
pm.traceplot(trace, varnames=[x_tr.name], priors=[x_tr.distribution]);
# -
x_tr = m.free_RVs[0]
jac = x_tr.distribution.transform_used.jacobian_det(theano.shared(np.array([-1, 2])))
print(x.ndim, x_tr.logp_elemwiset.ndim, jac.ndim)
# +
def check_elementwise_logp_transform(model):
x0 = model.deterministics[0]
x = model.free_RVs[0]
npt.assert_equal(x.ndim, x.logp_elemwiset.ndim)
pt = model.test_point
array = np.random.randn(*model.bijection.map(pt).shape)
pt2 = model.bijection.rmap(array)
dist = x.distribution
logp_nojac = x0.distribution.logp(dist.transform_used.backward(pt2[x.name]))
jacob_det = dist.transform_used.jacobian_det(theano.shared(pt2[x.name]))
npt.assert_equal(x.logp_elemwiset.ndim, jacob_det.ndim)
elementwiselogp = logp_nojac + jacob_det
npt.assert_array_almost_equal(x.logp_elemwise(pt2),
elementwiselogp.eval())
check_elementwise_logp_transform(m)
# -
with pm.Model() as m:
mu = pm.Normal('mu', 0., 10.)
sd = pm.HalfNormal('sd', 5.)
y_latent = pm.Normal('y', mu, sd, transform=boxcox, testval=2.)
m.free_RVs
# +
y_boxcox = m.free_RVs[2]
with m:
obs = pm.Potential('obs', y_boxcox.distribution.logp(theano.shared(y_tr)))
# -
m.check_test_point()
# +
with m:
trace = pm.sample(5000, tune=1000)
pm.traceplot(trace, lines=dict(mu=mu0, sd=sd0));
# -
# # Explicitly write down the distribution
# +
from pymc3.distributions.continuous import Continuous
from pymc3.theanof import gradient
class BoxCoxNormal(Continuous):
def __init__(self, mu=0., sd=1., lmbda=1., **kwargs):
self.sd = tt.as_tensor_variable(sd)
self.mu = tt.as_tensor_variable(mu)
self.lmbda = tt.as_tensor_variable(lmbda)
super(BoxCoxNormal, self).__init__(**kwargs)
def inv_boxcox_func(self, x):
return tt.exp(tt.log1p(self.lmbda * x) / self.lmbda)
def boxcox_func(self, y):
return tt.expm1(self.lmbda * tt.log(y)) / self.lmbda
def jacobian_det(self, x):
x = tt.as_tensor_variable(x)
grad = tt.reshape(
gradient(tt.sum(self.boxcox_func(x)), [x]), x.shape)
return tt.log(tt.abs_(grad))
def logp(self, value):
sd = self.sd
mu = self.mu
value_ = self.boxcox_func(value)
return pm.Normal.dist(mu, sd).logp(value_) + self.jacobian_det(value)
# +
with pm.Model() as m:
mu = pm.Normal('mu', 0., 10.)
sd = pm.HalfNormal('sd', 5.)
y_latent = BoxCoxNormal('y', mu, sd, lmbda=lam, observed=y_tr)
trace = pm.sample(5000, tune=1000)
pm.traceplot(trace, lines=dict(mu=mu0, sd=sd0));
# -
# # mixture
# +
np.random.seed(12345) # set random seed for reproduciblity
k = 2
ndata = 4500
ndata2 = 500
mu0, mu1 = 3.1, 2.9
# mu0, mu1 = 3.1, 2.5
sd0, sd1 = np.sqrt(0.014), np.sqrt(0.022)
# simulate data from mixture distribution
data_ = np.random.normal(loc=mu0, scale=sd0, size=ndata)
data2_ = np.random.normal(loc=mu1, scale=sd1, size=ndata2)
# lambdas for the boxcox transformation
ld_1 = -0.18
ld_2 = -0.26
# Back convert the guassians to the original data scale
data = special.inv_boxcox(data_, ld_1)
data2 = special.inv_boxcox(data2_, ld_2)
combi_data_ = np.concatenate((data_, data2_), axis=0)
# Make the final array
combi_data = np.concatenate((data, data2), axis=0)
np.random.shuffle(combi_data)
n = ndata + ndata2
# +
_, ax = plt.subplots(1, 2, figsize=(15, 4))
bin0 = np.linspace(combi_data_.min(), combi_data_.max(), 500)
ax[0].hist(combi_data_, bin0, alpha=.3)
ax[0].hist(data_, bin0, alpha=.3)
ax[0].hist(data2_, bin0, alpha=.3)
bin1 = np.linspace(combi_data.min(), combi_data.max(), 200)
ax[1].hist(combi_data, bin1, alpha=.3)
ax[1].hist(data, bin1, alpha=.3)
ax[1].hist(data2, bin1, alpha=.3)
plt.tight_layout();
# -
with pm.Model() as m:
w = pm.Dirichlet('w', a=np.ones(2))
mus = pm.Normal('mus', 2.8, 5., shape=2)
sds = pm.HalfNormal('sds', .5, shape=2)
mix_logp = [BoxCoxNormal.dist(mus[0], sds[0], lmbda=ld_1),
BoxCoxNormal.dist(mus[1], sds[1], lmbda=ld_2),]
obs = pm.Mixture('y', w, mix_logp, observed=combi_data)
m.check_test_point()
# +
with m:
map1 = pm.find_MAP()
map1
# -
print(mu0, mu1)
print(sd0, sd1)
# +
with m:
trace = pm.sample(1000, tune=1000, start=map1)
pm.traceplot(trace);
# -
# # with informative prior
import scipy.stats as st
plt.style.use('seaborn-darkgrid')
x = np.linspace(0, 1, 100)
alphas = [8.]
betas = [2.]
for a, b in zip(alphas, betas):
pdf = st.invgamma.pdf(x, a, scale=b)
plt.plot(x, pdf)
plt.vlines(-ld_1, 0, 10)
plt.vlines(-ld_2, 0, 10)
plt.xlabel('x', fontsize=12)
plt.ylabel('f(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
with pm.Model() as m:
w = pm.Dirichlet('w', a=np.ones(2))
mus = pm.Normal('mus', 2.8, 5., shape=2)
sds = pm.HalfNormal('sds', .5, shape=2)
lmbdas = pm.InverseGamma('lambdas', 8., 2., shape=2,
testval=np.asarray([-ld_1, -ld_2]))
mix_logp = [BoxCoxNormal.dist(mus[0], sds[0], lmbda=-lmbdas[0]),
BoxCoxNormal.dist(mus[1], sds[1], lmbda=-lmbdas[1]),]
obs = pm.Mixture('y', w, mix_logp, observed=combi_data)
m.check_test_point()
# +
with m:
# using the MAP from the above model to help convergence
map2 = pm.find_MAP(start=map1)
map2
# -
print(mu0, mu1)
print(sd0, sd1)
print(-ld_1, -ld_2)
# +
with m:
trace = pm.sample(1000, tune=1000, start=map2, init='adapt_diag')
pm.traceplot(trace);
# -
|
PyMC3QnA/Box-Cox transformation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 15.287961, "end_time": "2021-10-28T10:19:04.503522", "exception": false, "start_time": "2021-10-28T10:18:49.215561", "status": "completed"} tags=[]
# %pip install scikit-learn -U
# + papermill={"duration": 0.979723, "end_time": "2021-10-28T10:19:05.501434", "exception": false, "start_time": "2021-10-28T10:19:04.521711", "status": "completed"} tags=[]
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
X, y = make_classification(n_samples=300, n_features=50,
n_informative=10,
n_redundant=25, n_repeated=15,
n_clusters_per_class=5,
flip_y=0.05, class_sep=0.5,
random_state=0)
# + papermill={"duration": 0.045382, "end_time": "2021-10-28T10:19:05.563986", "exception": false, "start_time": "2021-10-28T10:19:05.518604", "status": "completed"} tags=[]
from sklearn import svm
svc = svm.SVC()
svc = svm.SVC(probability=True, random_state=1)
from sklearn import model_selection
search_grid = [
{'C': [1, 10, 100, 1000], 'kernel': ['linear']},
{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001],
'kernel': ['rbf']}
]
scorer = 'accuracy'
# + papermill={"duration": 943.048701, "end_time": "2021-10-28T10:34:48.629569", "exception": false, "start_time": "2021-10-28T10:19:05.580868", "status": "completed"} tags=[]
search_func = model_selection.GridSearchCV(estimator=svc,
param_grid=search_grid,
scoring=scorer,
n_jobs=-1,
cv=5)
search_func.fit(X, y)
print (search_func.best_params_)
print (search_func.best_score_)
# + papermill={"duration": 436.289573, "end_time": "2021-10-28T10:42:04.936849", "exception": false, "start_time": "2021-10-28T10:34:48.647276", "status": "completed"} tags=[]
import scipy.stats as stats
from sklearn.utils.fixes import loguniform
search_dict = {'kernel': ['linear', 'rbf'],
'C': loguniform(1, 1000),
'gamma': loguniform(0.0001, 0.1)
}
scorer = 'accuracy'
search_func = model_selection.RandomizedSearchCV(estimator=svc,
param_distributions=search_dict,
n_iter=6,
scoring=scorer,
n_jobs=-1,
cv=5
)
search_func.fit(X, y)
print (search_func.best_params_)
print (search_func.best_score_)
# + papermill={"duration": 65.594403, "end_time": "2021-10-28T10:43:10.549795", "exception": false, "start_time": "2021-10-28T10:42:04.955392", "status": "completed"} tags=[]
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingRandomSearchCV
search_func = HalvingRandomSearchCV(estimator=svc,
param_distributions=search_dict,
resource='n_samples',
max_resources=100,
aggressive_elimination=True,
scoring=scorer,
n_jobs=-1,
cv=5,
random_state=0)
search_func.fit(X, y)
print (search_func.best_params_)
print (search_func.best_score_)
|
chapter_08/basic-optimization-practices.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Odczyt i zapis danych, formaty plików
# + deletable=true editable=true
import numpy as np
import pandas as pd
np.random.seed(12345)
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rc('figure', figsize=(10, 6))
np.set_printoptions(precision=4, suppress=True)
# + [markdown] deletable=true editable=true
# ## Odczyt i zapis danych w formacie tekstowym
# + deletable=true editable=true
# !cat examples/ex1.csv
# + deletable=true editable=true
df = pd.read_csv('przyklady/ex1.csv')
df
# + deletable=true editable=true
pd.read_table('przyklady/ex1.csv', sep=',')
# + deletable=true editable=true
# !cat examples/ex2.csv
# + deletable=true editable=true
pd.read_csv('przyklady/ex2.csv', header=None)
pd.read_csv('przyklady/ex2.csv', names=['a', 'b', 'c', 'd', 'message'])
# + deletable=true editable=true
names = ['a', 'b', 'c', 'd', 'message']
pd.read_csv('przyklady/ex2.csv', names=names, index_col='message')
# + deletable=true editable=true
# !cat examples/csv_mindex.csv
parsed = pd.read_csv('przyklady/csv_mindex.csv',
index_col=['key1', 'key2'])
parsed
# + deletable=true editable=true
list(open('przyklady/ex3.txt'))
# + deletable=true editable=true
result = pd.read_table('przyklady/ex3.txt', sep='\s+')
result
# + deletable=true editable=true
# !cat examples/ex4.csv
pd.read_csv('przyklady/ex4.csv', skiprows=[0, 2, 3])
# + deletable=true editable=true
# !cat examples/ex5.csv
result = pd.read_csv('przyklady/ex5.csv')
result
pd.isnull(result)
# + deletable=true editable=true
result = pd.read_csv('przyklady/ex5.csv', na_values=['NULL'])
result
# + deletable=true editable=true
sentinels = {'message': ['foo', 'NA'], 'something': ['two']}
pd.read_csv('examples/ex5.csv', na_values=sentinels)
# + [markdown] deletable=true editable=true
# ### Wczytywanie części pliku tekstowego
# + deletable=true editable=true
pd.options.display.max_rows = 10
# + deletable=true editable=true
result = pd.read_csv('przyklady/ex6.csv')
result
# + deletable=true editable=true
pd.read_csv('przyklady/ex6.csv', nrows=5)
# + deletable=true editable=true
chunker = pd.read_csv('przyklady/ex6.csv', chunksize=1000)
chunker
# + deletable=true editable=true
chunker = pd.read_csv('przyklady/ex6.csv', chunksize=1000)
tot = pd.Series([])
for piece in chunker:
tot = tot.add(piece['key'].value_counts(), fill_value=0)
tot = tot.sort_values(ascending=False)
# + deletable=true editable=true
tot[:10]
# + [markdown] deletable=true editable=true
# ### Zapis danych w formacie tekstowym
# + deletable=true editable=true
data = pd.read_csv('przyklady/ex5.csv')
data
# + deletable=true editable=true
data.to_csv('przyklady/out.csv')
# !cat przyklady/out.csv
# + deletable=true editable=true
import sys
data.to_csv(sys.stdout, sep='|')
# + deletable=true editable=true
data.to_csv(sys.stdout, na_rep='NULL')
# + deletable=true editable=true
data.to_csv(sys.stdout, index=False, header=False)
# + deletable=true editable=true
data.to_csv(sys.stdout, index=False, columns=['a', 'b', 'c'])
# + deletable=true editable=true
dates = pd.date_range('1/1/2000', periods=7)
ts = pd.Series(np.arange(7), index=dates)
ts.to_csv('przyklady/tseries.csv')
# !cat przyklady/tseries.csv
# + [markdown] deletable=true editable=true
# ### Praca z plikami danych rozgraniczonych
# + deletable=true editable=true
# !cat przyklady/ex7.csv
# + deletable=true editable=true
import csv
f = open('przyklady/ex7.csv')
reader = csv.reader(f)
# + deletable=true editable=true
for line in reader:
print(line)
# + deletable=true editable=true
with open('przyklady/ex7.csv') as f:
lines = list(csv.reader(f))
# + deletable=true editable=true
header, values = lines[0], lines[1:]
# + deletable=true editable=true
data_dict = {h: v for h, v in zip(header, zip(*values))}
data_dict
# + [markdown] deletable=true editable=true
# class my_dialect(csv.Dialect):
# lineterminator = '\n'
# delimiter = ';'
# quotechar = '"'
# quoting = csv.QUOTE_MINIMAL
# + [markdown] deletable=true editable=true
# reader = csv.reader(f, dialect=my_dialect)
# + [markdown] deletable=true editable=true
# reader = csv.reader(f, delimiter='|')
# + [markdown] deletable=true editable=true
# with open('mydata.csv', 'w') as f:
# writer = csv.writer(f, dialect=my_dialect)
# writer.writerow(('one', 'two', 'three'))
# writer.writerow(('1', '2', '3'))
# writer.writerow(('4', '5', '6'))
# writer.writerow(('7', '8', '9'))
# + [markdown] deletable=true editable=true
# ### Dane w formacie JSON
# + deletable=true editable=true
obj = """
{"name": "Wes",
"places_lived": ["United States", "Spain", "Germany"],
"pet": null,
"siblings": [{"name": "Scott", "age": 30, "pets": ["Zeus", "Zuko"]},
{"name": "Katie", "age": 38,
"pets": ["Sixes", "Stache", "Cisco"]}]
}
"""
# + deletable=true editable=true
import json
result = json.loads(obj)
result
# + deletable=true editable=true
asjson = json.dumps(result)
# + deletable=true editable=true
siblings = pd.DataFrame(result['siblings'], columns=['name', 'age'])
siblings
# + deletable=true editable=true
# !cat przyklady/example.json
# + deletable=true editable=true
data = pd.read_json('przyklady/example.json')
data
# + deletable=true editable=true
print(data.to_json())
print(data.to_json(orient='records'))
# + [markdown] deletable=true editable=true
# ### XML i HTML: web scraping
# + [markdown] deletable=true editable=true
# conda install lxml
# pip install beautifulsoup4 html5lib
# + deletable=true editable=true
tables = pd.read_html('przyklady/fdic_failed_bank_list.html')
len(tables)
failures = tables[0]
failures.head()
# + deletable=true editable=true
close_timestamps = pd.to_datetime(failures['Closing Date'])
close_timestamps.dt.year.value_counts()
# + [markdown] deletable=true editable=true
# #### Parsowanie formatu XML za pomocą modułu lxml.objectify
# + [markdown] deletable=true editable=true
# <INDICATOR>
# <INDICATOR_SEQ>373889</INDICATOR_SEQ>
# <PARENT_SEQ></PARENT_SEQ>
# <AGENCY_NAME>Metro-North Railroad</AGENCY_NAME>
# <INDICATOR_NAME>Escalator Availability</INDICATOR_NAME>
# <DESCRIPTION>Percent of the time that escalators are operational
# systemwide. The availability rate is based on physical observations performed
# the morning of regular business days only. This is a new indicator the agency
# began reporting in 2009.</DESCRIPTION>
# <PERIOD_YEAR>2011</PERIOD_YEAR>
# <PERIOD_MONTH>12</PERIOD_MONTH>
# <CATEGORY>Service Indicators</CATEGORY>
# <FREQUENCY>M</FREQUENCY>
# <DESIRED_CHANGE>U</DESIRED_CHANGE>
# <INDICATOR_UNIT>%</INDICATOR_UNIT>
# <DECIMAL_PLACES>1</DECIMAL_PLACES>
# <YTD_TARGET>97.00</YTD_TARGET>
# <YTD_ACTUAL></YTD_ACTUAL>
# <MONTHLY_TARGET>97.00</MONTHLY_TARGET>
# <MONTHLY_ACTUAL></MONTHLY_ACTUAL>
# </INDICATOR>
# + deletable=true editable=true
from lxml import objectify
path = 'przyklady/mta_perf/Performance_MNR.xml'
parsed = objectify.parse(open(path))
root = parsed.getroot()
# + deletable=true editable=true
data = []
skip_fields = ['PARENT_SEQ', 'INDICATOR_SEQ',
'DESIRED_CHANGE', 'DECIMAL_PLACES']
for elt in root.INDICATOR:
el_data = {}
for child in elt.getchildren():
if child.tag in skip_fields:
continue
el_data[child.tag] = child.pyval
data.append(el_data)
# + deletable=true editable=true
perf = pd.DataFrame(data)
perf.head()
# + deletable=true editable=true
from io import StringIO
tag = '<a href="http://www.google.com">Google</a>'
root = objectify.parse(StringIO(tag)).getroot()
# + deletable=true editable=true
root
root.get('href')
root.text
# + [markdown] deletable=true editable=true
# ## Formaty danych binarnych
# + deletable=true editable=true
frame = pd.read_csv('przyklady/ex1.csv')
frame
frame.to_pickle('przyklady/frame_pickle')
# + deletable=true editable=true
pd.read_pickle('przyklady/frame_pickle')
# + deletable=true editable=true
# !rm przyklady/frame_pickle
# + [markdown] deletable=true editable=true
# ### Obsługa formatu HDF5
# + deletable=true editable=true
frame = pd.DataFrame({'a': np.random.randn(100)})
store = pd.HDFStore('mydata.h5')
store['obj1'] = frame
store['obj1_col'] = frame['a']
store
# + deletable=true editable=true
store['obj1']
# + deletable=true editable=true
store.put('obj2', frame, format='table')
store.select('obj2', where=['index >= 10 and index <= 15'])
store.close()
# + deletable=true editable=true
frame.to_hdf('mydata.h5', 'obj3', format='table')
pd.read_hdf('mydata.h5', 'obj3', where=['index < 5'])
# + deletable=true editable=true
os.remove('mydata.h5')
# + [markdown] deletable=true editable=true
# ### Wczytywanie plików programu Microsoft Excel
# + deletable=true editable=true
xlsx = pd.ExcelFile('przyklady/ex1.xlsx')
# + deletable=true editable=true
pd.read_excel(xlsx, 'Sheet1')
# + deletable=true editable=true
frame = pd.read_excel('przyklady/ex1.xlsx', 'Sheet1')
frame
# + deletable=true editable=true
writer = pd.ExcelWriter('przyklady/ex2.xlsx')
frame.to_excel(writer, 'Sheet1')
writer.save()
# + deletable=true editable=true
frame.to_excel('przyklady/ex2.xlsx')
# + deletable=true editable=true
# !rm przyklady/ex2.xlsx
# + [markdown] deletable=true editable=true
# ## Obsługa interfejsów sieciowych
# + deletable=true editable=true
import requests
url = 'https://api.github.com/repos/pandas-dev/pandas/issues'
resp = requests.get(url)
resp
# + deletable=true editable=true
data = resp.json()
data[0]['title']
# + deletable=true editable=true
issues = pd.DataFrame(data, columns=['number', 'title',
'labels', 'state'])
issues
# + [markdown] deletable=true editable=true
# ## Obsługa baz danych
# + deletable=true editable=true
import sqlite3
query = """
CREATE TABLE test
(a VARCHAR(20), b VARCHAR(20),
c REAL, d INTEGER
);"""
con = sqlite3.connect('mydata.sqlite')
con.execute(query)
con.commit()
# + deletable=true editable=true
data = [('Atlanta', 'Georgia', 1.25, 6),
('Tallahassee', 'Florida', 2.6, 3),
('Sacramento', 'California', 1.7, 5)]
stmt = "INSERT INTO test VALUES(?, ?, ?, ?)"
con.executemany(stmt, data)
con.commit()
# + deletable=true editable=true
cursor = con.execute('select * from test')
rows = cursor.fetchall()
rows
# + deletable=true editable=true
cursor.description
pd.DataFrame(rows, columns=[x[0] for x in cursor.description])
# + deletable=true editable=true
import sqlalchemy as sqla
db = sqla.create_engine('sqlite:///mydata.sqlite')
pd.read_sql('select * from test', db)
# + deletable=true editable=true
# !rm mydata.sqlite
# + [markdown] deletable=true editable=true
# ## Podsumowanie
|
pytand/r06.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
bankdataCSV = os.path.join("..", "data", "budget_data.csv")
bankdataCSV
### Old Monolithic Code
import os
import csv
# Include the path for the csv file
bankdataCSV = os.path.join("..", "data", "budget_data.csv")
# Open the csv file in read mode
with open(bankdataCSV, 'r') as csvfile:
# set the delimiter as commas
data = csv.reader(csvfile, delimiter=',')
# Skip the first line with header info
next(data)
# Create an empty set to count the number of unique months (check for duplicate months)
unique_months = set()
# set variable for net total results as counter
net_total = 0
# set variable for average change results
chg_sum = 0
row_count = 0
# Set the previous amount to 'none' to account for no comparison against row 1
previous_amount = None
# Set the current maximum increase and decrease to be zero as a starting point
current_max_inc = 0
current_max_dec = 0
# include variable for current maximum inc/dec dates
current_max_inc_date = ""
current_max_dec_date = ""
# Loop through each row in the dataset
for row in data:
row_count += 1
# Get a unique list of months and add to a set
unique_months.add(row[0])
# Sum the profits and losses column
net_total = net_total + int(row[1])
# Set the current amount to be the row we are iterating over and in index 1 position
current_amount = (int(row[1]))
# If previous amount is none, set previous amount and continue
if previous_amount is None:
previous_amount = (int(row[1]))
# go to the next row if previous amount is none
continue
chg_sum = chg_sum + (current_amount - previous_amount)
# If the current amount - previous amount result is greater than current max increase
if (current_amount) - (previous_amount) > current_max_inc:
current_max_inc = (current_amount - previous_amount)
current_max_inc_date = row[0]
elif (current_amount - previous_amount) < current_max_inc:
pass
if (current_amount - previous_amount) < current_max_dec:
current_max_dec = (current_amount - previous_amount)
current_max_dec_date = row[0]
elif (current_amount - previous_amount) > current_max_dec:
pass
previous_amount = current_amount
print("=============================================================")
print(f"Unique months : {len(unique_months)}")
print(f"Total : ${net_total}")
print(f"Average change : ${round(chg_sum/(row_count-1),2)}")
print(f"Greatest increase in profits: {current_max_inc_date} ($ {str(current_max_inc)} )")
print(f"Greatest decrease in profits: {current_max_dec_date} ($ {str(current_max_dec)} )")
print("=============================================================")
# ### The entire code block above essentially performs for tasks:
# 1.) find the number of unique months <br>
# 2.) finds te net total <br>
# 3.) finds the average change <br>
# 4.) finds the greatest increase in profits <br>
# 5.) find the greatest decrease in profits <br>
#
# #### These task form a natural grouping. tasks 1-3 can be grouped together, while 4-5 can be group together aswell
# #### Grouping the code above into seperate functions has the following benefit:
# - It imporoves readability
# - It makes debugging easier
# - It speeds up development
# - Writing functions is fun when everything comes together!
#
# # Pseudo code time
#
# #### First function
# - This function will calculate the number of unique months, the net total, and then using those two, calculate the average change.
# - The first question to ask your self is what does this function require as input? the answer is 'the csv file' (budget_data.csv in this case)
# - The next question is what do I expects as an output. The answer is number of unique months, net total, and average change.
#
# So tying it all together, I expect the first function to do somthing like this:
#
# ```python
# def calculate_unique_months_net_total_and_average_change(csv_file):
# # necessary calculations done here
# return unique_month_count, net_total, average_change
# ```
#
# #### Second function
# - This function calculate the greatest profit and greatest loss
# - for this calculation, depending on which is greater between previous amount and current amount, determines wether we compute for greatest profit or greatest loss
# - Again we ask ourselves, what does this function require as input? The answer is 'the csv file'
# - What outputs do we expect this function to give us? The gretest profit, greatest loss, greatest_profit_date and greatest_loss_date.
#
# So tying it all together, I expect the second function to do somthing like this:
#
# ```python
# def calculate_greatest_profit_and_greatest_loss(csv_file):
# # necessary calculations done here
# return greatest_profit, greatest_loss, greatest_profit_date, greatest_loss_date
# ```
# +
### Here we will define function one
import csv
def calculate_unique_months_net_total_and_average_change(csv_file):
with open(csv_file, 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
next(data)
previous_amount = int(next(data)[1])
unique_months = set()
net_total = previous_amount
chg_sum = 0
row_count = 0
for row in data:
row_count += 1
unique_months.add(row[0])
net_total = net_total + int(row[1])
current_amount = int(row[1])
chg_sum += (current_amount - previous_amount)
previous_amount = current_amount
unique_month_count = len(unique_months)
average_change = chg_sum / row_count
return unique_month_count, net_total, average_change
# -
### Validation to see if the function works
calculate_unique_months_net_total_and_average_change(bankdataCSV)
# +
### Here we will define function two
import csv
def calculate_greatest_profit_and_greatest_loss(csv_file):
with open(csv_file, 'r') as csvfile:
data = csv.reader(csvfile, delimiter=',')
next(data)
previous_amount = int(next(data)[1])
current_max_inc = 0
current_max_dec = 0
current_max_inc_date = ""
current_max_dec_date = ""
for row in data:
current_amount = int(row[1])
change = current_amount - previous_amount
date = row[0]
if (current_amount > previous_amount) and (change > current_max_inc):
current_max_inc = change
current_max_inc_date = date
if (current_amount < previous_amount) and (change < current_max_dec):
current_max_dec = change
current_max_dec_date = date
previous_amount = current_amount
greatest_profit = current_max_inc
greatest_profit_date = current_max_inc_date
greatest_loss = current_max_dec
greatest_loss_date = current_max_dec_date
return greatest_profit, greatest_loss, greatest_profit_date, greatest_loss_date
# -
### Validation to see if the function works
calculate_greatest_profit_and_greatest_loss(bankdataCSV)
# +
### So now that we see both functions give us what we need, lets bring the two of them together to solve the homework
### We will define one last function whose job is just to print the results produced by the other two functions
### We can call this function whatever, I will call it pyBank to match the file name
### the function pyBank will take the csv_file as input. Why? you might ask. The reason is the pyBank acts as an entry
### point into the other functions. Since the other functions require the csv_file, then we need to pass that into
### pyBank function and it will in turn pass the to the other function.... So here we go
def pyBank(csv_file):
unique_month_count, net_total, average_change = calculate_unique_months_net_total_and_average_change(csv_file)
greatest_profit, greatest_loss, greatest_profit_date, greatest_loss_date = calculate_greatest_profit_and_greatest_loss(csv_file)
print("=============================================================")
print(f"Unique months : {unique_month_count}")
print(f"Total : ${net_total}")
print(f"Average change : ${round(average_change ,2)}")
print(f"Greatest increase in profits: {greatest_profit_date} ($ {str(greatest_profit)} )")
print(f"Greatest decrease in profits: {greatest_loss_date} ($ {str(greatest_loss)} )")
print("=============================================================")
# -
#### Fingers crosses, this should work.....
pyBank(bankdataCSV)
|
notebooks/pyBank.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="CrFyHS6Qv0bM" colab_type="text"
# # **Basics**
# + id="QpkHA2jbvtzV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="797b4004-95cb-42de-a2cf-9d1484308c4b" executionInfo={"status": "ok", "timestamp": 1576970220133, "user_tz": 420, "elapsed": 1276, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
import numpy as np
a = np.array([1,2,3])
a
# + id="DwoBITEwv_lM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="d8f6cafb-3435-4910-d7d8-e3f3b94b3a07" executionInfo={"status": "ok", "timestamp": 1576970244031, "user_tz": 420, "elapsed": 1059, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.ones((2,5))
a
# + id="hm2SQQTHwF73" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="fa3e9a6f-0293-4f30-e2bf-271408e30df9" executionInfo={"status": "ok", "timestamp": 1576970260853, "user_tz": 420, "elapsed": 885, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.zeros((1,5))
a
# + id="uwQ9SjOSwJaD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="6bd5b427-c002-46ec-8d79-93d8e1ee3dca" executionInfo={"status": "ok", "timestamp": 1576970374866, "user_tz": 420, "elapsed": 886, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.random.random(2) #only 1D array can be returned
a
# + id="xN7A2iX2wJzo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="6b086820-a8aa-4ee7-e11b-9fb62074b48d" executionInfo={"status": "ok", "timestamp": 1576970355659, "user_tz": 420, "elapsed": 890, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.random.rand(2,4) #2D or any dimensional array can be returned
a
# + id="QYH2X128wJ2h" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="57e96f81-c597-4ca5-fac6-75400a8277ed" executionInfo={"status": "ok", "timestamp": 1576970451716, "user_tz": 420, "elapsed": 879, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.random.randn(2) #returns n samples from standard normal fcuntion
a
# + id="gYDPEv24wJ4n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="5a577807-8b0d-47e5-d601-55ca93738287" executionInfo={"status": "ok", "timestamp": 1576970527179, "user_tz": 420, "elapsed": 259, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.random.randint(5,10,4) #4 random integers from the interval 5 to 10
a
# + id="mfct2OfVwJ7E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="b029cf90-f9f9-4bca-c5ca-03c14c87ca2f" executionInfo={"status": "ok", "timestamp": 1576970565233, "user_tz": 420, "elapsed": 880, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.empty((2,4))
a
# + id="1lf5i1g-wJ9o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="b463df77-2eaa-4471-dc5d-560a75d1d962" executionInfo={"status": "ok", "timestamp": 1576970606067, "user_tz": 420, "elapsed": 879, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.full((2,3),55)
a
# + id="Uvi65sSgwJ_0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="39dde9da-501b-41ce-e72b-627f3a0b8fef" executionInfo={"status": "ok", "timestamp": 1576970683789, "user_tz": 420, "elapsed": 867, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.arange(10,25,3) #evenly spaced values in the interval 10 to 25 and increments 3
a
# + id="PUBWLlajwKCY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="ad462cbf-a271-4f7e-f474-e0d4a6270e8b" executionInfo={"status": "ok", "timestamp": 1576970744704, "user_tz": 420, "elapsed": 889, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.linspace(2,10,3) # returns 3 evenly spaced values in interval 2 to 10
a
# + id="POF2siydwKE-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="a729d07b-be32-47a9-a525-c8db6281fc28" executionInfo={"status": "ok", "timestamp": 1576970780278, "user_tz": 420, "elapsed": 931, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.eye(4) #identity matrices are alwaus square matrices so one argument is enough
a
# + id="KM39IiKKwKHX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="644b023c-ecbf-4f06-de5b-45ea974209ce" executionInfo={"status": "ok", "timestamp": 1576970825290, "user_tz": 420, "elapsed": 856, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
a = np.identity(7)
a
# + id="J7krLqZWwKJv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="ed7cf509-0fe9-4683-96f1-765c22c91baa" executionInfo={"status": "ok", "timestamp": 1576971235064, "user_tz": 420, "elapsed": 461, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
from google.colab import drive
drive.mount('/gdrive')
# %cd /gdrive
# + id="7wzSjCEDyVdk" colab_type="code" colab={}
#when there are missing values and mixed data types always use genfromtxt. This is more lowlevel
my_array2 = np.genfromtxt('data2.txt',
skip_header=1,
filling_values=-999)
# + id="HoEXOVsGyVf8" colab_type="code" colab={}
#highlevel compared to genfromtext - cannot handle missing values.
x, y, z = np.loadtxt('data.txt',
skiprows=1,
unpack=True)
# + id="qjyxb4nuyVnY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 234} outputId="f570f227-45a2-4a50-c7bd-04957e7684b7" executionInfo={"status": "ok", "timestamp": 1576972560073, "user_tz": 420, "elapsed": 897, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
my_array = np.random.rand(5,5)
my_array
# Print the number of `my_array`'s dimensions #2d,3d or higher
print(my_array.ndim)
# Print the number of `my_array`'s elements
print(my_array.size)
# Print information about `my_array`'s memory layout
print(my_array.flags)
# Print the length of one array element in bytes
print(my_array.itemsize)
# Print the total consumed bytes by `my_array`'s elements
print(my_array.nbytes)
# Print the length of `my_array`
print(len(my_array))
# + id="5MLCsIykyVqD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="451365ff-6c17-48b5-b2a1-34b129b7aa35" executionInfo={"status": "ok", "timestamp": 1576972474331, "user_tz": 420, "elapsed": 877, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
my_2d_array = np.random.rand(5,5)
print(my_2d_array)
# Print out memory address
print(my_2d_array.data)
# Print out the shape of `my_array`
print(my_2d_array.shape)
# Print out the data type of `my_array`
print(my_2d_array.dtype)
# Print out the stride of `my_array` #(x,y) x bytes to find next row element, y bytes to find next column element
print(my_2d_array.strides)
# + id="fZb-5bBoyVsP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 100} outputId="1a39a82c-d831-40b9-8490-df5c4466c48b" executionInfo={"status": "ok", "timestamp": 1576972605300, "user_tz": 420, "elapsed": 872, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
my_array.astype(float) #change the datatype of the array elements
# + [markdown] id="TKqA9QvM5kVz" colab_type="text"
# # **Elementwise Operations**
# + id="jduhXg5GyVwF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167} outputId="aa948b29-566f-4137-bc70-8b2136f6e7fe" executionInfo={"status": "ok", "timestamp": 1576972835023, "user_tz": 420, "elapsed": 877, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
x =np.ones((3,4))
y = np.ones((1,4))
print(x, "\n")
print(y)
print("\n", x+y) #small array is added to the large array multiple times
#if they are of same size, simple elementwise additoin
# + id="e6X-N24-6L9m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 569} outputId="d88e54ff-06e9-4ba8-ed5a-22d8435f4274" executionInfo={"status": "ok", "timestamp": 1576973343457, "user_tz": 420, "elapsed": 699, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
x = np.ones((3,4))
y = np.random.random((5,1,4))
print(x, "\n")
print(y)
#print("\n", x+y)
print("\n", np.add(x,y))
# + id="MkWDsTxi6MAB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="26a3160d-51ff-4d31-8458-8b49f5efa1d0" executionInfo={"status": "ok", "timestamp": 1576973376338, "user_tz": 420, "elapsed": 863, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08629147319952024849"}}
np.array_equal(x,y)
# + id="9oMOHUF16MHc" colab_type="code" colab={}
# + id="AVDHeO006MKD" colab_type="code" colab={}
# + id="MY4-1K6w6MMM" colab_type="code" colab={}
# + id="r4Saf8j56MOn" colab_type="code" colab={}
# + id="sHdnZXyv6FZd" colab_type="code" colab={}
|
NumPy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Sorting Sequences
# Just like with the concatenation and in-place concatenation we saw previously, we have two different ways of sorting a a mutable sequence:
#
# * returning a new sorted sequence
# * in-place sorting (mutating sequence) - obviously this works for mutable sequence types only!
#
# For any iterable, the built-in `sorted` function will return a **list** containing the sorted elements of the iterable.
#
# So a few things here:
# * any iterable can be sorted (as long as it is finite)
# * the elements must be pair-wise comparable (possibly indirectly via a sort key)
# * the returned result is always a list
# * the original iterable is not mutated
#
# In addition:
# * optionally specify a `key` - a function that extracts a comparison key for each element. If that key is not specified, Python will use the natural ordering of the elements (such as __gt__, etc, so that fails if they do not!)
# * optional specify the `reverse` argument which will return the reversed sort
# Numbers have a natural ordering for example, so sorting an iterable of numbers is easy:
t = 10, 3, 5, 8, 9, 6, 1
sorted(t)
# As you can see we sorted a `tuple` and got a `list` back.
# We can sort non-sequence iterables too:
s = {10, 3, 5, 8, 9, 6, 1}
sorted(s)
# For things like dictionaries, this works slightly differently. Remember what happens when we iterate a dictionary?
d = {3: 100, 2: 200, 1: 10}
for item in d:
print(item)
# We actually are iterating the keys.
#
# Same thing happens with sorting - we'll end up just sorting the keys:
d = {3: 100, 2: 200, 1: 10}
sorted(d)
# But what if we wanted to sort the dictionary keys based on the values instead?
# This is where the `key` argument of `sorted` will come in handy.
#
# We are going to specify to the `sorted` function that it should use the value of each item to use as a sort key:
d = {'a': 100, 'b': 50, 'c': 10}
sorted(d, key=lambda k: d[k])
# Basically the `key` argument was called on every item being sorted - these items were the keys of the dictionary: `a`, `b`, `c`.
# For every key it used the result of the lambda as the sorting key:
#
# dictionary keys --> sorting key:
# * `a --> 100`
# * `b --> 50`
# * `c --> 10`
#
# Hence the sort order was 10, 50, 100, which means `c, b, a`
# Here's a different example, where we want to sort strings, not based on the lexicographic ordering, but based on the length of the string.
#
# We can easily do this as follows:
t = 'this', 'parrot', 'is', 'a', 'late', 'bird'
sorted(t)
# As you can see the natural ordering for strings was used here, but we can change the behavior by specifying the sort key:
# Remember that the `key` is a function that receives the item being sorted, and should return something (else usually!) that we want to use as the sort key. We use lambdas, but you can also use a straight `def` function too:
def sort_key(s):
return len(s)
sorted(t, key=sort_key)
# or, using a lambda:
sorted(t, key=lambda s: len(s))
# #### Stable Sorting
# You might have noticed that the words `this`, `late` and `bird` all have four characters - so how did Python determine which one should come first? Randomly? No!
#
# The sort algorithm that Python uses, called the *TimSort* (named after Python core developer <NAME> - yes, the same Tim Peters that wrote the Zen of Python!!), is what is called a **stable** sort algorithm.
#
# This means that items with equal sort keys maintain their relative position.
# but first:
import this
# If you haven't read this in a while, take a few minutes now to do so again!
# Now back to stable sorting:
t = 'aaaa', 'bbbb', 'cccc', 'dddd', 'eeee'
sorted(t, key = lambda s: len(s))
# Now let's change our tuple a bit:
t = 'bbbb', 'cccc', 'aaaa', 'eeee', 'dddd'
sorted(t, key = lambda s: len(s))
# As you can see, when the sort keys are equal (they are all equal to 4), the original ordering of the iterable is preserved.
#
# So in our original example:
t = 'this', 'parrot', 'is', 'a', 'late', 'bird'
sorted(t, key = lambda s: len(s))
# So, `this`, will come before `late` which will come before `bird`.
#
# If we change it up a bit:
t = 'this', 'bird', 'is', 'a', 'late', 'parrot'
sorted(t, key = lambda s: len(s))
# you'll notice that now `bird` ends up before `late`.
# So this `key` argument makes the `sorted` function extremely flexible. We can now even sort objects that are not even comparable!
# As you can we do not have an ordering defined for complex numbers.
#
# But we may want to sort a sequence of complex numbers based on their distance from the origin:
t = 0, 10+10j, 3-3j, 4+4j, 5-2j
# We can easily calculate the distace from the origin by using the `abs` function:
abs(3+4j)
# So now we can use that as a sort key:
sorted(t, key=abs)
# Of course, you could decide to sort based on the imaginary component instead:
sorted(t, key=lambda c: c.imag)
# #### Reversed Sort
# We also have the `reverse` keyword-only argument that we can use - basically it sorts the iterable, but returns it reversed:
t = 'this', 'bird', 'is', 'a', 'late', 'parrot'
sorted(t, key=lambda s: len(s))
sorted(t, key=lambda s: len(s), reverse=True)
# Of course in this case we could have done it this way too:
sorted(t, key=lambda s: -len(s))
# #### In-Place Sorting
# So far we have seen the `sorted` function - it returns a new (list) containing the sorted elements, and the original iterable remains the same.
#
# But mutable sequence types, such as lists, also implement in-place sorting - where the original list is sorted (the memory address does not change, the object is actually mutated).
#
# The syntax for calling the sorted method is identical to the `sorted` function, and is implemented using the same TimSort algorithm.
#
# Of course, this will nto work with tuples, which are immutable.
l = ['this', 'bird', 'is', 'a', 'late', 'parrot']
id(l)
sorted(l, key=lambda s: len(s))
l, id(l)
# As you can see, the list `l` was not mutated and is still the same object.
#
# But this way is different:
result = l.sort(key=lambda s: len(s))
# First, the `sort` **method** does not return anything:
type(result)
# and the original list is still the same object:
id(l)
# but it has mutated:
l
# That's really the only fundamental difference between the two sorts - one is in-place, while the other is not.
# You might be wondering if one is more efficient than the other.
#
# As far as algorithms go, they are the same, so no difference there (one sort is not more efficient than the other).
#
# But `list.sort()` will be faster than `sorted()` because it does not have to create a copy of the sequence.
#
# Of course, for iterables other than lists, you don't have much of a choice, and need to use `sorted` anyways.
# Let's try timing this a bit to see if we can see the difference:
from timeit import timeit
import random
random.seed(0)
n = 10_000_000
l = [random.randint(0, 100) for n in range(n)]
# This produces a list of `n` random integers between 0 and 100.
#
# If you're wondering about what the seed does, look at my video on random seeds in Part 1|Extras of this course - basically it makes sure I will generate the same random sequence every time.
#
# If you're unsure about the `timeit` module, again I have a video on that in Part 1|Extras of this course.
# Now, I'm only going to run the tests once, because when using in-place sorting of `l` we'll end up sorting an already sorted list - and that may very well affect the timing...
timeit(stmt='sorted(l)', globals=globals(), number=1)
timeit(stmt='l.sort()', globals=globals(), number=1)
# As you can see, the time difference between the two methods, even for `n=10_000_000` is quite small.
# I also just want to point out that sorting a list that is already sorted results in much better performance!
random.seed(0)
n = 10_000_000
l = [random.randint(0, 100) for n in range(n)]
timeit(stmt='l.sort()', globals=globals(), number=1)
# So now `l` is sorted, and if re-run the sort on it (either method), here's what we get:
timeit(stmt='sorted(l)', globals=globals(), number=1)
timeit(stmt='l.sort()', globals=globals(), number=1)
# Substantially faster!!
#
# Hence why I only timed using a single iteration...
# #### Natural Ordering for Custom Classes
# I just want to quickly show you that in order to have a "natural ordering" for our custom classes, we just need to implement the `<` or `>` operators. (I discuss these operators in Part 1 of this course)
class MyClass:
def __init__(self, name, val):
self.name = name
self.val = val
def __repr__(self):
return f'MyClass({self.name}, {self.val})'
def __lt__(self, other):
return self.val < other.val
c1 = MyClass('c1', 20)
c2 = MyClass('c2', 10)
c3 = MyClass('c3', 20)
c4 = MyClass('c4', 10)
# Now we can sort those objects, without specifying a key, since that class has a natural ordering (`<` in this case). Moreover, notice that the sort is stable.
sorted([c1, c2, c3, c4])
# In fact, we can modify our class slightly so we can see that `sorted` is calling our `__lt__` method repeatedly to perform the sort:
class MyClass:
def __init__(self, name, val):
self.name = name
self.val = val
def __repr__(self):
return f'MyClass({self.name}, {self.val})'
def __lt__(self, other):
print(f'called {self.name} < {other.name}')
return self.val < other.val
c1 = MyClass('c1', 20)
c2 = MyClass('c2', 10)
c3 = MyClass('c3', 20)
c4 = MyClass('c4', 10)
sorted([c1, c2, c3, c4])
|
Sequences/code/Sorting Sequences.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # The CAMEOS algorithm
# The CAMEOS algorithm considers first-order and second-order aspects of protein sequence. First-order sequence likelihoods can be derived from observations in a single column of a multiple sequence alignment. Second-order terms define dependencies between amino acids, which can arise from long-range contacts between amino acids in a protein’s folded 3-D state.
# ## Prelude
#
# The first stage of the CAMEOS algorithm utilizes [dynamic programming](https://en.wikipedia.org/wiki/Dynamic_programming) to find a globally optimal encoding solution based on first-order protein sequence information. The solution depends on designing the overlapping sequence through a combination of optimal solutions to tractable sub-problems and can be demonstrated without recursion in approximately ~150 lines of python code which we provide below.
#
# We consider encoding two protein sequences, "alpha" and "beta", on a single piece of DNA. For this example, we consider a +1 encoding, meaning that the first nucleotide of each of beta’s codons immediately follow the first nucleotide of each of alpha’s codons.
#
# ~~~~
# +1:
# H K T S T E <- "alpha"
# CATAACACAAGCACCGAA
# I T Q A P <- "beta"
# ~~~~
#
# Completely synonymous double-encoding solutions are unlikely to occur, and for simplicity of exposition we consider a [BLOSUM62 penalty](https://en.wikipedia.org/wiki/BLOSUM) for mutations to amino acids. Later, we will describe how the algorithm can be further generalized to work in all frames and extend our substitution score to one that uses a HMM statistical parameterization.
# ## Codon tables and substitution scoring
#
# To start, we introduce some basic code to keep track of codons and their translations. We can use Biopython to import the BLOSUM62 matrix. We also define codons that begin and end with specific nucleotides, for later convenience. We further define our targets to be the E. coli proteins infA and ribF.
# +
#BLOSUM62 matrix. Accessed through Biopython: from Bio.SubsMat import MatrixInfo; blosum62 = MatrixInfo.blosum62
blosum62 = {('B', 'N'): 3, ('W', 'L'): -2, ('G', 'G'): 6, ('X', 'S'): 0, ('X', 'D'): -1, ('K', 'G'): -2, ('S', 'E'): 0, ('X', 'M'): -1, ('Y', 'E'): -2, ('W', 'R'): -3, ('I', 'R'): -3, ('X', 'Z'): -1, ('H', 'E'): 0, ('V', 'M'): 1, ('N', 'R'): 0, ('I', 'D'): -3, ('F', 'D'): -3, ('W', 'C'): -2, ('N', 'A'): -2, ('W', 'Q'): -2, ('L', 'Q'): -2, ('S', 'N'): 1, ('Z', 'K'): 1, ('V', 'N'): -3, ('Q', 'N'): 0, ('M', 'K'): -1, ('V', 'H'): -3, ('G', 'E'): -2, ('S', 'L'): -2, ('P', 'R'): -2, ('D', 'A'): -2, ('S', 'C'): -1, ('E', 'D'): 2, ('Y', 'G'): -3, ('W', 'P'): -4, ('X', 'X'): -1, ('Z', 'L'): -3, ('Q', 'A'): -1, ('V', 'Y'): -1, ('W', 'A'): -3, ('G', 'D'): -1, ('X', 'P'): -2, ('K', 'D'): -1, ('T', 'N'): 0, ('Y', 'F'): 3, ('W', 'W'): 11, ('Z', 'M'): -1, ('L', 'D'): -4, ('M', 'R'): -1, ('Y', 'K'): -2, ('F', 'E'): -3, ('M', 'E'): -2, ('S', 'S'): 4, ('X', 'C'): -2, ('Y', 'L'): -1, ('H', 'R'): 0, ('P', 'P'): 7, ('K', 'C'): -3, ('S', 'A'): 1, ('P', 'I'): -3, ('Q', 'Q'): 5, ('L', 'I'): 2, ('P', 'F'): -4, ('B', 'A'): -2, ('Z', 'N'): 0, ('M', 'Q'): 0, ('V', 'I'): 3, ('Q', 'C'): -3, ('I', 'H'): -3, ('Z', 'D'): 1, ('Z', 'P'): -1, ('Y', 'W'): 2, ('T', 'G'): -2, ('B', 'P'): -2, ('P', 'A'): -1, ('C', 'D'): -3, ('Y', 'H'): 2, ('X', 'V'): -1, ('B', 'B'): 4, ('Z', 'F'): -3, ('M', 'L'): 2, ('F', 'G'): -3, ('S', 'M'): -1, ('M', 'G'): -3, ('Z', 'Q'): 3, ('S', 'Q'): 0, ('X', 'A'): 0, ('V', 'T'): 0, ('W', 'F'): 1, ('S', 'H'): -1, ('X', 'N'): -1, ('B', 'Q'): 0, ('K', 'A'): -1, ('I', 'Q'): -3, ('X', 'W'): -2, ('N', 'N'): 6, ('W', 'T'): -2, ('P', 'D'): -1, ('B', 'C'): -3, ('I', 'C'): -1, ('V', 'K'): -2, ('X', 'Y'): -1, ('K', 'R'): 2, ('Z', 'R'): 0, ('W', 'E'): -3, ('T', 'E'): -1, ('B', 'R'): -1, ('L', 'R'): -2, ('Q', 'R'): 1, ('X', 'F'): -1, ('T', 'S'): 1, ('B', 'D'): 4, ('Z', 'A'): -1, ('M', 'N'): -2, ('V', 'D'): -3, ('F', 'A'): -2, ('X', 'E'): -1, ('F', 'H'): -1, ('M', 'A'): -1, ('K', 'Q'): 1, ('Z', 'S'): 0, ('X', 'G'): -1, ('V', 'V'): 4, ('W', 'D'): -4, ('X', 'H'): -1, ('S', 'F'): -2, ('X', 'L'): -1, ('B', 'S'): 0, ('S', 'G'): 0, ('P', 'M'): -2, ('Y', 'M'): -1, ('H', 'D'): -1, ('B', 'E'): 1, ('Z', 'B'): 1, ('I', 'E'): -3, ('V', 'E'): -2, ('X', 'T'): 0, ('X', 'R'): -1, ('R', 'R'): 5, ('Z', 'T'): -1, ('Y', 'D'): -3, ('V', 'W'): -3, ('F', 'L'): 0, ('T', 'C'): -1, ('X', 'Q'): -1, ('B', 'T'): -1, ('K', 'N'): 0, ('T', 'H'): -2, ('Y', 'I'): -1, ('F', 'Q'): -3, ('T', 'I'): -1, ('T', 'Q'): -1, ('P', 'L'): -3, ('R', 'A'): -1, ('B', 'F'): -3, ('Z', 'C'): -3, ('M', 'H'): -2, ('V', 'F'): -1, ('F', 'C'): -2, ('L', 'L'): 4, ('M', 'C'): -1, ('C', 'R'): -3, ('D', 'D'): 6, ('E', 'R'): 0, ('V', 'P'): -2, ('S', 'D'): 0, ('E', 'E'): 5, ('W', 'G'): -2, ('P', 'C'): -3, ('F', 'R'): -3, ('B', 'G'): -1, ('C', 'C'): 9, ('I', 'G'): -4, ('V', 'G'): -3, ('W', 'K'): -3, ('G', 'N'): 0, ('I', 'N'): -3, ('Z', 'V'): -2, ('A', 'A'): 4, ('V', 'Q'): -2, ('F', 'K'): -3, ('T', 'A'): 0, ('B', 'V'): -3, ('K', 'L'): -2, ('L', 'N'): -3, ('Y', 'N'): -2, ('F', 'F'): 6, ('L', 'G'): -4, ('B', 'H'): 0, ('Z', 'E'): 4, ('Q', 'D'): 0, ('X', 'B'): -1, ('Z', 'W'): -3, ('S', 'K'): 0, ('X', 'K'): -1, ('V', 'R'): -3, ('K', 'E'): 1, ('I', 'A'): -1, ('P', 'H'): -2, ('B', 'W'): -4, ('K', 'K'): 5, ('H', 'C'): -3, ('E', 'N'): 0, ('Y', 'Q'): -1, ('H', 'H'): 8, ('B', 'I'): -3, ('C', 'A'): 0, ('I', 'I'): 4, ('V', 'A'): 0, ('W', 'I'): -3, ('T', 'F'): -2, ('V', 'S'): -2, ('T', 'T'): 5, ('F', 'M'): 0, ('L', 'E'): -3, ('M', 'M'): 5, ('Z', 'G'): -2, ('D', 'R'): -2, ('M', 'D'): -3, ('W', 'H'): -2, ('G', 'C'): -3, ('S', 'R'): -1, ('S', 'I'): -2, ('P', 'Q'): -1, ('Y', 'A'): -2, ('X', 'I'): -1, ('E', 'A'): -1, ('B', 'Y'): -3, ('K', 'I'): -3, ('H', 'A'): -2, ('P', 'G'): -2, ('F', 'N'): -3, ('H', 'N'): 1, ('B', 'K'): 0, ('V', 'C'): -1, ('T', 'L'): -1, ('P', 'K'): -1, ('W', 'S'): -3, ('T', 'D'): -1, ('T', 'M'): -1, ('P', 'N'): -2, ('K', 'H'): -1, ('T', 'R'): -1, ('Y', 'R'): -2, ('L', 'C'): -1, ('B', 'L'): -4, ('Z', 'Y'): -2, ('W', 'N'): -4, ('G', 'A'): 0, ('S', 'P'): -1, ('E', 'Q'): 2, ('C', 'N'): -3, ('H', 'Q'): 0, ('D', 'N'): 1, ('Y', 'C'): -2, ('L', 'H'): -3, ('E', 'C'): -4, ('Z', 'H'): 0, ('H', 'G'): -2, ('P', 'E'): -1, ('Y', 'S'): -2, ('G', 'R'): -2, ('B', 'M'): -3, ('Z', 'Z'): 4, ('W', 'M'): -1, ('Y', 'T'): -2, ('Y', 'P'): -3, ('Y', 'Y'): 7, ('T', 'K'): -1, ('Z', 'I'): -3, ('T', 'P'): -1, ('V', 'L'): 1, ('F', 'I'): 0, ('G', 'Q'): -2, ('L', 'A'): -1, ('M', 'I'): 1}
#Define constants for bases, codons, and amino acid translations.
bases = [l.upper() for l in 'tcag']
codons = [a+b+c for a in bases for b in bases for c in bases]
amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'
codon_table = dict(zip(codons, amino_acids))
#Non-stop codon codons beginning with defined first letter.
first_letter_codons = {'T': ['TTT', 'TTC', 'TTA', 'TTG', 'TCT', 'TCC',
'TCA', 'TCG', 'TAT', 'TAC', 'TGT', 'TGC', 'TGG'],
'G': ['GTT', 'GTC', 'GTA', 'GTG', 'GCT', 'GCC', 'GCA', 'GCG',
'GAT', 'GAC', 'GAA', 'GAG', 'GGT', 'GGC', 'GGA', 'GGG'],
'A': ['ATT', 'ATC', 'ATA', 'ATG', 'ACT', 'ACC', 'ACA', 'ACG',
'AAT', 'AAC', 'AAA', 'AAG', 'AGT', 'AGC', 'AGA', 'AGG'],
'C': ['CTT', 'CTC', 'CTA', 'CTG', 'CCT', 'CCC', 'CCA', 'CCG',
'CAT', 'CAC', 'CAA', 'CAG', 'CGT', 'CGC', 'CGA', 'CGG']}
last_letter_codons = {'C': ['TTC', 'TCC', 'TAC', 'TGC', 'CTC', 'CCC', 'CAC', 'CGC',
'ATC', 'ACC', 'AAC', 'AGC', 'GTC', 'GCC', 'GAC', 'GGC'],
'T': ['TTT', 'TCT', 'TAT', 'TGT', 'CTT', 'CCT', 'CAT', 'CGT',
'ATT', 'ACT', 'AAT', 'AGT', 'GTT', 'GCT', 'GAT', 'GGT'],
'G': ['TTG', 'TCG', 'TGG', 'CTG', 'CCG', 'CAG', 'CGG', 'ATG',
'ACG', 'AAG', 'AGG', 'GTG', 'GCG', 'GAG', 'GGG'],
'A': ['TTA', 'TCA', 'CTA', 'CCA', 'CAA', 'CGA', 'ATA', 'ACA',
'AAA', 'AGA', 'GTA', 'GCA', 'GAA', 'GGA']}
def translate(seq):
seq = seq.upper()
seq_codons = filter(lambda s: len(s) == 3, [seq[x:x+3] for x in range(0, len(seq), 3)])
#Separate sequence into codons of length 3 (above), return translations of these codons (below).
return "".join([codon_table[codon] for codon in seq_codons])
def score_substitution(pair, matrix):
if "*" in pair:
return -1000 #if there's a stop codon, assign a large penalty
if pair not in matrix: #BLOSUM stores only one half of matrix so we may need to reverse tuple.
return matrix[(tuple(reversed(pair)))]
else:
return matrix[pair]
# -
# ## Double-encoding, step-by-step
#
# We consider constructing a double-encoding sequence. Given that we expect the protein to begin with a start codon, we can stipulate that the first letter of the double-encoding solution will begin with the nucleotide "A".
#
# The sequence we are constructing can therefore be represented as:
#
# ~~~~
# A????????????? ...
# ~~~~
#
# where "?" represent bases that are unknown.
#
# We note that if we were to fix the fourth base in this sequence (to, for example, the nucleotide "C"), the optimal dinucleotides for the first amino acid in alpha and the first amino acid in beta could be set directly by scoring all dinucleotides with the BLOSUM matrix.
#
# Using the proposed fourth base "C", we have a sequence:
#
# ~~~~
# A??C?????????? ...
# ~~~~
#
# Given the base C, we can complete the first codon for both alpha and beta with one of 16 dinucleotides. We must now find the maximum out of the following options:
#
# ~~~~
# max(["A" + dinucleotide + "C" for dinucleotide in ["AA", "AC", "AG", "AT", "CA", "CC", ...]])
# ~~~~
#
# Concretely scoring the possibilities:
#
# ~~~~
# max(blosum_score{
# (A aa C) -> alpha[0] = "K"; beta[0] = "N", (A ga C) -> alpha[0] = "R"; beta[0] = "D",
# (A ac C) -> alpha[0] = "N"; beta[0] = "T", (A gc C) -> alpha[0] = "S"; beta[0] = "A",
# (A ag C) -> alpha[0] = "K"; beta[0] = "S", (A gg C) -> alpha[0] = "R"; beta[0] = "G",
# (A at C) -> alpha[0] = "N"; beta[0] = "I", (A gt C) -> alpha[0] = "S"; beta[0] = "V",
# (A ca C) -> alpha[0] = "T"; beta[0] = "H", (A ta C) -> alpha[0] = "I"; beta[0] = "Y",
# (A cc C) -> alpha[0] = "T"; beta[0] = "P", (A tc C) -> alpha[0] = "I"; beta[0] = "S",
# (A cg C) -> alpha[0] = "T"; beta[0] = "R", (A tg C) -> alpha[0] = "M"; beta[0] = "C",
# (A ct C) -> alpha[0] = "T"; beta[0] = "L", (A tt C) -> alpha[0] = "I"; beta[0] = "F",
# }
# )
# ~~~~
#
# This work is performed by the function, `optimal_choice_p1()` which takes a fixed base ("A"), a proposed base ("C"), and the amino acids in alpha and beta that are being optimized. Optionally we can also flag whether we are at the beginning or end of the sequence to deal with special cases of start/stop codons.
def optimal_choice_p1(fixed_base, proposed_base, alpha_aa, beta_aa, is_start = False, is_end = False):
if not is_start and not is_end: #typical case
alpha_scores = {}
beta_scores = {}
#Score all codons not leading to a stop codon, recording score in terms of the non-fixed dinucleotides.
for codon in first_letter_codons[fixed_base]:
alpha_scores[codon[1:]] = score_substitution((alpha_aa, codon_table[codon]), blosum62)
for codon in last_letter_codons[proposed_base]:
beta_scores[codon[:2]] = score_substitution((beta_aa, codon_table[codon]), blosum62)
#get the set of dinucleotides that don't introduce a stop codon.
no_stop_codons = set(alpha_scores.keys()).intersection(set(beta_scores.keys()))
#return top-scoring dinucleotide as well as its score.
dinuc_scores = {dinuc: alpha_scores[dinuc] + beta_scores[dinuc] for dinuc in no_stop_codons}
#Return the maximum scoring dinculeotide.
return sorted(dinuc_scores.items(), key = lambda (key, val): -val)[0]
elif is_start: #we have no choice for the start codon, we just return the score of the ATG at the position.
if fixed_base == "A":
alpha_score = score_substitution(('M', 'M'), blosum62)
beta_score = score_substitution((beta_aa, codon_table["TG" + proposed_base]), blosum62)
return ('TG', alpha_score + beta_score) #whatever the score for TG, it's optimal (else no protein product).
elif fixed_base != "A": #we need A for a start codon so not having one is penalized.
return ("TG", -10000.0)
elif is_end:
if fixed_base == "T":
#We can get stop codons from TAG / TAA / TGA. We score each dinucleotide.
scores = {"AG": score_substitution((beta_aa, codon_table["AG" + proposed_base]), blosum62),
"AA": score_substitution((beta_aa, codon_table["AA" + proposed_base]), blosum62),
"GA": score_substitution((beta_aa, codon_table["GA" + proposed_base]), blosum62)}
return sorted(scores.items(), key = lambda (key, val): -val)[0]
elif fixed_base != "T": #we need T for a stop codon, so not having one is penalized.
return ("AA", -10000.0)
# Consider the first letter of infA, "M" and, without loss of generality, the eighth letter of ribF, "N".
#
# The optimal encoding for the proposed letter of "C" is...
optimal_choice_p1("A", "C", "M", "N", is_start = True)
# The "2" here defines the sum of BLOSUM62 scoring of matching "M" and "M" (+1) and substituting "N" for "C" (-3). Given that this is the first codon, the correct dinucleotide will always be "TG", as we would not have a protein product otherwise.
#
# The letter "C" was proposed, but it may not be optimal. We consider the other scores:
print("A =>", optimal_choice_p1("A", "A", "M", "N", is_start = True))
print("G =>", optimal_choice_p1("A", "G", "M", "N", is_start = True))
print("T =>", optimal_choice_p1("A", "T", "M", "N", is_start = True))
# We note that "A" is not a feasible encoding, since "TGA" is a stop codon in the beta frame. However, the letter "T" appears to be a nucleotide that scores just as well as "C". The question, then, is whether the sequence:
#
# `ATGC???????...`
#
# scores better than
#
# `ATGT???????...`
#
# ## Further optimization
#
# The above decision between C/T highlights that the optimal nucleotide choices for a given frame depend on downstream sequence features. In fact, given that the second amino acid in the infA sequence is alanine (encoded by the codons: "GCA", "GCC", "GCG", "GCT"), the "G" base in the fourth position may be preferable for downstream scoring even though it scores slightly lower in the first tetranucleotide.
#
# This observation could suggest a "brute force" optimization procedure, where for example, with a double-encoding of length 4 for proteins alpha and beta at positions, alpha_i and beta_j, we might consider a compound loop such as:
def brute_force_p1(alpha, beta, alpha_i, beta_j):
#alpha_i, beta_j define the position in the protein (we take alpha_i = 7, beta_j = 0 in the above example).
base_1 = "A" #assume ATG to start, necessitating "A" as first base.
top_scores = {}
cur_score = 0.0
cur_seq = start_base
for base_4 in ["A", "C", "G", "T"]:
#We determine the best di-nucleotide given the start base and fourth base and current position.
best_dinuc, best_score = optimal_choice_p1(base_1, base_4, alpha[alpha_i], beta[beta_j], is_start = True)
#We add the dinucleotide to our current sequence as well as the fourth base.
cur_seq += best_dinuc + base_4
#We add the score to our total which we hope to maximize.
cur_score += best_score
for base_7 in ["A", "C", "G", "T"]:
#We move one position forward in both protein sequences.
best_di, best_score = optimal_choice_p1(base_4, base_7, alpha[alpha_i + 1], beta[beta_j + 1])
cur_seq += best_di + base_7
cur_score += best_score
for base_10 in ["A", "C", "G", "T"]:
best_di, best_score = optimal_choice_p1(base_7, base_10, alpha[alpha_i + 2], beta[beta_j + 2])
cur_seq += best_di + base_10
cur_score += best_score
#Now that we are at end of sequence we can record the result in a dictionary for future use.
top_scores[(base_4, base_7, base_10)] = (cur_seq, cur_score)
#Using this approach we would hope to find the minimum-scoring combination of bases.
# Unfortunately, this code requires searching an exponential number of possibilities, and becomes infeasible to run for proteins of relatively short length.
#
# Fortunately, our search can be substantially improved by observing an important independence: only the terminal base (the "proposed" base) at the end of each tetranucleotide impacts downstream decisions.
#
# When comparing:
#
# `ATGC???????...`
#
# and
#
# `ATGT???????...`
#
# The only base interacting with codons beyond the first tetranucleotide is the terminal "C" or "T", while the dinucleotide "TG" only optimizes the first tetranucleotide given fixed ("A") and proposed ("C/T") bases.
#
# Further, the scores are additive. If we were to compare
#
# `ATGC???????...`
#
# vs
#
# `GTGC???????...`
#
# It is clear that if "ATGC" has a better score than "GTGC", and the score of the downstream ("???????...") sequence depends only on the terminal base "C", then the global score of an optimal downstream sequence is only maximized if it is attached to "ATGC".
#
# At each step, we must therefore only keep track of the best sequence for each terminal base. This is done by extending earlier optimal subsequences, up to the end of the sequence, where optimal sequences across all possible terminal bases are considered.
#
# Code achieving this aim is shown below:
def construct_p1(alpha_prot, beta_prot, alpha_position, beta_position, fixed_base = "A"):
#cur_seq = ""
#cur_score = 0.0
best_seq_per_base = {"A": ("A", 0.0), "C": ("C", 0.0), "G": ("G", 0.0), "T": ("T", 0.0)}
#We intend to fully encode all amino acids in the alpha_prot_seq within beta_prot.
for alpha_i in range(len(alpha_prot)):
#Get the amino acids at the considered position to evaluate costs of substitutions.
alpha_aa = alpha_prot[alpha_i]
beta_aa = beta_prot[beta_position + alpha_i]
#Have an empty dictionary of best scoring sub-sequences according to their terminal letter.
best_continuation = {"A": None, "C": None, "G": None, "T": None}
#check if start/end for special treatment of substitutions.
is_start = alpha_i == 0
is_end = alpha_i == len(alpha_prot) - 1
for fixed_base in "ACGT": #Consider extending earlier subsequences of different termini.
for proposed_base in "ACGT": #See if extending earlier subsequences results in top-scoring terminal options.
top_dinuc, top_score = optimal_choice_p1(fixed_base, proposed_base, alpha_aa, beta_aa, is_start, is_end)
#Proposed_seq/score adds to previous seq/score for subsequences we extend.
proposed_seq = best_seq_per_base[fixed_base][0] + top_dinuc + proposed_base
proposed_score = best_seq_per_base[fixed_base][1] + top_score
#If the proposal scores better than other subsequences with this terminal position up to this position
#then record this option
if best_continuation[proposed_base] is None or proposed_score >= best_continuation[proposed_base][1]:
best_continuation[proposed_base] = (proposed_seq, proposed_score)
#assign the best sequence per terminal base for this position, to be further extended in next step.
best_seq_per_base = {proposed_b: best_continuation[proposed_b] for proposed_b in "ACGT"}
#At the end, find which of the four termini have highest score, and return.
return max(best_seq_per_base.items(), key = lambda entry: entry[1])[1]
# Given that optimal_choice_p1() requires a constant number of operations, the above code clearly requires a constant number of operations for each amino acid in the alpha_prot, evaluating 4x4=16 possible combinations of start/end nucleotides and recording top subsequences as necessary.
#
# At the end, the optimal global sequence is returned.
# ## Running an example
#
# This algorithm runs very quickly for proteins of typical length (hundreds of amino acids). The results given example inputs of infA and ribF appear fairly compelling.
# +
def example():
infA = "MAKEDNIEMQGTVLETLPNTMFRVELENGHVVTAHISGKMRKNYIRILTGDKVTVELTPYDLSKGRIVFRSR*"
ribF = "MKLIRGIHNLSQAPQEGCVLTIGNFDGVHRGHRALLQGLQEEGRKRNLPVMVMLFEPQPLELFATDKAPA"
ribF += "RLTRLREKLRYLAECGVDYVLCVRFDRRFAALTAQNFISDLLVKHLRVKFLAVGDDFRFGAGREGDFLLL"
ribF += "QKAGMEYGFDITSTQTFCEGGVRISSTAVRQALADDNLALAESLLGHPFAISGRVVHGDELGRTIGFPTA"
ribF += "NVPLRRQVSPVKGVYAVEVLGLGEKPLPGVANIGTRPTVAGIRQQLEVHLLDVAMDLYGRHIQVVLRKKI"
ribF += "RNEQRFASLDELKAQIARDELTAREFFGLTKPA*"
alpha_position = 0
beta_position = 8 #set ahead of time for this example.
p1_seq, p1_score = construct_p1(infA, ribF, alpha_position, beta_position)
print(p1_seq) #the nucleotides that will encode two proteins.
print(translate(p1_seq)) #the translation of the infA sequence.
print(translate(p1_seq[1:])) #print just the double-encoding part of ribF
print(ribF[:beta_position] + translate(p1_seq[1:]) + ribF[beta_position + len(infA):]) #ribF sequence.
example()
# -
# Aligned, this double-encoding solution produces proteins that are ~70/80% similar (BLOSUM score > 0) to the original targets for both proteins:
#
# ~~~~
# Double-encoded: 1 MAKSDSAERQGTVLRTLPGTSFRASLSNGRVATARASGKLARNYIRILTGTSVTVRLRPYSLSSGKIAFRSS* 73
# |||.|:.|.|||||.|||.|.||..|.||.|.||..|||:.:||||||||..|||.|.||.||.|:|.|||.
# E. coli infA : 1 MAKEDNIEMQGTVLETLPNTMFRVELENGHVVTAHISGKMRKNYIRILTGDKVTVELTPYDLSKGRIVFRSR* 73
#
# (64% identical, 69% similar)
#
# Double-encoded: 1 WLSQTPQKGKVLSLGHFQGLHSGHRSLMEGLQQQGRRENLPVIIFVFSPEPLSLFASDRTPSRLVRLRSALRN 73
# .|||.||:|.||::|:|.|:|.|||:|::|||::||:.||||::.:|.|:||.|||:|:.|:||.|||..||.
# <NAME> ribF : 1 NLSQAPQEGCVLTIGNFDGVHRGHRALLQGLQEEGRKRNLPVMVMLFEPQPLELFATDKAPARLTRLREKLRY 73
#
# (56% identical, 81% similar)
# ~~~~
# ## Possible improvements
#
# This algorithm gives a relatively high-scoring double-encoding solution but can be improved with a few additional considerations.
#
# First, we can improve the score_substitution() function to consider position- and family-specific substition penalties derived by a Hidden Markov Model trained on a multiple sequence alignment. This should perform notably better than a BLOSUM62 parameterization, which penalizes substitutions based on general amino acid properties.
#
# We can also optimize the positioning of the encoding. This can be achieved by iterating over every beta position (if m is the length of alpha and n is the length of beta, this requires a number of operations proportional to m x n instead of just m, which remains tractable).
#
# HMMs also allow us to add insertions to the code: once we have computed all m x n possible combinations between alpha/beta positions, we can include an additional "state" variable. Double-encoding sequences are now constructed by appropriately combining earlier subsequences with the appropriate position and state. For example, our subsequence would evaluate an addition at position (i + 1, j + 1) if it is in the "standard" state, but (i + 1, j) for an "insertion" state in beta. The HMM-parameterized cost of opening/extending insertions at any position is added on top of any substitution score for each subsequence. Permitting insertions provides increased flexibility for the model, as slight modifications of alignment between the two proteins can result in more favorable double-encoding.
#
# We further consider that this algorithm provides a single deterministic solution for any set of proteins. Given advances in DNA synthesis, it may be reasonable to consider a broader diversity of possible high-scoring solutions. In the above example, a strategy may be to run the code several times and have optimal_choice_p1() occasionally return sub-optimal dinucleotides. In our full recursive implementation, we utilize a strategy where through keeping track of more information at each step we occasionally extend sub-optimal subsequences instead of always extending optimal ones.
#
# These stochastic strategies will result in a lower final score, but will provide a broader diversity of solutions, which may be useful for downstream applications.
#
# Finally, the code presented here is algorithmically efficient, but calculation speed can be improved beyond the presented python implementation. In practice, we implement the program in julia, which provides a speed-up compared to a comparable python implementation. Ultimately, run-times are dominated by the cost of optimizing the long-range interactions in the second part of our optimization procedure.
|
guide.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Use BioKEEN Programmatically to Train and Evalaute a KGE Model on HSDN
# +
import json
import logging
import os
import sys
import time
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import biokeen
import pykeen
# -
warnings.filterwarnings('ignore', category=UserWarning)
logging.basicConfig(level=logging.INFO)
logging.getLogger('biokeen').setLevel(logging.INFO)
print(sys.version)
print(time.asctime())
print(f'PyKEEN Version: {pykeen.constants.VERSION}')
print(f'BioKEEN Version: {biokeen.constants.VERSION}')
output_directory = os.path.join(
os.path.expanduser('~'),
'Desktop',
'biokeen_test'
)
# ## Step 1: Configure your experiment
config = dict(
training_set_path = 'bio2bel:hsdn',
execution_mode = 'Training_mode',
kg_embedding_model_name = 'TransE',
embedding_dim = 50,
normalization_of_entities = 2, # corresponds to L2
scoring_function = 1, # corresponds to L1
margin_loss = 1,
learning_rate = 0.01,
batch_size = 128,
num_epochs = 1000,
test_set_ratio = 0.1,
filter_negative_triples = True,
random_seed = 2,
preferred_device = 'cpu',
)
# ## Step 2: Run BioKEEN to Train and Evaluate the Model
results = pykeen.run(
config=config,
output_directory=output_directory,
)
print('Keys:', *sorted(results.results.keys()), sep='\n ')
# ## Step 3: Show Exported Results
# ### 3.1: Show Trained Model
results.results['trained_model']
# ### 3.2: Plot losses
losses = results.results['losses']
epochs = np.arange(len(losses))
plt.title(r'Loss Per Epoch')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.plot(epochs, losses)
plt.show()
# ### 3.3: Show Evaluation Results
print(json.dumps(results.results['eval_summary'], indent=2))
|
notebooks/Case Scenario HSDN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="0asdcdunj2Tx"
# # ORF recognition by CNN
#
# Same as 116 but let 5'UTR vary from 0 to 6 so memorizing specific STOP positions is harder.
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="QP1VTRNQj2UO" outputId="14af9221-64bf-49be-cb9e-37c436ccd8b4"
import time
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
# + id="Nhz4GKonj2T_"
PC_SEQUENCES=4000 # how many protein-coding sequences
NC_SEQUENCES=4000 # how many non-coding sequences
PC_TESTS=1000
NC_TESTS=1000
RNA_LEN=36 # how long is each sequence
CDS_LEN=30 # include bases in start, residues, stop
ALPHABET=4 # how many different letters are possible
INPUT_SHAPE_2D = (RNA_LEN,ALPHABET,1) # Conv2D needs 3D inputs
INPUT_SHAPE = (RNA_LEN,ALPHABET) # Conv1D needs 2D inputs
FILTERS = 16 # how many different patterns the model looks for
NEURONS = 16
DROP_RATE = 0.4
WIDTH = 3 # how wide each pattern is, in bases
STRIDE_2D = (1,1) # For Conv2D how far in each direction
STRIDE = 1 # For Conv1D, how far between pattern matches, in bases
EPOCHS=25 # how many times to train on all the data
SPLITS=5 # SPLITS=3 means train on 2/3 and validate on 1/3
FOLDS=5 # train the model this many times (range 1 to SPLITS)
# + colab={"base_uri": "https://localhost:8080/"} id="lr7q90rxj2UE" outputId="7aa26bdf-8cf1-4355-fb03-e1d8454dbc85"
import sys
try:
from google.colab import drive
IN_COLAB = True
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
#drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_gen.py')
with open('RNA_gen.py', 'w') as f:
f.write(r.text)
from RNA_gen import *
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(r.text)
from RNA_describe import ORF_counter
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_prep.py')
with open('RNA_prep.py', 'w') as f:
f.write(r.text)
from RNA_prep import *
except:
print("CoLab not working. On my PC, use relative paths.")
IN_COLAB = False
DATAPATH='data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_gen import *
from SimTools.RNA_describe import ORF_counter
from SimTools.RNA_prep import *
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
if not assert_imported_RNA_gen():
print("ERROR: Cannot use RNA_gen.")
if not assert_imported_RNA_prep():
print("ERROR: Cannot use RNA_prep.")
# + id="EGDXH8Uwj2UM"
from os import listdir
import csv
from zipfile import ZipFile
import numpy as np
import pandas as pd
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Conv1D,Conv2D
from keras.layers import Flatten,MaxPooling1D,MaxPooling2D
from keras.losses import BinaryCrossentropy
# tf.keras.losses.BinaryCrossentropy
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
# + colab={"base_uri": "https://localhost:8080/"} id="CUOG_jEvGtOm" outputId="f9c69356-4a4a-483c-d130-b5e2813e9130"
import random
def partition_random_sequences(goal_per_class):
between_bases = CDS_LEN - 6
utr5_bases = random.randint(0,RNA_LEN-CDS_LEN)
utr3_bases = RNA_LEN - utr5_bases - CDS_LEN
pc_seqs=[]
nc_seqs=[]
oc = ORF_counter()
trials = 0
pc_cnt = 0
nc_cnt = 0
bases=['A','C','G','T']
while pc_cnt<goal_per_class or nc_cnt<goal_per_class:
trials += 1
one_seq = "".join(random.choices(bases,k=utr5_bases))
one_seq += 'ATG'
random_cnt = random.randint(1,between_bases-3)
one_seq += "".join(random.choices(bases,k=random_cnt))
random_stop = random.choice(['TAA','TAG','TGA']) # random frame
one_seq += random_stop
remaining_cnt = between_bases - 3 - random_cnt
one_seq += "".join(random.choices(bases,k=remaining_cnt))
#one_seq += "".join(random.choices(bases,k=between_bases))
random_stop = random.choice(['TAA','TAG','TGA']) # in frame
one_seq += random_stop
one_seq += "".join(random.choices(bases,k=utr3_bases))
oc.set_sequence(one_seq)
cds_len = oc.get_max_cds_len() + 3
if cds_len >= CDS_LEN and pc_cnt<goal_per_class:
pc_cnt += 1
pc_seqs.append(one_seq)
elif cds_len < CDS_LEN and nc_cnt<goal_per_class:
nc_cnt += 1
nc_seqs.append(one_seq)
print ("It took %d trials to reach %d per class."%(trials,goal_per_class))
return pc_seqs,nc_seqs
pc_all,nc_all=partition_random_sequences(10) # just testing
pc_all,nc_all=partition_random_sequences(PC_SEQUENCES+PC_TESTS)
print("Use",len(pc_all),"PC seqs")
print("Use",len(nc_all),"NC seqs")
# + colab={"base_uri": "https://localhost:8080/"} id="Q-BmSXi2jUyl" outputId="8f68ac46-3431-4f06-b80a-a09ae0410ed6"
# Describe the sequences
def describe_sequences(list_of_seq):
oc = ORF_counter()
num_seq = len(list_of_seq)
rna_lens = np.zeros(num_seq)
orf_lens = np.zeros(num_seq)
for i in range(0,num_seq):
rna_len = len(list_of_seq[i])
rna_lens[i] = rna_len
oc.set_sequence(list_of_seq[i])
orf_len = oc.get_max_orf_len()
orf_lens[i] = orf_len
print ("Average RNA length:",rna_lens.mean())
print ("Average ORF length:",orf_lens.mean())
print("Simulated sequences prior to adjustment:")
print("PC seqs")
describe_sequences(pc_all)
print("NC seqs")
describe_sequences(nc_all)
# + id="iP1y7-J3jUys"
pc_train=pc_all[:PC_SEQUENCES]
nc_train=nc_all[:NC_SEQUENCES]
pc_test=pc_all[PC_SEQUENCES:]
nc_test=nc_all[NC_SEQUENCES:]
# + colab={"base_uri": "https://localhost:8080/"} id="CIpTrnH6j2US" outputId="58b37116-0f91-4044-a469-34b45ff14de2"
# Use code from our SimTools library.
X,y = prepare_inputs_len_x_alphabet(pc_train,nc_train,ALPHABET) # shuffles
print("Data ready.")
# + colab={"base_uri": "https://localhost:8080/"} id="7NvrVU8ij2UU" outputId="ea8c106a-933b-4a1e-d73f-f4b9cecbeb84"
def make_DNN():
print("make_DNN")
print("input shape:",INPUT_SHAPE)
dnn = Sequential()
#dnn.add(Embedding(input_dim=INPUT_SHAPE,output_dim=INPUT_SHAPE))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same",
input_shape=INPUT_SHAPE))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
dnn.add(MaxPooling1D())
#dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
#dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
#dnn.add(MaxPooling1D())
dnn.add(Flatten())
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=np.float32))
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(1,activation="sigmoid",dtype=np.float32))
dnn.compile(optimizer='adam',
loss=BinaryCrossentropy(from_logits=False),
metrics=['accuracy']) # add to default metrics=loss
dnn.build(input_shape=INPUT_SHAPE)
#ln_rate = tf.keras.optimizers.Adam(learning_rate = LN_RATE)
#bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
#model.compile(loss=bc, optimizer=ln_rate, metrics=["accuracy"])
return dnn
model = make_DNN()
print(model.summary())
# + id="nlVF0hR3j2UW"
from keras.callbacks import ModelCheckpoint
def do_cross_validation(X,y):
cv_scores = []
fold=0
mycallbacks = [ModelCheckpoint(
filepath=MODELPATH, save_best_only=True,
monitor='val_accuracy', mode='max')]
splitter = KFold(n_splits=SPLITS) # this does not shuffle
for train_index,valid_index in splitter.split(X):
if fold < FOLDS:
fold += 1
X_train=X[train_index] # inputs for training
y_train=y[train_index] # labels for training
X_valid=X[valid_index] # inputs for validation
y_valid=y[valid_index] # labels for validation
print("MODEL")
# Call constructor on each CV. Else, continually improves the same model.
model = model = make_DNN()
print("FIT") # model.fit() implements learning
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS,
verbose=1, # ascii art while learning
callbacks=mycallbacks, # called at end of each epoch
validation_data=(X_valid,y_valid))
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
# print(history.history.keys()) # all these keys will be shown in figure
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="9Ggt4EsSj2UY" outputId="fa7e0e1d-a1a1-43ab-9cda-abacd7d1741c"
do_cross_validation(X,y)
# + colab={"base_uri": "https://localhost:8080/"} id="e-jG1h5fj2Ua" outputId="0cea55ea-89e7-44de-e9ec-825d9789c4cf"
from keras.models import load_model
X,y = prepare_inputs_len_x_alphabet(pc_test,nc_test,ALPHABET)
best_model=load_model(MODELPATH)
scores = best_model.evaluate(X, y, verbose=0)
print("The best model parameters were saved during cross-validation.")
print("Best was defined as maximum validation accuracy at end of any epoch.")
print("Now re-load the best model and test it on previously unseen data.")
print("Test on",len(pc_test),"PC seqs")
print("Test on",len(nc_test),"NC seqs")
print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100))
# + colab={"base_uri": "https://localhost:8080/", "height": 312} id="VycUnmvUj2Ue" outputId="a303cfec-ae09-4b28-c41d-423d99b379f6"
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
ns_probs = [0 for _ in range(len(y))]
bm_probs = best_model.predict(X)
ns_auc = roc_auc_score(y, ns_probs)
bm_auc = roc_auc_score(y, bm_probs)
ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs)
bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs)
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc)
plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc)
plt.title('ROC')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()
print("%s: %.2f%%" %('AUC',bm_auc*100.0))
# + colab={"base_uri": "https://localhost:8080/", "height": 36} id="kFMb6rGNj2Ug" outputId="0c15573d-8a36-4ecc-f9ad-9ab021af8c9a"
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
# + id="e-mEgDrQjUzF"
|
Notebooks/ORF_CNN_117.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: asvtorch
# language: python
# name: asvtorch
# ---
# # Reproducibility
import torch
my_seed=19951008
torch.manual_seed(my_seed)
import numpy as np
np.random.seed(my_seed)
from tqdm import tqdm
#torch.set_deterministic(True)
# # Import libraries
import json
from sklearn.preprocessing import LabelEncoder
import sys
## These two should correspond to the path where asvtorch code () is present, in particular the:
# - asvtorch/asvtorch folder
# - asvtorch/asvtorch/src folder
asvtorch/asvotorch
sys.path.append("../")
sys.path.append("../..")
from src.utterances.utterance_list import UtteranceList
from src.backend.vector_processing import VectorProcessor
import wandb
from src.gender_classifiers import LogisticRegression, FC2, FC4
from torch.autograd import Variable
import sklearn.metrics
from sklearn.model_selection import StratifiedKFold
import scipy.linalg
import itertools
import pandas as pd
from tqdm import tqdm
# # X-Vectors
# ## Load features
# %%time
plda_data = UtteranceList.load('trial_embeddings', '/media/hdd1/khaled/voxceleb_xvector_outputs-correct/full_system_default/utterances/')
def get_correct_recordings_index(spk_labels):
spk_labels_dict = {i:spk_labels.count(i) for i in set(spk_labels)}
least_freq_spk = min(list(spk_labels_dict.values()))
print(least_freq_spk)
speaker_indexes = []
frequency_spk_labels_dict = {}
for x in set(spk_labels):
frequency_spk_labels_dict[x] = 0
for index, spk_id in enumerate(spk_labels):
frequency_spk_labels_dict[spk_id] += 1
if frequency_spk_labels_dict[spk_id] > least_freq_spk:
next
else:
speaker_indexes.append(index)
return speaker_indexes
def gender_classifier(
train_embeddings,
train_labels,
test_embeddings,
test_labels,
model_name = 'log_reg'):
# Train
print("Train embeddings", train_embeddings.shape)
train = torch.utils.data.TensorDataset(train_embeddings, train_labels)
train_loader = torch.utils.data.DataLoader(dataset = train, batch_size = config['batch_size'], shuffle = False)
# Test
test = torch.utils.data.TensorDataset(test_embeddings, test_labels)
test_loader = torch.utils.data.DataLoader(dataset = test, batch_size = config['test_batch_size'], shuffle = False)
if model_name == 'log_reg':
model = LogisticRegression(train_embeddings.shape[1], 2)
elif model_name == 'fc2':
model = FC2(train_embeddings.shape[1], 2, config['dropout'])
elif model_name == 'fc4':
model = FC4(train_embeddings.shape[1], 2, config['dropout'])
model = model.cuda()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=config['lr'])
wandb.watch(model, log="all")
for epoch in tqdm(range(config['epochs'])):
for i, (vectors, labels) in enumerate(train_loader):
# Define variables
train = Variable(vectors.view(-1, train_embeddings.shape[1]))
labels = Variable(labels)
# Clear gradients
optimizer.zero_grad()
# Forward propagation
outputs = model(train)
# Calculate softmax and cross entropy loss
loss = criterion(outputs, labels)
# Calculate gradients
loss.backward()
# Update parameters
optimizer.step()
# Get test predictions
y_pred = []
y_true = []
for i, (x_test, y_test) in enumerate(test_loader):
x_test = Variable(x_test)
outputs = model(x_test)
y_pred += torch.max(outputs.data, 1)[1].cpu().numpy().tolist()
y_true += y_test.data.cpu().numpy().tolist()
wandb.log({
'Accuracy': sklearn.metrics.accuracy_score(y_true, y_pred),
'F1': sklearn.metrics.f1_score(y_true, y_pred)
})
return model, sklearn.metrics.f1_score(y_true, y_pred)
# ## Load gender metadata
# ! ls dataset/
import pandas as pd
df = pd.read_csv("dataset/gender-train_test.csv")
df.head()
# +
with open("dataset/gender-train_set.txt") as f:
train_speakers = json.load(f)
with open("dataset/gender-test_set.txt") as f:
test_speakers = json.load(f)
# -
# %%time
from tqdm import tqdm
train_indexes = []
plda_vox_id = []
test_indexes = []
test_plda_vox_id = []
for i, voxID_video_id in enumerate(tqdm(plda_data.get_utt_labels())):
# Let's now remove the "recording" info from voxID-YT id
current_id = voxID_video_id.split("-")[0]
if current_id in train_speakers:
train_indexes.append(i)
plda_vox_id.append(current_id)
elif current_id in test_speakers:
test_indexes.append(i)
test_plda_vox_id.append(current_id)
len(train_indexes), len(test_indexes)
# - Individuo registrazioni plausibili
#
train_idx = get_correct_recordings_index(plda_vox_id)
test_idx = get_correct_recordings_index(test_plda_vox_id)
len(train_idx), len(test_idx)
X_train = plda_data.embeddings[train_indexes]
X_train = X_train[train_idx]
y_train_spk = np.array(plda_vox_id)
y_train_spk = y_train_spk[train_idx]
"""
X_train = [train_indexes]
current_labels = current_labels[train_test_idx]
spk_id = np.array(spk_id)[train_test_idx]
"""
X_test = plda_data.embeddings[test_indexes]
X_test = X_test[test_idx]
y_test_spk = np.array(test_plda_vox_id)
y_test_spk = y_test_spk[test_idx]
id_gender_dict = pd.Series(df.gender.values,index=df.VoxCeleb_ID).to_dict()
y_train = [id_gender_dict[x] for x in y_train_spk]
y_test = [id_gender_dict[x] for x in y_test_spk]
len(y_train), len(y_test)
# It is ok that the split is done at 'y' level, however current_labels should have the same length of np.unique(speakers_ids)
def train_holdout(preprocessing_strategy, model_name, train_embeddings, train_labels, test_embeddings, test_labels):
train_embeddings = train_embeddings.cuda()
train_labels = train_labels.cuda()
test_embeddings = test_embeddings.cuda()
test_labels = test_labels.cuda()
if preprocessing_strategy == 'cwl':
# Preprocess embeddings
vector_processor = VectorProcessor.train(train_embeddings, 'cwl', 'cuda:0')
train_embeddings = vector_processor.process(train_embeddings)
test_embeddings = vector_processor.process(test_embeddings)
elif preprocessing_strategy == 'wccn':
L = wccn(train_embeddings.cpu().numpy(), train_labels.cpu().numpy(), 0)
train_embeddings = torch.matmul(train_embeddings, torch.from_numpy(L).cuda().float())
test_embeddings = torch.matmul(test_embeddings, torch.from_numpy(L).cuda().float())
# Normal models
model, test_f1 = gender_classifier(train_embeddings,
train_labels,
test_embeddings,
test_labels,
model_name=model_name
)
print(test_f1)
return model
models_to_evaluate = ['fc2']
norm_strat_to_evaluate = ['']
dropout = [False]
batch_size = [256]
lr = [0.001]
epochs = [200]
train_combinations = list(itertools.product(
epochs,
models_to_evaluate,
norm_strat_to_evaluate,
dropout,
batch_size,
lr
))
"""
combos_to_exclude = list(itertools.product(['log_reg'], norm_strat_to_evaluate, [True]))
train_combinations = list(set(train_combinations) - set(combos_to_exclude))"""
len(train_combinations)
# Convert gender labels in numerical format for training reasons
label_encoder = LabelEncoder()
y_train = label_encoder.fit_transform(y_train)
y_test = label_encoder.transform(y_test)
y_train = torch.tensor(y_train)
y_test = torch.tensor(y_test)
trained_model = None
for epoch, model, strategy, drop, bs, lr_now in train_combinations:
config = {
'batch_size' : bs,
'test_batch_size' : 100,
'epochs' : epoch,
'lr' : lr_now,
'seed' : my_seed,
'log_interval' : 1,
'model_name' : model,
'feature_norm' : strategy,
'dropout': drop,
'dataset' : 'gender',
'embedding' : 'x-vec',
'folder_fn': 'xvectors/log_reg/'
}
print(config)
if drop:
drop_id = 'dropout'
else:
drop_id = ''
wandb.init(
project='voxceleb_enrichment',
name='_'.join([model,config['embedding'], strategy, drop_id]),
config=config
)
trained_model = train_holdout(strategy, model, X_train, y_train, X_test, y_test)
wandb.run.finish()
# ! mkdir torch_models
torch.save(trained_model.state_dict(), "torch_models/xvec_fc2_model")
# ## FC4
models_to_evaluate = ['fc4']
norm_strat_to_evaluate = ['']
dropout = [False]
batch_size = [256]
lr = [0.001]
epochs = [200]
train_combinations = list(itertools.product(
epochs,
models_to_evaluate,
norm_strat_to_evaluate,
dropout,
batch_size,
lr
))
trained_model = None
for epoch, model, strategy, drop, bs, lr_now in train_combinations:
config = {
'batch_size' : bs,
'test_batch_size' : 100,
'epochs' : epoch,
'lr' : lr_now,
'seed' : my_seed,
'log_interval' : 1,
'model_name' : model,
'feature_norm' : strategy,
'dropout': drop,
'dataset' : 'gender',
'embedding' : 'x-vec',
'folder_fn': 'xvectors/log_reg/'
}
print(config)
if drop:
drop_id = 'dropout'
else:
drop_id = ''
wandb.init(
project='voxceleb_enrichment',
name='_'.join([model,config['embedding'], strategy, drop_id]),
config=config
)
trained_model = train_holdout(strategy, model, X_train, y_train, X_test, y_test)
wandb.run.finish()
torch.save(trained_model.state_dict(), "torch_models/xvec_fc4_model")
# ## Log reg
models_to_evaluate = ['log_reg']
norm_strat_to_evaluate = ['']
dropout = [False]
batch_size = [256]
lr = [0.001]
epochs = [200]
train_combinations = list(itertools.product(
epochs,
models_to_evaluate,
norm_strat_to_evaluate,
dropout,
batch_size,
lr
))
trained_model = None
for epoch, model, strategy, drop, bs, lr_now in train_combinations:
config = {
'batch_size' : bs,
'test_batch_size' : 100,
'epochs' : epoch,
'lr' : lr_now,
'seed' : my_seed,
'log_interval' : 1,
'model_name' : model,
'feature_norm' : strategy,
'dropout': drop,
'dataset' : 'gender',
'embedding' : 'x-vec',
'folder_fn': 'xvectors/log_reg/'
}
print(config)
if drop:
drop_id = 'dropout'
else:
drop_id = ''
wandb.init(
project='voxceleb_enrichment',
name='_'.join([model,config['embedding'], strategy, drop_id]),
config=config
)
trained_model = train_holdout(strategy, model, X_train, y_train, X_test, y_test)
wandb.run.finish()
torch.save(trained_model.state_dict(), "torch_models/xvec_log_reg_model")
|
notebooks/02.2-Gender recognition-Train_test-xvec.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from matplotlib import style
style.use('ggplot')
# +
# %%time
x = [1,2,0.5,7,8,10,5]
y= [1,3,2,8,7,9,5]
plt.scatter(x,y)
plt.show()
# -
arr = np.vstack((x,y)).T
kmeans = KMeans(n_clusters=2)
kmeans.fit(arr)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
colors = ['r.','g.']
# +
for i in range(len(arr)):
print(arr[i])
plt.plot(arr[i][0], arr[i][1], colors[labels[i]], markersize=10)
plt.scatter(centroids[:, 0], centroids[:,1], marker="x", s=100)
plt.show()
# +
data = "rrr"
# %store data
# -
# %who
|
clustering_ex_01.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="gf4IcCFont41" outputId="779520e1-da4d-452a-d853-3a8ad036f943"
# ! pip install urduhack
# ! pip install tf2crf==0.1.13
import urduhack
import json
# + colab={"base_uri": "https://localhost:8080/"} id="RfSXzpi0oAHH" outputId="1305a029-dfb3-487d-cf4e-e434b39ad906"
# Load .json file
f = open('UQAD.json')
d = json.load(f)
context = d["1"]["context"]
print(context,"\n",type(context))
# + colab={"base_uri": "https://localhost:8080/"} id="fY7qamfFoWlT" outputId="4ced3c37-f2ea-4bd4-b049-3400308fd5c2"
# Downloading models
urduhack.download()
nlp = urduhack.Pipeline()
pos_passage = dict()
text = context
doc = nlp(text)
for sentence in doc.sentences:
print(sentence.text)
for word in sentence.words:
print(f"{word.text}\t{word.pos}")
for token in sentence.tokens:
print(f"{token.text}\t{token.ner}")
|
NLP_POS_Tagging_.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] heading_collapsed=true
# # King Count Houses - Insight Project
# -
# # Basic setups
# ## IMPORTS
import math
import plotly.express as px
import pandas as pd
from IPython.core.display import HTML
import seaborn as sns
import datetime
import ipywidgets as widgets
from ipywidgets import fixed
# ## Helper functions
def jupyter_settings():
# %matplotlib inline
# %pylab inline
plt.style.use( 'bmh' )
plt.rcParams['figure.figsize'] = [25, 12]
plt.rcParams['font.size'] = 24
display( HTML( '<style>.container { width:100% !important; }</style>') )
pd.options.display.max_columns = None
pd.options.display.max_rows = None
pd.set_option( 'display.expand_frame_repr', False )
sns.set()
jupyter_settings()
# # Initial data processing
# ## Data Load
df_raw = pd.read_csv(r'../data/raw/kc_house_data.csv')
# ## Data Overview
df_raw.sample(5)
print ('The original dataset have {} transation registers with {} attributes.'.format(df_raw.shape[0], df_raw.shape[1]))
df_raw.columns
# 1° Task: Whats the relevance of these attributes: 'grade','sqft_living15', 'sqft_lot15' and 'view'?
# Identify, cleaning or drop
# ## Data Types
df_raw.dtypes
# Tasks:
# 2°)Change 'waterfront' from 0 or 1 to yes or no
# 3°)Change 'date' to date format
# ## Check NA
df_raw.isna().sum()
# ## Check Duplicated
df_raw['id'].duplicated().sum()
# Task:
# 4) Identify the duplicated ones and drop if it is necessary
# ## Check attributes
# ### Attribute 'grade'
# It's an index from 1 to 13, where 1-3 falls short of building construction and design, 7 has an average level of construction and design, and 11-13 have a high quality level of construction and design.
df_raw['grade'].unique()
# ***Definition***
#
# Cycle 01 - Change the attribute 'grade' to low_quality_design, average_quality_design and high_quality_design
#
# Cycle 02 - After a look in the distribuition of price/grade, it seems better keep this as a number but it will be considered a categorical feature
# ### Attribute 'sqft_living15'
# sqft_living15 - The square footage of interior housing living space for the nearest 15 neighbors
df_raw['sqft_living15'].unique()
# Task 5) - Drop this feature
# ### Attibute 'sqft_lot15'
# sqft_lot15 - The square footage of the land lots of the nearest 15 neighbors
df_raw['sqft_lot15'].unique()
# Task 6): Drop this feature
# ### Attribute 'view'
# view - An index from 0 to 4 of how good the view of the property was
df_raw['view'].unique()
# ***Decision***
#
# Keep this for now
# # Data Transform/Preparing
df3 = df_raw.copy()
# 1) Drop 'sqft_living15'
#
# 2) Drop 'sqft_lot15'
# +
df3 = df3.drop(columns = ['sqft_living15', 'sqft_lot15'], axis = 1)
df3.head()
# -
# 3) Change 'waterfront' from 0 or 1 to yes or no
df3['waterfront'] = df3['waterfront'].apply(lambda x: 'yes' if x==1 else 'no')
# 4) Change 'date' to date format
#date
df3['date'] = pd.to_datetime(df3['date'])
# 5) Identify the duplicated ones and drop if it is necessary
df_dup = df3[df3['id'].duplicated(keep = False )].reset_index(drop=True)
df_dup.head()
print ('There is {} duplicated registers in the dataframe. Duplicated registers with the same date will be dropped'.format(df_dup.shape[0]))
# 6°)Task: All the duplicated one with the same date will be considered errors, the ones with different dates will be keep
# # Data Exploration
df4 = df3.copy()
main_attributes = df4[['date', 'price', 'bedrooms', 'bathrooms', 'sqft_lot', 'sqft_living','floors', 'yr_built', 'yr_renovated']]
main_attributes.describe().T
# 1) There is a big std in the price, and seems to have outliers. Check it.
#
# 2) There is at least one house without bedrooms and one with 33. Check it.
#
# 3) There is at least one house without bathrooms. Check it.
#
# 4) There are houses with the year of renovation with '0'. It has to be treated.
# ## Descriptive Statistics
df_aux = df4.drop_duplicates(subset='id')
num_attributes = df_aux.select_dtypes(include = ['int64', 'float64'] )
cat_attributes = df_aux[['date', 'waterfront', 'view', 'condition', 'grade','yr_built', 'yr_renovated']]
# ### Numerical Attributes
#central tendency statistics - mean, median
ct_mean = pd.DataFrame ( num_attributes.apply (np.mean ) ).T
ct_median = pd.DataFrame ( num_attributes.apply (np.median ) ).T
# dispersion statistics - standard deviation, minimum, maximum, range, skew, kurtosis
d_std = pd.DataFrame ( num_attributes.apply (np.std) ).T
d_min = pd.DataFrame ( num_attributes.apply (np.min) ).T
d_max = pd.DataFrame ( num_attributes.apply (np.max) ).T
d_range = pd.DataFrame ( num_attributes.apply ( lambda x: x.max() - x.min())).T
d_skew = pd.DataFrame ( num_attributes.apply ( lambda x: x.skew() ) ).T
d_kurtosis = pd.DataFrame ( num_attributes.apply ( lambda x: x.kurtosis() ) ).T
#concatenate
df_metrics = pd.concat ([d_min,d_max,d_range,ct_mean,ct_median,d_std,d_skew,d_kurtosis]).T.reset_index()
df_metrics.columns = ['attributes', 'min.','max','range', 'mean', 'median', 'std', 'skew', 'kurtosis']
df_metrics
# ### Categorical Attributes
cat_attributes.head()
# +
total = df_aux.shape[0]
wf_number = cat_attributes [ cat_attributes ['waterfront'] == 'yes'].shape[0]
lqd_number = cat_attributes [ cat_attributes ['grade'] <=3 ].shape[0]
hq_number = cat_attributes [ cat_attributes ['grade'] >= 11].shape[0]
avgq_number = total - lqd_number - hq_number
# -
print ( 'Dataset has {} houses with waterview, what corresponding to {:.2f} % of the data set\n' .format( wf_number, (wf_number/total)*100))
print ( 'Dataset has {} houses with low quality design , what corresponding to {:.2f} % of the data set\n' .format( lqd_number, (lqd_number/total)*100))
print ( 'Dataset has {} houses with average quality design , what corresponding to {:.2f} % of the data set\n' .format( avgq_number, (avgq_number/total)*100))
print ( 'Dataset has {} houses with high quality design , what corresponding to {:.2f} % of the data set' .format( hq_number, (hq_number/total)*100))
# #### mean price for view
df4_aux = df4[['price','view']].groupby('view').mean().reset_index()
sns.barplot(x='view', y ='price', data=df4_aux)
# It seems that the view has a big contribuition to the price
# #### Mean price for grade
df4_aux2 = df4[['price', 'grade']].groupby('grade').mean().reset_index()
sns.barplot(x='grade', y='price', data=df4_aux2)
# #### Average price for condition
aux = df4[['price', 'condition']].groupby('condition').mean().reset_index()
sns.barplot (x='condition', y='price', data = aux);
# It seems that its more valuable by a 2-condition house and make a lift than buy a 4-conditon one
# #### Average price per Zipcode
aux = df4[['price', 'zipcode']].groupby('zipcode').mean().reset_index()
sns.barplot(x='zipcode', y='price', data = aux);
aux.head()
# #### Average price per year built
aux = df4[['price', 'yr_built']].groupby('yr_built').mean().reset_index()
sns.barplot(x='yr_built', y='price', data = aux);
# +
#### Average price per year renovated
# -
aux = df4[['price', 'yr_renovated']].groupby('yr_renovated').mean().reset_index()
sns.barplot(x='yr_renovated', y='price', data = aux);
# # Primary Hipotesys
# 1) Houses with waterview are more expensive.
#
# 2) Houses with garage are more expensive.
#
# 3) There is a main feature that raise the prices.
#
# 4) Houses that were not renovated are cheaper.
#
# 5) The most houses with high quality design has waterview.
# 6) Houses with price below the average price for the zip code would be a good deal
# 7) Houses with price belou the average price for the zip code AND condition equal '2' would be a good opportunity to high the grade and profit
# # Feature Engeneering
# ## Feature filtering
df6 = df4[['id', 'price', 'sqft_living','sqft_lot', 'zipcode']].copy()
# ## Average price per area
df6['area'] = df6[['sqft_living', 'sqft_lot']].apply(lambda x: x['sqft_living'] if x['sqft_living'] >= x['sqft_lot'] else x['sqft_lot'], axis = 1)
df6['price_area'] = df6['price'] / df6['area'] # price/sqft for each house
df6.head()
# ## Average price for sqft built per zipcode
#df6['avg_price'] = df6[['price_area', 'zipcode']].groupby(by='zipcode').mean()
df6['avg_price'] = df6.groupby(['zipcode'])['price_area'].mean()
df6.head()
# +
## Price level (cheap, average expensive)
# +
## Should buy
# -
# # Conclusion
# # Next steps
# - Use ML models to find clusters among the houses and classify them
# - Can use the data to a simple regression model to the price of selling future houses
|
notebooks/kc_houses_2.0-Data_Filtering.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Stratified sampling
#
# In large dataset a relatively small group of points might be overplotted by the dominant group. In this case **stratified** sampling can help.
# +
import numpy as np
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
# +
N = 5000
small_group = 3
large_group = N - small_group
np.random.seed(123)
data = dict(
x = np.random.normal(0, 1, N),
y = np.random.normal(0, 1, N),
cond = ['A' for _ in range(small_group)] + ['B' for _ in range(large_group)]
)
# -
# Data points in group 'A' (small group) are overplotted by the dominant group 'B'.
p = ggplot(data, aes('x','y',color='cond')) + \
scale_color_manual(values=["red", "#1C9E77"], breaks=['A', 'B'])
p + geom_point(size=5, alpha=.2)
# The 'random' sampling loses the group 'A' altogether.
p + geom_point(size=5, sampling=sampling_random(50, seed=2))
# Stratified sampling ensures that group 'A' is represented.
p + geom_point(size=5, sampling=sampling_random_stratified(50, seed=2))
|
source/examples/cookbook/sampling_stratified.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.2.0
# language: julia
# name: julia-1.2
# ---
# +
using PyCall
using PyPlot
PyPlot.svg(true)
# -
function FTCS_update(dt, dx, a, u)
C = u*dt/dx
a_new = copy(a)
a_new[2:end-1] = a[2:end-1] .- C/2 .* (a[3:end].-a[1:end-2])
a_new[1] = a[1] - C/2*(a[2]-a[end])
a_new[end] = a[end] - C/2*(a[1] - a[end-1])
a_new
end
# +
xs = collect(0:0.01:1)
dx = xs[2]-xs[1]
dt = dx/2
a0 = exp.(-0.5*(xs.-0.25).*(xs.-0.25)./(0.002))
plot(xs, a0)
# -
a = copy(a0)
for i in 1:20
a = FTCS_update(dt, dx, a, 1.0)
end
plot(xs, a)
function upwind_update(dt, dx, a, u)
C = u*dt/dx
a_new = copy(a)
a_new[2:end] = a[2:end] .- C*(a[2:end] .- a[1:end-1])
a_new[1] = a[1] - C*(a[1] - a[end])
a_new
end
a = copy(a0)
for i in 1:200
a = upwind_update(dt, dx, a, 1.0)
end
plot(xs, a0, "-k")
plot(xs, a)
function upwind_burgers_update(dt, dx, a)
C = a*dt/dx
a_new = copy(a)
a_new[2:end] = a[2:end] .- C[2:end].*(a[2:end] .- a[1:end-1])
a_new[1] = a[1] - C[1]*(a[1] - a[end])
a_new
end
a0_burgers = a0 .+ 0.2
plot(xs, a0_burgers)
a = copy(a0_burgers)
for i in 1:20000
a = upwind_burgers_update(dx/10.0, dx, a)
end
plot(xs, a)
function LW_update(dt, dx, a, u)
C = u*dt/dx
a_new = copy(a)
for i in 2:size(a_new,1)-1
a_new[i] = 0.5*((1+C)*a[i-1] + (1-C)*a[i+1])
end
a_new[1] = 0.5*((1+C)*a[end] + (1-C)*a[2])
a_new[end] = 0.5*((1+C)*a[end-1] + (1-C)*a[1])
a_new
end
a = copy(a0)
for i in 1:200
a = LW_update(0.05, 1.0, a, 1.0)
end
plot(a0)
plot(a)
function FV_burger_update(dt, dx, u)
u_new = copy(u)
for i in 2:size(u,1)
u_new[i] = u[i] - 0.5*dt/dx*(u[i]*u[i] - u[i-1]*u[i-1])
end
u_new[1] = u[1] - 0.5*dt/dx*(u[1]*u[1] - u[end]*u[end])
u_new
end
a = copy(a0)
for i in 1:2000
a = FV_burger_update(0.1, 1.0, a)
end
plot(a0)
plot(a)
sum(a), sum(a0)
|
AdvectionFiniteDiff.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %load_ext autoreload
# %autoreload 2
import numpy as np
from ridge import *
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set_style('white')
sns.set_context('talk')
sns.set_color_codes()
np.random.seed(0)
def f(x):
w = np.ones(x.shape)
w2 = np.zeros(x.shape)
w2[0] = 1
w /= np.linalg.norm(w)
return np.dot(x, w)**3 + np.dot(x, w2)**2+1
X = np.random.uniform(size = (int(1e3),10))
fX = np.array([f(x) for x in X])
np.random.seed(5)
U0_vec = [orth(np.random.randn(10,2)) for i in range(10)]
res_norm_gauss_newton_vec = []
for i, U0 in enumerate(U0_vec):
U, c, hist = grassmann_gauss_newton(U0, X, fX, degree = 3, history = True, disp = False, gtol = 0, ftol = 1e-12)
res_norm_gauss_newton = [np.linalg.norm(r) for r in hist['residual']]
res_norm_gauss_newton_vec.append(res_norm_gauss_newton)
print "finished iter", i
from ridge_paul import *
degree = 3
maxiter = 50
tol = 1e-10
res_norm_alternating_vec = []
for i, U0 in enumerate(U0_vec):
U, hist = RidgeAlternating(X, fX.reshape(-1,1), U0, degree, maxiter, tol**2, history = True, disp = False)
res_norm = [np.linalg.norm(r) for r in hist['residual']]
res_norm_alternating_vec.append(res_norm)
print "finished iter", i
import active_subspaces
|
fig_convergence.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 (''venv'': venv)'
# name: pythonjvsc74a57bd0bd7733ef04046362eed5a1629bdd13cafa362ef6098c6612526b5bf7b26aecd0
# ---
# +
'''
@brief Leg-Rest Pos Recommendataion with DecisionTree Regressor
@author <NAME> <<EMAIL>>
@date 2021. 05. 21
'''
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
import progressbar
'''
Presets & Hyper-parameters
'''
CONFIGURATION_FILE_PATH = "./data/train/data_config.csv"
DATASET_PATH = "./data/train/"
pd.set_option('display.width', 200) # for display width
# FEATURE_LENGTH = 30 # n-dimensional data feature only use
# NUMBER_OF_SAMPLES = 299 # number of augmented data
# FEATURE_MAX_LENGTH = 115 # Maximum feature length
# NUMBER_OF_RANDOM_SELECTION = 5
# MAX_TRAIN_ITERATION = -1 # infinity
'''
1. Load configuration file
'''
data_config = pd.read_csv(CONFIGURATION_FILE_PATH, header=0, index_col=0)
'''
2. data extraction
'''
X = data_config.loc[:, ['user_height', 'user_weight', 'user_age']]
bmr = 66.47+(13.75*X['user_weight'])+(5*X['user_height'])-(6.76*X['user_age'])
bmi = X['user_weight']/(X['user_height']/100*X['user_height']/100)
X["bmr"] = bmr
X["bmi"] = bmi
ys = data_config.loc[:, ['bestfit_angle_standard']]
yr = data_config.loc[:, ['bestfit_angle_relax']]
'''
DecisionTree Regression Model
'''
print("------ Regression Model Evaluation (@standard) ------")
X_train, X_test, y_train, y_test = train_test_split(X, np.ravel(ys), test_size=0.33, shuffle=True)
model_standard = DecisionTreeRegressor(
criterion = "mse",
max_depth=6,
min_samples_leaf=1,
random_state=1).fit(X_train, y_train)
print("* R2 Score with Trainset (@standard) :", model_standard.score(X_train, y_train))
print("* R2 Score with Testset (@standard) :", model_standard.score(X_test, y_test))
print("* Feature Impotances (@standard) :")
for name, value in zip(X_train.columns, model_standard.feature_importances_):
print(' - {0}: {1:.3f}'.format(name, value))
print("------ Regression Model Evaluation (@relax) ------")
X_train, X_test, y_train, y_test = train_test_split(X, np.ravel(yr), test_size=0.33, shuffle=True)
model_relax = DecisionTreeRegressor(
criterion = "mse", # mean square error
max_depth=6,
min_samples_leaf=1,
random_state=1).fit(X_train, y_train)
print("* R-squared Score with Trainset (@relax) :", model_relax.score(X_train, y_train))
print("* R-squared Score with Testset (@relax) :", model_relax.score(X_test, y_test))
print("* Feature Impotances (@standard) :")
for name, value in zip(X_train.columns, model_relax.feature_importances_):
print(' - {0}: {1:.3f}'.format(name, value))
# +
'''
Output File Generation
'''
min_age = 10
max_age = 80
ages = np.array([min_age+i for i in range(max_age-min_age+1)])
min_height = 150
max_height = 200
heights = np.array([min_height+i for i in range(max_height-min_height+1)])
min_weight = 40
max_weight = 100
weights = np.array([min_weight+i for i in range(max_weight-min_weight+1)])
# -
df = pd.DataFrame(data=[ages])
print(df)
a = 40
w = 60
h = 180
bmr = 66.47+(13.75*w)+(5*h)-(6.76*a)
bmi = w/(h/100*h/100)
pvs = model_standard.predict([[a,h,w,bmr,bmi]])
print(pvs[0])
# +
bar = progressbar.ProgressBar(maxval=len(ages)*len(heights)*len(weights), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
output_standard = pd.DataFrame(columns=['age','height','weight','legrest'])
output_relax = pd.DataFrame(columns=['age','height','weight','legrest'])
count = 0
for a in ages:
for h in heights:
for w in weights:
bmr = 66.47+(13.75*w)+(5*h)-(6.76*a)
bmi = w/(h/100*h/100)
pvs = model_standard.predict([[a,h,w,bmr,bmi]])
pvr = model_relax.predict([[a,h,w,bmr,bmi]])
output_standard = output_standard.append({'age':a, 'height':h, 'weight':w, 'legrest':pvs}, ignore_index=True)
output_relax = output_relax.append({'age':a, 'height':h, 'weight':w, 'legrest':pvr}, ignore_index=True)
count = count+1
bar.update(count)
bar.finish()
output_standard.to_csv('result_standard.csv')
output_relax.to_csv('result_relax.csv')
print("saved results")
|
2021_legrest/legrest_recom.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="2HvVWMb3hRKX"
# # Fire and Smoke Detection using DCNN
#
# >Using Transfer Learning and GPU
# + [markdown] id="u1R73-8-hET2"
# ### Mounting Google Drive
# + id="tj-pEAyCgbsZ" outputId="c3685f2e-6228-41fb-b2a1-876954763f2d" colab={"base_uri": "https://localhost:8080/"}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="llcFtu1Biizc"
# ### Libraries
# + id="G05O5DQ4g1We" outputId="dc9a8efd-d982-46cf-cd47-e27bad03412e" colab={"base_uri": "https://localhost:8080/"}
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
4
# %load_ext tensorboard
import tensorboard
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import binary_crossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.applications import InceptionV3
# + id="iMmzDdPwihVJ"
path = "/content/drive/My Drive/DataSets/fire_dataset"
# + [markdown] id="8C3PAs1_ix0G"
# ### Data Augmentation
# + id="M3ZrXYOMixYC"
datagen=ImageDataGenerator(
validation_split=0.15,
zoom_range=1,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
horizontal_flip=True,
fill_mode="nearest",
rescale=1./255)
# + id="h4AuGW9zjTyv" outputId="c75468de-4864-493f-96bc-aea0db76b4b3" colab={"base_uri": "https://localhost:8080/"}
train_generator=datagen.flow_from_directory(path,
target_size=(224,224),
batch_size=32,
class_mode='binary',
shuffle=True,
subset='training')# set as training data
validation_generator = datagen.flow_from_directory(
path,
target_size=(224,224),
batch_size=32,
class_mode='binary',
shuffle=True,
subset='validation') # set as validation data
# + [markdown] id="6KoDAogYjlcy"
# ### Visualizing Training Data Post Augmentation
# + id="JMe73Q98jhFF" outputId="324fc81e-5750-45a4-90da-423acab29c6e" colab={"base_uri": "https://localhost:8080/", "height": 286}
x,y = train_generator.next()
for i in range (25):
plt.subplot(5,5,1 + i)
igx1 = x[i]
plt.imshow(igx1)
plt.show()
print(igx1.shape)
# + [markdown] id="F0hB_lDIk0L3"
# ### Preparing Model
# + id="TgwH0_X5jxkU"
# model_new = InceptionV3(include_top =True, weights = None, classes=2, input_shape = (224,224,3))
# # for ix in range(776):
# # model.layers[ix].trainable = False
# # x=model.output
# # # Add some new Fully connected layers
# # x=GlobalAveragePooling2D()(x)
# # x=Dense(256,activation='relu')(x)
# # x = Dropout(0.25)(x)
# # x=Dense(64,activation='relu')(x)
# # preds=Dense(2, activation='softmax')(x) #final layer with softmax activation #5749 classes
# # model_new=Model(inputs=model.input,outputs=preds)
# # model_new=Model(inputs=model.input,outputs=preds)
# + id="eTFgh5Z3m7jp"
model = Sequential()
model.add(Conv2D(32,kernel_size=(3,3),activation='relu',input_shape=(224,224,3)))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(64,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
model.add(Conv2D(128,(3,3),activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.2))
# model.add(Conv2D(256,(3,3),activation='relu'))
# model.add(MaxPooling2D(pool_size=(2,2)))
# model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dense(64,activation='relu'))
model.add(Dense(32,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1,activation='sigmoid')) # whatever is the label, is returned
# + id="iua6o3V3m4Lk" outputId="24ebd48e-3489-42fc-e8e0-2c1218116efc" colab={"base_uri": "https://localhost:8080/"}
model.summary()
# + id="eB73YLroogWa"
model.compile(loss = "binary_crossentropy", optimizer ="adam", metrics = ["accuracy"] )
# + [markdown] id="sVzEjBY1oBXC"
# ### Preparing Tensorboard
# + id="syUnMTNdnSkW"
import datetime
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.callbacks import TensorBoard
log_dir = "/content/drive/My Drive/DataSets/fire/model/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1)
# + [markdown] id="b81-n5gHolv-"
# ### Training Model
# + id="TtuDU5Tlon1R"
from numpy.random import seed
seed(1)
# + id="-nAN8oLkoHpC" outputId="3fde9414-08eb-4f9f-d79c-e50b2db23378" colab={"base_uri": "https://localhost:8080/"}
STEP_SIZE_TRAIN=train_generator.n//train_generator.batch_size
STEP_SIZE_VALID=validation_generator.n//validation_generator.batch_size
history=model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=validation_generator,
validation_steps=STEP_SIZE_VALID,
callbacks = [tensorboard_callback],
epochs=50)
# + [markdown] id="6EQ0yYIeqTLF"
# ### Visualizing Performance
# >Saving Model .h5 and its History
# + id="NeQVx23kqp3U"
from tensorflow.keras.models import load_model
model.save('/content/drive/My Drive/DataSets/model.h5')
# + id="hDLwf2BMoef4"
import _pickle as p
with open("/content/drive/My Drive/DataSets/fire__detection.txt",'w') as f:
for k in history.history.keys():
print(k,file=f)
for i in history.history[k]:
print(i,file=f)
# + id="Q1Om1E_XgRbd" outputId="73868608-1099-48ea-e987-9f9e3c64aaa8" colab={"base_uri": "https://localhost:8080/"}
print(history.history.keys())
# + id="BZFEFKZQrXdF" outputId="607f5a5d-e771-4720-ca7c-e3d50f4572ff" colab={"base_uri": "https://localhost:8080/", "height": 295}
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'Validation'], loc='upper left')
plt.show()
# + id="pGWpB5akrd4v" outputId="365bf628-1fce-46d1-eb18-4f968460b9bc" colab={"base_uri": "https://localhost:8080/", "height": 295}
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'Validation'], loc='upper left')
plt.show()
# + id="xp2xnOZcwUIc"
|
Fire_And_Smoke_Detection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="nYHb1ZgWsRii"
# # Pruning with our automatic structured Pruning framework
# Welcome to an end-to-end example for magnitude-based weight pruning
#
# **Summary**
#
# In this tutorial, you will:
#
# * Train a tf.keras model for CIFAR10 from scratch.
# * Fine tune the model by applying the pruning Framework and see the accuracy.
# -
# If you want to execute this notebook in Google Colab, uncomment the code below.
# + colab={"base_uri": "https://localhost:8080/"} id="f_QYhrsRq_dL" outputId="459d5eeb-0c25-4d9c-8ccf-fec265a9b909"
# # !git clone https://github.com/Hahn-Schickard/Automatic-Structured-Pruning
# # !echo $CWD
# # !cp -rf /content/Automatic-Structured-Pruning/* /content/
# -
import pruning
# + [markdown] id="P59sTXkZrcUB"
# ## Train a model for CIFAR10 without pruning
# Download and prepare the CIFAR10 dataset.
# The CIFAR10 dataset contains 60,000 color images in 10 classes, with 6,000 images in each class. The dataset is divided into 50,000 training images and 10,000 testing images. The classes are mutually exclusive and there is no overlap between them.
#
# Create the convolutional base
# The 6 lines of code below define the convolutional base using a common pattern: a stack of Conv2D and MaxPooling2D layers.
#
# As input, a CNN takes tensors of shape (image_height, image_width, color_channels), ignoring the batch size. If you are new to these dimensions, color_channels refers to (R,G,B). In this example, you will configure our CNN to process inputs of shape (32, 32, 3), which is the format of CIFAR images. You can do this by passing the argument input_shape to our first layer.
#
# To complete our model, you will feed the last output tensor from the convolutional base (of shape (4, 4, 64)) into one or more Dense layers to perform classification. Dense layers take vectors as input (which are 1D), while the current output is a 3D tensor. First, you will flatten (or unroll) the 3D output to 1D, then add one or more Dense layers on top. CIFAR has 10 output classes, so you use a final Dense layer with 10 outputs and a softmax activation.
# + colab={"base_uri": "https://localhost:8080/"} id="lOE5QdnNq_dQ" outputId="1f6861d6-952c-464b-be78-c274a4d189dc"
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dropout(0.3))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dropout(0.25))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
# + [markdown] id="oY3Yas0wrmZf"
# Above, you can see that the output of every Conv2D and MaxPooling2D layer is a 3D tensor of shape (height, width, channels). The width and height dimensions tend to shrink as you go deeper in the network. The number of output channels for each Conv2D layer is controlled by the first argument (e.g., 32 or 64). Typically, as the width and height shrink, you can afford (computationally) to add more output channels in each Conv2D layer.
# As you can see, our (4, 4, 64) outputs were flattened into vectors of shape (1024) before going through two Dense layers.
# + [markdown] id="ei2Brtgc232J"
# ## Compile and train the model
# + colab={"base_uri": "https://localhost:8080/"} id="5Slx3T9n24o6" outputId="119ec2c1-30b9-4aca-a26e-ab6317f3765a"
comp = {
"optimizer":'adam',
"loss": tf.keras.losses.SparseCategoricalCrossentropy(),
"metrics": ['accuracy']}
model.compile(**comp)
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)]
history = model.fit(train_images, train_labels, validation_split=0.2, epochs=30, batch_size=128, callbacks=callbacks)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
# + id="qXneu-31q_dT"
model.save('CIFAR10_model.h5')
# + [markdown] id="NtV0lMtArsup"
# ## Fine-tune pre-trained model with pruning
# You will apply pruning to the whole model and see this in the model summary.
#
# In this example, you prune the model with 30% dense pruning and 40% filter pruning.
# + colab={"base_uri": "https://localhost:8080/"} id="VWLQMAO3q_db" outputId="ede18f6b-a318-497b-8cd6-f22e7bc8b62c"
dense_prune_rate=30
conv_prune_rate=40
prunemodel=pruning.prune_model('./CIFAR10_model.h5', dense_prune_rate, conv_prune_rate,'L2', num_classes=10)
# + [markdown] id="Kz2f5f-o_6mX"
# We see how we get less parameter in the pruned model.
# + [markdown] id="eHSCkPz19Fwl"
# ## Compile and re-train the model
# -
prunemodel.summary()
# + id="jPAucXYJq_di"
comp = {
"optimizer":'adam',
"loss": tf.keras.losses.SparseCategoricalCrossentropy(),
"metrics": ['accuracy']}
prunemodel.compile(**comp)
history = prunemodel.fit(train_images, train_labels, epochs=10, validation_split=0.2)
# + [markdown] id="0y_x_9EjAQbz"
# Compare both models
# + id="hw1IIf0CAReG"
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
test_loss, test_acc = prunemodel.evaluate(test_images, test_labels, verbose=2)
# + [markdown] id="vSgcm-WoclIF"
# # Prune a model to a maximum accuracy loss
#
# We define the arguments to compile the model. In this case, we only want to have an accuracy loss of 3%.
# In this example we have loaded the data directly from a TensorFlow dataset. Therefore, we do not have a defined dataloader (path or file). However, the structure of the training data is the same as the data we would read from a Python file. Therefore, we use a trick here and pass an existing FILE from the current folder as the dataloader path. This way the correct functions will be executed afterwards and no error will be issued.
# + id="rUtd-GqJdJNx"
comp = {
"optimizer": 'adam',
"loss": tf.keras.losses.SparseCategoricalCrossentropy(),
"metrics": 'accuracy'
}
automodel = pruning.pruning_for_acc('./CIFAR10_model.h5', train_images, train_labels, comp, pruning_acc=None,
max_acc_loss=3, num_classes=10, label_one_hot=False, data_loader_path="./CIFAR10_model.h5")
# -
automodel.summary()
# Compare both models
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
test_loss, test_acc = automodel.evaluate(test_images, test_labels, verbose=2)
|
How_to_use_the_Framework.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Deep Leakage from Gradients
# This notebook shows an example for an attack as described in "Deep Leakage from Gradients".
#
# Paper URL: https://papers.nips.cc/paper/2019/hash/60a6c4002cc7b29142def8871531281a-Abstract.html
# #### Abstract
# Passing gradient is a widely used scheme in modern multi-node learning system (e.g, distributed training, collaborative learning). In a long time, people used to believe that gradients are safe to share: i.e, the training set will not be leaked by gradient sharing. However, in this paper, we show that we can obtain the private training set from the publicly shared gradients. The leaking only takes few gradient steps to process and can obtain the original training set instead of look-alike alternatives. We name this leakage as \textit{deep leakage from gradient} and practically validate the effectiveness of our algorithm on both computer vision and natural language processing tasks. We empirically show that our attack is much stronger than previous approaches and thereby and raise people's awareness to rethink the gradients' safety. We also discuss some possible strategies to defend this deep leakage.
# ### Startup
# +
try:
import breaching
except ModuleNotFoundError:
# You only really need this safety net if you want to run these notebooks directly in the examples directory
# Don't worry about this if you installed the package or moved the notebook to the main directory.
import os; os.chdir("..")
import breaching
import torch
# %load_ext autoreload
# %autoreload 2
# Redirects logs directly into the jupyter notebook
import logging, sys
logging.basicConfig(level=logging.INFO, handlers=[logging.StreamHandler(sys.stdout)], format='%(message)s')
logger = logging.getLogger()
# -
# ### Initialize cfg object and system setup:
# This will load the full configuration object. This includes the configuration for the use case and threat model as `cfg.case` and the hyperparameters and implementation of the attack as `cfg.attack`. All parameters can be modified below, or overriden with `overrides=` as if they were cmd-line arguments.
# +
cfg = breaching.get_config(overrides=["case=1_single_image_small", "attack=deepleakage"])
device = torch.device(f'cuda') if torch.cuda.is_available() else torch.device('cpu')
torch.backends.cudnn.benchmark = cfg.case.impl.benchmark
setup = dict(device=device, dtype=getattr(torch, cfg.case.impl.dtype))
setup
# -
# ### Modify config options here
# You can use `.attribute` access to modify any of these configurations for the attack, or the case:
# +
cfg.case.data.partition="unique-class"
cfg.case.user.user_idx = 1
cfg.case.user.provide_labels=False # This attack can reconstruct label information via optimization.
# -
# ### Instantiate all parties
# The following lines generate "server, "user" and "attacker" objects and print an overview of their configurations.
user, server, model, loss_fn = breaching.cases.construct_case(cfg.case, setup)
attacker = breaching.attacks.prepare_attack(server.model, server.loss, cfg.attack, setup)
breaching.utils.overview(server, user, attacker)
# ### Simulate an attacked FL protocol
# This exchange is a simulation of a single query in a federated learning protocol. The server sends out a `server_payload` and the user computes an update based on their private local data. This user update is `shared_data` and contains, for example, the parameter gradient of the model in the simplest case. `true_user_data` is also returned by `.compute_local_updates`, but of course not forwarded to the server or attacker and only used for (our) analysis.
server_payload = server.distribute_payload()
shared_data, true_user_data = user.compute_local_updates(server_payload)
user.plot(true_user_data)
# ### Reconstruct user data:
# Now we launch the attack, reconstructing user data based on only the `server_payload` and the `shared_data`.
#
# You can interrupt the computation early to see a partial solution.
reconstructed_user_data, stats = attacker.reconstruct([server_payload], [shared_data], {}, dryrun=cfg.dryrun)
# Next we'll evaluate metrics, comparing the `reconstructed_user_data` to the `true_user_data`.
metrics = breaching.analysis.report(reconstructed_user_data, true_user_data, [server_payload],
server.model, order_batch=True, compute_full_iip=False,
cfg_case=cfg.case, setup=setup)
# And finally, we also plot the reconstructed data:
user.plot(reconstructed_user_data)
# ### Notes
# * The model from the original paper can be retrieved with `case.model=lenet_zhu`
# * This attack often works, but for more challenging models can get stuck in bad local minima. Restarting the attack several, (e.g. by setting `attack.restarts.num_trials=16` can help here.
|
examples/Deep Leakage from Gradients - Optimization-based Attack - ConvNet CIFAR-10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # maysics.__init__模块使用说明
#
# __init__模块包含五个函数
#
# |名称|作用|
# |---|---|
# |covs1d|一维卷积和|
# |covs2d|二维卷积和|
# |save|保存文件|
# |load|载入文件|
# |all_same|判断数组元素全同|
#
# __init__模块还直接调用了以下类或函数
#
# |名称|作用|来源|
# |---|---|---|
# |linear_r|线性回归|models|
# |preview_file|数据预览|preprocess|
# |preview|数据预览|preprocess|
# |shuffle|打乱数据|preprocess|
# |circle|绘制圆|utils|
# |discrete|绘制离散函数|utils|
# |grid_net|生成网格点|utils|
#
# 具体使用方法查看相应的模块使用说明
#
# ## 卷积和:covs1d和covs2d
# 实现数组与数组、矩阵与矩阵之间的卷积和
# <br>对一维:$z[n]=x[n]*y[n]=\Sigma x[k]y[n-k]$
# <br>实质是平移→求和的往复过程
# <br>对二维,则增加了一个平移维度
#
# ### DEMO 1-1:求两个一维数组之间的卷积和
# +
import maysics as ms
x = [1, 2, 3, 4]
y = [1, 1, 1, 1]
# 求z[2]
ms.covs1d(x, y, 2)
# -
# ### DEMO 1-2:求两个二维数组之间的卷积和
# +
import maysics as ms
x = [[1, 2],
[3, 4]]
y = [[1, 1],
[1, 1]]
# 求z[2, 1]
ms.covs2d(x, y, 2, 1)
# -
# <br></br>
# ## 保存和载入文件:save和load
# 可操作的文件对象包括:pkl文件、npy文件、csv文件
#
# ### save函数
# save(filename, data, header=None)
# <br>```filename```是保存的文件名
# <br>```data```是需要保存的数据
# <br>```header```仅在操作csv文件时有效果,是表格数组每一列的名称列表
#
# ### load函数
# load(filename, header=True)
# <br>```filename```是载入文件的名称
# <br>```header```仅在操作csv文件时有效果,True表示载入数据时删除第一行
#
# <br></br>
# ## 判断数组元素是否全部相同:all_same
#
# ### DEMO 2-1:判断一维数组a的元素是否全部相同
# +
import maysics as ms
a = [1, 2, 2, 3, 4]
ms.all_same(a)
# -
# ### DEMO 2-2:判断二维数组A的元素是否全部相同
# +
import maysics as ms
A = [[2, 2],
[2, 2]]
ms.all_same(A)
|
maysics教程/__init__说明.ipynb
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .groovy
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Groovy
// language: groovy
// name: groovy
// ---
f = new EasyForm("Form and Run")
f.addTextField("first", 250)
f['first'] = "First"
f.addTextField("last", 250)
f['last'] = "Last"
f.addButton("Go!", "run")
f
// + tags=["run"]
// You can access the values from the form by treating it as an array indexed on the field names:
f['last'].reverse() + '...' + f['first']
// -
// The array works both ways, so you set default values on the fields by writing the array:
f['first'] = 'Beaker'
f['last'] = 'Berzelius'
h = new EasyForm("Form and Run")
h.addTextField("first", 380)
h.addTextArea("Text Area",500,200)
h
g2 = new EasyForm("Field Types")
options = ["a", "b", "c", "d"]
g2.addList("List Single", options, false)
g2.addList("List Two Row", options, 2)
g2
//You can use onInit and onChange to handle component events. For button events use actionPerfromed or addAction.
f1 = new EasyForm("Form and Run")
f1.addTextField("first", 15)
f1.addTextField("last", 15).onInit({f1['last'] = "setinit1"}).onChange({text -> f1['first'] = text})
button = f1.addButton("action button")
button.actionPerformed = {f1['last'] = 'action done'}
f1
f1['last']+ ", "+f1['first']
f1['last'] = "new Value"
f1['first'] = "new Value2"
// +
//All Kinds of Fields
g = new EasyForm("Field Types")
g.addTextField("Short Text Field", 10)
g.addTextField("Text Field")
g.addTextArea("Text Area")
g.addCheckBox("Check Box")
options = ["a", "b", "c", "d"]
g.addComboBox("Combo Box", options)
g.addComboBox("Editable Combo", options, true)
g.addList("List", options)
g.addList("List Single", options, false)
g.addList("List Two Row", options, 2)
g.addCheckBoxes("Check Boxes", options)
g.addCheckBoxes("Check Boxes H", options, EasyForm.HORIZONTAL)
g.addRadioButtons("Radio Buttons", options)
g.addRadioButtons("Radio Buttons H", options, EasyForm.HORIZONTAL)
g.addDatePicker("Date")
g.addButton("Go!", "run2")
g
// + tags=["run2"]
result = [:]
g.keySet().each {
result[it] = g[it]
}
result
// -
gdp = new EasyForm("Field Types")
gdp.addDatePicker("Date")
gdp
gdp['Date']
|
demoFiles/EasyFormDemos.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Using FAST.AI for Medical NLP - Step 1 Build a langauge model
#
# Exploring the MIMIC III data set medical notes.
#
# Tried working with the full dataset, but almost every training step takes many hours (~13 for initial training), predicted 14+ per epoch for fine tuning, and we need to do many epochs.
#
# Instead will try to work with just 10% sample... Not sure that will work though
#
# A few notes:
# * See https://docs.fast.ai/text.transform.html#Tokenizer for details on what various artificial tokens (e.g xxup, xxmaj, etc.) mean
# * To view nicely formatted documentation on the fastai library, run commands like: ` doc(learn.lr_find)`
from fastai.text import *
from sklearn.model_selection import train_test_split
import glob
import gc
# from pympler import asizeof
# If you want to verify that Torch can find and use your GPU, run the following code:
#
# ```python
# import torch
#
# print(torch.cuda.current_device())
# print(torch.cuda.device(0))
# print(torch.cuda.device_count())
# print(torch.cuda.get_device_name(0))
# ```
# These next cells can be used to get an idea of the speed up provided by a GPU for some operations (from https://course.fast.ai/gpu_tutorial.html)
# ```python
# import torch
# t_cpu = torch.rand(500,500,500)
# # %timeit t_cpu @ t_cpu
# # separate cell
# t_gpu = torch.rand(500,500,500).cuda()
# # %timeit t_gpu @ t_gpu
# ```
# +
# original data set too large to work with in reasonable time due to limted GPU resources
pct_data_sample = 0.1
# how much to hold out for validation
valid_pct = 0.1
# pandas doesn't understand ~, so provide full path
base_path = Path.home() / 'mimic'
# files used during processing - all aggregated here
notes_file = base_path/'noteevents.pickle'
lm_file = 'mimic_lm.pickle' # actual file is at base_path/lm_file but due to fastai function, have to pass file name separately
init_model_file = base_path/'mimic_fit_head'
cycles_file = base_path/'num_iterations.pickle'
lm_base_file = 'mimic_lm_fine_tuned_'
enc_file = 'mimic_fine_tuned_enc'
training_history_file = 'mimic_lm_fine_tune_history'
# -
# if this doesn't free memory, can restart Python kernel.
# if that still doesn't work, try OS items mentioned here: https://docs.fast.ai/dev/gpu.html
def release_mem():
gc.collect()
torch.cuda.empty_cache()
# +
# run this to see what has already been imported
#whos
# -
# ### Set Random Number seed for repeatability; set Batch Size to control GPU memory
#
# See **"Performance notes"** section below for how setting batch size impacts GPU memory
seed = 42
# previously used 48; worked fine but never seemed to use even half of GPU memory; 64 still on the small side
bs=96
# While parsing a CSV and converting to a dataframe is pretty fast, loading a pickle file is much faster.
#
# For load time and size comparison:
# * `NOTEEVENTS.csv` is ~ 3.8GB in size
# ```
# CPU times: user 51.2 s, sys: 17.6 s, total: 1min 8s
# Wall time: 1min 47s
# ```
# * `noteevents.pickle` is ~ 3.7 GB in size
# ```
# CPU times: user 2.28 s, sys: 3.98 s, total: 6.26 s
# Wall time: 6.26 s
# ```
# +
# %%time
orig_df = pd.DataFrame()
if os.path.isfile(notes_file):
print('Loading noteevent pickle file')
orig_df = pd.read_pickle(notes_file)
else:
print('Could not find noteevent pickle file; creating it')
# run this the first time to covert CSV to Pickle file
orig_df = pd.read_csv(base_path/'NOTEEVENTS.csv', low_memory=False, memory_map=True)
orig_df.to_pickle(notes_file)
# -
# Due to data set size and performance reasons, working with a 10% sample. Use same random see to get same results from subsequent runs.
df = orig_df.sample(frac=pct_data_sample, random_state=seed)
# +
# if you want to free up some memory
# orig_df = None
# del orig_df
# gc.collect()
# +
#print('df:', int(asizeof.asizeof(df) / 1024 / 1024), 'MB')
#print('orig_df:', asizeof.asizeof(orig_df))
#print('data_lm:', asizeof.asizeof(data_lm, detail=1))
#print asizeof.asized(obj, detail=1).format()
# -
df.head()
df.dtypes
df.shape
# Code to build initial version of language model; If running with full dataset, requires a **LOT** of RAM; using a **LOT** of CPU helps it to happen quickly as well
#
# **Note:** By default, this only tracks up to 60,000 tokens (words usually). In my testing that is sufficient to get high accuracy
#
# Questions:
#
# * why does this only seem to use CPU? (applies to both both textclasdatabunch and textlist)
# * for 100% of the mimic noteevents data:
# * run out of memory at 32 GB, error at 52 GB, trying 72GB now... got down to only 440MB free; if crash again, increase memory
# * now at 20vCPU and 128GB RAM; ok up to 93%; got down to 22GB available
# * succeeded with 20CPU and 128GB RAM...
# * try smaller batch size? will that reduce memory requirements?
# * with 10% dataset sample, it seems I could get by with perhaps 32GB system RAM
#
# For comparison:
# * 10% language model is ~ 1.2 GB in size
# * Time to load existing language model:
# ```
# CPU times: user 3.29 s, sys: 844 ms, total: 4.14 s
# Wall time: 12.6 s
# ```
# * Time to build language model:
# ```
# CPU times: user 36.9 s, sys: 8.56 s, total: 45.4 s
# Wall time: 3min 27s
# ```
# * 100% language model is...
# * Time to load existing language model:
# * Time to build language model:
# +
# %%time
tmpfile = base_path/lm_file
if os.path.isfile(tmpfile):
print('loading existing language model')
data_lm = load_data(base_path, lm_file, bs=bs)
else:
print('creating new language model')
data_lm = (TextList.from_df(df, base_path, cols='TEXT')
#df has several columns; actual text is in column TEXT
.split_by_rand_pct(valid_pct=valid_pct, seed=seed)
#We randomly split and keep 10% for validation
.label_for_lm()
#We want to do a language model so we label accordingly
.databunch(bs=bs))
data_lm.save(tmpfile)
# -
# If need to view more data, run appropriate line to make display wider/show more columns...
# ```python
# # default 20
# pd.get_option('display.max_columns')
# pd.set_option('display.max_columns', 20)
# pd.set_option('display.max_columns', None) # show all
# # default 50
# pd.get_option('display.max_colwidth')
# pd.set_option('display.max_colwidth', -1) # show all
# ```
data_lm.show_batch()
# how to look at original version of text
#df[df['TEXT'].str.contains('being paralyzed were discussed', case=False)].TEXT
# as of June 2019, this automatically loads and initializes the model based on WT103 from
# https://s3.amazonaws.com/fast-ai-modelzoo/wt103-fwd.tgz; will auto download if not already on disk
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3)
release_mem()
# ### Generate Learning rate graph.
learn.lr_find()
learn.recorder.plot(skip_end=15)
# ### Initial model training
#
# Time to run:
#
# * Full data set took about 13 hours using the Nvidia P1000
# * Full data set was predicted to take about 25 hours with the T4
# * 10% data took about 1 hour (1:08) using the Nvidia P1000
# * 10% data is predicted to take about 2.5 hour (actual 2:42) using the Nvidia GTX 1060
#
release_mem()
# Results from first time run - subsequent runs will just reload the same learner
#
# epoch train_loss valid_loss accuracy time
# 0 2.371173 2.207830 0.562977 1:08:52
#
# no idea how long nor how much resources this will take
# not sure 1e-2 is the right learning rate; maybe 1e-1 or between 1e-2 and 1e-1
# using t4
# progress bar says this will take around 24 hours... ran for about 52 minutes
# gpustat/nvidia-smi indicates currently only using about 5GB of GPU RAM
# using p100
# progress bar says this will take around 12 hours; took 13:16
# at start GPU using about 5GB RAM
# after about 8 hours GPU using about 7.5GB RAM.
# looks like I could increase batch size...
# with bs=64, still only seems to be using about 7GB GPU RAM after running for 15 minutes.
# will check after a bit, but likely can increase batch size further
#
# note about number of epochs/cycle length: Using a value of 1 does a rapid increase and
# decrease of learning rate and end result gets almost the save result as 2 but in half
# the time
if os.path.isfile(str(init_model_file) + '.pth'):
learn.load(init_model_file)
print('loaded learner')
else:
learn.fit_one_cycle(1, 5e-2, moms=(0.8,0.7),
callbacks=[
callbacks.CSVLogger(learn, filename=training_history_file, append=True)
])
learn.save(init_model_file)
print('generated new learner')
release_mem()
# continue from initial training - reload in case just want to continue processing from here.
#
# As an FYI pytorch automatically appends .pth to the filename, you cannot provide it
# +
#learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3)
#learn.load(init_model_file)
#print('done')
# -
learn.show_results()
# If you manually want to set the number of previous cycles, run something like this:
#
# ```python
# with open(cycles_file, 'wb') as f:
# pickle.dump(8, f)
# ```
# +
prev_cycles = 0
if os.path.isfile(cycles_file):
with open(cycles_file, 'rb') as f:
prev_cycles = pickle.load(f)
print('This model has been trained for', prev_cycles, 'epochs already')
# -
#temp_files = glob.glob(str(base_path/'*_auto_*'))
#if len(training_files) > 0:
rfiles = glob.glob(str(base_path/'*_auto_*'))
rfiles.sort()
if (len(rfiles) > 0):
print('There are pre-existing automatic save states. Remove these files if no longer needed.')
for f in rfiles:
print(f)
# ### Now fine tune language model
#
# Performance notes w/P100 GPU:
#
# * at batch size of 128 takes about 1:14:00 per epoch; GPU usage is about 14GB; RAM usage is about 10GB
# * at batch size of 96 takes about 1:17:00 per epoch; GPU usage is about 9GB; RAM usage is about 10GB
# * at batch size of 48 takes about 1:30:00 per epoch; GPU usage is about 5GB; RAM usage is about 10GB
#
# With `learn.fit_one_cycle(8, 5e-3, moms=(0.8,0.7))` (8 cycles)
# * gets from about 62.7% accuracy to 67.6% accuracy
# * Total time: 9:54:16
#
#
# epoch train_loss valid_loss accuracy time
# 0 1.926960 1.832659 0.627496 1:14:14
# 1 1.808083 1.755725 0.637424 1:14:15
# 2 1.747903 1.697741 0.645431 1:14:15
# 3 1.714081 1.652703 0.652703 1:14:19
# 4 1.637801 1.602961 0.660170 1:14:15
# 5 1.596906 1.553225 0.668557 1:14:14
# 6 1.572020 1.519172 0.674477 1:14:26
# 7 1.517364 1.510010 0.676342 1:14:14
#
#
# With `learn.fit_one_cycle(num_cycles, 5e-3, moms=(0.8,0.7)` (10 cycles)
# * batch size `bs=96`
# * Total time: 12:17:26
#
#
# epoch train_loss valid_loss accuracy time
# 0 1.876292 1.813362 0.630908 1:13:40
# 1 1.816879 1.770555 0.635667 1:13:41
# 2 1.833764 1.769055 0.635783 1:13:45
# 3 1.765977 1.729675 0.641041 1:13:43
# 4 1.672098 1.683195 0.648317 1:13:52
# 5 1.639705 1.637336 0.655466 1:13:43
# 6 1.600122 1.589719 0.663033 1:13:45
# 7 1.529386 1.546841 0.670321 1:13:43
# 8 1.527369 1.518421 0.675460 1:13:41
# 9 1.512422 1.511458 0.676779 1:13:42
#
# completed 10 new training epochs
# completed 10 total training epochs
#
# Interesting to note, training for fewer epochs with the one cycle policy results in faster training. In either case, as the validation loss is still improving, can continue to train more to improve model.
def custom_learner_load(lf):
if os.path.isfile(str(lf) + '.pth'):
learn.load(lf)
print('loaded existing learner from', str(lf))
else:
# should not continue as could not find specified file
print('existing learner file (', lf, ') not found, cannot continue')
print('previous epoch may have only partially completed')
print(' --- try updating prev_cycles to match or copy file to correct name.')
assert(False)
return learn
# +
# if want to continue training existing model, set to True
# if want to start fresh from the initialized language model, set to False
# also, make sure to remove any previously created saved states before changing
# flag back to continue
continue_flag = True
# Resume interrupted training - should be able to leave as True
resume_flag = True
########################################################
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3)
########################################################
# set this to how many cycles you want to run
num_cycles = 5
########################################################
if continue_flag:
if os.path.isfile(cycles_file):
with open(cycles_file, 'rb') as f:
prev_cycles = pickle.load(f)
print('This model has been trained for', prev_cycles, 'epochs already')
else:
prev_cycles = 0
file = lm_base_file + str(prev_cycles)
learner_file = base_path/file
callback_save_file = str(learner_file) + '_auto'
fn_pattern = callback_save_file + '*'
# for one cycle learning with learning rate annealing - where to resume from
start_epoch = 0
if resume_flag:
training_files = glob.glob(str(base_path/fn_pattern))
if len(training_files) > 0:
training_files.sort()
completed_cycles = int(re.split('_|\.', training_files[-1])[-2])
if completed_cycles < (num_cycles - 1):
# need to load the last file
print('Previous training cycle of', num_cycles, 'did not complete; finished',
completed_cycles + 1, 'cycles. Loading last save...')
# load just filename, drop extension of .pth as that is automatically appended inside load function
learn.load(os.path.splitext(training_files[-1])[0])
start_epoch = completed_cycles + 1
else:
print('Previous training cycle of', num_cycles, 'completed fully.')
learn = custom_learner_load(learner_file)
else:
print('No auto save files exist from interupted training.')
if continue_flag:
learn = custom_learner_load(learner_file)
else:
print('Starting training with base language model')
else:
if continue_flag:
learn = custom_learner_load(learner_file)
else:
print('Starting training with base language model')
# remove any auto saves
training_files = glob.glob(str(base_path/fn_pattern))
if len(training_files) > 0:
for f in training_files:
print('Deleting', f)
os.remove(f)
learn.unfreeze()
#learn.fit_one_cycle(num_cycles, 5e-3, moms=(0.8,0.7),
learn.fit_one_cycle(num_cycles, 5e-3, moms=(0.8,0.7),
callbacks=[
callbacks.SaveModelCallback(learn, every='epoch', monitor='accuracy', name=callback_save_file),
# CSVLogger only logs when num_cycles are complete
callbacks.CSVLogger(learn, filename=training_history_file, append=True),
callbacks.EarlyStoppingCallback(learn, monitor='accuracy', min_delta=0.0025, patience=5)
],
start_epoch=start_epoch)
file = lm_base_file + str(prev_cycles + num_cycles)
learner_file = base_path/file
learn.save(learner_file)
with open(cycles_file, 'wb') as f:
pickle.dump(num_cycles + prev_cycles, f)
release_mem()
print('completed', num_cycles, 'new training epochs')
print('completed', num_cycles + prev_cycles, 'total training epochs')
# -
# ### Evaluate different learning rates.
#
# Use this block of code to compare how well a few different learning rates work
#
# Found that `5e-3` works best with `learn.unfreeze()`
#
# ```python
# num_cycles = 4
# prev_cycles = 4
#
# #for lr in [5e-6, 1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 5e-2, 1e-1]:
# for lr in [5e-6, 1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 5e-3]:
# print('now testing with multiple epochs and learning rate of', lr)
# print('This model has been trained for', prev_cycles, 'epochs already')
# file = lm_base_file + str(prev_cycles)
# learner_file = base_path/file
# learn.load(learner_file)
# learn.unfreeze()
# print('loaded existing learner from', str(learner_file))
#
#
# learn.fit_one_cycle(num_cycles, lr, moms=(0.8,0.7))
# file = lm_base_file + str(prev_cycles + num_cycles + 1)
# learner_file = base_path/file
# learn.save(learner_file)
# release_mem()
#
# print('completed', num_cycles, 'new training epochs')
# print('completed', num_cycles + prev_cycles, 'total training epochs')
# ```
# Revaluate learning rate now that we've trained some
learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3)
learn.load(base_path/'mimic_lm_fine_tuned_10')
print('loaded learner') # if don't print this then jupyter will display too much details about learner
learn.lr_find()
learn.recorder.plot(skip_end=15)
# test the language generation capabilities of this model (not the point, but is interesting)
TEXT = "For confirmation, she underwent CTA of the lung which was negative for pulmonary embolism"
N_WORDS = 40
N_SENTENCES = 2
print("\n".join(learn.predict(TEXT, N_WORDS, temperature=0.75) for _ in range(N_SENTENCES)))
learn.save_encoder(enc_file)
# To load the encoder:
#
# ```python
# learn = language_model_learner(data_lm, AWD_LSTM, drop_mult=0.3)
# learn.load_encoder(enc_file)
# ```
learn.summary()
learn.model
# see if learning rate has changed with training
learn.unfreeze()
learn.lr_find()
learn.recorder.plot(skip_end=15)
learn.unfreeze()
|
sourcecode/mimic_nlp_lm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.insert(0, '..')
from datetime import datetime
import pandas as pd
from bcb import sgs
df = sgs.get({'IPCA': 433}, last=5, freq='M')
df
|
notebooks/sgs get series with period index.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import torch
import torchvision
import torchvision.transforms as transforms
import csv
import matplotlib.pyplot as plt
# keras imports
import keras
from keras import backend as K
backend_keras = keras.backend.backend()
print("keras is using", backend_keras,"as the backend")
# import a whole bunch of other packeges
import matplotlib
matplotlib.use("Agg")
from sklearn.preprocessing import LabelBinarizer
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from keras.models import Sequential
from keras.layers.core import Dense
from keras.optimizers import SGD
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import argparse
import random
import keras
import pickle
import cv2
import os
# +
def show_image(arr):
two_d = (np.reshape(arr, (28,28))*255).astype(np.uint8)
plt.imshow(two_d, cmap='Greys', interpolation='nearest')
plt.show()
train_images = np.load("../train_images.npy")
# -
# Transform and normalize images??
# +
# load the training labels into a dictionary,
# also load it into a list, haven't decided which one is better yet
labels_dic = {}
labels = []
with open('../train_labels.csv') as csvDataFile:
csvReader = csv.reader(csvDataFile)
for row in csvReader:
try:
labels_dic[int(row[0])] = int(row[1])
labels.append(int(row[1]))
except: print(row)
len(labels_dic)
# +
# normalize images
train_images = np.array(train_images, dtype="float") / 255.0
labels = np.array(labels)
train_labels = []
for i in labels:
label = np.zeros(10)
label[i]=1
train_labels.append(label)
train_labels = np.array(train_labels)
# -
print(labels.shape,train_images.shape)
# flatten and train
train_images_flatten = np.array([i.flatten("C") for i in train_images])
train_images_flatten.shape
# ## Creating keras model
#
# following this guid
# https://medium.com/@pallawi.ds/ai-starter-train-and-test-your-first-neural-network-classifier-in-keras-from-scratch-b6a5f3b3ebc4
# +
# define the 784-350-200-10 architecture using keras
model = Sequential()
# we construct our nn architecture - a feedforward nn
# our input layer has 28 x 28 x 1 = 784 raw pixels
model.add(Dense(350, input_shape=(784,), activation="sigmoid"))
model.add(Dense(200, activation="sigmoid"))
model.add(Dense(10, activation="softmax"))
print("printing summary of model")
model.summary()
# -
# ### Compile the model
#
# You can compile a network (model) as many times as you want. You need to compile the model if you wish to change the loss function, optimizer or matrices.
#
# You need a compiled model to train (because training uses the loss function and the optimizer). But it’s not necessary to compile the model when testing the model on a new data.
# +
# initialize our initial learning rate and # of epochs to train for
INIT_LR = 0.01
EPOCHS = 100
# compile the model using SGD as our optimizer and categorical
# cross-entropy loss (if you only have two classes use binary =_crossentropy)
print("[INFO] training network...")
opt = SGD(lr=INIT_LR) # stochastic gradient descent
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
# -
# split the train into a train and valid set
ratio = 0.8
cut = int(ratio*len(train_images_flatten))
trainX = train_images_flatten[:cut]
trainY = train_labels[:cut]
valX = train_images_flatten[cut:]
valY = train_labels[cut:]
train_labels.shape
# train the neural network
H = model.fit(trainX, trainY,
validation_data=(valX, valY),
epochs=EPOCHS, batch_size=32)
# +
#evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(trainX, batch_size=32)
#Uncomment to see the predicted probabilty for each class in every test image
# print ("predictions---------------->",predictions)
#Uncomment to print the predicted labels in each image
# print("predictions.argmax(axis=1)",predictions.argmax(axis=1))
### print the performance report of the prediction
#print(classification_report(valY.argmax(axis=1),
# predictions.argmax(axis=1),
# target_names=[str(i) for i in range(10)]))
# plot the training loss and accuracy for each epoch
N = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure()
plt.plot(N, H.history["loss"], label="train_loss")
plt.plot(N, H.history["val_loss"], label="val_loss")
plt.plot(N, H.history["accuracy"], label="train_acc")
plt.plot(N, H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy (simple_multiclass_classifcation)")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.savefig("training_performance.png")
# -
# ## save the model
# save the model to disk
print("[INFO] serializing network...")
model.save("keras_two_hidden_layer_784_350_200_10_795_valmodel.model")
# import the necessary packages
from keras.models import load_model
import argparse
import pickle
import cv2
import os
import pandas as pd
model = load_model("keras_two_hidden_layer_784_350_200_10_795_valmodel.model")
# +
x_test = np.load('../test_images.npy').squeeze()
x_test = x_test.reshape(len(x_test), -1)
y_test = model.predict(x_test)
df_test = pd.read_csv('submission_2.csv')
df_test['label'] = y_test
df_test.to_csv('submission_2.csv', index=False)
# -
|
keras_single_hidden_layer/keras-single-hidden-layer-architecure.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Load essential libraries
import csv
import numpy as np
import matplotlib.pyplot as plt
import statistics
import numpy as np
from scipy.signal import butter, lfilter, freqz
from IPython.display import Image
from datetime import datetime
# +
# Time and robot egomotion
time = []
standardized_time = []
standardized_time2 = []
compass_heading = []
speed = []
# sonde data
temp = []
PH = []
cond = [] # ms
chlorophyll = []
ODO = [] # mg/L
sonar = []
angular_z = []
# wp data
wp_time = []
wp_seq = []
# +
initial_time = None
time_crop = 4000
# File loading from relative path
file = '../../../Data/ISER2021/Sunapee-20200715-path-3.csv'
# File loading from relative path
file2 = '../../../Data/ISER2021/Sunapee-20200715-path-3-mavros.csv'
# original data
with open(file, 'r') as csvfile:
csvreader= csv.reader(csvfile, delimiter=',')
header = next(csvreader)
for row in csvreader:
# robot data
if initial_time is None:
initial_time = float(row[0])
current_time = float(row[0])
#if current_time - initial_time >= 700 and current_time - initial_time < 1000:
if current_time - initial_time <= time_crop:
time.append(float(row[0]))
compass_heading.append(float(row[4]))
speed.append(float(row[10]))
angular_z.append(float(row[18]))
# sonde data
temp.append(float(row[23]))
PH.append(float(row[26]))
cond.append(float(row[25]))
chlorophyll.append(float(row[29]))
ODO.append(float(row[30]))
sonar.append(float(row[8]))
minimum_time = min(time)
for time_stamp in time:
standardized_time.append(time_stamp - minimum_time)
# wp data
with open(file2, 'r') as csvfile2:
csvreader2 = csv.reader(csvfile2, delimiter=',')
header = next(csvreader2)
for row in csvreader2:
current_time = float(row[0])
if current_time - initial_time <= time_crop:
wp_time.append(float(row[0]))
wp_seq.append(float(row[1]))
for time_stamp in wp_time:
standardized_time2.append(time_stamp - minimum_time)
# +
# collision time around 790
# -
# ### Compass heading
# +
# Figure initialization
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time [sec]', fontsize=16)
ax1.set_ylabel('Heading [degree]', fontsize=16)
ax1.plot(standardized_time, compass_heading, label='compass heading')
ax1.legend()
for wp in standardized_time2:
plt.axvline(x=wp, color='gray', linestyle='--')
plt.show()
# +
# Figure initialization
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time [sec]', fontsize=16)
ax1.set_ylabel('ground_speed_x [m/s]', fontsize=16)
ax1.plot(standardized_time, speed, label='ground_speed_x', color='m')
ax1.legend()
for wp in standardized_time2:
plt.axvline(x=wp, color='gray', linestyle='--')
plt.show()
# +
# Figure initialization
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time [sec]', fontsize=16)
ax1.set_ylabel('angular_z [rad/s]', fontsize=16)
ax1.plot(standardized_time, angular_z, label='angular_z', color='r')
ax1.legend()
for wp in standardized_time2:
plt.axvline(x=wp, color='gray', linestyle='--')
plt.show()
# -
# ### Temperature
# +
# Figure initialization
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time [sec]', fontsize=16)
ax1.set_ylabel('Temperature [degree]', fontsize=16)
ax1.plot(standardized_time, temp, label='temp', color='k')
ax1.legend()
for wp in standardized_time2:
plt.axvline(x=wp, color='gray', linestyle='--')
plt.show()
print("Standard Deviation of the temp is % s " %(statistics.stdev(temp)))
print("Mean of the temp is % s " %(statistics.mean(temp)))
# -
# ### PH
# +
# Figure initialization
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time [sec]', fontsize=16)
ax1.set_ylabel('PH', fontsize=16)
ax1.plot(standardized_time, PH, label='PH', color='r')
ax1.legend()
for wp in standardized_time2:
plt.axvline(x=wp, color='gray', linestyle='--')
plt.show()
print("Standard Deviation of the temp is % s " %(statistics.stdev(PH)))
print("Mean of the temp is % s " %(statistics.mean(PH)))
# -
# ### Conductivity
# +
# Figure initialization
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time [sec]', fontsize=16)
ax1.set_ylabel('Conductivity [ms]', fontsize=16)
ax1.plot(standardized_time, cond, label='conductivity', color='b')
ax1.legend()
for wp in standardized_time2:
plt.axvline(x=wp, color='gray', linestyle='--')
plt.show()
print("Standard Deviation of the chlorophyll is % s " %(statistics.stdev(cond)))
print("Mean of the chlorophyll is % s " %(statistics.mean(cond)))
# -
# ### Chlorophyll
# +
# Figure initialization
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time [sec]', fontsize=16)
ax1.set_ylabel('chlorophyll [RFU]', fontsize=16)
ax1.plot(standardized_time, chlorophyll, label='chlorophyll', color='g')
ax1.legend()
for wp in standardized_time2:
plt.axvline(x=wp, color='gray', linestyle='--')
plt.show()
print("Standard Deviation of the chlorophyll is % s " %(statistics.stdev(chlorophyll)))
print("Mean of the chlorophyll is % s " %(statistics.mean(chlorophyll)))
# -
# ### ODO
# +
# Figure initialization
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time [sec]', fontsize=16)
ax1.set_ylabel('ODO [mg/L]', fontsize=16)
ax1.plot(standardized_time, ODO, label='ODO', color='m')
ax1.legend()
for wp in standardized_time2:
plt.axvline(x=wp, color='gray', linestyle='--')
plt.show()
print("Standard Deviation of the DO is % s " %(statistics.stdev(ODO)))
print("Mean of the DO is % s " %(statistics.mean(ODO)))
# -
# ### Sonar depth
# +
# Figure initialization
fig, ax1 = plt.subplots()
ax1.set_xlabel('Time [sec]', fontsize=16)
ax1.set_ylabel('sonar [m]', fontsize=16)
ax1.plot(standardized_time, sonar, label='sonar', color='c')
ax1.legend()
for wp in standardized_time2:
plt.axvline(x=wp, color='gray', linestyle='--')
plt.show()
# -
|
Jupyter_notebook/ISER2021/Path 3/20200715-Sunapee-path3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fast generation of healthy and unhealthy assessment counts by period
# +
# %matplotlib inline
import math
import numpy as np
from numba import njit
import matplotlib.pyplot as plt
from exetera.core.session import Session
from exetera.core.utils import Timer
from exetera.processing.date_time_helpers import\
get_periods, generate_period_offset_map, get_days, get_period_offsets
# -
# ### Helper functions
def human_readable_date(date):
'''
Transfer the float timestamp to a string representated date.
'''
if isinstance(date, float):
date = datetime.fromtimestamp(date)
return date.strftime("%Y-%m-%d")
# ## Fill in these parameters
# +
from datetime import datetime, timedelta
filename = # filename
start_dt = # the starting datetime
end_dt = # the ending datetime
# -
# ## Generate the summaries by seven day period
# ### Generate the seven day periods corresponding to the start and end dates
# Trunk the dates range into seven-day periods
start_ts = start_dt.timestamp()
end_ts = end_dt.timestamp()
periods = get_periods(end_dt, start_dt, 'week', -1)
periods.reverse()
print("Weekly periods from {} to {}".format(human_readable_date(periods[0]),
human_readable_date(periods[-1])))
# ### Create the Session object
# Note, you can also use `with Session() as s:` if you don't mind opening the session in each cell
s = Session() # Open the ExeTera session
src = s.open_dataset(filename, 'r', 'src') # Open the dataset with read-only 'r' mode
assessment_df = src['assessments'] # Get the dataframe named 'assessments'
# ### Get the timestamp for each user signup
with Timer("Fetching assessment 'created_at' values"): # Record the time usage
created_at_dates = assessment_df['created_at'].data[:] # Load data from 'created_at' field into memory
# ### Calculate on what day (relative to the start of the first period) each user signed up
# `get_days` also returns a filter indicating whether a given record is within the date range of interest
with Timer("Calculating day offsets for assessments"):
# Converts a field of timestamps into a field of relative elapsed days
created_at_days, inrange = get_days(created_at_dates,
start_date=periods[0].timestamp(),
end_date=periods[-1].timestamp())
# ### Clear the days that fall outside of the specified range
with Timer("Filter out days that fall outside of the specified range"):
created_at_days = created_at_days[inrange]
# ### Map the days to their corresponding periods
# We generate the map using `generate_period_offset_map` and then pass it to `generate_period_offsets`
with Timer("Convert from days to periods"):
created_at_periods = get_period_offsets(generate_period_offset_map(periods),
created_at_days)
# cat_counts = np.unique(cat_period, return_counts=True)
# ### Generate 'healthy' and 'unhealthy' assessment filters
# Consider assessments with no health status to be 'healthy'
with Timer("Generate healthy and unhealthy status arrays"):
unhealthy = assessment_df['health_status'].apply_filter(inrange) == 2 # Filter assessments according to data value defined in scheme
healthy = assessment_df['health_status'].apply_filter(inrange) != 2
# ### Summarise unhealthy and healthy by period
with Timer("Summarise unhealthy and healthy by period"):
healthy_counts = np.unique(created_at_periods[healthy.data[:]], return_counts=True) # Count number of healthy assessments in each period
all_healthy_counts = np.zeros(len(periods), dtype=np.int32)
for k, v in zip(healthy_counts[0], healthy_counts[1]):
all_healthy_counts[k] = v
unhealthy_counts = np.unique(created_at_periods[unhealthy.data[:]], return_counts=True) # Count number of unhealthy assessments
all_unhealthy_counts = np.zeros(len(periods), dtype=np.int32)
for k, v in zip(unhealthy_counts[0], unhealthy_counts[1]):
all_unhealthy_counts[k] = v
# ## Generate the charts for healthy / unhealthy assessments
# +
width = 1
widths = [width * d for d in range(len(periods))]
fig, ax = plt.subplots(2, 1, figsize=(10, 10))
negtests = ax[0].bar(widths, all_healthy_counts)
postests = ax[0].bar(widths, all_unhealthy_counts, bottom=all_healthy_counts)
ax[0].set_title("Assessment counts by week")
ax[0].set_xticks(np.arange(len(periods)-1))
ax[0].set_xticklabels([human_readable_date(d) for d in periods[:-1]], rotation=270)
ax[0].set_yticks(np.arange(10) * 1000000)
ax[0].set_yticklabels(i for i in range(10))
ax[0].legend((negtests, postests), ("'Healthy'", "'Unhealthy'"))
ax[0].set_xlabel("Week starting")
ax[0].set_ylabel("Million assessments per week")
all_counts = all_unhealthy_counts + all_healthy_counts
all_counts = np.where(all_counts == 0, 1, all_counts)
pos_fraction = all_unhealthy_counts / all_counts
pfbar = ax[1].bar(widths, pos_fraction, color="#ff7f0e")
ax[1].set_title("'Unhealthy' assessments as a fraction of assessments by week")
ax[1].set_xticks(np.arange(len(periods)-1))
ax[1].set_xticklabels([human_readable_date(d) for d in periods[:-1]], rotation=270)
ax[1].legend((pfbar,), ("Positive test fraction",))
ax[1].set_xlabel("Week starting")
ax[1].set_ylabel("'Unhealthy' assessment fraction")
fig.tight_layout(h_pad=2.5)
plt.show()
# -
# Close the session manually; not needed if opening the session using 'with' statement.
s.close()
|
notebooks/weekly_assessment_summary_fast.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="W8QuvjRYElkS" colab_type="code" colab={}
# %matplotlib inline
from fastai.basics import *
# + [markdown] id="E4UhSszXElkg" colab_type="text"
# ## MNIST SGD
# + [markdown] id="eL-IsnV9Elkq" colab_type="text"
# Get the 'pickled' MNIST dataset from http://deeplearning.net/data/mnist/mnist.pkl.gz. We're going to treat it as a standard flat dataset with fully connected layers, rather than using a CNN.
# + id="HX9JRSDWElks" colab_type="code" colab={}
path = Config().data_path()/'mnist'
# + id="pOka03FjElkz" colab_type="code" colab={} outputId="ea19e01c-eef4-409e-c350-089a5e90a96a"
path.ls()
# + id="eRftin7_EllA" colab_type="code" colab={}
with gzip.open(path/'mnist.pkl.gz', 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
# + id="yJ8en2npEllH" colab_type="code" colab={} outputId="abb0f804-6f9a-4234-ae95-129a0369623a"
plt.imshow(x_train[0].reshape((28,28)), cmap="gray")
x_train.shape
# + [markdown] id="p4d8qf8NEllS" colab_type="text"
# We convert matrices into pytorch tensors by using `map`:
# + id="n3GA0oJ6EllV" colab_type="code" colab={} outputId="73a63bc1-9c17-4304-92cb-ee63a01b904f"
x_train,y_train,x_valid,y_valid = map(torch.tensor, (x_train,y_train,x_valid,y_valid))
n,c = x_train.shape
x_train.shape, y_train.min(), y_train.max()
# + [markdown] id="puMtF7eREllk" colab_type="text"
# In lesson2-sgd we did everything from scratch; like this:
#
# ```python
# x = torch.ones(n,2)
# def mse(y_hat, y): return ((y_hat-y)**2).mean()
# y_hat = x@a
# ```
#
# Now instead we'll use PyTorch functions to do it for us, and also to handle mini-batches (which we didn't do last time, since our dataset was so small). So we'll create a Data Loader we can interact with:
# + id="Y5_I3pZmEllm" colab_type="code" colab={}
bs=64
train_ds = TensorDataset(x_train, y_train)
valid_ds = TensorDataset(x_valid, y_valid)
data = DataBunch.create(train_ds, valid_ds, bs=bs)
# + id="-30V0ZFMElly" colab_type="code" colab={} outputId="3176f956-7837-4c90-8854-64db373aebb0"
x,y = next(iter(data.train_dl))
x.shape,y.shape
# + [markdown] id="zWk5c1aFEll9" colab_type="text"
# We create a Logistic Regression model; a neural network with no hidden layers and no nonlinearities:
# + id="8xKEDwbTElmA" colab_type="code" colab={}
class Mnist_Logistic(nn.Module):
def __init__(self):
super().__init__()
self.lin = nn.Linear(784, 10, bias=True)
def forward(self, xb): # xb for mini-batch
return self.lin(xb) # returns x@a + b
# + [markdown] id="pdNG7AEIElmE" colab_type="text"
# And put the model into the GPU:
# + id="XIhGFCFDElmG" colab_type="code" colab={}
model = Mnist_Logistic().cuda()
# + id="eG2X06wGElmM" colab_type="code" colab={} outputId="3236c9e4-21aa-465a-b416-3e1dd284f337"
model
# + id="ZvLox4AJElmV" colab_type="code" colab={} outputId="31eec115-c28a-4bbb-adde-79c12e218b25"
model.lin
# + id="ZbynopAnElmc" colab_type="code" colab={} outputId="345c2144-50e7-48dc-d2a0-93ec7dbb4410"
model(x).shape
# + [markdown] id="y3rEdbhqElmn" colab_type="text"
# The **`parameters`** attribute of `model` holds all the parameters tensors and bias tensors:
# + id="Km6BaNsEElmq" colab_type="code" colab={} outputId="19912e50-d53a-486d-83a8-567f8d54cc7c"
[p.shape for p in model.parameters()]
# + [markdown] id="BuDHxayuElm0" colab_type="text"
# We choose a learning rate and a loss function:
# + id="zWen1Y_oElm1" colab_type="code" colab={}
lr=2e-2
# + id="RpLqt4_YElm8" colab_type="code" colab={}
loss_func = nn.CrossEntropyLoss()
# + [markdown] id="8Vhh_Oa0ElnD" colab_type="text"
# Note: This PyTorch loss function includes a final softmax activation function automatically, so we don't need to add that explicitly.
#
# Then, we define a function for updating the parameters:
# + id="ty1D_tnnElnF" colab_type="code" colab={}
def update(x,y,lr):
wd = 1e-5
y_hat = model(x)
# weight decay
w2 = 0.
for p in model.parameters():
w2 += (p**2).sum()
# add to regular loss
loss = loss_func(y_hat, y) + w2*wd
loss.backward()
with torch.no_grad():
for p in model.parameters():
p.sub_(lr * p.grad)
p.grad.zero_()
return loss.item()
# + id="VE1CFdR-ElnM" colab_type="code" colab={}
losses = [update(x,y,lr) for x,y in data.train_dl]
# + id="rXoAeLsHElnR" colab_type="code" colab={} outputId="36d5383b-b75d-44cb-c5fc-c793c6481ed1"
plt.plot(losses);
# + [markdown] id="wr6H042HElna" colab_type="text"
# We see that loss reduces with iterations, but it bounces more too (approximating to the minimum). That's the reason why we may want to reduce our LR, with *learning rate annealing*.
#
# Now we add a hidden layer so we have a **deep neural network**:
# + id="zdRE5ktRElnc" colab_type="code" colab={}
class Mnist_NN(nn.Module):
def __init__(self):
super().__init__()
self.lin1 = nn.Linear(784, 50, bias=True)
self.lin2 = nn.Linear(50, 10, bias=True)
def forward(self, xb):
x = self.lin1(xb)
x = F.relu(x)
return self.lin2(x)
# + id="5zINYYzUElnh" colab_type="code" colab={}
model = Mnist_NN().cuda()
# + id="z6l2kcpXElnr" colab_type="code" colab={}
losses = [update(x,y,lr) for x,y in data.train_dl]
# + id="gSJe6fWxElnu" colab_type="code" colab={} outputId="da415fa3-c36f-4237-c9c7-82aa7ed6a9a7"
plt.plot(losses);
# + [markdown] id="EGhwcTmpEln3" colab_type="text"
# Now we modify the update function, and leave the optimization part in hands of Pytorch. We'll use Adam instead of prior SGD. Note: changing the process is easy and fast with this optimizer thing.
# + id="xLv5tjiEEln6" colab_type="code" colab={}
model = Mnist_NN().cuda()
# + id="XV5t8xWIEln-" colab_type="code" colab={}
def update(x,y,lr):
opt = optim.Adam(model.parameters(), lr) # optimize the model parameters using <Adam>, with this lr or that wd...
y_hat = model(x)
loss = loss_func(y_hat, y)
loss.backward()
opt.step() # update parameters
opt.zero_grad() # reset gradient
return loss.item()
# + [markdown] id="TAoLUqhEEloB" colab_type="text"
# We need to lower the LR because using the same the loss diverges. If you see that in the graph, start again!
# + id="RXk5WaIKEloC" colab_type="code" colab={}
losses = [update(x,y,1e-3) for x,y in data.train_dl]
# + id="WZP255ttEloJ" colab_type="code" colab={} outputId="19b52261-021a-488b-ec6c-6b46d2dcb777"
plt.plot(losses);
# + [markdown] id="wkaXs_iPEloT" colab_type="text"
# Now we get to a 0.5 loss very earlier! Adam is really fast :)
#
# But why using the optimizer and those Pytorch tools when we can use fastai's `Learner` to do everything for us?
# + id="DPKRGDXSEloV" colab_type="code" colab={} outputId="732f10b8-6261-4925-dac2-8b704d2ee50a"
learn = Learner(data, Mnist_NN(), loss_func=loss_func, metrics=accuracy)
# + id="3mMNycRXEloc" colab_type="code" colab={} outputId="9fe176f5-3c4f-4adb-9c09-7605aebf0588"
# %debug
# + id="haaCB0qCEloi" colab_type="code" colab={} outputId="f2ed194b-0ada-498f-cfbe-25096b093fe2"
learn.lr_find()
learn.recorder.plot()
# + [markdown] id="9oDWcheyEloq" colab_type="text"
# And we use 1-cycle policy:
# + id="AL2KBVjJElor" colab_type="code" colab={} outputId="ff51cd85-23a4-4be1-b07c-1b69b57b5013"
learn.fit_one_cycle(1, 1e-2)
# + [markdown] id="9c6lq1J4Elou" colab_type="text"
# Look at that! we already surpassed our prior approach :) Fastai is using Adam too, with minor variations. Let's plot the used LR against iterations (left) and the momentum against iterations (right). Momentum is high when LR is low, and the opposite!
#
# Why that LR? at the beginning we're in some part of function space, it's just bumpy as all heck. So if you start jumping around, those bumps have big gradients and it will throw you into crazy parts of the space. So start slow. Then you'll gradually move into parts of the weight space that is sensible. And as you get to the points where they're sensible, you can increase the learning rate because the gradients are actually in the direction you want to go. Then as we've discussed a few times, as you get close to the final answer you need to anneal your learning rate to hone in on it.
#
# And why that momentum? because at the beginning, I have a learning small learning rate, but you keep going in the same direction, you may as well go faster. But if you're jumping really far, don't like jump really far because it's going to throw you off. Then as you get to the end again, you're fine tuning in but actually if you keep going the same direction again and again, go faster. So this combination is called one cycle and it's a simple thing but it's astonishing. This can help you get what's called super convergence that can let you train 10 times faster.
# + id="--vkU98GElow" colab_type="code" colab={} outputId="65c0c35e-046c-4a37-d308-912ecb731771"
learn.recorder.plot_lr(show_moms=True)
# + [markdown] id="jv9y4lT6Elo9" colab_type="text"
# Let's plot losses:
# + id="G8oUhh2kElo-" colab_type="code" colab={} outputId="38b9cd66-5876-423b-aea3-fd174264cd0c"
learn.recorder.plot_losses()
# + [markdown] id="CSxPlEZRElpD" colab_type="text"
# We don't see any bounce here. Why? because fastai calculates the exponentially weighted moving average of the losses for you
# + [markdown] id="xaVdD8YnElpG" colab_type="text"
# ## fin
# + id="wdyfcR96ElpH" colab_type="code" colab={}
|
nbs/dl1/lesson5-sgd-mnist.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Player Analysis
# +
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
# %matplotlib inline
# -
# ## Data Cleaning and Exploring
# +
df1 = pd.read_csv("matches.csv")
df2 = pd.read_csv("deliveries.csv")
df1.rename(columns={"id" : 'match_id'}, inplace=True)
matches= pd.merge(df1, df2, on='match_id', how='outer')
# -
matches.columns
# +
matches = matches[['match_id', 'season','team1', 'team2', 'toss_winner','toss_decision','winner',
'inning', 'batting_team', 'bowling_team', 'over', 'ball',
'batsman','non_striker', 'bowler','wide_runs','noball_runs',
'batsman_runs', 'extra_runs', 'total_runs', 'player_dismissed',
'dismissal_kind']]
matches.shape
# -
matches.head()
# ### Taking in Consideration only KKR VS MI matches
KKR_MI =matches[np.logical_or(np.logical_and(matches['team1']=='Kolkata Knight Riders',matches['team2']=='Mumbai Indians'),
np.logical_and(matches['team2']=='Kolkata Knight Riders',matches['team1']=='Mumbai Indians'))]
KKR_MI.head()
KKR_MI.match_id.unique().shape
# ### Analysis of wicket fall down of MI in 6-15 overs
# +
overs = KKR_MI[(KKR_MI["over"]>6) & (KKR_MI["over"]<=15)]
mumbai = overs[overs["batting_team"]=="Mumbai Indians"]
mumbai.head()
# +
wicket_fall_per = mumbai.groupby(["season","match_id"]).agg({"player_dismissed":"count"})
wicket_fall_per.columns = ["wicket fall"]
print("Wickets loss by MI in 6-15 overs against KKR(season wise) :")
wicket_fall_per
# -
sns.distplot(wicket_fall_per["wicket fall"])
sns.countplot(wicket_fall_per["wicket fall"])
# From both the histogram and barplot we can se that we got a almost normal distribution curve . Also , we can see the longest peak at 2 wicket fall down.
#
sns.boxplot(wicket_fall_per["wicket fall"])
# So , no outliers as such present in the data
print("Average no of wickets loss by MI in 6-15 over against KKR :",round(wicket_fall_per["wicket fall"].mean()))
# ### Confidence Interval Calculation
wicket_fall_per.describe().T
# +
mean = wicket_fall_per["wicket fall"].mean()
sd = wicket_fall_per["wicket fall"].std()
n = len(wicket_fall_per)
n
# -
tstar = 2.064
se = sd/np.sqrt(n)
se
# +
lcb = mean - tstar * se
ucb = mean + tstar * se
lcb = round(lcb)
ucb = round(ucb)
print("So , our 95% Confidence Interval for wickets that MI can loose in 6-15 over against KKR :{}".format((lcb, ucb)))
# -
# ### Rohit Sharma dismissal type Analysis
rohit = KKR_MI[KKR_MI["batsman"]=='<NAME>']
batsmen_score = pd.DataFrame(rohit.groupby(['season',"match_id","inning","batsman"]).agg({'batsman_runs' : 'sum', 'ball' :'count' , "player_dismissed":"count" }))
batsmen_score
# From the above dataframe we can observe that out of 18 innings Rohit had played against KKR , he had been out 13 times.
#
# Also we can observe that most of the time he was out when he has scored in run per ball pattern and his scores also lied below 30 for majority time, which indicates that he faces difficulty playing initially when he has played under 30 balls.
# +
rohit_dismiss = rohit[rohit["player_dismissed"]=="RG Sharma"]
rohit_dis = rohit_dismiss.groupby(["bowler"])["player_dismissed"].count()
dismissal = pd.DataFrame(rohit_dis)
print("Rohit Sharma dismissal against KKR bowlers :")
dismissal
# -
# From here we get an idea that Rohit had been dismissed majority of times by pacers.
#
# We can also see dominancy of Sunil Naraine in getting Rohit OUT , where he was able to dismiss him 6 times.
#
# +
rohit_diss_k = rohit_dismiss.groupby(["bowler","inning","dismissal_kind"])["dismissal_kind"].count()
dismissal_kind_inning_wise = pd.DataFrame(rohit_diss_k)
print("Inning wise Dismissal :")
dismissal_kind_inning_wise
# +
rohit_diss_kind = rohit_dismiss.groupby(["dismissal_kind"])["dismissal_kind"].count()
dismissal_kind = pd.DataFrame(rohit_diss_kind)
dismissal_kind
# -
# From all the analysis above we can conclude that Rohit had faced problem against pacers while batting initially (i.e when he had played balls less then 30) and also Sunil Naraine has troubled him a lot .
#
# Analysing his performance , dismissal time , inning wise dismissal , dismissal type etc we can conclude that there are more chances of him to get out by caugth out.
# ### Analysis of Total Runs that can be scored in the match
# +
Each_team_overall_score = pd.DataFrame(KKR_MI.groupby("batting_team")["total_runs"].sum())
Each_team_overall_score.columns=["Total Runs"]
print('Overall score for each team :')
Each_team_overall_score
# +
count = KKR_MI.match_id.unique()
count = len(count)
Total_runs_scored = KKR_MI.total_runs.sum()
Avg_score = Total_runs_scored/(count*2)
print("On an average runs scored in each innnings in KKR VS MI :",round(Avg_score))
# +
Total_avg = Total_runs_scored/count
print("On an average total runs scored in a match of KKR VS MI :" , round(Total_avg))
# +
runs_scored_per_match = pd.DataFrame(KKR_MI.groupby(["season","match_id"])["total_runs"].sum())
print("Total Runs scored in per match of KKR VS MI :")
runs_scored_per_match
# -
sns.distplot(runs_scored_per_match["total_runs"])
# The curve is somewhat similar to a Normal Distribution curve , we can also observe few possible outliers along both the edges but we need to confirm the presence of outliers first before coming to any conclusion.
sns.boxplot(runs_scored_per_match["total_runs"])
# We can see 1-2 outliers in our data
# #### Outliers Removal
# +
runs_scored_per_match['z_score']=stats.zscore(runs_scored_per_match["total_runs"])
runs_scored_per_match
# -
# From the above dataframe we can see that for most of the data values z-score lies between (-1,1) except at some points where value of z-score is +/- 2. So these points can be considered as the outliers(far points)
# Outliers Removal :
runs_scored_per_match = runs_scored_per_match.loc[(runs_scored_per_match['z_score'].abs()<=2) & (runs_scored_per_match['z_score'].abs()>=-2)]
sns.boxplot(runs_scored_per_match["total_runs"])
# ### Confidence Interval Calculation
# +
mean = runs_scored_per_match["total_runs"].mean()
sd = runs_scored_per_match["total_runs"].std()
n = len(runs_scored_per_match)
n
# -
tstar = 2.064
se = sd/np.sqrt(n)
se
# +
lcb = mean - tstar * se
ucb = mean + tstar * se
lcb = round(lcb)
ucb = round(ucb)
print("So , our 95% Confidence Interval for Total runs that can be scored(in total) in today's match :{}".format((lcb, ucb)))
# -
# ### Analysis of total no of fours that KKR can score
KKR = KKR_MI[KKR_MI["batting_team"]=="Kolkata Knight Riders"]
four_only = KKR[KKR["batsman_runs"]==4]
# +
per_match_fours = pd.DataFrame(four_only.groupby(["match_id"])["batsman_runs"].count())
per_match_fours.columns = ["fours count"]
print("Per match fours scored by KKR against MI:")
per_match_fours
# -
plt.figure(figsize = (8,6))
sns.countplot(per_match_fours["fours count"])
# From the plot above we can see the longest peak at 14 fours
sns.boxplot(per_match_fours["fours count"])
# We can see 1-2 outliers in our data
# #### Outlier Removal
# +
per_match_fours['z_score']=stats.zscore(per_match_fours["fours count"])
per_match_fours
# -
# From the above dataframe we can see that for most of the data value's z-score lies between (-1,1) except at some points where value of z-score is +/- 2. So these points can be considered as the outliers(far points)
per_match_fours = per_match_fours.loc[(per_match_fours['z_score'].abs()<=2) & (per_match_fours['z_score'].abs()>=-2)]
sns.boxplot(per_match_fours["fours count"])
print("Average no. of fours KKR has scored per match against MI :",round(per_match_fours["fours count"].mean()))
# ### Confidence Interval Calculation
# +
mean = per_match_fours["fours count"].mean()
sd = per_match_fours["fours count"].std()
n = len(per_match_fours)
n
# -
tstar = 2.064
se = sd/np.sqrt(n)
se
# +
lcb = mean - tstar * se
ucb = mean + tstar * se
lcb = round(lcb)
ucb = round(ucb)
print("So , our 95% Confidence Interval for Total fours that can be scored by KKR in the match :{}".format((lcb, ucb)))
|
KKR VS MI/Player Analysis KKR VS MI.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit (conda)
# name: python391jvsc74a57bd076e70d4f24e964d17f53abe91c9fa765a5a058d8698ed877a6a2481687c98a77
# ---
# +
import pandas as pd
import numpy as np
import logging
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from pathlib import Path
import itertools
import sys
sys.path.insert(1, '../')
from config import shuffled_csv, path_exps
from NN import NN_model, Sigmoid, MSE, L2_reg, L1_reg, ReLU
from NN.utility import batch_train, batch_out, Model_Wrapper
from LBFGS import LBFGS
from LevelMethod import LevelMethod
from testing import multi_run
# + tags=[]
data = pd.read_csv(shuffled_csv, index_col=0).to_numpy()
data = data[:100, :]
n_samples = data.shape[0]
X_data = data[:, :10]
Y_data = data[:, 10:]
Y_scaler = StandardScaler()
Y_scaled = Y_scaler.fit_transform(Y_data)
# np.random.seed(11)
model = NN_model([10, 20, 20, 2], ReLU, MSE)
model.init_weights()
reg_loss = L1_reg(1e-4)
# logging.basicConfig(level="INFO")
f = Model_Wrapper(model, X_data, Y_scaled, reg_loss)
x = model.Weights
# +
lambda_ = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
bounds = [1]
HPs = itertools.product(lambda_, bounds)
experiments_csv = path_exps / Path("level_grid_search.csv")
# remove HP combinations already explored
if experiments_csv.exists():
df = pd.read_csv(experiments_csv)
HPs_new = []
for lambda_, bounds in HPs:
if ((df['lambda_'] == lambda_) & (df['bounds'] == bounds)).any():
continue
else:
HPs_new.append((lambda_, bounds))
HPs = HPs_new
for lambda_, bounds in HPs:
print(f'Testing lambda: {lambda_}, bounds: {bounds}', end='\n')
solver = LevelMethod(lambda_=lambda_, bounds=bounds, max_iter=100000, verbose=False)
results = multi_run(solver, f, n=3)
log_columns = ["lambda_", "bounds", "final_fv_m", "final_fv_std", "f_evals_m", "f_evals_std", "seconds_m", "seconds_std", "n_failures"]
log_data = [lambda_, bounds, results[0], results[1], results[2], results[3], results[4], results[5], results[6]]
print(results[0])
df = pd.DataFrame([log_data], columns=log_columns)
if experiments_csv.exists():
df.to_csv(experiments_csv, mode="a", header=False,index=False)
else:
if not os.path.exists(path_exps):
os.mkdir(path_exps)
df.to_csv(experiments_csv,index=False)
print('\t Finished')
# +
# Plot belli
experiments_csv = path_exps / Path("level_grid_search.csv")
df = pd.read_csv(experiments_csv)
df = df.sort_values(by='lambda_')
x = df['lambda_']
y = df['seconds_m']
y_std = df['seconds_std']
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('seconds')
ax.set_xlabel('Level method λ')
fig.savefig('plots/level/seconds.png')
y = df['f_evals_m']
y_std = df['f_evals_std']
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('f_evals')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_evals.png')
y = df['final_fv_m']
y_std = df['final_fv_std']
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('f_min')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_min.png')
y = df['final_fv_m']
y_std = df['final_fv_std']
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
plt.yscale('log',base=10)
ax.set_ylabel('f_min')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_min_log.png')
# Plots without failures
df = df[df['n_failures'] == 0]
df
x = df['lambda_']
y = df['seconds_m']
y_std = df['seconds_std']
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('seconds')
ax.set_xlabel('Level method λ')
fig.savefig('plots/level/seconds_nofail.png')
y = df['f_evals_m']
y_std = df['f_evals_std']
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('f_evals')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_evals_nofail.png')
y = df['final_fv_m']
y_std = df['final_fv_std']
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('f_min')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_min_nofail.png')
y = df['final_fv_m']
y_std = df['final_fv_std']
fig, ax = plt.subplots(figsize=(12,4))
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
plt.yscale('log',base=10)
ax.set_ylabel('f_min')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_min_log_nofail.png', bbox_inches='tight')
# +
# Plot belli
experiments_csv = path_exps / Path("level_grid_search.csv")
df = pd.read_csv(experiments_csv)
df = df.sort_values(by='lambda_')
x = df['lambda_']
y = df['seconds_m']
y_std = df['seconds_std']
fig, ax = plt.subplots()
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('seconds')
ax.set_xlabel('Level method λ')
fig.savefig('plots/level/seconds_square.png')
y = df['f_evals_m']
y_std = df['f_evals_std']
fig, ax = plt.subplots()
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('f_evals')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_evals_square.png')
y = df['final_fv_m']
y_std = df['final_fv_std']
fig, ax = plt.subplots()
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('f_min')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_min_square.png')
y = df['final_fv_m']
y_std = df['final_fv_std']
fig, ax = plt.subplots()
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
plt.yscale('log',base=10)
ax.set_ylabel('f_min')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_min_log_square.png')
# Plots without failures
df = df[df['n_failures'] == 0]
df
x = df['lambda_']
y = df['seconds_m']
y_std = df['seconds_std']
fig, ax = plt.subplots()
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('seconds')
ax.set_xlabel('Level method λ')
fig.savefig('plots/level/seconds_nofail_square.png')
y = df['f_evals_m']
y_std = df['f_evals_std']
fig, ax = plt.subplots()
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('f_evals')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_evals_nofail_square.png')
y = df['final_fv_m']
y_std = df['final_fv_std']
fig, ax = plt.subplots()
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
ax.set_ylabel('f_min')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_min_nofail_square.png')
y = df['final_fv_m']
y_std = df['final_fv_std']
fig, ax = plt.subplots()
ax.plot(x,y)
ax.fill_between(x, y-y_std,y+y_std,alpha=0.1)
plt.xticks(x,x)
plt.yscale('log',base=10)
ax.set_ylabel('f_min')
ax.set_xlabel('Level method λ')
plt.savefig('plots/level/f_min_log_nofail_square.png', bbox_inches='tight')
|
Experiments/experiment_level.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: hypeScanKernel
# language: python
# name: hypescancentral
# ---
# + pycharm={"name": "#%%\n"}
'''
This script loads output data from control_task_ISC.py run
on the Discovery cluster and runs various analyses depending
on user input at the top of the script. See the "set
parameters" chunk below for descriptions of the various
analyses.
'''
# + pycharm={"name": "#%% imports\n"}
import scipy.io as sio
import os
import numpy as np
import pandas as pd
import time
from joblib import Parallel, delayed
from scipy import stats
import matplotlib.pyplot as plt
import statsmodels.stats.multitest as multi
from scipy.stats import norm
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
import seaborn as sns
import matplotlib.cm as cm
import warnings
import matplotlib.cbook
import pickle
import sys
from scipy.interpolate import interp1d
sys.path.append('/dartfs-hpc/rc/home/z/f00589z/hyperscanning/support_scripts/')
from phaseScramble import *
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation) # suppress some matplotlib warnings
# %matplotlib inline
# + pycharm={"name": "#%%\n"}
from platform import python_version
print(python_version())
# + pycharm={"name": "#%% set paths\n"}
baseFolder = '/dartfs-hpc/rc/home/z/f00589z/hyperscanning/control_tasks/'
loadFolder = baseFolder + 'control_ISC_output/'
inputFolder = baseFolder + 'nuisRegr_output_files/'
# + pycharm={"name": "#%% et parameterss\n"}
# select parameters of dataset to load
permutations = 5000 # number of permutations
cShift = True # cShift (True) or pScram (False)
normalize = True # time series z-scale normalized before ISC
alpha = 0.05 # alpha for permutation tests
twoTailed = True # two (True) or one-tailed (False; right tailed) permutation tests applied
useDetrendedData = True # use data that was detrended during nilearn nuisance regression step
fitNormal = False # load data that includes null distribution normal fit parameters
removeListeningSamples = 0
removeReadingSamples = 0
# set analyses to include
plotCorrDists = False # plot ISC distributions for each subject for each control task
plotMinMaxMedianISCtimeSeries = False # look at sub vs group time series in voxels with min, max, and median ISC coefficients
subBySubCorrMats = False # plot mean correlation values across voxels between subs
plotPairwiseISCtimeSeries = False # NOTE that subBySubCorrMats must be True for this to run
analyzeSmoothness = False # compute and plot time series smoothness measure (this was a concern early on but has largely been obviated as of June 2021)
analyzeDrift = False # compute and plot time series drift measure
findOptimalDriftWindow = False # find the shortest initial time windows to remove from each task to minimize drift ('analyzeDrift' must be True)
ISC_statMap = True # plot median ISC heatmaps on an average brain surface
drift_statMap = True # plot mean drift on an average brain surface
smooth_statMap = False # plot mean smoothness on an average brain surface
# + pycharm={"name": "#%% load data\n"}
# get file name based on input above
fileName = 'controlISC_' + str(permutations) + 'perm'
if cShift:
fileName = fileName + '_cShift'
else:
fileName = fileName + '_pScram'
if normalize:
fileName = fileName + '_norm'
if twoTailed:
fileName = fileName + '_twoTailed'
if useDetrendedData:
fileName = fileName + '_detrended'
epiTag = 'detrended_'
else:
epiTag = ''
if fitNormal:
fileName = fileName + '_nullDistFits'
if removeListeningSamples > 0:
fileName = fileName + '_xL' + str(removeListeningSamples)
if removeReadingSamples > 0:
fileName = fileName + '_xR' + str(removeReadingSamples)
# load data
with open(loadFolder + fileName + '.pkl', 'rb') as f:
permTest, corrData, groupFitData, duration, pGroup = pickle.load(f)
print('loaded file: ' + loadFolder + fileName + '.pkl')
# + pycharm={"name": "#%% setup\n"}
# load hyperscanning subject list
subList = pd.read_pickle('/dartfs-hpc/rc/home/z/f00589z/hyperscanning/misc/hyperscanning_subject_list.pkl')
# get number of participants
numSubs = subList.shape[0]
# get number of pairs
numPairs = round(numSubs / 2)
# get number of voxels (using the first subject's ISC data from the listening task)
numVox = len(corrData[0][0])
# define condition labels
taskNames = ['listening','reading']
siteNames = ['DBIC','CBS']
# indicate that we have not loaded the EPI time series
epiLoaded = False
# colorblind-friendly colors list
CB_color_cycle = ['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']
# set task colors
taskColors = CB_color_cycle[:2]
# + pycharm={"name": "#%%\n"}
with open('/dartfs-hpc/rc/home/z/f00589z/hyperscanning/misc/hyperscanning_subject_list.pkl','wb') as f: # Python 3: open(..., 'wb')
pickle.dump([subList], f, protocol=1)
# + pycharm={"name": "#%%\n"}
subList.to_csv('/dartfs-hpc/rc/home/z/f00589z/hyperscanning/misc/hyperscanning_subject_list.csv',index=False)
# + pycharm={"name": "#%%\n"}
s.getcwd()
# + pycharm={"name": "#%% function for loading EPI time series\n"}
def loadEPI(subList,folder,normalize,epiTag):
"""
function for loading EPI time series
:param subList: subs x 3 dataframe with columns: 'pairNum', 'site', and 'subID'
:param folder: folder from which to load EPI time series
:param normalize: boolean indicating whether or not to normalize time series
:return: boldData: time series as numpy arrays
"""
numS = subList.shape[0]
taskNames = ['listening','reading']
# loop through participants...
boldData = [[]] * 2
for TASK in [0,1]: #for each task, listening, then reading
# preallocate task data list
boldData[TASK] = [[]] * numS
for S in range(numS):
# get file name
file = folder + 'sub-' + str(subList['subID'][S]) + '_ses-pair0' + str(subList['pairNum'][S]) + '_task-storytelling' + str(TASK + 3) + '_run-0' + str(TASK + 3) + '_bold_space-MNI152NLin2009cAsym_preproc_nuisRegr_2021_' + epiTag + 'interp.mat'
# load data
tmp = sio.loadmat(file) #load file
boldData[TASK][S] = tmp['tseries'] #get timeseries data
print('loaded ' + str(boldData[TASK][S].shape[0]) + ' x ' + str(boldData[TASK][S].shape[1]) + ' timeseries for ' + taskNames[TASK] + ' task, sub ' + subList['subID'][S])
if normalize:
boldData[TASK][S] = stats.zscore(boldData[TASK][S],axis=0)
print('z-scoring time series')
return boldData
# + pycharm={"name": "#%% homebrewed fdr function\n"}
"""
This is effectively here just in case you want
clarity on how the multi.fdrcorrection function
works. You verified that the function below
gives the same output.
"""
def fdr(pVals,q):
# get p values
pVals = np.sort(pVals)
# get "unsorting" indices so you can map the hypothesis testing results back to the proper voxels
unsortInds = pVals.argsort().argsort()
# find threshold
N = len(pVals)
i = np.arange(1, N+1) # the 1-based i index of the p values, as in p(i)
# print number of uncorrected -values below q
print('# uncorrected pVals < ' + str(q) + ': ' + str(len(np.where(pVals < q)[0])))
# get pVals below qi / N
below = pVals < (q * i / N)
# if any critical value exists
if np.where(below)[0].size > 0:
# get index (sorted) of greatest pVal below qi / N
max_below = np.max(np.where(below)[0])
# get FDR adjusted p value
pCrit = pVals[max_below]
# print number of uncorrected -values below q
print('# uncorrected pVals < ' + str(pCrit) + ': ' + str(len(np.where(pVals <= pCrit)[0])))
# hypothesis test
h = (pVals <= pCrit)[unsortInds]
else:
h = np.zeros(N, dtype=bool)
return h
# + pycharm={"name": "#%%\n"}
def surfaceStatMap(masker,statMapVec,avgSurface,thresh):
# preallocate task arrays
statMap = [[]] * 2
texture = [[]] * 2
view = [[]] * 2
# for each task...
for TASK in [0,1]:
# get stat map
statMap[TASK] = masker.inverse_transform(statMapVec[TASK])
# surface plot
texture[TASK] = [[]] * 2
view[TASK] = [[]] * 2
for HEMI in [0,1]:
if HEMI == 0:
texture[TASK][HEMI] = surface.vol_to_surf(statMap[TASK], avgSurface.pial_left)
view[TASK][HEMI] = plotting.view_surf(avgSurface.infl_left,
texture[TASK][HEMI],
threshold=thresh,
colorbar=True,
title= taskNames[TASK] + ', left',
bg_map=avgSurface.sulc_left)
else:
texture[TASK][HEMI] = surface.vol_to_surf(statMap[TASK], avgSurface.pial_right)
view[TASK][HEMI] = plotting.view_surf(avgSurface.infl_right,
texture[TASK][HEMI],
threshold=thresh,
colorbar=True,
title=taskNames[TASK] + ', right',
bg_map=avgSurface.sulc_right)
return view
# + pycharm={"name": "#%% plot correlation distributions\n"}
if plotCorrDists:
# set axis label font size
axLabFontSize = 12
# plot data
for SUB in range(numPairs): # for each pair...
# get subjects from current pair
pairSubs = [SUB,SUB + numPairs]
# initialize plot
plt.figure(facecolor='white',figsize=(6,6))
# for each subject in the current pair
for PAIRSUB in [0,1]:
for TASK in [0,1]:
# get plot data
pData = corrData[TASK][pairSubs[PAIRSUB]]
# select subplot
plt.subplot(2, 2, PAIRSUB*2 + TASK + 1)
# plot histogram
plt.hist(pData, bins=25, density=True, alpha=0.6, color=taskColors[TASK])
# dashed line at x=0
yMax = plt.gca().get_ylim()[1]
plt.plot([0, 0], [0, yMax], '--k')
# axes and title
plt.xlabel('correlation', fontsize=axLabFontSize)
if TASK == 0:
plt.ylabel('voxel count', fontsize=axLabFontSize)
plt.title(taskNames[TASK] + ', sub ' + siteNames[PAIRSUB] + str(SUB + 1))
plt.tight_layout()
plt.show()
# + pycharm={"name": "#%% look at sub vs group timeseries in voxels with min, max, and median ISC coefficients\n"}
"""
For each participant and each task we plot the distribution
of ISC coefficients (correlation between the participant's
voxel time series and the mean voxel time series among all
of the other participants) across voxels. We then overlay
the individual voxel time series from the participant and
the rest of the group that have the minimum, maximum, and
median ISC coefficients across voxels.
"""
if plotMinMaxMedianISCtimeSeries:
# load EPI time series if necessary
if not epiLoaded:
boldData = loadEPI(subList,inputFolder,normalize,epiTag)
epiLoaded = True # indicate that we've loaded the EPI time series
# extreme voxel labels
voxLabs = ['min corr vox','max corr vox','median vox']
voxColors = ['y','m','k']
# set task colors
taskColors = CB_color_cycle[:2]
# make subplotting map
spMap3 = np.arange(8).reshape(4,2) + 1
# set axis label font size
axLabFontSize = 12
# define standard scaler
scaler = StandardScaler()
# plot data
for SUB in range(numSubs):
# initialize plot
plt.figure(facecolor='white',figsize=(16,8))
# set main plot title
titleString = subList['subID'][SUB]
plt.suptitle(titleString)
for TASK in [0,1]:
# get plot data
pData = corrData[TASK][SUB]
# select subplot for histogram
plt.subplot(spMap3.shape[0], spMap3.shape[1], spMap3[0,TASK])
# plot histogram
plt.hist(pData, bins=100, density=True, alpha=0.6, color=taskColors[TASK])
# dashed line at x=0
yMax = plt.gca().get_ylim()[1]
plt.plot([0, 0], [0, yMax], '--k')
# axes and title
plt.xlabel('correlation', fontsize=axLabFontSize)
if TASK == 0:
plt.ylabel('voxel count', fontsize=axLabFontSize)
plt.title(taskNames[TASK])
# plot voxel time series with extreme values
for VOX in [0,1,2]: # min, max, median
# get "Extreme Index" of voxel with either min or max value (or median)
if VOX == 0:
EIND = np.unravel_index(np.argmin(pData),pData.shape) # minimum correlation voxel index
elif VOX == 1:
EIND = np.unravel_index(np.argmax(pData),pData.shape) # maximum correlation voxel index
elif VOX == 2:
EIND = np.argsort(pData)[len(pData)//2] # median (approximately)
# add locations of min and max correlation to histogram for reference
extremeCorr = pData[EIND]
plt.subplot(spMap3.shape[0], spMap3.shape[1], spMap3[0,TASK])
plt.plot([extremeCorr, extremeCorr], [0, yMax], '-' + voxColors[VOX])
# get individual subject time series at the extreme voxel
y1 = boldData[TASK][SUB][:,EIND]
x = np.array(range(len(y1))) + 1
# get mean of data from all participants EXCEPT the current participant
otherSubs = np.arange(0,numSubs)
otherSubs = np.delete(otherSubs,SUB)
y2 = np.mean([boldData[TASK][i][:,EIND] for i in otherSubs], axis=0)
if VOX == 2: #hack to deal with EIND not being a tuple when we find the median
y2 = y2.reshape(y2.shape[0],1)
y2 = scaler.fit_transform(y2) # normalize the rest-of-group mean (see next section for confirmation that this doesn't influence correlations)
# select subplot and reset subplot border color
ax = plt.subplot(spMap3.shape[0], spMap3.shape[1], spMap3[VOX + 1,TASK])
plt.setp(ax.spines.values(), color=voxColors[VOX])
plt.setp([ax.get_xticklines(), ax.get_yticklines()], color=voxColors[VOX])
# plot lines and add legend
line1, = plt.plot(x,y1,'-k',label = 'individual')
line2, = plt.plot(x,y2,'-', label = 'rest of group', color = taskColors[TASK]) # , linewidth=2
plt.legend(handles=[line1, line2],loc='upper right')
if TASK == 0:
plt.xlabel('TR')
else:
plt.xlabel('reading stimulus flip')
plt.ylabel('BOLD signal')
plt.title(voxLabs[VOX])
plt.tight_layout()
plt.show()
# + pycharm={"name": "#%% sub x sub correlation matrices\n"}
# from matplotlib.colors import Normalize
if subBySubCorrMats:
# load EPI time series if necessary
if not epiLoaded:
boldData = loadEPI(subList,inputFolder,normalize,epiTag)
epiLoaded = True # indicate that we've loaded the EPI time series
corrMat = [[]] * 2
corrColors = [[]] * 2
corrData_pairs = [[]] * 2
axLab = [[]] * numSubs
for TASK in [0,1]:
corrMat[TASK] = [[]] * 2
corrColors[TASK]= [[]] * 2
corrData_pairs[TASK]= [[]] * 2
# some feedback
print('\ncomputing pairwise correlations for ' + str(taskNames[TASK]) + ' task')
# preallocate subs x subs correlation matrix
corrMat[TASK] = np.empty([numSubs,numSubs])
corrData_pairs[TASK] = [[]] * numSubs
for SUB1 in range(numSubs):
corrData_pairs[TASK][SUB1] = [[]] * numSubs
# get axis labels
if TASK == 0:
if SUB1 < numPairs:
axLab[SUB1] = 'D' + str(SUB1 + 1)
else:
axLab[SUB1] = 'H' + str(SUB1 - numPairs + 1)
# set the diagonal equal to 1
corrMat[TASK][SUB1,SUB1] = 1
for SUB2 in np.arange(SUB1 + 1,numSubs):
corrData_pairs[TASK][SUB1][SUB2] = fastColumnCorr(boldData[TASK][SUB1], boldData[TASK][SUB2])
corrMat[TASK][SUB1,SUB2] = np.mean(corrData_pairs[TASK][SUB1][SUB2])
#fill in the other half of corrMat so the plots dont look weird
corrMat[TASK][SUB2,SUB1] = corrMat[TASK][SUB1,SUB2]
plt.figure(facecolor='white')
cmap = cm.get_cmap('RdBu')#sns.diverging_palette(20, 220, n=200)
ax = sns.heatmap(
corrMat[TASK],
vmin=-1, vmax=1, center=0,
cmap=cmap,
square=True
)
ax.set_xticklabels(axLab)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
)
ax.set_yticklabels(axLab)
ax.set_yticklabels(
ax.get_yticklabels(),
rotation=0
)
# add a title
plt.title('mean corr coef across vox, ' + taskNames[TASK] + ' task')
# get heatmap rgbs
im = ax.collections[0]
corrColors[TASK] = im.cmap(im.norm(im.get_array()))
# pairwise version of individual voxel time series comparisons
if plotPairwiseISCtimeSeries:
# make a numSubs by numSubs plot map
spMap4 = np.arange(numSubs**2).reshape(numSubs,numSubs)
# set plot width [inches?]
plotWidth = 16
# plot data
for SUB1 in range(numSubs):
# get sub1 string
if SUB1 < numPairs:
sub1Str = 'D' + str(SUB1 + 1)
else:
sub1Str = 'H' + str(SUB1 - numPairs + 1)
for SUB2 in np.arange(SUB1 + 1,numSubs):
# get sub2 string
if SUB2 < numPairs:
sub2Str = 'D' + str(SUB2 + 1)
else:
sub2Str = 'H' + str(SUB2 - numPairs + 1)
# initialize plot
plt.figure(facecolor='white',figsize=(16,8))
# main title
plt.suptitle('subs ' + sub1Str + ' & ' + sub2Str)
for TASK in [0,1]:
# get correlation data for a given pair
pData = corrData_pairs[TASK][SUB1][SUB2]
# plot histogram
plt.subplot(spMap3.shape[0], spMap3.shape[1], spMap[0,TASK])
plt.hist(pData, bins=100, density=True, alpha=0.6, color=taskColors[TASK])
# dashed line at x=0
yMax = plt.gca().get_ylim()[1]
plt.plot([0, 0], [0, yMax], '--k')
# axes and title
plt.xlabel('correlation', fontsize=axLabFontSize)
if TASK == 0:
plt.ylabel('voxel count', fontsize=axLabFontSize)
plt.title(taskNames[TASK])
for VOX in [0,1,2]: # min, max, median
# get "Extreme Index" of voxel with either min or max value (or median)
if VOX == 0:
EIND = np.unravel_index(np.argmin(pData),pData.shape) # minimum correlation voxel index
elif VOX == 1:
EIND = np.unravel_index(np.argmax(pData),pData.shape) # maximum correlation voxel index
elif VOX == 2:
EIND = np.argsort(pData)[len(pData)//2] # median (approximately)
# add locations of min and max correlation to histogram for reference
extremeCorr = pData[EIND]
plt.subplot(spMap3.shape[0], spMap3.shape[1], spMap3[0,TASK])
plt.plot([extremeCorr, extremeCorr], [0, yMax], '-', color=voxColors[VOX])
# get individual subject time series at the extreme voxel
y1 = boldData[TASK][SUB1][:,EIND]
y2 = boldData[TASK][SUB2][:,EIND]
x = np.array(range(len(y1))) + 1
# select subplot for time series line plot
ax = plt.subplot(spMap3.shape[0], spMap3.shape[1], spMap3[VOX + 1,TASK])
plt.setp(ax.spines.values(), color=voxColors[VOX])
plt.setp([ax.get_xticklines(), ax.get_yticklines()], color=voxColors[VOX])
line1, = plt.plot(x,y1,'-k',label = sub1Str)
line2, = plt.plot(x,y2,'-', label = sub2Str, color = taskColors[TASK])
plt.legend(handles=[line1, line2],loc='upper right')
if TASK == 0:
plt.xlabel('TR')
else:
plt.xlabel('reading stimulus flip')
plt.ylabel('BOLD signal')
plt.title(voxLabs[VOX])
# display plots
plt.tight_layout()
plt.show()
# + pycharm={"name": "#%% look at the \"smoothness\" of each voxel timeseries for each subject\n"}
"""
Here we use a basic formula for quantifying smoothness:
sd(diff(x))/abs(mean(diff(x)))
Adapted the formula from here:
https://stats.stackexchange.com/questions/24607/how-to-measure-smoothness-of-a-time-series-in-r
The inversion is so that large values reflect greater
smoothness. Smoothness values can then be optionally
standardized from 0 to 1 to make them a bit more
interpratable.
For each subject and each task we plot the distribution
of smoothness values across voxels -- only showing those
voxels less than or equal to the __th percentile, because
these distributions are deeply right skewed. We then plot
the individual time series for voxels at the min and max
and three selected percentile values for smoothness.
"""
if analyzeSmoothness:
# function to find nearest value to a given percentile in an array
def find_nearest_percentile_index(array, percentile):
array = np.asarray(array)
target = np.percentile(array, percentile)
idx = (np.abs(array - target)).argmin()
return idx
# set up a subplot map
spMap = np.arange(6).reshape(3,2) + 1
# preallocate task arrays for smoothness
smoothness = [[]] * 2
# option to standardize smoothness values
standardize0to1 = True
# select colors for different individual voxels
voxColors = CB_color_cycle[4:9]
# threshold percentile below which to include data for histogram (deals with extreme skew)
threshPerc = 90
# select smoothness percentiles to look at in case voxMethod == 'percentile' above
percentiles = [25, 50, threshPerc]
# make voxel labels
if standardize0to1:
smoothLabs = ['min smoothness = 0',str(percentiles[0]) + ' %', str(percentiles[1]) + ' %', str(percentiles[2]) + ' %','max smoothness = 1']
else:
smoothLabs = ['min smoothness',str(percentiles[0]) + ' %', str(percentiles[1]) + ' %', str(percentiles[2]) + ' %','max smoothness']
# get and plot smoothness values
for TASK in [0,1]: # for each task...
# preallocate sub arrays for smoothness
smoothness[TASK] = [[]] * numSubs
for SUB in range(numSubs): # for each subject...
# initialize plot
plt.figure(facecolor='white',figsize=(16,8))
# main title (subID)
plt.suptitle(taskNames[TASK] + ' sub ' + str(SUB + 1))
# get data
data = boldData[TASK][SUB]
# compute smoothness
smoothness[TASK][SUB] = 1 / np.std(np.diff(data,axis=0),axis=0) / abs(np.mean(np.diff(data,axis=0),axis=0)) # see description above for source of formula
# optional z-score standardization
if standardize0to1:
smoothness[TASK][SUB] = (smoothness[TASK][SUB] - np.min(smoothness[TASK][SUB])) / (np.max(smoothness[TASK][SUB]) - np.min(smoothness[TASK][SUB]))
# arbitrarily subset for plotability (because these are so skewed)
data = smoothness[TASK][SUB]
# get voxel indices for time series with various levels of smoothness smoothness
evox = [[]] * 5
evox[0] = np.unravel_index(np.argmin(data),data.shape)[0]
counter = 1
for PERC in percentiles:
evox[counter] = find_nearest_percentile_index(data, PERC)
counter += 1
evox[4] = np.unravel_index(np.argmax(data),data.shape)[0]
# select subplot for histogram
plt.subplot(spMap.shape[0], spMap.shape[1], 1)
# select subset of data to plot for histogram to deal with visualization problems from extreme skew
threshInd = find_nearest_percentile_index(data, threshPerc)
plotData = data[data <= data[threshInd]]
# plot smoothness histogram
plt.hist(plotData, bins=100, density=True, alpha=1, color=taskColors[TASK])
plt.xlabel('smoothness parameter')
plt.ylabel('density')
if standardize0to1:
plt.title('standardized (0 to 1) smoothness values up to ' + str(threshPerc) + ' percentile')
else:
plt.title('smoothness values up to ' + str(threshPerc) + ' percentile')
# get histogram max y-value
yMax = plt.gca().get_ylim()[1]
# plot single voxel timeseries
for VOX in range(len(evox)):
# add vertical bars to histogram
if data[evox[VOX]] <= data[threshInd]:
plt.subplot(spMap.shape[0], spMap.shape[1], 1)
smoothVal = data[evox[VOX]]
plt.plot([smoothVal, smoothVal], [0, yMax], '-', color=voxColors[VOX])
# get time series at the extreme voxel
y = boldData[TASK][SUB][:,evox[VOX]]
x = np.array(range(len(y))) + 1
# select subplot for time series line plot
ax = plt.subplot(spMap.shape[0], spMap.shape[1], VOX + 2)
plt.setp(ax.spines.values(), color=voxColors[VOX])
plt.setp([ax.get_xticklines(), ax.get_yticklines()], color=voxColors[VOX])
# plot time series
plt.plot(x,y,'-k')
# subplot title and axis labels
plt.title(smoothLabs[VOX])
if TASK == 0:
plt.xlabel('TR')
else:
plt.xlabel('reading stimulus flip')
plt.ylabel('BOLD signal')
plt.tight_layout()
plt.show()
###############################################
### get smoothness summary measures (means) ###
###############################################
# preallocate
smoothnessCons = [[]] * 2
smoothness_mean = [[]] * 2
# get mean drift measure across subs
for TASK in [0,1]:
smoothnessCons[TASK] = np.empty([numSubs,numVox])
for SUB in range(numSubs):
# make sure everything is standardized
smoothnessCons[TASK][SUB,:] = (smoothness[TASK][SUB] - np.nanmean(smoothness[TASK][SUB])) / np.std(smoothness[TASK][SUB])
smoothness_mean[TASK] = np.nanmean(smoothnessCons[TASK], axis=0)
# + pycharm={"name": "#%% drift\n"}
"""
compare the mean signal of an early epoch to that of a late epoch. Greater absolute differences
should indicate greater drift. NOTE that this is super hacky, but should be FAST and at least
somewhat sensitive
set ending and starting time points for the early and late epochs for each task, respectively
epochBorders = [[10,5],[100,50]] would mean...
early epochs for the listening and reading tasks would be time points 1-10 and 1-5, respectively
and the late epochs would be 100-end, 50-end, also respectively
"""
if analyzeDrift:
# define epoch borders
epochBorders = [[10,5],[100,50]]
# subplot map
spMap = np.arange(6).reshape(3,2) + 1
# set percentiles, colors, labels
voxColors = CB_color_cycle[4:9]
percentiles = [10, 50, 90]
sdSf = 2 # standard deviation scaling factor
voxMethod = 'stdevs'
if voxMethod == 'stdevs':
diffLabs = ['most negative diff','mean - 1SD*' + str(sdSf), 'mean','mean + 1SD*' + str(sdSf) ,'most negative diff']
else:
diffLabs = ['most negative diff',str(percentiles[0]) + ' %', str(percentiles[1]) + ' %', str(percentiles[2]) + ' %','most positive diff']
# standardize difference scores
stdDiff = True
# preallocate arrays
epoch = [[]] * 2
driftHack = [[]] * 2
# load EPI time series if necessary
if not epiLoaded:
boldData = loadEPI(subList,inputFolder,normalize,epiTag)
epiLoaded = True # indicate that we've loaded the EPI time series
# get number of samples in the time series from each task, using the normalized data from the first subject
numSamps = [boldData[0][0].shape[0], boldData[1][0].shape[0]]
for TASK in [0,1]:
# get epoch time points
epoch[TASK] = [[]] * 2 # preallocate
epoch[TASK][0] = np.arange(epochBorders[0][TASK]) # early epoch
lateEpochWidth = numSamps[TASK] - epochBorders[1][TASK] + 1
epoch[TASK][1] = np.arange(lateEpochWidth) + epochBorders[1][TASK] - 1
# preallocate
driftHack[TASK] = [[]] * numSubs
for SUB in range(numSubs):
# initialize plot
plt.figure(facecolor='white',figsize=(16,8))
# main title
plt.suptitle(taskNames[TASK] + ' sub ' + str(SUB + 1))
# get time series for current sub
data = boldData[TASK][SUB]
# compute hacky drift statistic
driftHack[TASK][SUB] = np.mean(data[tuple(epoch[TASK][0]),:],axis=0) - np.mean(data[tuple(epoch[TASK][1]),:],axis=0)
# optional standardization
if stdDiff:
driftHack[TASK][SUB] = (driftHack[TASK][SUB] - np.mean(driftHack[TASK][SUB])) / np.std(driftHack[TASK][SUB])
# select subplot for histogram
plt.subplot(spMap.shape[0], spMap.shape[1], 1)
# plot difference histogram
plt.hist(driftHack[TASK][SUB], bins=100, density=True, alpha=0.5, color=taskColors[TASK])
plt.xlabel('mean(first ' + str(epochBorders[0][TASK]) + ' time points) - mean(last ' + str(numSamps[TASK] - epochBorders[1][TASK] + 1) + ' timpoints')
plt.ylabel('proportion of voxels')
# get voxel indices for time series with min and max difference scores and those at various percentile cutoffs
evox = [[]] * 5
evox[0] = np.unravel_index(np.argmin(driftHack[TASK][SUB]),driftHack[TASK][SUB].shape)[0]
if voxMethod == 'stdevs':
evox[1] = (np.abs(driftHack[TASK][SUB] - (np.mean(driftHack[TASK][SUB]) - np.std(driftHack[TASK][SUB]) * sdSf))).argmin()
evox[2] = (np.abs(driftHack[TASK][SUB] - np.mean(driftHack[TASK][SUB]))).argmin()
evox[3] = (np.abs(driftHack[TASK][SUB] - (np.mean(driftHack[TASK][SUB]) + np.std(driftHack[TASK][SUB]) * sdSf))).argmin()
else:
counter = 1
for PERC in percentiles:
evox[counter] = find_nearest_percentile_index(driftHack[TASK][SUB], PERC)
counter += 1
evox[4] = np.unravel_index(np.argmax(driftHack[TASK][SUB]),driftHack[TASK][SUB].shape)[0]
# get histogram max y-value
yMax = plt.gca().get_ylim()[1]
# plot single voxel timeseries
for VOX in range(len(evox)):
# add vertical bars to histogram
plt.subplot(spMap.shape[0], spMap.shape[1], 1)
diffVal = driftHack[TASK][SUB][evox[VOX]]
plt.plot([diffVal, diffVal], [0, yMax], '-', color=voxColors[VOX])
# get time series at the extreme voxel
y = boldData[TASK][SUB][:,evox[VOX]]
x = np.array(range(len(y))) + 1
# select subplot for time series line plot
ax = plt.subplot(spMap.shape[0], spMap.shape[1], VOX + 2)
plt.setp(ax.spines.values(), color=voxColors[VOX])
plt.setp([ax.get_xticklines(), ax.get_yticklines()], color=voxColors[VOX])
# plot time series
plt.plot(x,y,'-k')
# subplot title and axis labels
plt.title(diffLabs[VOX])
if TASK == 0:
plt.xlabel('TR')
else:
plt.xlabel('reading stimulus flip')
plt.ylabel('BOLD signal')
plt.tight_layout()
plt.show()
##########################################
### get drift summary measures (means) ###
##########################################
# preallocate
driftHackCons = [[]] * 2
driftHack_mean = [[]] * 2
# get mean drift measure across subs
for TASK in [0,1]:
# preallocate
driftHackCons[TASK] = np.empty([numSubs,numVox])
for SUB in range(numSubs):
# make sure everything is standardized
driftHackCons[TASK][SUB,:] = (driftHack[TASK][SUB] - np.nanmean(driftHack[TASK][SUB])) / np.std(driftHack[TASK][SUB])
# get mean drift
driftHack_mean[TASK] = np.nanmean(driftHackCons[TASK], axis=0)
# + pycharm={"name": "#%% find optimal drift window\n"}
if findOptimalDriftWindow:
# load EPI time series if necessary
if not epiLoaded:
boldData = loadEPI(subList,inputFolder,normalize,epiTag)
epiLoaded = True # indicate that we've loaded the EPI time series
# set plotting scheme
individPlots = True
groupPlots = True
# set epoch widths and get the maximum number of TRs to remove
widths = [3,6] # epoch widths [TRs]
removalMax = 100
# get subplotting scheme
pRows = np.ceil(len(widths) / 2)
if len(widths) == 1:
pCols = 1
else:
pCols = 2
# standardize difference scores
stdDiff = True
# preallocate arrays
meanAbsDrift = [[]] * 2
# drift threshold scaling factor (to scale by 1 SD) - any voxels
# with drift values less than this distance from the mean drift
# value will be ignored in the analysis below. If set to zero,
# no thresholding will be applied.
threshSF = 2
for TASK in [0,1]: # for each task...
# preallocate
meanAbsDrift[TASK] = [[]] * numSubs
for SUB in range(numSubs): # for each subject...
# get time series for current sub
data = boldData[TASK][SUB]
# preallocate
meanAbsDrift[TASK][SUB] = np.empty([len(widths),removalMax])
# initialize individual plot and set title
if individPlots:
plt.figure(facecolor='white',figsize=(pCols * 4,pRows * 4))
if threshSF > 0:
plt.suptitle(taskNames[TASK] + ' task, sub ' + str(SUB + 1) + ', drift threshold: +/-' + str(threshSF) + 'SD')
else:
plt.suptitle(taskNames[TASK] + ' task, sub ' + str(SUB + 1) + ', no drift thresholding')
for WIDTH in range(len(widths)): # for each epoch width...
# feedback
print('\nanalyzing ' + taskNames[TASK] + ' sub ' + str(SUB + 1) + ' width ' + str(WIDTH + 1))
for TRX in range(removalMax): # for each number of TRs removed
# remove TRX TRs
if TRX > 0:
data = np.delete(data,0,0)
# get epochs
epochs = [np.arange(widths[WIDTH]), np.arange(widths[WIDTH],data.shape[0])]
# compute drift statistic
drift = np.mean(data[tuple(epochs[0]),:],axis=0) - np.mean(data[tuple(epochs[1]),:],axis=0)
if TRX == 0: # if thresholding, select the voxels with the greatest drift prior to removing TRs
if threshSF > 0:
mu = np.mean(drift)
sd = np.std(drift)
thresholds = [mu - sd * threshSF, mu + sd * threshSF]
voxInds = np.concatenate((np.argwhere(drift < thresholds[0]), np.argwhere(drift > thresholds[1])))
else:
voxInds = range(numVox)
meanAbsDrift[TASK][SUB][WIDTH,TRX] = np.mean(np.abs(drift[voxInds]))
# optional standardization across TR removals for each width
if stdDiff:
meanAbsDrift[TASK][SUB][WIDTH,:] = (meanAbsDrift[TASK][SUB][WIDTH,:] - np.mean(meanAbsDrift[TASK][SUB][WIDTH,:])) / np.std(meanAbsDrift[TASK][SUB][WIDTH,:])
# individual plots
if individPlots:
plt.subplot(pRows,pCols,WIDTH+1)
plt.plot(range(removalMax),meanAbsDrift[TASK][SUB][WIDTH,:],'-ok')
plt.xlabel('# samples removed',fontsize=16)
plt.ylabel('mean absolute drift',fontsize=16)
plt.title('epoch width = ' + str(widths[WIDTH]) + ' samples',fontsize=16)
# clean setup for individual plots
if individPlots:
plt.tight_layout()
plt.show()
# preallocate group stats arrays
groupMeanAbsDrift = [[]] * 2
groupSDAbsDrift = [[]] * 2
# get / plot group stats
driftSamps = [17,10] # hardcoding the thresholds for now
for TASK in [0,1]: # for each task...
# compute group mean drift
groupMeanAbsDrift[TASK] = np.mean([meanAbsDrift[TASK][i] for i in range(numSubs)], axis=0)
groupSDAbsDrift[TASK] = np.std([meanAbsDrift[TASK][i] for i in range(numSubs)], axis=0)
# plot group mean drift
if groupPlots:
# initialize plot
plt.figure(facecolor='white',figsize=(pCols * 4,pRows * 4))
if threshSF > 0:
plt.suptitle(taskNames[TASK] + ' task, group mean absolute drift (N=' + str(numSubs) + '), drift threshold: +/-' + str(threshSF) + 'SD')
else:
plt.suptitle(taskNames[TASK] + ' task, group mean absolute drift (N=' + str(numSubs) + '), no drift thresholding')
for WIDTH in range(len(widths)):
plt.subplot(pRows,pCols,WIDTH+1)
x = range(removalMax)
y = groupMeanAbsDrift[TASK][WIDTH,:]
error = groupSDAbsDrift[TASK][WIDTH,:]
plt.plot(x, y, 'k-')
plt.fill_between(x, y-error, y+error)
plt.xlabel('# samples removed',fontsize=16)
plt.ylabel('mean absolute drift',fontsize=16)
plt.title('epoch width = ' + str(widths[WIDTH]) + ' samples',fontsize=16)
# estimate the "elbow" of the group mean curve
# fit an exponential curve
popt, pcov = curve_fit(func, x, y, p0=(1, 1e-6, -1))
x2 = np.linspace(np.min(x),np.max(x),100)
y2 = func(x2, *popt)
plt.plot(x2,y2,'--r',linewidth=2)
yLims = plt.gca().get_ylim()
plt.plot([driftSamps[TASK],driftSamps[TASK]],yLims,'-r')
plt.tight_layout()
plt.show()
# + pycharm={"name": "#%% Add note about hacky way you're plotting stat maps\n"}
"""
#############################################
########### Note about stat maps! ###########
#############################################
Currently plotting stat maps by generating a 'view' list
then printing its subcomponents in successive notebook
chunks. Hence why the stat maps sections are broken into
so many chunks.
"""
# + pycharm={"name": "#%% get masker object\n"}
if ISC_statMap or ISC_statMap or drift_statMap:
# import nilearn modules
from nilearn import image as nImage
from nilearn import input_data
from nilearn import datasets
from nilearn import surface
from nilearn import plotting
# get masker object
maskFile = '/dartfs-hpc/rc/home/z/f00589z/hyperscanning/control_tasks/nuisRegr_input_files/mni_asym09c_mask_resamp3x3.nii.gz'
maskImg = nImage.load_img(maskFile)
masker = input_data.NiftiMasker(maskImg)
masker.fit_transform(maskImg)
mapDir = '/dartfs-hpc/rc/home/z/f00589z/hyperscanning/control_tasks/statMaps/'
fsaverage = datasets.fetch_surf_fsaverage()
# + pycharm={"name": "#%%\n"}
if ISC_statMap:
#########################################
### set hypothesis testing parameters ###
#########################################
# threshold by proportion of subjects with a significant FDR corrected p-value at a given voxel
propThresh = False
# if thresholding by the proportion of subjects with a significant FDR corrected p-value at a given voxel...
if propThresh:
# set proportion of participants who need to have fdr corrected
# significant median ISC coefficients at a voxel to include it
# in the mask
fdrProp = .5
# preallocate
fdrVecs = [[]] * 2
fdrMask = [[]] * 2
for TASK in [0,1]:
# preallocate
fdrVecs[TASK] = np.empty([numSubs,numVox])
fdrMask[TASK] = np.zeros([numVox,1])
for SUB in range(numSubs):
# compile fdr hypothesis testing vectors (1=reject null, 0=fail to reject null)
fdrVecs[TASK][SUB,:] = permTest[TASK][SUB][1][0]
# generate group mask based on fdrProp
for VOX in range(numVox):
if np.sum(fdrVecs[TASK][:,VOX]) > (numSubs * fdrProp):
fdrMask[TASK][VOX] = 1
else:
# evaluate real median ISC against null distribution of ISC group medians
homeBrew = False
alpha = 0.05
# make an array from 0 to numSubs
subNums = np.arange(0,numSubs)
# preallocate task arrays for mean ISC coefficients
corrData_median = [[]] * 2
# for each task...
for TASK in [0,1]:
# get mean ISC across subs
corrData_median[TASK] = np.median([corrData[TASK][i] for i in subNums], axis=0)
if propThresh:
corrData_median[TASK][fdrMask[TASK][:,0] == 0] = 0
else:
# get FDR hypothesis testing array
if homeBrew:
h = fdr(pGroup[TASK][METHOD][0], alphaPrime)
else:
# h = multi.fdrcorrection(pGroup[TASK][METHOD][0], alpha=alpha)[0]
h = pGroup[TASK][1][0]
print('\n' + str(len(np.where(h == True)[0])) + ' hits')
# mask out voxels that failed to reject the null by setting them to 0
corrData_median[TASK][h == False] = 0 # try NaN?
# get surface plots
thresh = 0.001 # threshold the stat maps just above zero so that voxels where null is not rejected are not plotted
view = surfaceStatMap(masker,corrData_median,fsaverage,thresh)
# + pycharm={"name": "#%% listening left\n"}
view[0][0]
# + pycharm={"name": "#%% listening right\n"}
view[0][1]
# + pycharm={"name": "#%% reading left\n"}
view[1][0]
# + pycharm={"name": "#%% reading right\n"}
view[1][1]
# + pycharm={"name": "#%% plot smoothness stat map on fsaverage\n"}
if smooth_statMap:
if not analyzeSmoothness:
print('\nYou need to run the "analyzeSmoothness" chunk before you can unlock this stat map.\n')
else:
thresh = 0.99
view = surfaceStatMap(masker,smoothness_mean,fsaverage,thresh)
# + pycharm={"name": "#%% listening left\n"}
if smooth_statMap:
view[0][0]
# + pycharm={"name": "#%% listening right\n"}
if smooth_statMap:
view[0][1]
# + pycharm={"name": "#%% reading left\n"}
if smooth_statMap:
view[1][0]
# + pycharm={"name": "#%% reading right\n"}
if smooth_statMap:
view[1][1]
# + pycharm={"name": "#%% plot drift stat map on fsaverage\n"}
if drift_statMap:
if not analyzeDrift:
print('\nYou need to run the "analyzeDrift" chunk before you can unlock this stat map.\n')
else:
thresh = 1
view = surfaceStatMap(masker,driftHack_mean,fsaverage,thresh)
# + pycharm={"name": "#%% listening left\n"}
if drift_statMap:
view[0][0]
# + pycharm={"name": "#%% listening right\n"}
if drift_statMap:
view[0][1]
# + pycharm={"name": "#%% reading left\n"}
if drift_statMap:
view[1][0]
# + pycharm={"name": "#%% reading right\n"}
if drift_statMap:
view[1][1]
|
current_code/control_ISC_analyses.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import numpy as np
import pandas as pd
import os
from os import listdir
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
import cv2
import matplotlib.pyplot as plt
# #%matplotlib inline
import imutils
from tensorflow.keras.models import Model,load_model
from tensorflow.keras.layers import Conv2D,Input,ZeroPadding2D,BatchNormalization,Flatten,Activation,Dense,MaxPooling2D
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle #shuffling the data improves the model
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
#
# +
# #!pip install imutils
# -
#
image_dir= "../input/brain-tumor-detection-mri/Brain_Tumor_Detection/"
#
# +
# Preprocessing the data
def crop_brain_contour(image, plot=False):
# Convert the image to grayscale, and blur it slightly
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(gray, 45, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
# Find contours in thresholded image, then grab the largest one
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key=cv2.contourArea)
# extreme points
extLeft = tuple(c[c[:, :, 0].argmin()][0])
extRight = tuple(c[c[:, :, 0].argmax()][0])
extTop = tuple(c[c[:, :, 1].argmin()][0])
extBot = tuple(c[c[:, :, 1].argmax()][0])
# crop new image out of the original image using the four extreme points (left, right, top, bottom)
new_image = image[extTop[1]:extBot[1], extLeft[0]:extRight[0]]
if plot:
plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(image)
plt.tick_params(axis='both', which='both', top=False, bottom=False, left=False, right=False,labelbottom=False, labeltop=False, labelleft=False, labelright=False)
plt.title('Original Image')
plt.subplot(1, 2, 2)
plt.imshow(new_image)
plt.tick_params(axis='both', which='both',top=False, bottom=False, left=False, right=False,labelbottom=False, labeltop=False, labelleft=False, labelright=False)
plt.title('Cropped Image')
plt.show()
return new_image
# After applying the cropping function
ex_img = cv2.imread(image_dir+'yes/y107.jpg')
ex_crop_img = crop_brain_contour(ex_img, True)
# -
#
# +
# Loading the entire Dataset
def load_data(dir_list, image_size):
# load all images in a directory
X = []
y = []
image_width, image_height = image_size
for directory in dir_list:
for filename in listdir(directory):
image = cv2.imread(directory+'/'+filename)
image = crop_brain_contour(image, plot=False)
image = cv2.resize(image, dsize=(image_width, image_height), interpolation=cv2.INTER_CUBIC)
# normalize values
image = image / 255.
# convert image to numpy array and append it to X
X.append(image)
# append a value of 1 to the target array if the image
# is in the folder named 'yes', otherwise append 0.
if directory[-3:] == 'yes':
y.append([1])
else:
y.append([0])
X = np.array(X)
y = np.array(y)
# Shuffle the data
X, y = shuffle(X, y)
print(f'Number of examples is: {len(X)}')
print(f'X shape is: {X.shape}')
print(f'y shape is: {y.shape}')
return X, y
yes =image_dir+'yes'
no = image_dir+'no'
IMG_WIDTH, IMG_HEIGHT = (240, 240)
X, y = load_data([yes, no], (IMG_WIDTH, IMG_HEIGHT))
# -
#
# +
def plot_sample_images(X, y, n=40):
for label in [0,1]:
# grab the first n images with the corresponding y values equal to label
images = X[np.argwhere(y == label)]
n_images = images[:n]
columns_n = 10
rows_n = int(n/ columns_n)
plt.figure(figsize=(10, 8))
i = 1 # current plot
for image in n_images:
plt.subplot(rows_n, columns_n, i)
plt.imshow(image[0])
# remove ticks
plt.tick_params(axis='both', which='both',
top=False, bottom=False, left=False, right=False,
labelbottom=False, labeltop=False, labelleft=False, labelright=False)
i += 1
label_to_str = lambda label: "Yes" if label == 1 else "No"
plt.suptitle(f"Brain Tumor: {label_to_str(label)}")
plt.show()
plot_sample_images(X, y)
# +
# Splitting the data
def split_data(X, y, test_size=0.2):
X_train, X_test_val, y_train, y_test_val = train_test_split(X, y, test_size=test_size)
X_test, X_val, y_test, y_val = train_test_split(X_test_val, y_test_val, test_size=0.5)
return X_train, y_train, X_val, y_val, X_test, y_test
X_train, y_train, X_val, y_val, X_test, y_test = split_data(X, y, test_size=0.1)
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of validation examples = " + str(X_val.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
# -
#
# +
# Building The Model
def build_model(input_shape):
X_input = Input(input_shape)
X = ZeroPadding2D((2, 2))(X_input)
X = Conv2D(32, (7, 7), strides = (1, 1))(X)
X = BatchNormalization(axis = 3, name = 'bn0')(X)
X = Activation('relu')(X)
X = MaxPooling2D((4, 4))(X)
X = MaxPooling2D((4, 4))(X)
X = Flatten()(X)
X = Dense(1, activation='sigmoid')(X)
model = Model(inputs = X_input, outputs = X)
return model
IMG_SHAPE = (IMG_WIDTH, IMG_HEIGHT, 3)
model=build_model(IMG_SHAPE)
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
from keras import callbacks
earlystopping = callbacks.EarlyStopping(monitor ="val_loss", mode ="min", patience = 5, restore_best_weights = True)
model.fit(x=X_train, y=y_train, batch_size=64, epochs=100, validation_data=(X_val, y_val), callbacks = [earlystopping])
# -
#
model.save("brain_tumor_dataset_2_model_training_1000_epochs.h5")
# +
history = model.history.history
#Plotting of accuracy
def plot_metrics(history):
train_loss = history['loss']
val_loss = history['val_loss']
train_acc = history['accuracy']
val_acc = history['val_accuracy']
# Loss
plt.figure()
plt.plot(train_loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Loss')
plt.legend()
plt.show()
# Accuracy
plt.figure()
plt.plot(train_acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Accuracy')
plt.legend()
plt.show()
plot_metrics(history)
# +
train_loss = history['loss']
val_loss = history['val_loss']
train_acc = history['accuracy']
val_acc = history['val_accuracy']
print(train_loss)
print(val_loss)
print(train_acc)
print(train_acc)
# -
loss, accuracy = model.evaluate(X_test, y_test, verbose=0)
loss_v, accuracy_v = model.evaluate(X_val, y_val, verbose=0)
print("Validation: accuracy = %f ; loss_v = %f" % (accuracy_v, loss_v))
print("Test: accuracy = %f ; loss = %f" % (accuracy, loss))
|
Model Training Jupyter Notebook/brain-tumor-another-dataset-training-4(1).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/chamikasudusinghe/nocml/blob/master/fft_r4-i2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="sFmit6AlgLJZ"
# Module Imports for Data Fetiching and Visualization
#
#
# + colab={} colab_type="code" id="k-c4z9vpHg1z"
import time
import pandas as pd
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
# + [markdown] colab_type="text" id="wY7RDDLBgdMs"
# Module Imports for Data Processing
# + colab={} colab_type="code" id="zhZHRUXxHg13"
from sklearn import preprocessing
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
import pickle
# + [markdown] colab_type="text" id="GEH4mU3Tghd8"
# Importing Dataset from GitHub
# + [markdown] colab_type="text" id="q9-4lGZ6NMSt"
# Train Data
# + colab={} colab_type="code" id="NblOX7-5PrjB"
df1 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-15-m-1-r4.csv?token=<KEY>')
df2 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-15-m-11-r4.csv?token=AKVFSOGGFTANMBL73NT4MQS63INLS')
df3 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-4-m-1-r4.csv?token=<KEY>')
df4 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-4-m-11-r4.csv?token=<KEY>')
df5 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-6-m-1-r4.csv?token=<KEY>')
df6 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-6-m-11-r4.csv?token=<KEY>EGKQHAS63INME')
df7 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-9-m-1-r4.csv?token=<KEY>')
df8 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-9-m-11-r4.csv?token=<KEY>')
df9 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-normal-n-0-15-r4.csv?token=AKVFSOCRF7VQ65UPET5B2NS63INMS')
df10 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-normal-n-0-4-r4.csv?token=<KEY>')
df11 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-normal-n-0-6-r4.csv?token=<KEY>')
df12 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-normal-n-0-9-r4.csv?token=<KEY>')
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" id="YtODRV2NPem1" outputId="1ba1a82f-b977-454b-f2b7-f0e241986c29"
print(df1.shape)
print(df2.shape)
print(df3.shape)
print(df4.shape)
print(df5.shape)
print(df6.shape)
print(df7.shape)
print(df8.shape)
print(df9.shape)
print(df10.shape)
print(df11.shape)
print(df12.shape)
# + colab={} colab_type="code" id="9L0CwK80NgDG"
df = df1.append(df2, ignore_index=True,sort=False)
df = df.append(df3, ignore_index=True,sort=False)
df = df.append(df4, ignore_index=True,sort=False)
df = df.append(df5, ignore_index=True,sort=False)
df = df.append(df6, ignore_index=True,sort=False)
df = df.append(df7, ignore_index=True,sort=False)
df = df.append(df8, ignore_index=True,sort=False)
df = df.append(df9, ignore_index=True,sort=False)
df = df.append(df10, ignore_index=True,sort=False)
df = df.append(df11, ignore_index=True,sort=False)
df = df.append(df12, ignore_index=True,sort=False)
df = df.sort_values('timestamp')
df.to_csv('fft-r4-train.csv',index=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 439} colab_type="code" id="tovtj_19OeCa" outputId="660ac53c-10ab-43c1-9280-5a16e41d26ee"
df = pd.read_csv('fft-r4-train.csv')
df
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="DEIwin3VOmdC" outputId="1ba7373f-9312-45c7-c89d-c9fd6faf8b5d"
df.shape
# + [markdown] colab_type="text" id="kB28tdcoNPvn"
# Test Data
# + colab={} colab_type="code" id="2NNmwmZGNLly"
df13 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-15-m-12-r4.csv?token=<KEY>')
df14 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-15-m-7-r4.csv?token=<KEY>')
df15 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-4-m-12-r4.csv?token=<KEY>')
df16 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-4-m-7-r4.csv?token=<KEY>')
df17 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-6-m-12-r4.csv?token=<KEY>')
df18 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-6-m-7-r4.csv?token=<KEY>')
df19 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-9-m-12-r4.csv?token=<KEY>')
df20 = pd.read_csv('https://raw.githubusercontent.com/chamikasudusinghe/nocml/master/dos%20results%20ver%204/router-dataset/r4/2-fft-malicious-n-0-9-m-7-r4.csv?token=<KEY>')
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="vI7r69cXm0wU" outputId="f69b08ac-ae34-4638-da28-3681508467c4"
print(df13.shape)
print(df14.shape)
print(df15.shape)
print(df16.shape)
print(df17.shape)
print(df18.shape)
print(df19.shape)
print(df20.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 439} colab_type="code" id="lyFCjch-Qr7R" outputId="e336a6f4-4019-494b-daf1-f614eca95149"
df5
# + [markdown] colab_type="text" id="xhWoBoqmnTM8"
# Processing
# + colab={"base_uri": "https://localhost:8080/", "height": 476} colab_type="code" id="Z-DMh9YdHg2F" outputId="d7d70306-d959-44ba-d3af-477243a7a985"
df.isnull().sum()
# + colab={} colab_type="code" id="ct4HKW31PrjK"
df = df.drop(columns=['timestamp','src_ni','src_router','dst_ni','dst_router'])
# + colab={"base_uri": "https://localhost:8080/", "height": 720} colab_type="code" id="Y9vcn7NCHg2M" outputId="747d6a94-d542-4eb1-c4f4-31f250f27e28"
df.corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="KO-4OTqWHg2O" outputId="9a729aef-7dbd-4e5d-fa78-40b24adf53ab"
plt.figure(figsize=(25,25))
sns.heatmap(df.corr(), annot = True)
plt.show()
# + colab={} colab_type="code" id="ihUQ4Na0X6ud"
def find_correlation(data, threshold=0.9):
corr_mat = data.corr()
corr_mat.loc[:, :] = np.tril(corr_mat, k=-1)
already_in = set()
result = []
for col in corr_mat:
perfect_corr = corr_mat[col][abs(corr_mat[col])> threshold].index.tolist()
if perfect_corr and col not in already_in:
already_in.update(set(perfect_corr))
perfect_corr.append(col)
result.append(perfect_corr)
select_nested = [f[1:] for f in result]
select_flat = [i for j in select_nested for i in j]
return select_flat
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="ea_XFirFYRWD" outputId="bea3c2ab-1693-48cb-e933-245457a4b2d7"
columns_to_drop = find_correlation(df.drop(columns=['target']))
columns_to_drop
# + colab={} colab_type="code" id="uoJ0vV8dc3Tp"
#df = df.drop(columns=[''])
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="mnbTvnKD8pJp" outputId="b759b1af-9ee8-4a35-b387-ce6d6931cf24"
plt.figure(figsize=(21,21))
sns.heatmap(df.corr(), annot = True)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 993} colab_type="code" id="xmD7x9tM8rFy" outputId="44813939-1e72-4016-812b-d93d783b362e"
plt.figure(figsize=(25,25))
sns.heatmap(df.corr())
plt.show()
# + [markdown] colab_type="text" id="dqkKQOsugytM"
# Processing Dataset for Training
# + colab={} colab_type="code" id="3QGHP8EYGqHK"
train_X = df.drop(columns=['target'])
train_Y = df['target']
# + colab={"base_uri": "https://localhost:8080/", "height": 439} colab_type="code" id="ebvK4Qc8Hg2S" outputId="a1277f52-9bdf-49ec-90b2-bf2d7bc9a6e2"
#standardization
x = train_X.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = train_X.columns
x_scaled = min_max_scaler.fit_transform(x)
train_X = pd.DataFrame(x_scaled)
train_X.columns = columns
train_X
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="S6WGL499FBbX" outputId="054729fb-5547-4bb1-90cf-245a7a0e1075"
train_X[train_X.duplicated()].shape
# + colab={"base_uri": "https://localhost:8080/", "height": 456} colab_type="code" id="0BZm1zbDPrjh" outputId="5558e61f-49c8-43e8-a8cc-bdc8a7f1b05b"
test_X = df13.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router'])
test_Y = df13['target']
x = test_X.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X = pd.DataFrame(x_scaled)
test_X.columns = columns
print(test_X[test_X.duplicated()].shape)
test_X
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="6RHIaC_EpHU3" outputId="2425e99b-8492-4999-b7ba-da72f5166bf0"
test_X1 = df14.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router'])
test_Y1 = df14['target']
x = test_X1.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X1.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X1 = pd.DataFrame(x_scaled)
test_X1.columns = columns
print(test_X1[test_X1.duplicated()].shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="CcaDu50VpICj" outputId="7e796b16-a797-49ca-c633-b49646bf306a"
test_X2 = df15.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router'])
test_Y2 = df15['target']
x = test_X2.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X2.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X2 = pd.DataFrame(x_scaled)
test_X2.columns = columns
print(test_X2[test_X2.duplicated()].shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="6_CQ1D1DpIgj" outputId="79984f34-7d05-4d06-9604-ae29d4a13417"
test_X3 = df16.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router'])
test_Y3 = df16['target']
x = test_X3.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X3.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X3 = pd.DataFrame(x_scaled)
test_X3.columns = columns
print(test_X3[test_X3.duplicated()].shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="yyhSMnXWpI7H" outputId="c8a93959-f0d0-4140-e96a-6d063cd6517d"
test_X4 = df17.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router'])
test_Y4 = df17['target']
x = test_X4.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X4.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X4 = pd.DataFrame(x_scaled)
test_X4.columns = columns
print(test_X4[test_X4.duplicated()].shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="x1L9-vRTpJtX" outputId="2f128133-2e2b-4587-d9fa-0c8eeb0d6255"
test_X5 = df18.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router'])
test_Y5 = df18['target']
x = test_X5.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X5.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X5 = pd.DataFrame(x_scaled)
test_X5.columns = columns
print(test_X5[test_X5.duplicated()].shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="TOO9K9qnpLL_" outputId="0147dbc8-c8d5-467a-ea91-4f8127619a27"
test_X6 = df19.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router'])
test_Y6 = df19['target']
x = test_X6.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X6.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X6 = pd.DataFrame(x_scaled)
test_X6.columns = columns
print(test_X6[test_X6.duplicated()].shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="y2dPb0tOpLxu" outputId="79f5f478-356e-4af6-e5d4-dce7b56db60d"
test_X7 = df20.drop(columns=['target','timestamp','src_ni','src_router','dst_ni','dst_router'])
test_Y7 = df20['target']
x = test_X7.values
min_max_scaler = preprocessing.MinMaxScaler()
columns = test_X7.columns
x_scaled = min_max_scaler.fit_transform(x)
test_X7 = pd.DataFrame(x_scaled)
test_X7.columns = columns
print(test_X7[test_X7.duplicated()].shape)
# + [markdown] colab_type="text" id="2NUEiEtVHg2h"
# #### Machine Learning Models
# + [markdown] colab_type="text" id="BzgcG2e7hQC8"
# Module Imports for Data Processing and Report Generation in Machine Learning Models
# + colab={} colab_type="code" id="wZCWkSvsHg2h"
from sklearn.model_selection import train_test_split
import statsmodels.api as sm
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
# + [markdown] colab_type="text" id="TWvMTDtHhoex"
# Labels
#
# 1. 0 - malicious
# 2. 1 - good
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="Ns6uUJz1G9MD" outputId="eb74db62-e0e1-47ff-8ba8-b9bfb4aab11a"
train_Y = df['target']
train_Y.value_counts()
# + [markdown] colab_type="text" id="_px-73eUh-05"
# Training and Validation Splitting of the Dataset
# + colab={} colab_type="code" id="f9N<KEY>"
seed = 5
np.random.seed(seed)
# + colab={} colab_type="code" id="SnHyPPbEHg2i"
X_train, X_test, y_train, y_test = train_test_split(train_X, train_Y, test_size=0.2, random_state=seed, shuffle=True)
# + [markdown] colab_type="text" id="Fy_oia0XiJNW"
# Feature Selection
# + colab={"base_uri": "https://localhost:8080/", "height": 588} colab_type="code" id="v10pJaPUfMOd" outputId="e9be8bfa-0845-4eab-eca2-d9f7bebaec21"
#SelectKBest for feature selection
bf = SelectKBest(score_func=chi2, k=17)
fit = bf.fit(X_train,y_train)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['Specs','Score']
print(featureScores.nlargest(17,'Score'))
featureScores.plot(kind='barh')
# + [markdown] colab_type="text" id="rnmPuKJF2pdi"
# Decision Tree Classifier
# + colab={} colab_type="code" id="biZL4MMmvYmG"
#decisiontreee
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import GridSearchCV
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="d3mCR-TiHg2-" outputId="0d552c19-44c4-4695-fe4f-48c6dbd8358b"
dt = DecisionTreeClassifier(max_depth=20,max_features=20,random_state = 42)
dt.fit(X_train,y_train)
# + colab={} colab_type="code" id="inQ8gu1OdApz"
pickle.dump(dt, open("dt-r4.pickle.dat", 'wb'))
# + colab={} colab_type="code" id="xIIefAL_Hg2_"
y_pred_dt= dt.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="gBahu3DGHg3A" outputId="05437eb4-3119-4724-8352-70293834d40f"
dt_score_train = dt.score(X_train,y_train)
print("Train Prediction Score",dt_score_train*100)
dt_score_test = accuracy_score(y_test,y_pred_dt)
print("Test Prediction Score",dt_score_test*100)
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="6y9oZRCGhR_7" outputId="4385f72f-0bc4-4840-c10e-0f3d317fc7dc"
y_pred_dt_test= dt.predict(test_X)
dt_score_test = accuracy_score(test_Y,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X1)
dt_score_test = accuracy_score(test_Y1,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X2)
dt_score_test = accuracy_score(test_Y2,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X3)
dt_score_test = accuracy_score(test_Y3,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X4)
dt_score_test = accuracy_score(test_Y4,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X5)
dt_score_test = accuracy_score(test_Y5,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X6)
dt_score_test = accuracy_score(test_Y6,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
y_pred_dt_test= dt.predict(test_X7)
dt_score_test = accuracy_score(test_Y7,y_pred_dt_test)
print("Test Prediction Score",dt_score_test*100)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} colab_type="code" id="OCAU8YpEfMRD" outputId="92a07366-baab-4ee1-b8de-e1e1b38cc489"
feat_importances = pd.Series(dt.feature_importances_, index=columns)
feat_importances.plot(kind='barh')
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="5nIhx84-Hg3B" outputId="c9736c64-3894-4f81-df18-55dc596cbbe6"
cm = confusion_matrix(y_test, y_pred_dt)
class_label = ["Anomalous", "Normal"]
df_cm = pd.DataFrame(cm, index=class_label,columns=class_label)
sns.heatmap(df_cm, annot=True, fmt='d')
plt.title("Confusion Matrix")
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="xGqt7wigHg3B" outputId="9f806f29-fd9c-498d-b302-ebf6df019634"
print(classification_report(y_test,y_pred_dt))
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="wkWUUjGmHg3C" outputId="44a21be0-7f2f-4300-bad1-25c503836ee7"
dt_roc_auc = roc_auc_score(y_test, y_pred_dt)
fpr, tpr, thresholds = roc_curve(y_test, dt.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='DTree (area = %0.2f)' % dt_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('DT_ROC')
plt.show()
# + [markdown] colab_type="text" id="fXmBo6f1Hg3J"
# XGB Classifier
# + colab={} colab_type="code" id="nE2alWP-ejv9"
from xgboost import XGBClassifier
from xgboost import plot_importance
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="JBwo0_BWHg3K" outputId="622a2a6c-7ced-4d73-aa13-eb042d7d56e2"
xgbc = XGBClassifier(max_depth=20,min_child_weight=1,n_estimators=500,random_state=42,learning_rate=0.2)
xgbc.fit(X_train,y_train)
# + colab={} colab_type="code" id="1mA3luB2uPQG"
pickle.dump(xgbc, open("xgbc-r4.pickle.dat", 'wb'))
# + colab={} colab_type="code" id="prz2UBF8Hg3L"
y_pred_xgbc= xgbc.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="tFKQ1bq8Hg3L" outputId="df491cf2-291d-4622-f4c3-8857a9969d6d"
xgbc_score_train = xgbc.score(X_train,y_train)
print("Train Prediction Score",xgbc_score_train*100)
xgbc_score_test = accuracy_score(y_test,y_pred_xgbc)
print("Test Prediction Score",xgbc_score_test*100)
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" id="Z6hKTv8gopF3" outputId="3647dace-457e-4708-fd62-e6ac1f329ae7"
y_pred_xgbc_test= xgbc.predict(test_X)
xgbc_score_test = accuracy_score(test_Y,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X1)
xgbc_score_test = accuracy_score(test_Y1,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X2)
xgbc_score_test = accuracy_score(test_Y2,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X3)
xgbc_score_test = accuracy_score(test_Y3,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X4)
xgbc_score_test = accuracy_score(test_Y4,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X5)
xgbc_score_test = accuracy_score(test_Y5,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X6)
xgbc_score_test = accuracy_score(test_Y6,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
y_pred_xgbc_test= xgbc.predict(test_X7)
xgbc_score_test = accuracy_score(test_Y7,y_pred_xgbc_test)
print("Test Prediction Score",xgbc_score_test*100)
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="t-zwHWP_fMR_" outputId="c2d40a07-e142-4ffe-e9d2-02c24ab8ccc1"
plot_importance(xgbc)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="9OVtiUmaHg3M" outputId="87771d2b-4388-4e73-f0c2-e6b2de8e6e91"
cm = confusion_matrix(y_test, y_pred_xgbc)
class_label = ["Anomalous", "Normal"]
df_cm = pd.DataFrame(cm, index=class_label,columns=class_label)
sns.heatmap(df_cm, annot=True, fmt='d')
plt.title("Confusion Matrix")
plt.xlabel("Predicted Label")
plt.ylabel("True Label")
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="gAx5bI8BHg3M" outputId="59ae150b-465a-4d0b-ad35-817064d28ab7"
print(classification_report(y_test,y_pred_xgbc))
# + colab={"base_uri": "https://localhost:8080/", "height": 295} colab_type="code" id="LqPGLNKQHg3N" outputId="096dd751-6a9a-4c9a-c23d-9a60e4cf7c78"
xgb_roc_auc = roc_auc_score(y_test, y_pred_xgbc)
fpr, tpr, thresholds = roc_curve(y_test, xgbc.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='XGBoost (area = %0.2f)' % xgb_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('XGB_ROC')
plt.show()
|
[03 - Results]/dos results ver 4/models/iter-2/fft_r4-i2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## UPOS
# +
from corpuscula.corpus_utils import download_ud, UniversalDependencies, \
AdjustedForSpeech
import junky
from mordl import UposTagger, FeatsTagger
BERT_MODEL_FN = 'xlm-roberta-base'
MODEL_FN = 'upos-bert_model'
SEED = 42
BERT_MAX_LEN, BERT_EPOCHS, BERT_BATCH_SIZE = 0, 3, 8
DEVICE = 'cuda:0'
corpus_name = 'UD_Russian-SynTagRus'
download_ud(corpus_name, overwrite=False)
corpus = UniversalDependencies(corpus_name)
#corpus = AdjustedForSpeech(corpus)
# +
tagger = UposTagger()
tagger.load_train_corpus(corpus.train)
tagger.load_test_corpus(corpus.dev)
_ = tagger.train(MODEL_FN, device=DEVICE, control_metric='accuracy',
max_epochs=None, min_epochs=0, bad_epochs=5,
max_grad_norm=None, tags_to_remove=None, word_emb_type='bert',
word_emb_path=BERT_MODEL_FN, word_transform_kwargs={
'max_len': BERT_MAX_LEN, 'hidden_ids': 10, 'aggregate_subtokens_op': 'absmax'
# BertDataset.transform() (for BERT-descendant models)
# params:
# {'max_len': 0, 'batch_size': 64, 'hidden_ids': '10',
# 'aggregate_hiddens_op': 'cat',
# 'aggregate_subtokens_op': 'absmax', 'to': junky.CPU,
# 'loglevel': 1}
# WordDataset.transform() (for other models) params:
# {'check_lower': True}
},
stage1_params=None,
# {'lr': .0001, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': 0, 'amsgrad': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage2_params=None,
# {'lr': .001, 'momentum': .9, 'weight_decay': 0,
# 'dampening': 0, 'nesterov': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage3_params={
'save_as': MODEL_FN.replace('-bert_model', '_' + BERT_MODEL_FN)
+ f'_len{BERT_MAX_LEN}_ep{BERT_EPOCHS}_bat{BERT_BATCH_SIZE}_seed{SEED}',
'epochs': BERT_EPOCHS,
'batch_size': BERT_BATCH_SIZE,
'lr': 2e-5, 'num_warmup_steps': 3,
# {'save_as': None, 'max_epochs': 3, 'batch_size': 8,
# 'lr': 2e-5, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': .01, 'amsgrad': False,
# 'num_warmup_steps': 3, 'max_grad_norm': 1.}
},
stages=[1, 2, 3, 1, 2], save_stages=True, load_from=None,
learn_on_padding=True, remove_padding_intent=False,
seed=SEED, start_time=None, keep_embs=False,
rnn_emb_dim=None, cnn_emb_dim=None, cnn_kernels=range(1, 7),
emb_bn=True, emb_do=.2,
final_emb_dim=512, pre_bn=True, pre_do=.5,
lstm_layers=1, lstm_do=0, tran_layers=0, tran_heads=8,
post_bn=True, post_do=.4)
# -
res_dev = 'corpora/_dev_' + MODEL_FN.replace('_model', '.conllu')
res_test = 'corpora/_test_' + MODEL_FN.replace('_model', '.conllu')
tagger = UposTagger(embs=globals()['tagger'].embs
if 'tagger' in globals() else
None)
tagger.load(MODEL_FN)
junky.clear_tqdm()
_ = tagger.predict(corpus.dev, clone_ds=True, save_to=res_dev)
_ = tagger.evaluate(corpus.dev)
_ = tagger.evaluate(corpus.dev, res_dev)
_ = tagger.predict(corpus.test, save_to=res_test)
_ = tagger.evaluate(corpus.test, clone_ds=True)
_ = tagger.evaluate(corpus.test, res_test)
corp_gold = list(corpus.test())
corp_test = list(tagger._get_corpus(res_test))
tags = sorted(set(x['UPOS'] for x in corp_gold
for x in x[0] if x['UPOS']))
for tag in tags:
print('{}: {}'.format(
tag, tagger.evaluate(corp_gold, corp_test,
label=tag, log_file=None)
))
# ## FEATS
# +
from corpuscula.corpus_utils import download_ud, UniversalDependencies, \
AdjustedForSpeech
import junky
from mordl import FeatsTagger
BERT_MODEL_FN = 'xlm-roberta-base'
MODEL_FN = 'feats-bert_model'
SEED=42
BERT_MAX_LEN, BERT_EPOCHS, BERT_BATCH_SIZE = 0, 3, 8
DEVICE='cuda:0'
corpus_name = 'UD_Russian-SynTagRus'
download_ud(corpus_name, overwrite=False)
corpus = UniversalDependencies(corpus_name)
#corpus = AdjustedForSpeech(corpus)
# +
tagger = FeatsTagger()
tagger.load_train_corpus(corpus.train)
tagger.load_test_corpus(corpus.dev)
_ = tagger.train(MODEL_FN, device=DEVICE, control_metric='accuracy',
max_epochs=None, min_epochs=0, bad_epochs=5,
max_grad_norm=None, tags_to_remove=None, word_emb_type='bert',
word_emb_path=BERT_MODEL_FN, word_transform_kwargs={
'max_len': BERT_MAX_LEN, 'hidden_ids': 10, 'aggregate_subtokens_op': 'absmax'
# BertDataset.transform() (for BERT-descendant models)
# params:
# {'max_len': 0, 'batch_size': 64, 'hidden_ids': '10',
# 'aggregate_hiddens_op': 'cat',
# 'aggregate_subtokens_op': 'absmax', 'to': junky.CPU,
# 'loglevel': 1}
# WordDataset.transform() (for other models) params:
# {'check_lower': True}
},
stage1_params=None,
# {'lr': .0001, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': 0, 'amsgrad': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage2_params=None,
# {'lr': .001, 'momentum': .9, 'weight_decay': 0,
# 'dampening': 0, 'nesterov': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage3_params={
'save_as': MODEL_FN.replace('-bert_model', '_' + BERT_MODEL_FN)
+ f'_len{BERT_MAX_LEN}_ep{BERT_EPOCHS}_bat{BERT_BATCH_SIZE}_seed{SEED}',
'epochs': BERT_EPOCHS,
'batch_size': BERT_BATCH_SIZE,
'lr': 2e-5, 'num_warmup_steps': 3,
# {'save_as': None, 'max_epochs': 3, 'batch_size': 8,
# 'lr': 2e-5, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': .01, 'amsgrad': False,
# 'num_warmup_steps': 3, 'max_grad_norm': 1.}
},
stages=[1, 2, 3, 1, 2], save_stages=True, load_from=None,
learn_on_padding=True, remove_padding_intent=False,
seed=SEED, start_time=None, keep_embs=False,
rnn_emb_dim=None, cnn_emb_dim=200, cnn_kernels=range(1, 7),
upos_emb_dim=200, emb_bn=True, emb_do=.2,
final_emb_dim=512, pre_bn=True, pre_do=.5,
lstm_layers=1, lstm_do=0, tran_layers=0, tran_heads=8,
post_bn=True, post_do=.4)
# -
res_dev = 'corpora/_dev_' + MODEL_FN.replace('_model', '.conllu')
res_test = 'corpora/_test_' + MODEL_FN.replace('_model', '.conllu')
tagger = FeatsTagger(embs=globals()['tagger'].embs
if 'tagger' in globals() else
None)
tagger.load(MODEL_FN)
junky.clear_tqdm()
_ = tagger.predict(corpus.dev, clone_ds=True, save_to=res_dev)
_ = tagger.evaluate(corpus.dev)
_ = tagger.evaluate(corpus.dev, res_dev)
_ = tagger.predict(corpus.test, save_to=res_test)
_ = tagger.evaluate(corpus.test, clone_ds=True)
_ = tagger.evaluate(corpus.test, res_test)
corp_gold = list(corpus.test())
corp_test = list(tagger._get_corpus(res_test))
tags = sorted(set(x for x in corp_gold
for x in x[0]
for x in x['FEATS'].keys()))
for tag in tags:
print('{}: {}'.format(
tag, tagger.evaluate(corp_gold, corp_test,
feats=tag, log_file=None)
))
# ## LEMMA
#
# For lemmata, besides of *BERT* word embeddings one can use *FastText*. In this case, model performance on the *SynTagRus* test datasetis just slightly worse (0.9945 vs. 0.9948, and, we think, it may be tuned if need). So, we give here training snippets for both version of tagger, *BERT* (next snippet) and *FastText* (see further).
# ### *BERT* Lemmata Tagger
# +
from corpuscula.corpus_utils import download_ud, UniversalDependencies, \
AdjustedForSpeech
import junky
from mordl import LemmaTagger
BERT_MODEL_FN = 'xlm-roberta-base'
MODEL_FN = 'lemma-bert_model'
SEED=42
BERT_MAX_LEN, BERT_EPOCHS, BERT_BATCH_SIZE = 0, 2, 8
DEVICE='cuda:0'
corpus_name = 'UD_Russian-SynTagRus'
download_ud(corpus_name, overwrite=False)
corpus = UniversalDependencies(corpus_name)
#corpus = AdjustedForSpeech(corpus)
# +
tagger = LemmaTagger()
tagger.load_train_corpus(corpus.train)
tagger.load_test_corpus(corpus.dev)
_ = tagger.train(MODEL_FN, device=DEVICE, control_metric='accuracy',
max_epochs=None, min_epochs=0, bad_epochs=5,
max_grad_norm=None, tags_to_remove=None, word_emb_type='bert',
word_emb_path=BERT_MODEL_FN, word_transform_kwargs=None,
# BertDataset.transform() (for BERT-descendant models)
# params:
# {'max_len': 0, 'batch_size': 64, 'hidden_ids': '10',
# 'aggregate_hiddens_op': 'cat',
# 'aggregate_subtokens_op': 'absmax', 'to': junky.CPU,
# 'loglevel': 1}
# WordDataset.transform() (for other models) params:
# {'check_lower': True}
stage1_params=None,
# {'lr': .0001, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': 0, 'amsgrad': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage2_params=None,
# {'lr': .001, 'momentum': .9, 'weight_decay': 0,
# 'dampening': 0, 'nesterov': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage3_params={
'save_as': MODEL_FN.replace('-bert_model', '_' + BERT_MODEL_FN)
+ f'_len{BERT_MAX_LEN}_ep{BERT_EPOCHS}_bat{BERT_BATCH_SIZE}_seed{SEED}',
'epochs': BERT_EPOCHS,
'batch_size': BERT_BATCH_SIZE,
'lr': 2e-5, 'num_warmup_steps': 6
},
# {'save_as': None, 'epochs': 3, 'batch_size': 8,
# 'lr': 2e-5, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': .01, 'amsgrad': False,
# 'num_warmup_steps': 3, 'max_grad_norm': 1.}
stages=[1, 2, 3, 1, 2], save_stages=True, load_from=None,
learn_on_padding=False, remove_padding_intent=False,
seed=SEED, start_time=None, keep_embs=False,
rnn_emb_dim=384, cnn_emb_dim=None, cnn_kernels=range(1, 7),
upos_emb_dim=256, emb_bn=True, emb_do=.2,
final_emb_dim=512, pre_bn=True, pre_do=.5,
lstm_layers=1, lstm_do=0, tran_layers=0, tran_heads=8,
post_bn=True, post_do=.4)
# -
res_dev = 'corpora/_dev_' + MODEL_FN.replace('_model', '.conllu')
res_test = 'corpora/_test_' + MODEL_FN.replace('_model', '.conllu')
tagger = LemmaTagger(embs=globals()['tagger'].embs
if 'tagger' in globals() else
None)
tagger.load(MODEL_FN)
junky.clear_tqdm()
_ = tagger.predict(corpus.dev, clone_ds=True, save_to=res_dev)
_ = tagger.evaluate(corpus.dev)
_ = tagger.evaluate(corpus.dev, res_dev)
_ = tagger.predict(corpus.test, save_to=res_test)
_ = tagger.evaluate(corpus.test, clone_ds=True)
_ = tagger.evaluate(corpus.test, res_test)
# ### *FastText* Lemmata Tagger
# **NB:** For this task, we use Russian *FastText* embeddings provided by *Facebook*: [cc.ru.300.bin.gz](https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.ru.300.bin.gz). We highly recommend them because it delivers the highest evaluation scores. Also, embeddings provided by *DeepPavlov* ([ft_native_300_ru_wiki_lenta_nltk_wordpunct_tokenize.bin](http://files.deeppavlov.ai/embeddings/ft_native_300_ru_wiki_lenta_nltk_wordpunct_tokenize/ft_native_300_ru_wiki_lenta_nltk_wordpunct_tokenize.bin)) could be used, too. They deliver just slightly worse model performance.
#
# Maybe, one can try embeddings from *RusVectores*. Early (long ago) it was the worst choice because of inappropriate preprocessing. But now it seems, it's corrected. We didn't try, but if you surely want *FastText* model, embeddings from *RusVectores* are also worth to check.
#
# If you want your model to achieve high scores, use embeddings without any lemmatization, removal of punctuation, and adding any other archaic transformations. Embeddings of words with part of speech tags appended in the end are also useless (by obvious reasons).
# +
from corpuscula.corpus_utils import download_ud, UniversalDependencies, \
AdjustedForSpeech
import junky
from mordl import LemmaTagger
FT_MODEL_FN = '../mordl/cc.ru.300.bin'
MODEL_FN = 'lemma-ft_model'
SEED=42
DEVICE='cuda:0'
corpus_name = 'UD_Russian-SynTagRus'
download_ud(corpus_name, overwrite=False)
corpus = UniversalDependencies(corpus_name)
#corpus = AdjustedForSpeech(corpus)
# +
tagger = LemmaTagger()
tagger.load_train_corpus(corpus.train)
tagger.load_test_corpus(corpus.dev)
_ = tagger.train(MODEL_FN, device=DEVICE, control_metric='accuracy',
max_epochs=None, min_epochs=0, bad_epochs=5,
max_grad_norm=None, tags_to_remove=None, word_emb_type='ft',
word_emb_path=FT_MODEL_FN, word_transform_kwargs=None,
# BertDataset.transform() (for BERT-descendant models)
# params:
# {'max_len': 0, 'batch_size': 64, 'hidden_ids': '10',
# 'aggregate_hiddens_op': 'cat',
# 'aggregate_subtokens_op': 'absmax', 'to': junky.CPU,
# 'loglevel': 1}
# WordDataset.transform() (for other models) params:
# {'check_lower': True}
stage1_params=None,
# {'lr': .0001, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': 0, 'amsgrad': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage2_params=None,
# {'lr': .001, 'momentum': .9, 'weight_decay': 0,
# 'dampening': 0, 'nesterov': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage3_params=None,
# {'save_as': None, 'epochs': 3, 'batch_size': 8,
# 'lr': 2e-5, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': .01, 'amsgrad': False,
# 'num_warmup_steps': 3, 'max_grad_norm': 1.}
stages=[1, 2], save_stages=True, load_from=None,
learn_on_padding=False, remove_padding_intent=False,
seed=SEED, start_time=None, keep_embs=False,
rnn_emb_dim=300, cnn_emb_dim=None, cnn_kernels=range(1, 7),
upos_emb_dim=200, emb_bn=True, emb_do=.2,
final_emb_dim=512, pre_bn=True, pre_do=.5,
lstm_layers=1, lstm_do=0, tran_layers=0, tran_heads=8,
post_bn=True, post_do=.4)
# -
res_dev = 'corpora/_dev_' + MODEL_FN.replace('_model', '.conllu')
res_test = 'corpora/_test_' + MODEL_FN.replace('_model', '.conllu')
tagger = LemmaTagger(embs=globals()['tagger'].embs
if 'tagger' in globals() else
None)
tagger.load(MODEL_FN)
junky.clear_tqdm()
_ = tagger.predict(corpus.dev, clone_ds=True, save_to=res_dev)
_ = tagger.evaluate(corpus.dev)
_ = tagger.evaluate(corpus.dev, res_dev)
_ = tagger.predict(corpus.test, save_to=res_test)
_ = tagger.evaluate(corpus.test, clone_ds=True)
_ = tagger.evaluate(corpus.test, res_test)
# ## CoNLL18 Validation
# +
from corpuscula.corpus_utils import download_ud, get_ud_test_path
import junky
from mordl import UposTagger, FeatsTagger, LemmaTagger, conll18_ud_eval
corpus_name = 'UD_Russian-SynTagRus'
download_ud(corpus_name, overwrite=False)
DEVICE = 'cuda:0'
corpus_gold = get_ud_test_path(corpus_name)
corpus_test = 'corpora/_test_tagged.conllu'
# -
del tagger
tagger_u = UposTagger()
tagger_u.load('upos-bert_model', device=DEVICE, dataset_device=DEVICE)
tagger_f = FeatsTagger()
tagger_f.load('feats-bert_model', device=DEVICE, dataset_device=DEVICE)
tagger_l = LemmaTagger()
tagger_l.load('lemma-bert_model', device=DEVICE, dataset_device=DEVICE)
# +
_ = tagger_l.predict(
tagger_f.predict(
tagger_u.predict(corpus_gold)
), save_to=corpus_test
)
del tagger_u, tagger_f, tagger_l
# -
conll18_ud_eval(corpus_gold, corpus_test)
# ## MISC:NE
#
# Note: the corpora we used are proprietary (and of low quality). You have to find another corpora.
# +
from mordl import UposTagger, FeatsTagger
DEVICE = 'cuda:0'
tagger_u = UposTagger()
tagger_u.load('upos-bert_model', device=DEVICE, dataset_device=DEVICE)
tagger_f = FeatsTagger()
tagger_f.load('feats-bert_model', device=DEVICE, dataset_device=DEVICE)
PREFIX = 'ner-old-'
for corpora in zip([f'corpora/{PREFIX}train.conllu',
f'corpora/{PREFIX}dev.conllu',
f'corpora/{PREFIX}test.conllu'],
[f'corpora/{PREFIX}train_upos_feats.conllu',
f'corpora/{PREFIX}dev_upos_feats.conllu',
f'corpora/{PREFIX}test_upos_feats.conllu']):
tagger_f.predict(
tagger_u.predict(corpora[0]), save_to=corpora[1]
)
del tagger_u, tagger_f
# +
import junky
from mordl import NeTagger
BERT_MODEL_FN = 'xlm-roberta-base'
MODEL_FN = 'misc-ne-bert_model'
SEED=42
BERT_MAX_LEN, BERT_EPOCHS, BERT_BATCH_SIZE = 0, 2, 8
DEVICE='cuda:0'
PREFIX = 'ner-old-'
corpus_train = f'corpora/{PREFIX}train_upos_feats.conllu'
corpus_dev = f'corpora/{PREFIX}dev_upos_feats.conllu'
corpus_test = f'corpora/{PREFIX}test_upos_feats.conllu'
# +
tagger = NeTagger()
tagger.load_train_corpus(corpus_train)
tagger.load_test_corpus(corpus_dev)
_ = tagger.train(MODEL_FN, device=DEVICE, control_metric='accuracy',
max_epochs=None, min_epochs=0, bad_epochs=5,
max_grad_norm=None, tags_to_remove=None, word_emb_type='bert',
word_emb_path=BERT_MODEL_FN, word_transform_kwargs={
'max_len': BERT_MAX_LEN, 'hidden_ids': 10, 'aggregate_subtokens_op': 'absmax'
# BertDataset.transform() (for BERT-descendant models)
# params:
# {'max_len': 0, 'batch_size': 64, 'hidden_ids': '10',
# 'aggregate_hiddens_op': 'cat',
# 'aggregate_subtokens_op': 'absmax', 'to': junky.CPU,
# 'loglevel': 1}
# WordDataset.transform() (for other models) params:
# {'check_lower': True}
},
stage1_params=None,
# {'lr': .0001, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': 0, 'amsgrad': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage2_params=None,
# {'lr': .001, 'momentum': .9, 'weight_decay': 0,
# 'dampening': 0, 'nesterov': False,
# 'max_epochs': None, 'min_epochs': None,
# 'bad_epochs': None, 'batch_size': None,
# 'max_grad_norm': None}
stage3_params={
'save_as': MODEL_FN.replace('-bert_model', '_' + BERT_MODEL_FN)
+ f'_len{BERT_MAX_LEN}_ep{BERT_EPOCHS}_bat{BERT_BATCH_SIZE}_seed{SEED}',
'epochs': BERT_EPOCHS,
'batch_size': BERT_BATCH_SIZE,
'lr': 4e-5, 'num_warmup_steps': 1,
# {'save_as': None, 'max_epochs': 3, 'batch_size': 8,
# 'lr': 2e-5, 'betas': (0.9, 0.999), 'eps': 1e-8,
# 'weight_decay': .01, 'amsgrad': False,
# 'num_warmup_steps': 0, 'max_grad_norm': 1.}
},
stages=[1, 2, 3, 1, 2], save_stages=True, load_from=None,
learn_on_padding=True, remove_padding_intent=False,
seed=SEED, start_time=None, keep_embs=False,
rnn_emb_dim=None, cnn_emb_dim=None, cnn_kernels=range(1, 7),
upos_emb_dim=300, emb_bn=True, emb_do=.2,
final_emb_dim=512, pre_bn=True, pre_do=.5,
lstm_layers=1, lstm_do=0, tran_layers=0, tran_heads=8,
post_bn=True, post_do=.4)
# -
res_dev = 'corpora/_dev_' + MODEL_FN.replace('_model', '.conllu')
res_test = 'corpora/_test_' + MODEL_FN.replace('_model', '.conllu')
tagger = NeTagger(embs=globals()['tagger'].embs
if 'tagger' in globals() else
None)
tagger.load(MODEL_FN)
junky.clear_tqdm()
_ = tagger.predict(corpus_dev, clone_ds=True, save_to=res_dev)
_ = tagger.evaluate(corpus_dev)
_ = tagger.evaluate(corpus_dev, res_dev)
_ = tagger.predict(corpus_test, save_to=res_test)
_ = tagger.evaluate(corpus_test, clone_ds=True)
_ = tagger.evaluate(corpus_test, res_test)
corp_gold = list(tagger._get_corpus(corpus_test, asis=True))
corp_test = list(tagger._get_corpus(res_test))
tags = sorted(set(x['MISC'].get('NE')
for x in corp_gold for x in x[0]
if x['MISC'].get('NE')))
for tag in tags:
print('{}: {}'.format(
tag, tagger.evaluate(corp_gold, corp_test,
label=tag, log_file=None)
))
# +
import gc
del tagger
gc.collect()
# -
|
examples/mordl.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/graviraja/100-Days-of-NLP/blob/applications%2Fclassification/applications/classification/ner_tagging/NER%20tagging%20with%20Spacy.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="EdFsIp6uHUrK" colab_type="text"
# # NER using Spacy and Custom Training
# + [markdown] id="11Hvx03FHSJG" colab_type="text"
# ### Imports
# + id="9pS3sNGa4QnU" colab_type="code" colab={}
import random
import spacy
from spacy import displacy
from spacy.util import minibatch, compounding
# + [markdown] id="AexvzGYxHc-j" colab_type="text"
# Load the small english model
# + id="_41iBA7P5JTD" colab_type="code" colab={}
nlp = spacy.load("en_core_web_sm")
# + [markdown] id="O5DHLOoVHgd_" colab_type="text"
# The models comes with three pipelines:
#
# - tagger (Parts-of-Speech)
# - parser (Dependency Parsing)
# - ner (Named Entity Recognition)
#
# The focus of this notebook will be on `ner`
# + id="OhbsnQfE5OZP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="bc0d7dd5-714d-477f-c974-9608aff50076"
nlp.pipe_names
# + [markdown] id="Tc61v8x5IJoK" colab_type="text"
# ### Spacy NER
# + [markdown] id="qYO8iKPdIjsP" colab_type="text"
# Spacy provides ner tagging support for the following entities. Learn more about these [here](https://spacy.io/api/annotation#named-entities)
#
# |NER | Description |
# |---------|-----------------------------|
# |PERSON| People, including fictional.|
# |NORP| Nationalities or religious or political groups.|
# |FAC| Buildings, airports, highways, bridges, etc.|
# |ORG| Companies, agencies, institutions, etc.|
# |GPE| Countries, cities, states.|
# |LOC| Non-GPE locations, mountain ranges, bodies of water.|
# |PRODUCT| Objects, vehicles, foods, etc. (Not services.)|
# |EVENT| Named hurricanes, battles, wars, sports events, etc.|
# |WORK_OF_ART| Titles of books, songs, etc.|
# |LAW| Named documents made into laws.|
# |LANGUAGE| Any named language.|
# |DATE| Absolute or relative dates or periods.|
# |TIME| Times smaller than a day.|
# |PERCENT| Percentage, including ”%“.|
# |MONEY| Monetary values, including unit.|
# |QUANTITY| Measurements, as of weight or distance.|
# |ORDINAL| “first”, “second”, etc.|
# |CARDINAL| Numerals that do not fall under another type.|
#
# + id="Zq0vH25U5SG0" colab_type="code" colab={}
doc = nlp("Australia wants to force Facebook and Google to pay media companies for news")
# + id="TSEN3_1w5Yne" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="b3dd9402-b527-40a5-e4c0-56dcdd6e91bc"
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
# + [markdown] id="R7KbHbqvJz1C" colab_type="text"
# Visualization using [displacy](https://explosion.ai/demos/displacy-ent)
# + id="p2fhJDAN5fG_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="bfa966bb-1d88-4e0c-b25d-71f1e2a36076"
displacy.render(nlp(doc.text), style="ent", jupyter=True)
# + id="8JXxj8ypKUuF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="daf84355-0a16-45b0-e529-1a6063456587"
doc = nlp("A little less than a decade later, dozens of self-driving startups have cropped up while automakers around the world clamor")
displacy.render(nlp(doc.text), style="ent", jupyter=True)
# + id="URGAEcAcKj1U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="9c97b30f-a16c-4505-be50-1e95885a6ea7"
doc = nlp("I am working at Microsoft from 27/07/2017")
displacy.render(nlp(doc.text), style="ent", jupyter=True)
# + [markdown] id="2g9X3naEKLr7" colab_type="text"
# ### Custom data
# + id="tg8uR4iI5udo" colab_type="code" colab={}
doc = nlp("I do not have money to pay my credit card account")
# + id="p-uvfrBE59cR" colab_type="code" colab={}
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
# + [markdown] id="L00Us5yGK9HM" colab_type="text"
# Not able to predict any entity
# + id="DckDU3B06B0x" colab_type="code" colab={}
doc = nlp("what is the process to open a new saving account")
# + id="-z7x5Ibb6Hm6" colab_type="code" colab={}
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
# + [markdown] id="KfKoxjIuLBLG" colab_type="text"
# ### Custom Training
# + [markdown] id="92wn1gf0LNjF" colab_type="text"
# Let's create 2 new entities called **`ACTIVITY`** and **`SERVICE`** in a specific domain data (bank).
#
# There are many tools available for creating data for training the NER model. Few of them are:
#
# - [prodigy](https://prodi.gy/)
# - [doccano](https://github.com/doccano/doccano)
# - [inception](https://inception-project.github.io/)
# + id="jJ7TYMtn6MgT" colab_type="code" colab={}
train = [
("Money transfer from my checking account is not working", {"entities": [(6, 13, "ACTIVITY"), (23, 39, "SERVICE")]}),
("I want to check balance in my savings account", {"entities": [(16, 23, "ACTIVITY"), (30, 45, "SERVICE")]}),
("I suspect a fraud in my credit card account", {"entities": [(12, 17, "ACTIVITY"), (24, 35, "SERVICE")]}),
("I am here for opening a new savings account", {"entities": [(14, 21, "ACTIVITY"), (28, 43, "SERVICE")]}),
("Your mortage is in delinquent status", {"entities": [(20, 30, "ACTIVITY"), (5, 13, "SERVICE")]}),
("Your credit card is in past due status", {"entities": [(23, 31, "ACTIVITY"), (5, 16, "SERVICE")]}),
("My loan account is still not approved and funded", {"entities": [(25, 37, "ACTIVITY"), (3, 15, "SERVICE"), (42, 48, "ACTIVITY")]}),
("How do I open a new loan account", {"entities": [(9, 13, "ACTIVITY"), (20, 32, "SERVICE")]}),
("what are the charges on Investment account", {"entities": [(13, 20, "ACTIVITY"), (24, 42, "SERVICE")]}),
("Can you explain late charges on my credit card", {"entities": [(21, 28, "ACTIVITY"), (35, 46, "SERVICE")]}),
("I want to open a new loan account", {"entities": [(10, 14, "ACTIVITY"), (21, 33, "SERVICE")]}),
("Can you help updating payment on my credit card", {"entities": [(22, 29, "ACTIVITY"), (36, 47, "SERVICE")]}),
("When is the payment due date on my card", {"entities": [(12, 19, "ACTIVITY"), (35, 39, "SERVICE")]})
]
# + id="RNe9Wefp_893" colab_type="code" colab={}
# get the ner pipeline
ner = nlp.get_pipe("ner")
# + id="hkP92ezVAA0h" colab_type="code" colab={}
# add the labels to ner pipeline
for _, annotations in train:
for ent in annotations.get("entities"):
ner.add_label(ent[2])
# + id="hRkw6BT1AJmM" colab_type="code" colab={}
# disable other pipelines, since we are only training NER
disable_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
# + [markdown] id="u1uqSqUqLlWM" colab_type="text"
# Training
# + id="FZYeaV0BA8Vj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="3b281ae9-ed94-42dc-9cd0-abd31895a6a5"
with nlp.disable_pipes(*disable_pipes):
# resumes from the previous learning
optimizer = nlp.resume_training()
# run for 100 iterations
for iteration in range(100):
# randomly shuffle the data
random.shuffle(train)
losses = {}
# create minibatches for training
batches = minibatch(train, size=compounding(1.0, 4.0, 1.001))
for batch in batches:
text, annotation = zip(*batch)
nlp.update(
text,
annotation,
drop=0.5,
losses=losses,
sgd=optimizer
)
print(f"Losses: {losses}")
# + id="4I3tlIwoCGns" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 691} outputId="71484b85-0db3-458e-fefa-9ae03e44e550"
for text, entities in train:
doc = nlp(text)
print(f"Text: {text} | entites: {entities}")
print(f"\tActual: {[(text[ent[0]: ent[1]], ent[2]) for ent in entities['entities']]}")
print(f"\tPredicted: {[(ent.text, ent.label_) for ent in doc.ents]}")
# + [markdown] id="60iAuylDMNIu" colab_type="text"
# As we can see from the results, the model is decent enough if not 100% with only small amount of training
# + id="Afkx50YtEDIw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 466} outputId="0a7f4bdb-a474-4f9d-f27d-69bd5d2a4b2e"
# visualize using displacy
for text, _ in train:
doc = nlp(text)
displacy.render(nlp(doc.text), style="ent", jupyter=True)
# + [markdown] id="gIow5pHAMhD7" colab_type="text"
# Let's see how it predicts on unseen data
# + id="5kyx0N-wFuof" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="02573b45-0076-4edb-b173-65e6bcd94055"
doc = nlp("My credit card payment will be delayed")
displacy.render(nlp(doc.text), style="ent", jupyter=True)
# + id="HuI-uKGFGPlN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="a734bdfe-7e5d-4666-e94a-8916aff09544"
doc = nlp("what are the charges on credit card late payment in Bank of America")
displacy.render(nlp(doc.text), style="ent", jupyter=True)
# + id="YC_B7xoJGgn-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 106} outputId="3916ecc9-5cde-415e-99d8-e4770d7e64d8"
doc = nlp("Australia wants to force Facebook and Google to pay media companies for news")
displacy.render(nlp(doc.text), style="ent", jupyter=True)
# + [markdown] id="psUVrDGIMm4_" colab_type="text"
# As we can see that it is not able to predict the entities which were done prior to training. This is due to [pseudo-rehearsal-catastrophic-forgetting](https://explosion.ai/blog/pseudo-rehearsal-catastrophic-forgetting). In order to fix this, we need to train the model on complete data
|
applications/classification/ner_tagging/NER tagging with Spacy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import h2o
# Set seed
np.random.seed(123)
h2o.init()
N = 1000
bloodTypes = ['A','O','AB','B']
d = pd.DataFrame({'id':[i for i in range(1,1001)],'bloodType':[bloodTypes[i%len(bloodTypes)] for i in range(1,1001)]})
d.head(5)
bloodTypes = ['A','A','A','O','O','O','AB','B']
bloodTypes = bloodTypes*(round(N/8))
ages = np.random.randint(low=18,high=65,size=N)
ages[:5]
healthyEating = np.random.normal(loc=5,scale=2,size=N)
healthyEating[healthyEating<0] = 0
healthyEating[healthyEating>9] = 9
healthyEating = healthyEating.round().astype(int)
np.bincount(healthyEating.astype(int))
lifeStyle = np.random.normal(loc=5,scale=2,size=N)
lifeStyle[ages<30]=lifeStyle[ages<30]+1
lifeStyle[lifeStyle<0] = 0
lifeStyle[lifeStyle>9] = 9
lifeStyle=lifeStyle.round().astype(int)
income = ((ages*3)**2) + 20000 #Base salary
income = income + healthyEating*500
income = income - lifeStyle*300
income = income + np.random.randint(low=0,high=5000,size=N)
income = income.round(decimals=-2).astype(int)
income[:5]
# +
d = {'bloodType': bloodTypes, 'age': ages,'healthyEating': healthyEating,
'lifeStyle':lifeStyle,'income':income}
# -
df = pd.DataFrame(data=d)
people = h2o.H2OFrame(df,destination_frame='people')
|
Week 2 - Artificial Data Sets.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import cirq
from cirq.contrib.svg import SVGCircuit
# single-qubit rotations
exponents = np.linspace(0, 7/4, 8)
exponents
import itertools
SINGLE_QUBIT_GATES = [
cirq.PhasedXZGate(x_exponent=0.5, z_exponent=z, axis_phase_exponent=a)
for a, z in itertools.product(exponents, repeat=2)
]
SINGLE_QUBIT_GATES[:10], '...'
# +
# random two-qubit circuit
import cirq_google as cg
from cirq.experiments import random_quantum_circuit_generation as rqcg
q0, q1 = cirq.LineQubit.range(2)
circuit = rqcg.random_rotations_between_two_qubit_circuit(
q0, q1,
depth=4,
two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b),
single_qubit_gates=SINGLE_QUBIT_GATES
)
SVGCircuit(circuit)
# -
# long circuits to truncate
MAX_DEPTH = 100
N_CIRCUITS = 10
circuits = [
rqcg.random_rotations_between_two_qubit_circuit(
q0, q1,
depth=MAX_DEPTH,
two_qubit_op_factory=lambda a, b, _: cirq.SQRT_ISWAP(a, b),
single_qubit_gates=SINGLE_QUBIT_GATES)
for _ in range(N_CIRCUITS)
]
# length to truncate
cycle_depths = np.arange(1, MAX_DEPTH + 1, 9)
cycle_depths
# +
pure_sim = cirq.Simulator()
# Pauli Error. If there is an error, it is either X, Y, or Z
# with probability E_PAULI / 3
E_PAULI = 5e-3
noisy_sim = cirq.DensityMatrixSimulator(noise=cirq.depolarize(E_PAULI))
# These two qubit circuits have 2^2 = 4 probabilities
DIM = 4
records = []
for cycle_depth in cycle_depths:
for circuit_i, circuit in enumerate(circuits):
# Truncate the long circuit to the requested cycle_depth
circuit_depth = cycle_depth * 2 + 1
assert circuit_depth <= len(circuit)
trunc_circuit = circuit[:circuit_depth]
# Pure-state simulation
psi = pure_sim.simulate(trunc_circuit)
psi = psi.final_state_vector
pure_probs = np.abs(psi)**2
# Noisy execution
meas_circuit = trunc_circuit + cirq.measure(q0, q1)
sampled_inds = noisy_sim.sample(meas_circuit, repetitions=10_000).values[:,0]
sampled_probs = np.bincount(sampled_inds, minlength=DIM) / len(sampled_inds)
# Save the results
records += [{
'circuit_i': circuit_i,
'cycle_depth': cycle_depth,
'circuit_depth': circuit_depth,
'pure_probs': pure_probs,
'sampled_probs': sampled_probs,
}]
print('.', end='', flush=True)
# -
for record in records:
e_u = np.sum(record['pure_probs']**2)
u_u = np.sum(record['pure_probs']) / DIM
m_u = np.sum(record['pure_probs'] * record['sampled_probs'])
record.update(
e_u=e_u,
u_u=u_u,
m_u=m_u,
)
# +
import pandas as pd
df = pd.DataFrame(records)
df['y'] = df['m_u'] - df['u_u']
df['x'] = df['e_u'] - df['u_u']
df['numerator'] = df['x'] * df['y']
df['denominator'] = df['x'] ** 2
df.head()
# +
# plot the linear relationship and least squares fit
# %matplotlib inline
from matplotlib import pyplot as plt
# Color by cycle depth
import seaborn as sns
colors = sns.cubehelix_palette(n_colors=len(cycle_depths))
colors = {k: colors[i] for i, k in enumerate(cycle_depths)}
_lines = []
def per_cycle_depth(df):
fid_lsq = df['numerator'].sum() / df['denominator'].sum()
cycle_depth = df.name
xx = np.linspace(0, df['x'].max())
l, = plt.plot(xx, fid_lsq*xx, color=colors[cycle_depth])
plt.scatter(df['x'], df['y'], color=colors[cycle_depth])
global _lines
_lines += [l] # for legend
return pd.Series({'fidelity': fid_lsq})
fids = df.groupby('cycle_depth').apply(per_cycle_depth).reset_index()
plt.xlabel(r'$e_U - u_U$', fontsize=18)
plt.ylabel(r'$m_U - u_U$', fontsize=18)
_lines = np.asarray(_lines)
plt.legend(_lines[[0,-1]], cycle_depths[[0,-1]], loc='best', title='Cycle depth')
plt.tight_layout()
# +
# fidelities
plt.plot(
fids['cycle_depth'],
fids['fidelity'],
marker='o',
label='Least Squares')
xx = np.linspace(0, fids['cycle_depth'].max())
# In XEB, we extract the depolarizing fidelity, which is
# related to (but not equal to) the Pauli error.
# For the latter, an error involves doing X, Y, or Z with E_PAULI/3
# but for the former, an error involves doing I, X, Y, or Z with e_depol/4
e_depol = E_PAULI / (1 - 1/DIM**2)
# The additional factor of four in the exponent is because each layer
# involves two moments of two qubits (so each layer has four applications
# of a single-qubit single-moment depolarizing channel).
plt.plot(xx, (1-e_depol)**(4*xx), label=r'$(1-\mathrm{e\_depol})^{4d}$')
plt.ylabel('Circuit fidelity', fontsize=18)
plt.xlabel('Cycle Depth $d$', fontsize=18)
plt.legend(loc='best')
plt.yscale('log')
plt.tight_layout()
# +
from cirq.experiments.xeb_fitting import fit_exponential_decays
# Ordinarily, we'd use this function to fit curves for multiple pairs.
# We add our qubit pair as a column.
fids['pair'] = [(q0, q1)] * len(fids)
fit_df = fit_exponential_decays(fids)
fit_row = fit_df.iloc[0]
print(f"Noise model fidelity: {(1-e_depol)**4:.3e}")
print(f"XEB layer fidelity: {fit_row['layer_fid']:.3e} +- {fit_row['layer_fid_std']:.2e}")
# -
|
xeb/xeb-theory.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
df = pd.read_csv('survey_results_public.csv')
#df.head()
# -
age_df = df[['Respondent', 'Age', 'Age1stCode', 'YearsCode', 'YearsCodePro']].copy()
#creates dataframe with relevent columns
# +
##Preparing the Data
#Values like 'More than 50 years' and 'Older than 85' have less relevance for someone just entering
#the industry and with no means of estimating their true value these are better left out
#
#Similarly with 'Less than 1 year', whilst alot more relevant there is no way of calculating the
#exact value between 0 and 1 so a reasonable choice of 0.5 will be used.
# +
age_df = age_df[age_df['YearsCode'] != 'More than 50 years']
age_df = age_df[age_df['YearsCodePro'] != 'More than 50 years']
age_df = age_df[age_df['Age1stCode'] != 'Older than 85']
#Dropping out the over 50 years experience as we are looking at how to enter the workplace and with no means to
#calculate age accuartely its better the remove rather than guess
age_df['YearsCodePro'] = age_df['YearsCodePro'].replace('Less than 1 year', '0.5')
age_df['YearsCode'] = age_df['YearsCode'].replace('Less than 1 year', '0.5')
age_df['Age1stCode'] = age_df['Age1stCode'].replace('Younger than 5 years', '4')
#Replace less than 1/5 years with a realistic value. Its reasonable to assume you probably
#weren't coding when you were 3 and won't drastically change a large number in a meaningful way
for age in range(len(age_df['Age'])):
if age_df.iloc[age,1] >= 118:
age_df.iloc[age,1] *= 0.1
#if age is accidentally put in as older than the oldest living person then an assumtion of a
#missing decimel point is made
# -
age_df[['Age1stCode','YearsCodePro','YearsCode']] = age_df[['Age1stCode','YearsCodePro','YearsCode']].apply(pd.to_numeric)
#converts all object values to float
for age in range(len(age_df['Age'])):
if pd.isna(age_df.iloc[age,1]):
if age_df.iloc[age, 2] >=85:
age_df.iloc[age,1] = age_df.iloc[age,2]
else:
age_df.iloc[age,1] = age_df.iloc[age, 3] + age_df.iloc[age, 2]
#fills in some of the nan values with an estimate age calculating as : Age first coded + Years coded
age_df['Age1stJob'] = age_df['Age'] - age_df['YearsCodePro']
#By taking away the number of years coding professionally from age we can estimate the age at which
#they entered employment within the industry
age_df.describe()
#gives a description of data so far
#Shows that 23 is the mean start point for most in the field which would fit within the early-mid 20s graduate
#assumption.
fig, axes = plt.subplots(1, 2, sharey = True, figsize = (10, 5))
age_df[['Age1stJob']].plot(kind = 'hist', bins = 60, ax = axes[0], xlim = (0, 60), legend = False, title = 'Age at First Coding Job')
age_df[['Age']].plot(kind = 'hist', bins = 60, ax = axes[1], xlim = (0, 60), legend = False, title = 'Age Spread')
fig.text(0.3, 0.04, 'Age', ha='center')
fig.text(0.73, 0.04, 'Age', ha='center')
fig.savefig('AgeGraph.jpg')
#Plots estimate age of people starting work next to general current age spread
age_df.head()
age_df['TimeToEmployment'] = abs(age_df['Age1stCode'] - age_df['Age1stJob'])
#A supplementary intrigue exploring how long from when people first code do they get employed
#This is effectively to remove the age element of the question and boil it down to how long until
#I 'in theory' get employed. This is vague result however as Age1stCode can't be assumed to be a start of a
#continuous coding period and if the user hasn't actually started a job then it will be a unrealistic time
#to employment. An interesting nugget of information to go, hmmm, to though.
age_df['TimeToEmployment'].plot(kind= 'hist', bins = 20, xlim = (0,40),alpha =0.8)
|
DoesAgeMatter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Using TensorFlow Scripts in SageMaker - Quickstart
#
# Starting with TensorFlow version 1.11, you can use SageMaker's TensorFlow containers to train TensorFlow scripts the same way you would train outside SageMaker. This feature is named **Script Mode**.
#
# This example uses
# [Multi-layer Recurrent Neural Networks (LSTM, RNN) for character-level language models in Python using Tensorflow](https://github.com/sherjilozair/char-rnn-tensorflow).
# You can use the same technique for other scripts or repositories, including
# [TensorFlow Model Zoo](https://github.com/tensorflow/models) and
# [TensorFlow benchmark scripts](https://github.com/tensorflow/benchmarks/tree/master/scripts/tf_cnn_benchmarks).
# ### Get the data
# For training data, we use plain text versions of Sherlock Holmes stories.
# Let's create a folder named **sherlock** to store our dataset:
# +
import os
data_dir = os.path.join(os.getcwd(), 'sherlock')
os.makedirs(data_dir, exist_ok=True)
# -
# We need to download the dataset to this folder:
# !wget https://sherlock-holm.es/stories/plain-text/cnus.txt --force-directories --output-document=sherlock/input.txt
# ## Preparing the training script
#
# For training scripts, let's use Git integration for SageMaker Python SDK here. That is, you can specify a training script that is stored in a GitHub, CodeCommit or other Git repository as the entry point for the estimator, so that you don't have to download the scripts locally. If you do so, source directory and dependencies should be in the same repo if they are needed.
#
# To use Git integration, pass a dict `git_config` as a parameter when you create the `TensorFlow` Estimator object. In the `git_config` parameter, you specify the fields `repo`, `branch` and `commit` to locate the specific repo you want to use. If authentication is required to access the repo, you can specify fields `2FA_enabled`, `username`, `password` and token accordingly.
#
# The scripts we want to use for this example is stored in GitHub repo
# [https://github.com/awslabs/amazon-sagemaker-examples/tree/training-scripts](https://github.com/awslabs/amazon-sagemaker-examples/tree/training-scripts),
# under the branch `training-scripts`. It is a public repo so we don't need authentication to access it. Let's specify the `git_config` argument here:
#
git_config = {'repo': 'https://github.com/awslabs/amazon-sagemaker-examples.git', 'branch': 'training-scripts'}
# Note that we did not specify `commit` in `git_config` here, so the latest commit of the specified repo and branch will be used by default.
#
# The scripts we will use are under the `char-rnn-tensorflow` directory in the repo. The directory also includes a [README.md](https://github.com/awslabs/amazon-sagemaker-examples/blob/training-scripts/README.md#basic-usage) with an overview of the project, requirements, and basic usage:
#
# > #### **Basic Usage**
# > _To train with default parameters on the tinyshakespeare corpus, run **python train.py**.
# To access all the parameters use **python train.py --help.**_
#
# [train.py](https://github.com/awslabs/amazon-sagemaker-examples/blob/training-scripts/char-rnn-tensorflow/train.py#L11) uses the [argparse](https://docs.python.org/3/library/argparse.html) library and requires the following arguments:
#
# ```python
# parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# # Data and model checkpoints directories
# parser.add_argument('--data_dir', type=str, default='data/tinyshakespeare', help='data directory containing input.txt with training examples')
# parser.add_argument('--save_dir', type=str, default='save', help='directory to store checkpointed models')
# ...
# args = parser.parse_args()
#
# ```
# When SageMaker training finishes, it deletes all data generated inside the container with exception of the directories `_/opt/ml/model_` and `_/opt/ml/output_`. To ensure that model data is not lost during training, training scripts are invoked in SageMaker with an additional argument `--model_dir`. The training script should save the model data that results from the training job to this directory..
#
# The training script executes in the container as shown bellow:
#
# ```bash
# python train.py --num-epochs 1 --data_dir /opt/ml/input/data/training --model_dir /opt/ml/model
# ```
# ## Test locally using SageMaker Python SDK TensorFlow Estimator
# You can use the SageMaker Python SDK [`TensorFlow`](https://github.com/aws/sagemaker-python-sdk/blob/master/src/sagemaker/tensorflow/README.rst#training-with-tensorflow) estimator to easily train locally and in SageMaker.
#
# Let's start by setting the training script arguments `--num_epochs` and `--data_dir` as hyperparameters. Remember that we don't need to provide `--model_dir`:
hyperparameters = {'num_epochs': 1, 'data_dir': '/opt/ml/input/data/training'}
# This notebook shows how to use the SageMaker Python SDK to run your code in a local container before deploying to SageMaker's managed training or hosting environments. Just change your estimator's train_instance_type to local or local_gpu. For more information, see: https://github.com/aws/sagemaker-python-sdk#local-mode.
#
# In order to use this feature you'll need to install docker-compose (and nvidia-docker if training with a GPU). Running following script will install docker-compose or nvidia-docker-compose and configure the notebook environment for you.
#
# Note, you can only run a single local notebook at a time.
# !/bin/bash ./setup.sh
# To train locally, you set `train_instance_type` to [local](https://github.com/aws/sagemaker-python-sdk#local-mode):
train_instance_type='local'
# We create the `TensorFlow` Estimator, passing the `git_config` argument and the flag `script_mode=True`. Note that we are using Git integration here, so `source_dir` should be a relative path inside the Git repo; otherwise it should be a relative or absolute local path. the `Tensorflow` Estimator is created as following:
#
# +
import os
import sagemaker
from sagemaker.tensorflow import TensorFlow
estimator = TensorFlow(entry_point='train.py',
source_dir='char-rnn-tensorflow',
git_config=git_config,
train_instance_type=train_instance_type,
train_instance_count=1,
hyperparameters=hyperparameters,
role=sagemaker.get_execution_role(), # Passes to the container the AWS role that you are using on this notebook
framework_version='1.15.2',
py_version='py3',
script_mode=True)
# -
# To start a training job, we call `estimator.fit(inputs)`, where inputs is a dictionary where the keys, named **channels**,
# have values pointing to the data location. `estimator.fit(inputs)` downloads the TensorFlow container with TensorFlow Python 3, CPU version, locally and simulates a SageMaker training job.
# When training starts, the TensorFlow container executes **train.py**, passing `hyperparameters` and `model_dir` as script arguments, executing the example as follows:
# ```bash
# python -m train --num-epochs 1 --data_dir /opt/ml/input/data/training --model_dir /opt/ml/model
# ```
#
# +
inputs = {'training': f'file://{data_dir}'}
estimator.fit(inputs)
# -
# Let's explain the values of `--data_dir` and `--model_dir` with more details:
#
# - **/opt/ml/input/data/training** is the directory inside the container where the training data is downloaded. The data is downloaded to this folder because `training` is the channel name defined in ```estimator.fit({'training': inputs})```. See [training data](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-running-container-trainingdata) for more information.
#
# - **/opt/ml/model** use this directory to save models, checkpoints, or any other data. Any data saved in this folder is saved in the S3 bucket defined for training. See [model data](https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html#your-algorithms-training-algo-envvariables) for more information.
#
# ### Reading additional information from the container
#
# Often, a user script needs additional information from the container that is not available in ```hyperparameters```.
# SageMaker containers write this information as **environment variables** that are available inside the script.
#
# For example, the example above can read information about the `training` channel provided in the training job request by adding the environment variable `SM_CHANNEL_TRAINING` as the default value for the `--data_dir` argument:
#
# ```python
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
# # reads input channels training and testing from the environment variables
# parser.add_argument('--data_dir', type=str, default=os.environ['SM_CHANNEL_TRAINING'])
# ```
#
# Script mode displays the list of available environment variables in the training logs. You can find the [entire list here](https://github.com/aws/sagemaker-containers/blob/master/README.rst#list-of-provided-environment-variables-by-sagemaker-containers).
# # Training in SageMaker
# After you test the training job locally, upload the dataset to an S3 bucket so SageMaker can access the data during training:
# +
import sagemaker
inputs = sagemaker.Session().upload_data(path='sherlock', key_prefix='datasets/sherlock')
# -
# The returned variable inputs above is a string with a S3 location which SageMaker Tranining has permissions
# to read data from.
inputs
# To train in SageMaker:
# - change the estimator argument `train_instance_type` to any SageMaker ml instance available for training.
# - set the `training` channel to a S3 location.
# +
estimator = TensorFlow(entry_point='train.py',
source_dir='char-rnn-tensorflow',
git_config=git_config,
train_instance_type='ml.c4.xlarge', # Executes training in a ml.c4.xlarge instance
train_instance_count=1,
hyperparameters=hyperparameters,
role=sagemaker.get_execution_role(),
framework_version='1.15.2',
py_version='py3',
script_mode=True)
estimator.fit({'training': inputs})
|
sagemaker-python-sdk/tensorflow_script_mode_quickstart/tensorflow_script_mode_quickstart.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
#
# +
#
# +
#https://stackoverflow.com/questions/36716242/xlrderror-no-sheet-named-sheet1-in-python
#https://stackoverflow.com/questions/60044233/converting-excel-into-json-using-python
# +
import pandas
import json
file = 'yahoo-finance-gainers-02-.xlsx'
# the file is endswith '.xls' and there is multiple sheets
# error method
#df_sheet1 = pd.read_excel(file, sheet_name='Sheet1')
# Read excel document
excel_data_df = pandas.read_excel(file, sheet_name='Sheet1')
# Convert excel to string
# (define orientation of document in this case from up to down)
thisisjson = excel_data_df.to_json(orient='records')
# Print out the result
print('Excel Sheet to JSON:\n', thisisjson)
# Make the string into a list to be able to input in to a JSON-file
thisisjson_dict = json.loads(thisisjson)
# Define file to write to and 'w' for write option -> json.dump()
# defining the list to write from and file to write to
with open('data-02.json', 'w') as json_file:
json.dump(thisisjson_dict, json_file)
# +
import pandas as pd
file = 'yahoo-finance-01-.xlsx'
# the file is endswith '.xls' and there is multiple sheets
# error method
df_sheet1 = pd.read_excel(file, sheet_name='Sheet1')
#df_sheet2 = pd.read_excel(file, sheet_name='Sheet2')
# when read Sheet1 had no error, but when read Sheet2, had an error:
# xlrd.biffh.XLRDError: No sheet named <'Sheet2'>
# right method
with pd.ExcelFile(file) as xls:
for sheet_name in xls.sheet_names:
df = pd.read_excel(xls, sheet_name=sheet_name)
print(df.head())
# -
|
BS4 Source codes/yahoo-finance/yahoo-gainers/Yahoo-finance-excel-into-json-format.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Nama : <NAME>
#
# NIM : 18.11.2110
# Pokok Bahasa 1 :
# 1.2 Daftar Pertanyaan
#
# 1. Bagaimanakah bentuk koneksi database MongoDB pada Python?
# dengan menginstall driver MongoDB pada Python yang bernama PyMongo.
# - ini adalah cara installnya :
# pip install pymongo
# - dan menginport librarynya :
# import mongo
#
# 2. Apakah dapat melakukan ekstraksi data dari database MongoDB menggunakan Python? dapat,dengan melakukan ekstraksi data dari database MongoDB menggunakan Python dapat dilakukan dengan
# bantuan library.
# - ini adalah cara installnya :
# pip install pandas
#
# Kompetensi 1 :
# D. Latihan 1
# 1. Koneksikan Python pada server mongoDB lokal dengan bentuk server public !
pip install pymongo
import pymongo
from bson.json_util import dumps
client = pymongo.MongoClient("mongodb://127.0.0.1:27017") #server
db = client["coba2"] #database
collection = db["inventory"] #collection
query = collection.find() #query
json_data = dumps(list_query, indent=2)# setting pretty output
print(json_data)
# 2. Koneksikan Python pada server mongoDB lokal dengan bentuk server authorized !
from pymongo import MongoClient
client = MongoClient("mongodb://localhost:27017")
db = client.coba2
try: db.command("serverStatus")
except Exception as e: print(e)
else: print("Anda Terhubung")
client.close
# 3. Daftarkan akun email student amikom ke cloud atlas mongoDB; buat cluster server dan koneksikan pada Python !
#
# !pip install pymongo[srv]
# !pip install dnspython
from pymongo import MongoClient
client = MongoClient(
"mongodb+srv://slarkfall:<EMAIL>/test?authSource=admin&replicaSet=atlas-jt7rx5-shard-0&readPreference=primary&appname=MongoDB%20Compass&ssl=true")
# +
db = client.coba
col =db.item
data1 = {"text" :"Modul_11","NIM": "18.11.2110", "Nama": "<NAME>"}
col.insert_one(data1)
# +
results = col.find({})
for i in results:
print(i)
# -
# Kesimpulan : Keunggulan Pymongo dapat juga untuk menghubungkan python dengan MongoDB, dengan menginstall :
# pip install pymongo
# Untuk menggunakan pymongo sendiri kita perlu mengimport pymongo :
# import pymongo
#
#
#
# Pokok Bahasan 2 :
# 2.2 Daftar Pertanyaan
# 1. Apakah yang dimaksud data mining?
# Data Mining adalah proses menemukan pola dan pengetahuan dengan menarik data dalam jumlah besar.
#
# 2. Bagaimanakah model data mining?
# - Predictive Model, Mengumpulkan data untuk diolah hingga melahirkan pola informasi berupa nilai
# prediksi. Yang termasuk model ini adalah Classification, Regression, Time-Series Analysis, dan
# Prediction.
# - Descriptive Model, Mengumpulkan data untuk mempelajari bentuk hubungan atau pola dari sekumpulan data
# yang ada. Yang termasuk model ini adalah Clustering, Summarization, Association Rules, dan Sequence
# Discovery.
#
# 3. Bagaimanakah tahapan data mining?
# - Data Cleansing, Proses dimana data-data yang tidak lengkap, mengandung error dan tidak konsisten
# dibuang dari koleksi data.
# - Data Integration, Proses integrasi data dimana yang berulang akan dikombinasikan.
# - Selection, Proses seleksi atau pemilihan data yang relevan terhadap analisis untuk diterima dari
# koleksi data yang ada.
# - Data Transformation, Proses transformasi data yang sudah dipilih ke dalam bentuk mining procedure
# melalui cara dan agresi data.
# - Data Mining, Proses yang paling penting dimana akan dilakukan berbagai teknik yang diaplikasikan untuk
# mengekstrak berbagai pola-pola potensial untuk mendapatkan data yang berguna.
# - Pattern Evolution, Sebuah proses dimana pola-pola menarik yang sebelumnya sudah ditemukan dengan
# identifikasi berdasarkan measure yang telah diberikan.
# - Knowledge Presentation, Merupakan proses tahap terakhir, Dalam hal ini digunakan teknik visualisasi
# yang bertujuan membantu user dalam mengerti dan menginterpretasikan hasil dari penambangan data.
#
# Kompetensi 2 :
# D. Latihan 2
#
# 1. Berikan contoh suatu nilai yang bersifat Incomplete, Noisy, Inconsistent, dan Duplicate !
inventory = {'_id':['1ab', '2cd', '3ef', '3ef','4gh','5ij','5ij'],
'Nama_barang':['Sabun','Sampo','Odol','Odol','Parfum','GPU','GPU'],
'Harga':[3000, '2k',8000,8000,float("NaN"),12000, 12000]}
inventory
# Output data inventory
# +
import pandas as pds
import numpy as npy
inv = pds.DataFrame(inventory)
inv
# -
# - Incomplete
inv.isna().sum()
# terlihat output diatas bahwa harga mempunyai 1 data incomplete
# - Duplicate
inv.duplicated(subset=None, keep='first')
# output diatas adalah untuk mengecek data duplicate
# 2. Bagaimanakah cara untuk memperbaiki data missing?
# dengan cara menghapus data yang kosong/NaN.
inv.dropna(inplace=True)
inv
# output diatas adalah data baru dari penghapusan data yang kosong/NaN.
# Kesimpulan : Data Cleansing, Proses dimana data-data yang tidak lengkap, mengandung error dan tidak konsisten
# dibuang dari koleksi data.disini kita juga menggunakan library pandas dan numpy untuk melakukan data ekstraksi,
# data framing dan pemrosesan data agar menjadi bersih tanpa adanya data yang incomplete, data yang duplicate,
# data Inconsistent,data Noisy dan data yang Missing.
# Pokok Bahasan 3 :
# 3.2 Daftar Pertanyaan
# 1. Apakah fungsi library pandas pada data mining?
# untuk melakukan manipulasi data,karena mempunyai struktur data yang fleksibel dan ekspresif.
# 2. Bagaimanakah bentuk query library pandas pada data mining?
# - Untuk import library
# import pandas as pd
# Untuk pembuatan data frame
# dataframe = pandas.DataFrame(data)
# - Untuk Import Data
# pd.read_csv(filename) #From a CSV file
# pd.read_table(filename) #From a delimited text file (like TSV)
# pd.read_excel(filename) #From an Excel file
# pd.read_sql(query, connection_object) # Read from a SQL table/database
# pd.read_json(json_string) # Read from a JSON formatted string, URL or file.
# pd.DataFrame(dict) # From a dict, keys for columns names, values for data as lists
# - Untuk Export Data
# pd.to_csv(filename) # Write to a CSV file
# pd.to_excel(filename) # Write to an Excel file
# pd.to_sql(table_name, connection_object) # Write to a SQL table
# pd.to_json(filename) # Write to a file in JSON format
# - Untuk Cleansing Data
# pd.isnull() # Checks for null Values, Returns Boolean Arrray
# pd.notnull() # Kebalikan dari pd.isnull()
# pd.dropna() # Drop all rows that contain null values
# pd.fillna(x) # Replace all null values with x
# pd.astype(float) # Convert the datatype of the series to float
# pd.replace(1,'one') # Replace all values equal to 1 with 'one’
# Kompetensi 3 :
# D. Latihan 3
import pymongo as pym
import pandas as pds
from bson.json_util import dumps
client = pym.MongoClient("mongodb://127.0.0.1:27017")
db = client["titanic"]
coll = db["passengers"]
cursor = coll.find()
result_cursor = list(cursor)
df = pds.DataFrame(result_cursor)
df
# 1. Tampilkan statistik data per atribut untuk data yang kosong/null !
data_null = df.isnull().any(axis=1)
df[data_null]
# 2. Tampilkan statistik data per atribut untuk maksimal data !
df_atribut = df.astype('category')
df_atribut = df_atribut.astype('str')
df_atribut.max()
# 3. Tampilkan statistik data per atribut untuk minimal data !
df_atribut = df.astype('category')
df_atribut = df_atribut.astype('str')
df_atribut.min()
# 4. Tampilkan umur maksimal pada dataset !
df_umur = df['Age'].astype('category')
df_umur = df_umur.astype("float")
df_umur.max()
# 5. Tampilkan umur minimal pada dataset !
df_umur = df['Age'].astype('category')
df_umur = df_umur.astype("float")
df_umur.min()
# 6. Tampilkan total data dengan umur diatas 18 tahun !
df_umur = df['Age'].astype('category')
df_umur = df_umur.astype("float")
df[df_umur>18]
# Kesimpulan : Pandas adalah library python yang sangat sering digunakan.dapat digunakan untuk melakukan manipulasi
# data,karena mempunyai struktur data yang fleksibel dan ekspresif.untuk menginstall nya pun cukup mudah dengan
# cara pip install pandas
|
Modul11_2110.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# Import The CSV
tsla_data = pd.read_csv('data/TSLA.csv', index_col='Date', parse_dates=True)
tsla_data.head()
# ### Data Visualization
# Let's Analyze & Visualize The Open Prices
sns.set_style('darkgrid')
tsla_data['Open'].plot(linewidth=4, figsize=(15, 6), title='TSLA STOCK')
# Let's Analyze & Visualize The Open and Close Prices
tsla_data[['Open', 'Close']].plot(linewidth=4, figsize=(15, 6), title='TSLA STOCK')
# Let's Analyze & Visualize The High and Low Prices
tsla_data[['High', 'Low']].plot(linewidth=4, figsize=(15, 6), title='TSLA STOCK')
# Let's Analyze & Visualize The All Four Prices
tsla_data[['Open', 'Close', 'High', 'Low', 'Adj Close']].plot(kind='box', figsize=(15, 6), title='TSLA STOCK')
# Let's Analyze and Visualize The Volume
tsla_data['Volume'].plot.hist(figsize=(15, 6), title='TSLA STOCK')
# ### Data Preprocessing
# Info
tsla_data.info()
# Describe
tsla_data.describe()
# +
# Split the data into X, y
X = tsla_data[['Open', 'High', 'Low', 'Adj Close', 'Volume']]
y = tsla_data['Close']
X.shape, y.shape
# -
from sklearn.model_selection import train_test_split
# Split the data into X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=4)
# Shapes
X_train.shape, y_train.shape
# Shapes
X_test.shape, y_test.shape
from sklearn.preprocessing import MinMaxScaler
# +
# Feature Scaling
scaler = MinMaxScaler(feature_range=(0, 1))
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
# -
# ### Building The Model
from sklearn.linear_model import LinearRegression
# Build a linear regression model
model = LinearRegression()
# Train the model
model.fit(X_train, y_train)
# ### Prediction & Model Evaluate & Model Accuracy
# Make Predictions
prediction = model.predict(X_test)
# +
# Predictions VS Y_true
pred_vs_true = pd.DataFrame({'Actual Close': y_test, 'Predicted Close': prediction})
pred_vs_true
# -
# Let's visualize the predictions & actual
pred_vs_true[['Actual Close', 'Predicted Close']].plot(linewidth=4, figsize=(15, 6), title='TSLA STOCK')
# Let's find the co-efficents & y_intercept
model.coef_, model.intercept_
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
# Let's find the model mean squared error & mean_absolute_error
mean_squared_error(y_test, prediction), mean_absolute_error(y_test, prediction)
# Accuracy
r2_score(y_test, prediction)
|
Predictions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="QZhIwy1isa1F" colab_type="code" colab={}
# !pip install hypothesize
# + id="c_ulEnBms7RI" colab_type="code" colab={}
from hypothesize.utilities import create_example_data
from hypothesize.compare_groups_with_single_factor import ydbt
# + id="3HSmG9exs_2C" colab_type="code" colab={}
df=create_example_data(2)
df.head()
# + id="bm4pbHTRtfra" colab_type="code" colab={}
results=ydbt(df.cell_1, df.cell_2)
# + id="-HCK1vVat-jd" colab_type="code" colab={}
results
|
examples/ydbt.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] heading_collapsed=true
# # Exercício 1.0 - Raciocínio Lógico
# + [markdown] heading_collapsed=true hidden=true
# ## Problema
# + [markdown] hidden=true
# Em uma escola estudam quatro amigas: Raquel, Vivian, Bruna e Ana. Em um dia ensolarado, elas se reuniram para passear em um parque. Cada uma delas utilizou uma camiseta de cor diferente: Branco, Azul, Amarelo e Verde. Além disso, cada uma passou um protetor com fator de proteção (FPS) diferente. Cada uma delas tem um animal de estimação diferente e gosta de um suco diferente, além de possuírem idades diferentes.
# Todas elas sentaram em um banco de 4 lugares, cada uma ocupando um lugar diferente.
# Na terceira posição do banco está a garota que tem um Cachorro.
# Quem tem um Peixe está em uma das pontas do banco.
# A garota que tem um Gato está na primeira posição do banco.
# Ana usou o protetor solar de FPS 50.
# Na segunda posição está a garota que usou o protetor solar com FPS 55.
# A garota mais nova está ao lado da que usa protetor solar de menor FPS.
# Quem gosta de suco de Morango está na quarta posição do banco.
# A garota que gosta de suco de Maracujá está ao lado da que tem Pássaros.
# A garota que gosta de suco de Limão está ao lado da que gosta de suco de Maracujá.
# Quem gosta de suco de Laranja está em uma das pontas.
# A garota de camiseta Azul está em algum lugar à esquerda da garota de 9 anos.
# A garota de 8 anos está na quarta posição.
# A garota de 11 anos está em uma das pontas.
# Vivian tem Pássaros.
# Raquel está na primeira posição.
# A garota de camiseta Verde está na quarta posição do banco.
# A garota de camiseta Branca está em uma das pontas.
# A garota de 10 anos está ao lado da garota de 11.
# A garota que usou o protetor solar de FPS 40 gosta de suco de Maracujá.
# A garota que usou o protetor solar de FPS 45 está em uma das pontas do banco.
# Bruna tem Cachorros.
#
# Qual a característica (cor da camiseta, animal de estimação, protetor solar que utilizou, suco que gosta, nome e idade) de cada uma das garotas?
#
# + [markdown] heading_collapsed=true hidden=true
# ## Análise
# + [markdown] hidden=true
# Em uma escola estudam quatro amigas: Raquel, Vivian, Bruna e Ana. <br>
# camiseta de cor diferente: Branco, Azul, Amarelo e Verde. <br>
# cada uma passou um protetor com fator de proteção (FPS) diferente. <br>
# Cada uma delas tem um animal de estimação diferente. <br>
# Cada uma delas gosta de um suco diferente. <br>
# Cada uma delas possui idade diferente. <br>
# Todas elas sentaram em um banco de 4 lugares, cada uma ocupando um lugar diferente. <br>
# Na terceira posição do banco está a garota que tem um Cachorro. <br>
# Quem tem um Peixe está em uma das pontas do banco(Primeira ou Quarta). <br>
# A garota que tem um Gato está na primeira posição do banco. <br>
# Ana usou o protetor solar de FPS 50. <br>
# Na segunda posição está a garota que usou o protetor solar com FPS 55. <br>
# A garota mais nova está ao lado da que usa protetor solar de menor FPS. <br>
# Quem gosta de suco de Morango está na quarta posição do banco. <br>
# A garota que gosta de suco de Maracujá está ao lado da que tem Pássaros. <br>
# A garota que gosta de suco de Limão está ao lado da que gosta de suco de Maracujá. <br>
# Quem gosta de suco de Laranja está em uma das pontas. <br>
# A garota de camiseta Azul está em algum lugar à esquerda da garota de 9 anos. <br>
# A garota de 8 anos está na quarta posição. <br>
# A garota de 11 anos está em uma das pontas. <br>
# Vivian tem Pássaros. <br>
# Raquel está na primeira posição. <br>
# A garota de camiseta Verde está na quarta posição do banco. <br>
# A garota de camiseta Branca está em uma das pontas. <br>
# A garota de 10 anos está ao lado da garota de 11. <br>
# A garota que usou o protetor solar de FPS 40 gosta de suco de Maracujá. <br>
# A garota que usou o protetor solar de FPS 45 está em uma das pontas do banco. <br>
# Bruna tem Cachorros. <br>
#
# Qual a característica (cor da camiseta, animal de estimação, protetor solar que utilizou, suco que gosta, nome e idade) de cada uma das garotas?
# + [markdown] heading_collapsed=true hidden=true
# ## Implicações / Proposições / Afirmações
# + [markdown] hidden=true
# * Existem 4 amigas: Raquel, Vivian, Bruna e Ana. <br>
# * Existem 4 camisetas: Branco, Azul, Amarelo e Verde. <br>
# * Exitem 4 protetores solares com fator de proteção diferentes: FPS 40, FPS 45, FPS 50 e FPS 55. <br>
# * Existem 4 animais de estimação diferente: Cachorro, Peixe, Gato e Pássaros. <br>
# * Existem 4 sucos diferentes: Morango, Maracujá, Limão e Laranja. <br>
# * Cada uma tem idade diferente: 8 anos, 9 anos, 10 anos e 11 anos. <br>
# * Existe um banco com 4 assentos, cada uma estando em um. <br>
# * Na terceira posição do banco está a garota que tem um Cachorro. <br>
# * Quem tem um Peixe está em uma das pontas do banco(pode ser na Primeira ou Quarta). <br>
# * A garota que tem um Gato está na primeira posição do banco. <br>
# * Ana usou o protetor solar de FPS 50. <br>
# * Na segunda posição está a garota que usou o protetor solar com FPS 55. <br>
# * A garota mais nova está ao lado da que usa protetor solar de menor FPS. <br>
# * Quem gosta de suco de Morango está na quarta posição do banco. <br>
# * A garota que gosta de suco de Maracujá está ao lado da que tem Pássaros. <br>
# * A garota que gosta de suco de Limão está ao lado da que gosta de suco de Maracujá. <br>
# * Quem gosta de suco de Laranja está em uma das pontas(pode ser na Primeira ou Quarta). <br>
# * A garota de camiseta Azul está em algum lugar à esquerda da garota de 9 anos. <br>
# * A garota de 8 anos está na quarta posição.A garota de 8 anos está na quarta posição. <br>
# * A garota de 11 anos está em uma das pontas(pode ser na Primeira ou Quarta). <br>
# * Vivian tem Pássaros. <br>
# * Raquel está na primeira posição. <br>
# * A garota de camiseta Verde está na quarta posição do banco. <br>
# * A garota de camiseta Branca está em uma das pontas. <br>
# * A garota de 10 anos está ao lado da garota de 11. <br>
# * A garota que usou o protetor solar de FPS 40 gosta de suco de Maracujá. <br>
# * A garota que usou o protetor solar de FPS 45 está em uma das pontas do banco. <br>
# * Bruna tem Cachorros.
# + [markdown] heading_collapsed=true hidden=true
# ## Resolução
# -
# 1 RAQUEL<br>
# A garota que tem um Gato está na primeira posição do banco. <br>
# A garota de camiseta Branca está em uma das pontas pode ser na Primeira ou Quarta). <br>
# Raquel está na primeira posição. <br>
# Quem gosta de suco de Laranja está em uma das pontas(pode ser na Primeira ou Quarta). <br>
# A garota de 11 anos está em uma das pontas(pode ser na Primeira ou Quarta). <br>
# A garota que usou o protetor solar de FPS 45 está em uma das pontas do banco. <br>
#
# 2 VIVIAN <br>
# Vivian tem Pássaros. <br>
# Na segunda posição está a garota que usou o protetor solar com FPS 55. <br>
# A garota que gosta de suco de Limão está ao lado da que gosta de suco de Maracujá. <br>
# A garota de 10 anos está ao lado da garota de 11. <br>
# A garota de camiseta Azul está em algum lugar à esquerda da garota de 9 anos. <br>
#
# 3 BRUNA <br>
# Bruna tem Cachorros.<br>
# Na terceira posição do banco está a garota que tem um Cachorro. <br>
# A garota que gosta de suco de Maracujá está ao lado da que tem Pássaros. <br>
# A garota que usou o protetor solar de FPS 40 gosta de suco de Maracujá <br>
# Tem 9 anos. <br>
#
# 4 ANA <br>
# Ana usou o protetor solar de FPS 50.<br>
# Quem tem um Peixe está em uma das pontas do banco(pode ser na Primeira ou Quarta).<br>
# A garota de camiseta Verde está na quarta posição do banco.<br>
# Quem gosta de suco de Morango está na quarta posição do banco.<br>
# A garota de 8 anos está na quarta posição<br>
#
# + [markdown] hidden=true
# Pessoa x Cor Camiseta
#
# | Pessoa x Cor Camiseta | Branco | Azul | Amarelo | Verde |
# | :-------------------: | :------: | :------: | :------: | :------: |
# | Raquel | V | F | F | F |
# | Vivian | F | V | F | F |
# | Bruna | F | F | V | F |
# | Ana | F | F | F | V |
# + [markdown] hidden=true
# Pessoa x Animal
#
# | Pessoa x Animal | Cachorro | Peixe | Gato | Pássaro |
# | :-------------------: | :------: | :------: | :------: | :------: |
# | Raquel | F | F | V | F |
# | Vivian | F | F | F | V |
# | Bruna | V | F | F | F |
# | Ana | F | V | F | F |
# + [markdown] hidden=true
# Pessoa x Protetor Solar
#
# | Pessoa x Protetor Solar | FPS 40 | FPS 45 | FPS 50 | FPS 55 |
# | :---------------------: | :------: | :------: | :------: | :------: |
# | Raquel | F | V | F | F |
# | Vivian | F | F | F | V |
# | Bruna | V | F | F | F |
# | Ana | F | F | V | F |
# + [markdown] hidden=true
# Pessoa x Suco
#
# | Pessoa x Suco | Morango | Maracujá | Limão | Laranja |
# | :---------------------: | :------: | :------: | :------: | :------: |
# | Raquel | F | F | F | V |
# | Vivian | F | F | V | F |
# | Bruna | F | V | F | F |
# | Ana | V | F | F | F |
# + [markdown] hidden=true
# Pessoa x Idade
#
# | Pessoa x Idade | 8 anos | 9 anos | 10 anos | 11 anos |
# | :---------------------: | :------: | :------: | :------: | :------: |
# | Raquel | F | F | F | V |
# | Vivian | F | F | V | F |
# | Bruna | F | V | F | F |
# | Ana | V | F | F | F |
# + [markdown] hidden=true
# Pessoa x Banco
#
# | Pessoa x Banco | 1 | 2 | 3 | 4 |
# | :---------------------: | :------: | :------: | :------: | :------: |
# | Raquel | V | F | F | F |
# | Vivian | F | V | F | F |
# | Bruna | F | F | V | F |
# | Ana | F | F | F | V |
# + [markdown] heading_collapsed=true hidden=true
# ## Resposta
# + [markdown] hidden=true
# | Características x Banco | Posição 1 | Posição 2 | Posição 3 | Posição 4 |
# | :---------------: | :-: | :-: | :-: | :-: |
# | Nome | Raquel | Vivian | Bruna | Ana |
# | Animal | Gato | Pássaro | Cachorro | Peixe |
# | Idade | 11 | 10 | 9 | 8 |
# | Suco | Laranja | Limão | Maracujá | Morango |
# | Camiseta | Branco | Azul | Amarelo | Verde |
# | Protetor | 45 | 55 | 40 | 50 |
# + [markdown] heading_collapsed=true
# # Exercício 2.0 - Estrutura IF
# + [markdown] heading_collapsed=true hidden=true
# ## Problema
# + [markdown] hidden=true
# Elimine os testes redundantes dos itens abaixo:
# + [markdown] heading_collapsed=true hidden=true
# ### a)
# ```python
# if (a > b):
# print(f'Maior {a}')
# else:
# if (b >= a):
# print(f'Maior {b}')
# ```
# + [markdown] heading_collapsed=true hidden=true
# #### Resposta
# + hidden=true
# Resolução aqui
if (b >= a):
print(f'Maior {b}')
else:
print(f'Maior {a}')
# + [markdown] heading_collapsed=true hidden=true
# ### b)
# ```python
# if (x > 10):
# print(f'Valor maior que 10')
# else:
# if (x <= 10 and x > 5):
# print(f'Menor ou igual a 10 e maior que 5')
# else:
# if (x <= 5):
# print(f'Menor ou igual a 5')
# ```
# + [markdown] hidden=true
# #### Reposta
# + hidden=true
# Resolução aqui
if (x <= 5):
print(f'Menor ou igual a 5')
elif(x <= 10 and x > 5):
print(f'Menor ou igual a 10 e maior que 5')
else:
print(f'Valor maior que 10')
|
01-Fundamentos-Programacao-DS/Scripts/Mod-01/aula_01/exercicios.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
import os
import sagemaker
from sagemaker.tensorflow.serving import Model
import tensorflow as tf
import numpy as np
from sagemaker import get_execution_role
import numpy as np
import cv2
from matplotlib import pyplot as mlt
import boto3
import json
from tensorflow.keras.preprocessing.image import ImageDataGenerator
my_training_artifact = '[YOUR TRAINING OUTPUT GOES HERE]'
# + magic_args="-s \"$my_training_artifact\" " language="bash"
#
# aws s3 cp $1 ./
# -
# ! tar -xvf model.tar.gz
sagemaker_session = sagemaker.Session()
role = get_execution_role()
bucket = sagemaker_session.default_bucket()
prefix = 'tf-serving'
model_data = sagemaker_session.upload_data('model.tar.gz',
bucket,
os.path.join(prefix, 'model'))
model_data
# The "Model" object doesn't create a SageMaker Model until a Transform Job or Endpoint is created.
tensorflow_serving_model = Model(model_data=model_data,
role=role,
framework_version='2.0',
sagemaker_session=sagemaker_session)
predictor = tensorflow_serving_model.deploy(initial_instance_count=1,
instance_type='ml.m5.xlarge')
# +
train_data_gen_args = dict(rescale=1./255)
data_gen_args = dict(target_size=(224, 224),
batch_size=16,
shuffle=True,
#color_mode='grayscale',
class_mode='categorical')
# -
gen = ImageDataGenerator(**train_data_gen_args)
test_set = gen.flow_from_directory('../Classificacion-Train-Serve/splitdata/test/', **data_gen_args)[0]
img = test_set[0][1]
label = test_set[1][1]
img.shape
predict_batch = img.reshape((1,) + img.shape)
payload = {
'instances': predict_batch.tolist()
}
client = boto3.client('sagemaker-runtime')
endpoint_name = "tensorflow-inference-2020-05-21-22-47-12-683" # Your endpoint name.
content_type = "application/json" # The MIME type of the input data in the request body.
json_payload = json.dumps(payload)
response = client.invoke_endpoint(
EndpointName=endpoint_name,
ContentType=content_type,
Body=json_payload
)
eval(response['Body'].read())
label
|
week2/day3/sagemaker-tensorflow2/Classification-Serve/TensorflowServing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
GPU_id = 3
os.environ['CUDA_VISIBLE_DEVICES'] = str(GPU_id)
# +
import torch
import pandas as pd
import numpy as np
from time import time
from fastai import *
from fastai.basic_data import *
from fastai.basic_data import *
from fastai.tabular import *
from fastai.basic_data import DataBunch
from fastai.tabular import TabularModel
import cudf
import nvtabular as nvt
from nvtabular.ops import Normalize, FillMissing, Categorify, Moments, Median, Encoder, LogOp, ZeroFill
from nvtabular.torch_dataloader import FileItrDataset, DLCollator, DLDataLoader
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
# %matplotlib inline
# -
torch.__version__, cudf.__version__
# %load_ext snakeviz
# load snakeviz if you want to run profiling
# +
# to_cpu = True
# -
# <h3> Dataset Gathering: Define files in the training and validation datasets. </h3>
# +
# data_path = '/rapids/notebooks/jperez/Documents/ds-itr/examples/'
data_path = '/datasets/outbrain/jp_out/output/'
#df_test = 'test/'
df_valid = 'validation_feature_vectors_integral.csv/'
df_train = 'train_feature_vectors_integral_eval.csv/'
train_set = [data_path + df_train + x for x in os.listdir(data_path + df_train) if x.startswith("part")][:20]
valid_set = [data_path + df_valid + x for x in os.listdir(data_path + df_valid) if x.startswith("part")][:5]
# -
len(train_set), len(valid_set)
# <h4>Grab column information</h4>
cols = open(data_path + 'train_feature_vectors_integral_eval.csv.header').read().splitlines()
cat_names = ['display_id', 'is_leak', 'doc_event_id', 'ad_id', 'doc_id', 'doc_ad_entity_id', 'doc_event_entity_id', 'doc_event_entity_id', 'doc_ad_source_id', 'doc_event_source_id', 'event_geo_location', 'ad_advertiser', 'event_country_state', 'doc_ad_publisher_id', 'doc_event_publisher_id', 'doc_ad_topic_id', 'doc_event_topic_id', 'event_country', 'doc_ad_category_id', 'doc_event_category_id', 'event_hour', 'event_platform', 'traffic_source', 'event_weekend', 'user_has_already_viewed_doc']
cont_names = ['pop_ad_id_conf', 'pop_document_id_conf', 'user_doc_ad_sim_categories_conf', 'user_doc_ad_sim_topics_conf', 'pop_publisher_id_conf', 'pop_advertiser_id_conf', 'pop_campaign_id_conf', 'pop_source_id_conf', 'pop_entity_id_conf', 'pop_topic_id_conf', 'pop_category_id_conf', 'pop_ad_id', 'pop_document_id', 'pop_publisher_id', 'pop_advertiser_id', 'pop_campaign_id', 'pop_source_id', 'pop_entity_id', 'pop_topic_id', 'pop_category_id', 'user_doc_ad_sim_categories', 'user_doc_ad_sim_topics', 'user_doc_ad_sim_entities', 'doc_event_doc_ad_sim_categories', 'doc_event_doc_ad_sim_topics', 'doc_event_doc_ad_sim_entities', 'user_views', 'ad_views', 'doc_views', 'doc_event_days_since_published', 'doc_event_hour', 'doc_ad_days_since_published'] #+ [i for i in ds.columns if i not in cat_names and i not in ['label']]
cat_names = [name for name in cat_names if name in cols]
cont_names = [name for name in cont_names if name in cols]
# <h3>Preprocessing:</h3> <p>Select operations to perform, create the Preprocessor object, create dataset iterator object and collect the stats on the training dataset</p>
freq_threshes = {}
for x in cat_names:
freq_threshes[x] = 1
freq_threshes
# %%time
proc = nvt.Workflow(cat_names=cat_names, cont_names=cont_names, label_name=['label'], to_cpu=False)
proc.add_cont_preprocess([FillMissing(replace=True), Normalize(replace=True)])
proc.add_cat_preprocess(Categorify(replace=True, use_frequency=True, freq_threshold=1))
# %%time
trains_itrs = nvt.dataset(train_set,names=cols, engine='csv')
valids_itrs = nvt.dataset(valid_set,names=cols, engine='csv')
output_path_train = './jp_outbrains/train'
output_path_valid = './jp_outbrains/valid'
# %%time
proc.apply(trains_itrs, apply_offline=True, record_stats=True, output_path=output_path_train, shuffle=False)
# %%time
proc.apply(valids_itrs, apply_offline=True, record_stats=False, output_path=output_path_valid, shuffle=False)
new_train_set = [os.path.join(output_path_train, x) for x in os.listdir(output_path_train) if x.endswith("parquet")]
new_valid_set = [os.path.join(output_path_valid, x) for x in os.listdir(output_path_valid) if x.endswith("parquet")]
# <h5>Gather embeddings using statistics gathered in the Read phase.</h5>
embeddings = [x[1] for x in proc.df_ops['Categorify'].get_emb_sz(proc.stats["categories"], proc.columns_ctx['categorical']['base'])]
embeddings
# <h5>Create the file iterators using the FileItrDataset Class.</h5>
# %%time
t_batch_sets = [FileItrDataset(x, engine='parquet', batch_size=400000) for x in new_train_set]
v_batch_sets = [FileItrDataset(x, engine='parquet', batch_size=400000) for x in new_valid_set]
# %%time
t_chain = torch.utils.data.ChainDataset(t_batch_sets)
v_chain = torch.utils.data.ChainDataset(v_batch_sets)
proc.columns_ctx['final']['ctx']
# <h5>Use the Deep Learning Collator to create a collate function to pass to the dataloader.</h5>
# %%time
dlc = DLCollator(preproc=proc, apply_ops=False)
# %%time
t_data = DLDataLoader(t_chain, collate_fn=dlc.gdf_col, pin_memory=False, num_workers=0)
v_data = DLDataLoader(v_chain, collate_fn=dlc.gdf_col, pin_memory=False, num_workers=0)
# <h4>After creating the Dataloaders you can leverage fastai framework to create Machine Learning models</h4>
databunch = DataBunch(t_data, v_data, collate_fn=dlc.gdf_col, device="cuda")
# +
# %%time
model = TabularModel(emb_szs = embeddings, n_cont=len(cont_names), out_sz=2, layers=[512,256])
learn = Learner(databunch, model, metrics=[accuracy])
learn.loss_func = torch.nn.CrossEntropyLoss()
# -
learn.lr_find()
learn.recorder.plot(show_moms=True, suggestion=True)
learning_rate = 2.75e-2
epochs = 1
start = time()
learn.fit_one_cycle(epochs,learning_rate)
t_final = time() - start
t_final
|
examples/gpu_benchmark.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Determinado la eficiencia del código
# +
import time
start = time.time()
#Nuestro código
time.sleep(0.5)
end = time.time()
print(end - start)
# +
from datetime import timedelta
start = time.monotonic()
#Nuestro código
time.sleep(1)
end = time.monotonic()
print(timedelta(seconds = end - start))
# +
start = time.time()
#Nuestro código
time.sleep(0.5)
end = time.time()
start2 = time.time()
#Nuestro código
time.sleep(0.501)
end2 = time.time()
print(end - start > end2 - start2)
# -
# # Pandas
import pandas as pd
# **'pdread_csv'** es el comando para abrir la base de datos y **'nrows'** es la cantidad de filas que va a abrir en este caso el archivo tiene ```1'048.575``` filas, pesa ```657 mb``` para que sea más fácil trabajar con el elegimos un número menor. La sintaxis **`1e6`** es para indicar que se cargan 1 millón de datos.
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv", nrows = 1e6)
# **'df.head()'** permite ver los datos iniciales de la tabla, dentro del paréntesis le indicamos la cantidad de filas que queremos ver. La función `.head()` sola, devuleve solo los 5 primeros resultados.
df.head()
# **'df.tail()'** permite ver los datos finales de la tabla
df.tail()
# **'df.sample()'** permite reorganizar toda la tabla dependiendo del parámetro sin guardarla. Para guardarla la asignamos a un objeto **'df'** que consume memoria, de igual manera se puede guardar una cantidad x en otro `objeto` para hacer comparaciones y otras operaciones. `'frac = 1'` significa que queremos utilizar el 100% de las filas que seleccionamos anteriormente.
df.sample(frac = 1)
# **'df. columns'** mustra información de las columnas en forma de lista, se escribe sin paréntesis porque se esta llamando a uno de los atributos del data frame que ya exíste.
df. columns
# Sí por ejemplo queremos ver los datos de una sola columna se utiliza el comando **'df.DepTime'**, o el nombre de la columna que se quiere ver, de nuevo sin paréntesis debido a que no es una función.
df.DepTime
# **'df.dtypes'** nos muestra el tipo de variable que se esta utilizando en cada caso.
df.dtypes
# El comando **'df.values'** devuelve los datos en una `array` que va a permitir realizar operaciones matriciales y otros tipos de cálculo más cómodos.
df.values
# ## Herramientas de filtrado
# La sintaxsis **'df["ArrDelay"].head()'** nos muestra información concreta de una columna.
df["ArrDelay"].head()
# La sintaxis **'df[0:10]'** devulve exactamente lo mismo que la función `.head(10)`, con la diferencia que le podemos especificar el intervalo.
df[100:110]
# Para obtener información sobre valores concretos, se le indican los parámetros especificos por ejemplo para saber que vuelos tienen retrazo de menos de una hora se usa la sintaxis `df[df["ArrDelay"] < 60].head()` y para saber los que tienen más de una de retraso, simplemente se cambia a `df[df["ArrDelay"] > 60].head()`, se usa doble `'=='` para saber los que tienen exactamente una hora de retraso `df[df["ArrDelay"] == 60].head()`, `'!='` para saber los que no se retrasaron una hora, `'<='` menor o igual que y `'>='` mayor o igual que.
df[df["ArrDelay"] < 60].head()
# Igualmente se puede filtrar por 'cadenas de texto'
df[df["Origin"] == "OAK"].head()
# Para usar un filtro compuesto se utiliza este tipo de sintaxis, si se utiliza `'&'` se requiere que ambas condiciones se cumplan, `'|'` para indicar que una ó la otra, dependiendo del tipo de análisis que se necesite.
df[(df["Origin"] == "OAK") & (df["ArrDelay"] > 60)].head()
# También se puede usar la siguiente función `'isin'` para requerir una u otra condición en vez de usar `'|'`
df[df.Origin.isin(["AOK", "IND"])].head()
# Si los datos están perdidos se puede usar la siguiente sintaxis sin recurrir al data frame directamente desde pandas con la función `'isna'`
df[pd.isna(df["ArrDelay"])].head()
# Para saber el número exacto de los vuelos con retraso podemos usar
len(df[pd.isna(df["ArrDelay"])].head())
# ## Transformaciónes
# Lo primero que se hace es crear otra columna y asignarle los valores que queremos ver.
df["HoursDelay"] = round(df["ArrDelay"]/60)
# Ya creada la columna aún no se puede ver y se aplica el siguiente código para visualizarla.
df["HoursDelay"].head()
# Si por el contrario lo que se quiere es borrar la columna que se creo u otra por algín motivo se usa
del(df["HoursDelay"])
# Yá se ha eliminado y para comprobarlo se hace un `'.head()'` de la base de datos. Al final no aparece la columna. Se puede volver a crear usando el comando `df["HoursDelay"] = round(df["ArrDelay"]/60)` y se observa el resultado en la columna del final.
df.head()
# Para borrar varias columanas a la vez se utiliza una lista y el comando 'drop' y para que no arroje error se le debe especificar el eje, sin embargo no se guarda y lo comprobamos a aplicar un `.head()` de nuevo.
df.drop(["Diverted", "Cancelled", "Year"], axis=1)
df.head()
# Para que se guarde hay que igualarlo y eso se hace de la siguiente manera
df = df.drop(["Diverted", "Cancelled", "Year"], axis=1)
# O también se puede hacer especificandole que lo haga en el sitio
df.drop(["Diverted", "Cancelled", "Year"], axis=1, inplace=True)
# La función `'drop'` también sirve para eliminar filas de a una o varias indicandole la fila o el rango, y para que las guarde se debe igualar o indicar el sitio.
df.drop(0)
df.drop(range(0,1000))
# Para añadir mas filas a una tabla se utiliza la siguiente sintaxis, se crean los objetos con solo los origenes especificados.
dfATL = df[df.Origin == "ALT"]
dfHOU = df[df.Origin == "HOU"]
# Para sumar esos dos data frame a la columna Atlanta se le agrega la columan Houston `dfATL.append(dfHOU)`y se puede hacer porque ambos tienen la misma estructura de columnas y para que se guarde se crea el nuevo objeto `newdf`
newdf = dfATL.append(dfHOU)
# Se comprueba con el comando `newdf.Origin` primero se observan las filas de Atlanta seguidas de las de Houston
newdf.Origin
# ## Groupby
#
# Herramienta de resumen de nuestros datos que nos permite realizar operaciones matemáticas sencillas agrupando por categorías.
#
# El siguiente ejemplo muestra como se aplica este filtro con tres parámetros, para saber el retraso máximo de cada dia de la semana se usa la función `max()`, para saber la media `mean()`, `min()` para saber el mínimo, o incluso se puede saber que días hay menos vuelos usando la función `count()`, `describe()` muestra un resumen estadistico de lo que sucedio en cada uno de los días de la semana.
df.groupby(by = "DayOfWeek")["ArrDelay"].max()
df.groupby(by = "DayOfWeek")["ArrDelay"].describe()
# Se pueden hacer comparaciones complejas, siempre y cuando las columnas sean del mismo tipo (texto con texto, décimales con décimales, etc.).
df.groupby(by = "DayOfWeek")["ArrDelay", "DepDelay"].mean()
# Se pueden realizar operaciones entre valores, por ejemplo para deterinar en que día hay más distancia
df.groupby(by = "DayOfWeek")["ArrDelay"].max()- df.groupby(by = "DayOfWeek")["ArrDelay"].min()
# Para hacer un análisis más complejo se crea una base de datos alterna con los siguientes datos:
dfATLHOU = df[df.Origin.isin(["ALT","HOU"])]
# Con la siguiente instrucción lo que vamos a ver es cada día de la semana por separado con los vuelos que salen de Atlanta y aquellos vuelos que salen de Houston y sus medias. Esto permite comparar, por ejemplo, en cuál de las dos ciudades los vuelos llegan más retrasados y en qué día las diferencias son (importante el orden en el by para poder leer bien la información ya que al no hacerlo así se puede dificultar este trabajo, prueba con `["Origin", "DayOfWeek"]`).
dfATLHOU.groupby(by = ["DayOfWeek", "Origin"])["ArrDelay"].mean()
# Para facilitar el trabajo, también se puede guardar esta información en un objeto al que se le pueden aplicar directamente la instrucciones y va ha realizarlo de una manera más rápida
mygroupby = dfATLHOU.groupby(by = ["DayOfWeek", "Origin"])["ArrDelay"]
mygroupby.min()
# ## Duplicados y perdidos
#
# Por lo general lo que se hace con los datos duplicados, es eliminalos.
dfduplicate = df.append(df)
dfduplicate = dfduplicate.sample(frac=1)
dfclean = dfduplicate.drop_duplicates()
len(dfclean) == len(df)
len(dfclean)
# Para hacer algo más especifico
dfclean.drop_duplicates(subset = "DayofMonth")
# La función `dropna()` se usa para gestionar los datos faltantes
df.dropna()
# Hay parámetros que permiten que la función `dropna()` no funcione de manera tan radical, es el caso de `theshold`
df.dropna(thresh=25)
df.dropna(thresh=len(df.columns)-2)
# Para devolver una columna que no tenga Nan en la que seleccionemos, usamos `subset` dentro de la función y dentro de una lista entregarle el nombre de la columna que vamos a filtrar.
df.dropna(subset = ["CancellationCode"])
# # Numpy
#
# El objero más popular dentro de la librería son los 'arrays'
import numpy as np
valoraciones = np.array([[8,7,8,5], [2,6,8,1],[8,8,9,5]])
valoraciones
valoraciones[0][1]
valoraciones[0,1]
valoraciones2 = np.array([[[8,7,8,5],[2,5,5,2]],[[2,6,8,4],[8,9,7,4]],[[8,8,9,3],[10,9,10,8]]])
valoraciones2
valoraciones2[0,1,2]
# La función `zeros()` permite crear un objeto dependiendo de las dimenciones que le indiquemos
np.zeros((3,2,4,5,6))
# Se pueden hacer operaciones entre 'arrays' del mismo tamaño
valoraciones2 + np.ones((3,2,4))
# Se puede obtener la `media` total, o parcial especificandole el eje
np.mean(valoraciones2)
np.mean(valoraciones2,axis = 0)
np.mean(valoraciones2,axis = 1)
np.mean(valoraciones2,axis = 2)
# La función `reshape` permite convertir una 'lista' en un array del tamaño que se le indique según sea necesario
np.reshape([1,2,3,4,5,6,7,8,9,10,11,12], (3,2,2))
# Se puede usar la función 'median' sobre columnas de data frames por ejemplo si cargaramos "columana1"
np.median(df["columna1"])
# La función `random` permite crear un 'array' aleatorio dependiendo de la necesidad
np.random.rand(2,2)
# ## Correlaciones
#
# Antes que nada una correlación es una relación lineal entre dos variables cuantitativas que toma la expresión que vemos por pantalla, "Correlación no implica causalidad". Vamos a poder detectar correlaciones, pero no va a servir para encontrar explicaciones ('a' está correlacionado positivamente con 'b') y poder cuantificar esta relación. Se interpreta como el cociente entre la covarianza entre dos variables y el producto de sus desviaciones estándar. Esto puede tomar valores entre `-1 y 1`. Y como más cerca de estos extremos se encuentra el valor, más fuerte será la relación. Normalmente los valores entre `0,3 y -0,3` son considerados muy `bajos`, y ya sea a partir de `0,6 o 0,7` en cualquiera de los dos signos cuando estamos hablando de correlaciones `fuertes`.
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv", nrows = 100000)
np.corrcoef(df["ArrDelay"],df["DepDelay"])
# Esto se presenta porque el cociente de correlación no admite valores faltantes. Hay que imputarlos o quitarlos. El paso más sencillo es quitarlos.
df.dropna(inplace=True, subset=["ArrDelay", "DepDelay"])
np.corrcoef([df["ArrDelay"],df["DepDelay"],df["DepTime"]])
df.drop(inplace = True, columns = ["Year","Cancelled","Diverted"])
df.corr()
df.drop(inplace = True, columns = ["Month"])
corr = round(df.corr(),3)
corr.style.background_gradient()
# ## Test de la Chi-Cuadrado
import pandas as pd
import numpy as np
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv")
np.random.seed(0)
df = df[df["Origin"].isin(["HOU", "ATL", "IND"])]
df = df.sample(frac=1)
dg = df[0:10000]
df["BigDelay"] = df["ArrDelay"] > 30
observados = pd.crosstab(index=df['BigDelay'],columns=df['Origin'], margins=True)
observados
# Si presernta algún error con el módulo `scipy.stats` pruebe instalando scipy directamente con el comando `pip install scipy`
from scipy.stats import chi2_contingency
test = chi2_contingency(observados)
test
esperados = pd.DataFrame(test[3])
esperados
esperados_rel = round(esperados.apply(lambda r: r/len(df) *100,axis=1),2)
observados_rel = round(observados.apply(lambda r: r/len(df) *100,axis=1),2)
observados_rel
esperados_rel
test[1]
# ### Resumen de Test de Hipótesis
#
# * Si el p-valor<0.05, hay diferencias significativas: Hay relación entre variable
# * si el p-valor>0.05, no hay diferencias significativas: No hay relación entre variables
# ## Análisis de datos extremos o Outliers
import pandas as pd
import numpy as np
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv", nrows=100000)
x = df["ArrDelay"].dropna()
Q1 = np.percentile(x,25)
Q3 = np.percentile(x,75)
rangointer = Q3 - Q1
umbralsuperior = Q3 + 1.5*rangointer
umbralinferior = Q1 - 1.5*rangointer
umbralsuperior
umbralinferior
np.mean(x > umbralsuperior)
np.mean(x < umbralinferior)
from sklearn.covariance import EllipticEnvelope
outliers = EllipticEnvelope(contamination = .01)
var_list = ["DepDelay", "TaxiIn", "TaxiOut", "CarrierDelay", "WeatherDelay", "NASDelay", "SecurityDelay", "LateAircraftDelay"]
x = np.array(df.loc[:,var_list].dropna())
outliers.fit(x)
pred = outliers.predict(x)
pred
elips_outliers = np.where(pred == -1)[0]
elips_outliers
# # Transformar un dataframe en base de datos relacional
import pandas as pd
data = [(1,"Joan","Gastón",25,1,"Libreta",1.2,.4,0.8,3,"03-02-2018"),
(1,"Joan","Gastón",25,2,"Esfero",0.4,0.15,0.25,1,"03-02-2018"),
(1,"Joan","Gastón",25,1,"Libreta",1.2,.4,0.8,2,"15-02-2018"),
(2,"Joan","López",33,2,"Esfero",0.4,0.15,0.25,4,"01-02-2018"),
(2,"Joan","López",33,1,"Libreta",1.2,.4,0.8,10,"05-03-2018"),
(3,"María","García",40,1,"Libreta",1.2,.4,0.8,20,"13-04-2018"),
(3,"María","García",40,2,"Esfero",0.4,0.15,0.25,1,"09-02-2018"),
(3,"María","García",40,2,"Esfero",0.4,0.15,0.25,3,"03-04-2018")]
labels = ["Comprador_id","Nombre","Apellido","Edad","Producto_id","Producto","Precio","Coste","Margen","Cantidad","Fecha"]
df = pd.DataFrame.from_records(data, columns = labels)
df
compradores = df.drop_duplicates(subset = "Comprador_id", keep = "first")
compradores
compradores = compradores = compradores[["Comprador_id","Nombre","Apellido","Edad"]]
compradores
productos = df.drop_duplicates(subset = "Producto_id", keep = "first")
productos = productos[["Producto_id","Producto","Precio","Coste","Margen"]]
productos
compras = df[["Comprador_id","Producto_id","Fecha","Cantidad"]]
compras
# ## Joins en bases de datos relacionales
# +
import pandas as pd
consumidores = [("A","Móvil"),("B","Móvil"),("A","Portátil"),("A","Tablet"),
("B","Tablet"),("C","Portátil"),("D","Smartwatch"),("E","Consola")]
con_labels = ["Consumidor","Producto"]
con_df = pd.DataFrame.from_records(consumidores,columns = con_labels)
productores = [("a","Móvil"),("a","Smartwatch"),("a","Tablet"),("b","Portátil"),
("c","Sobremesa"),("c","Portátil")]
prod_labels = ["Productor","Producto"]
prod_df = pd.DataFrame.from_records(productores,columns = prod_labels)
# -
# Las tablas son para un ejemplo básico, primero se visualizan
con_df
prod_df
# ### Unir las tablas
#
# Con la función `merge()` se unen las tablas, al cambiar el argumento en `how` por ejemplo con el **`'outer'`** se muestra la unión completa con los resultados que no aparecen en ambas tablas, **`'inner'`** en cambio omite los resultados que no esten en ambas tablas, y en la siguientes estructuras el orden es importante con el **`'right'`** hace la unión solo si encuentra relación con la derecha (por esto deja sobremesa y omite consola) y con el **`'left'`** pasa todo lo contrario (deja consola y omite sobremesa)
pd.merge(con_df,prod_df,on="Producto",how="outer")
pd.merge(con_df,prod_df,on="Producto",how="inner")
pd.merge(con_df,prod_df,on="Producto",how="right")
pd.merge(con_df,prod_df,on="Producto",how="left")
# # Paralelizar loops en python
# +
import pandas as pd
import numpy as np
from joblib import Parallel, delayed
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv", nrows=100000)
# -
df_sub = df[['CarrierDelay','WeatherDelay','NASDelay','SecurityDelay','LateAircraftDelay']]
df_sub.head(10)
def retraso_maximo(fila):
if not np.isnan(fila).any():
names = ['CarrierDelay','WeatherDelay','NASDelay','SecurityDelay','LateAircraftDelay']
return names[fila.index(max(fila))]
else:
return "None"
results = []
for fila in df_sub.values.tolist():
results.append(retraso_maximo(fila))
results
result = Parallel(n_jobs = 2, backend = "multiprocessing")(
map(delayed(retraso_maximo), df_sub.values.tolist()))
result
# # Matplotlib
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv", nrows=100000)
# -
data = np.unique(df.Cancelled, return_counts = True)
data
plt.pie(x = data[1],
labels = data[0], # Se imprime una básico
colors = ["Red","Green"],
shadow = True,
startangle = 90,
radius= 2)
plt.show()
# ## Modificar elementos del gráfico en Matplotlib
#
# Gráfico de burbujas
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv", nrows=1000000)
df= df.sample(frac=1).head(100)
plt.scatter(x=df.DayofMonth,y=df.ArrDelay,s=df.Distance)
plt.scatter(x=df.DayofMonth,y=df.ArrDelay,s=df.Distance,alpha=.3,c = df.DayOfWeek.isin([6,7]))
plt.title("Retrasos en EEUU")
plt.xlabel("Día del Mes")
plt.ylabel("Retrasos al Llegar")
plt.ylim([0,150])
plt.xticks([0,15,30])
plt.text(x=28,y=120,s="Mi vuelo")
plt.show()
# ## Etiquetas y leyendas en Matplotlib
# +
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv", nrows=100000)
data = np.unique(df.DayOfWeek,return_counts=True)
labs = ["lun","Mar","Mie","Jue","Vie","Sab","Dom"]
data
# -
plt.pie(x = data[1],
labels = data[0],
radius = 1.5,
colors = ["Red","Green","Orange","Blue","Gray","Pink","Black"],
startangle = 90)
plt.show()
plt.pie(x = data[1],
labels = labs,
radius = .7,
colors = sns.color_palette("hls",7),
startangle = 90,
autopct = "%1.1f%%",
explode = (0,0,0,0.2,0,0,0.1))
plt.legend(loc="upper right",labels = labs)
plt.show()
plt = sns.barplot(x = labs, y = data[1])
plt.set(xlabel = "Día de la semana", ylabel = "Número de vuelos")
# ## Gráficos para series temporales en Matplotlib
import pandas as pd
import seaborn as sns
import numpy as np
import datetime
import time
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv")
df2 = df[df["Origin"].isin(["ATL","HOU","IND"])]
df.head(500000)
times = []
for i in np.arange(len(df)):
times.append(datetime.datetime(year = 2008, month = df.loc[i,"Month"], day = df.loc[i,"DayofMonth"]))
times[50000]
df["Time"] = times
data = df.groupby(by=["Time"],as_index=Falsr)["DepDelay","ArrDelay"].mean()
data.head()
sns.lineplot(data["Time"],data[DepDelay])
data = df.groupby(by=["Time"])["DepDelay","ArrDelay"].mean()
data.head()
sns.lineplot(data=data)
# +
times = []
for i in df2.index:
times.append(datetime.datetime(year = 2008, month = df2.loc[i,"Month"], day = df2.loc[i,"DayofMonth"]))
df2["Time"] = times
# -
sns.set(rc={'figure.figsize':(10,15)})
sns.lineplot(x="Time",y="ArrDelay",hue="Origin",data=df2)
# ## Histogramas y box plots en Matplotlib
#
# Estos gráficos sirven para ver dónde están concentrados nuestros datos y sacar conclusiones exploratorias de cómo están distribuidos.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv")
df.dropna(inplace=True, subset = ["ArrDelay", "DepDelay", "Distance"])
sns.distplot(df["Distance"], kde = False, bins = 100)
sns.kdeplot(df["ArrDelay"])
sns.kdeplot(df["DepDelay"])
plt.xlim([-300,300])
df2 = df[df["Origin"].isin(["ATL","HOU","IND"])].sample(frac = 1).head(500)
sns.boxplot(x="DepDelay",y="Origin",data = df2)
plt.xlim([-20,150])
# ## Nubes de puntos y mapas de calor en Matplotlib
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("C:/Users/<NAME>/Desktop/GitHub/Python/Ciencia_Datos/base_datos_2008.csv")
df.dropna(inplace=True, subset = ["ArrDelay", "DepDelay", "Distance","AirTime"])
sns.set(rc={'figure.figsize':(15,10)}) # Ajusta el tamaño del gráfico
# +
df2 =df[df["Origin"].isin(["ATL","HOU","IND"])].sample(frac=1).head(1000)
sns.jointplot(df2["DepDelay"],df2["ArrDelay"])
# -
df3 = df2[np.abs(df2["DepDelay"])<40]
df3 = df3[np.abs(df2["DepDelay"])<40]
sns.jointplot(df3["DepDelay"],df3["ArrDelay"],kind="hex")
sns.jointplot(df3["DepDelay"],df3["ArrDelay"],kind="kde")
gb_df = pd.DataFrame(df2.groupby(["Origin","Month"],as_index=False)["DepDelay"].mean())
gb_df.head()
data = gb_df.pivot("Month","Origin","DepDelay")
data
sns.set(rc={'figure.figsize':(15,8)})
sns.heatmap(data = data,annot=True,linewidths=.5)
# # Plotly
#
# Herramienta que permite realizar gráficos avanzados, funciona por medio de registro.
|
big_data_esencial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow_p27)
# language: python
# name: conda_tensorflow_p27
# ---
# # Abalone age predictor using tf.layers
#
# This tutorial covers how to create your own training script using the building
# blocks provided in `tf.layers`, which will predict the ages of
# [abalones](https://en.wikipedia.org/wiki/Abalone) based on their physical
# measurements. You'll learn how to do the following:
#
# * Instantiate an `sagemaker.Estimator`
# * Construct a custom model function
# * Configure a neural network using `tf.feature_column` and `tf.layers`
# * Choose an appropriate loss function from `tf.losses`
# * Define a training op for your model
# * Generate and return predictions
# ## An Abalone Age Predictor
#
# It's possible to estimate the age of an
# [abalone](https://en.wikipedia.org/wiki/Abalone) (sea snail) by the number of
# rings on its shell. However, because this task requires cutting, staining, and
# viewing the shell under a microscope, it's desirable to find other measurements
# that can predict age.
#
# The [Abalone Data Set](https://archive.ics.uci.edu/ml/datasets/Abalone) contains
# the following
# [feature data](https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.names)
# for abalone:
#
# | Feature | Description |
# | -------------- | --------------------------------------------------------- |
# | Length | Length of abalone (in longest direction; in mm) |
# | Diameter | Diameter of abalone (measurement perpendicular to length; in mm)|
# | Height | Height of abalone (with its meat inside shell; in mm) |
# | Whole Weight | Weight of entire abalone (in grams) |
# | Shucked Weight | Weight of abalone meat only (in grams) |
# | Viscera Weight | Gut weight of abalone (in grams), after bleeding |
# | Shell Weight | Weight of dried abalone shell (in grams) |
#
# The label to predict is number of rings, as a proxy for abalone age.
# ### Set up the environment
# +
import os
import sagemaker
from sagemaker import get_execution_role
sagemaker_session = sagemaker.Session()
role = get_execution_role()
# -
# ### Upload the data to a S3 bucket
inputs = sagemaker_session.upload_data(path='data', key_prefix='data/DEMO-abalone')
# **sagemaker_session.upload_data** will upload the abalone dataset from your machine to a bucket named **sagemaker-{region}-{your aws account number}**, if you don't have this bucket yet, sagemaker_session will create it for you.
# ## Complete source code
# Here is the full code for the network model:
# !cat 'abalone.py'
# ## Constructing the `model_fn`
#
# The basic skeleton for an model function looks like this:
#
# ```python
# def model_fn(features, labels, mode, hyperparameters):
# # Logic to do the following:
# # 1. Configure the model via TensorFlow operations
# # 2. Define the loss function for training/evaluation
# # 3. Define the training operation/optimizer
# # 4. Generate predictions
# # 5. Return predictions/loss/train_op/eval_metric_ops in EstimatorSpec object
# return EstimatorSpec(mode, predictions, loss, train_op, eval_metric_ops)
# ```
#
# The **`model_fn`** requires three arguments:
#
# * **`features`**: A dict containing the features passed to the model via
# **`input_fn`**.
# * **`labels`**: A `Tensor` containing the labels passed to the model via
# **`input_fn`**. Will be empty for `predict()` calls, as these are the values the
# model will infer.
# * **`mode`**: One of the following tf.estimator.ModeKeys string values
# indicating the context in which the model_fn was invoked:
# * **`TRAIN`** The **`model_fn`** was invoked in training
# mode, namely via a `train()` call.
# * **`EVAL`** The **`model_fn`** was invoked in
# evaluation mode, namely via an `evaluate()` call.
# * **`PREDICT`** The **`model_fn`** was invoked in
# predict mode, namely via a `predict()` call.
#
# **`model_fn`** may also accept a **`hyperparameters`** argument containing a dict of
# hyperparameters used for training (as shown in the skeleton above).
#
# The body of the function performs the following tasks (described in detail in the
# sections that follow):
#
# * Configuring the model for the abalone predictor, this will be a neural
# network.
# * Defining the loss function used to calculate how closely the model's
# predictions match the target values.
# * Defining the training operation that specifies the `optimizer` algorithm to
# minimize the loss values calculated by the loss function.
# The **`model_fn`** must return a tf.estimator.EstimatorSpec
# object, which contains the following values:
#
# * **`mode`** (required). The mode in which the model was run. Typically, you will
# return the `mode` argument of the `model_fn` here.
#
# * **`predictions`** (required in `PREDICT` mode). A dict that maps key names of
# your choice to `Tensor`s containing the predictions from the model, e.g.:
#
# ```python
# predictions = {"results": tensor_of_predictions}
# ```
#
# In `PREDICT` mode, the dict that you return in `EstimatorSpec` will then be
# returned by `predict()`, so you can construct it in the format in which
# you'd like to consume it.
#
#
# * **`loss`** (required in `EVAL` and `TRAIN` mode). A `Tensor` containing a scalar
# loss value: the output of the model's loss function (discussed in more depth
# later in Defining loss for the model calculated over all
# the input examples. This is used in `TRAIN` mode for error handling and
# logging, and is automatically included as a metric in `EVAL` mode.
#
# * **`train_op`** (required only in `TRAIN` mode). An Op that runs one step of
# training.
#
# * **`eval_metric_ops`** (optional). A dict of name/value pairs specifying the
# metrics that will be calculated when the model runs in `EVAL` mode. The name
# is a label of your choice for the metric, and the value is the result of
# your metric calculation. The tf.metrics
# module provides predefined functions for a variety of common metrics. The
# following `eval_metric_ops` contains an `"accuracy"` metric calculated using
# `tf.metrics.accuracy`:
#
# ```python
# eval_metric_ops = {
# "accuracy": tf.metrics.accuracy(labels, predictions)
# }
# ```
#
# If you do not specify `eval_metric_ops`, only `loss` will be calculated
# during evaluation.
# ### Configuring a neural network with `tf.feature_column` and `tf.layers`
#
# Constructing a [neural
# network](https://en.wikipedia.org/wiki/Artificial_neural_network) entails
# creating and connecting the input layer, the hidden layers, and the output
# layer.
#
# The input layer is a series of nodes (one for each feature in the model) that
# will accept the feature data that is passed to the `model_fn` in the `features`
# argument. If `features` contains an n-dimensional `Tensor` with all your feature
# data, then it can serve as the input layer.
# If `features` contains a dict of feature columns passed to
# the model via an input function, you can convert it to an input-layer `Tensor`
# with the tf.feature_column.input_layer function.
#
# ```python
# input_layer = tf.feature_column.input_layer(features=features, feature_columns=[age, height, weight])
# ```
# As shown above, **`input_layer()`** takes two required arguments:
#
# * **`features`**. A mapping from string keys to the `Tensors` containing the
# corresponding feature data. This is exactly what is passed to the `model_fn`
# in the `features` argument.
# * **`feature_columns`**. A list of all the `FeatureColumns`: `age`,
# `height`, and `weight` in the above example.
#
# The input layer of the neural network then must be connected to one or more
# hidden layers via an [activation
# function](https://en.wikipedia.org/wiki/Activation_function) that performs a
# nonlinear transformation on the data from the previous layer. The last hidden
# layer is then connected to the output layer, the final layer in the model.
# `tf.layers` provides the `tf.layers.dense` function for constructing fully
# connected layers. The activation is controlled by the `activation` argument.
# Some options to pass to the `activation` argument are:
#
# * **`tf.nn.relu`**. The following code creates a layer of `units` nodes fully
# connected to the previous layer `input_layer` with a
# [ReLU activation function](https://en.wikipedia.org/wiki/Rectifier_\(neural_networks\))
# (tf.nn.relu):
#
# ```python
# hidden_layer = tf.layers.dense(
# inputs=input_layer, units=10, activation=tf.nn.relu)
# ```
#
# * **`tf.nn.relu`**. The following code creates a layer of `units` nodes fully
# connected to the previous layer `hidden_layer` with a ReLU activation
# function:
#
# ```python
# second_hidden_layer = tf.layers.dense(
# inputs=hidden_layer, units=20, activation=tf.nn.relu)
# ```
#
# * **`None`**. The following code creates a layer of `units` nodes fully connected
# to the previous layer `second_hidden_layer` with *no* activation function,
# just a linear transformation:
#
# ```python
# output_layer = tf.layers.dense(
# inputs=second_hidden_layer, units=3, activation=None)
# ```
# Other activation functions are possible, e.g.:
#
# ```python
# output_layer = tf.layers.dense(inputs=second_hidden_layer,
# units=10,
# activation_fn=tf.sigmoid)
# ```
#
# The above code creates the neural network layer `output_layer`, which is fully
# connected to `second_hidden_layer` with a sigmoid activation function
# (tf.sigmoid).
#
# Putting it all together, the following code constructs a full neural network for
# the abalone predictor, and captures its predictions:
#
# ```python
# def model_fn(features, labels, mode, params):
# """Model function for Estimator."""
#
# # Connect the first hidden layer to input layer
# # (features["x"]) with relu activation
# first_hidden_layer = tf.layers.dense(features["x"], 10, activation=tf.nn.relu)
#
# # Connect the second hidden layer to first hidden layer with relu
# second_hidden_layer = tf.layers.dense(
# first_hidden_layer, 10, activation=tf.nn.relu)
#
# # Connect the output layer to second hidden layer (no activation fn)
# output_layer = tf.layers.dense(second_hidden_layer, 1)
#
# # Reshape output layer to 1-dim Tensor to return predictions
# predictions = tf.reshape(output_layer, [-1])
# predictions_dict = {"ages": predictions}
# ...
# ```
#
# Here, because you'll be passing the abalone `Datasets` using `numpy_input_fn`
# as shown below, `features` is a dict `{"x": data_tensor}`, so
# `features["x"]` is the input layer. The network contains two hidden
# layers, each with 10 nodes and a ReLU activation function. The output layer
# contains no activation function, and is
# tf.reshape to a one-dimensional
# tensor to capture the model's predictions, which are stored in
# `predictions_dict`.
#
# ### Defining loss for the model
#
# The `EstimatorSpec` returned by the `model_fn` must contain `loss`: a `Tensor`
# representing the loss value, which quantifies how well the model's predictions
# reflect the label values during training and evaluation runs. The tf.losses
# module provides convenience functions for calculating loss using a variety of
# metrics, including:
#
# * `absolute_difference(labels, predictions)`. Calculates loss using the
# [absolute-difference
# formula](https://en.wikipedia.org/wiki/Deviation_\(statistics\)#Unsigned_or_absolute_deviation)
# (also known as L<sub>1</sub> loss).
#
# * `log_loss(labels, predictions)`. Calculates loss using the [logistic loss
# forumula](https://en.wikipedia.org/wiki/Loss_functions_for_classification#Logistic_loss)
# (typically used in logistic regression).
#
# * `mean_squared_error(labels, predictions)`. Calculates loss using the [mean
# squared error](https://en.wikipedia.org/wiki/Mean_squared_error) (MSE; also
# known as L<sub>2</sub> loss).
#
# The following example adds a definition for `loss` to the abalone `model_fn`
# using `mean_squared_error()`:
# ```python
# def model_fn(features, labels, mode, params):
# """Model function for Estimator."""
#
# # Connect the first hidden layer to input layer
# # (features["x"]) with relu activation
# first_hidden_layer = tf.layers.dense(features["x"], 10, activation=tf.nn.relu)
#
# # Connect the second hidden layer to first hidden layer with relu
# second_hidden_layer = tf.layers.dense(
# first_hidden_layer, 10, activation=tf.nn.relu)
#
# # Connect the output layer to second hidden layer (no activation fn)
# output_layer = tf.layers.dense(second_hidden_layer, 1)
#
# # Reshape output layer to 1-dim Tensor to return predictions
# predictions = tf.reshape(output_layer, [-1])
# predictions_dict = {"ages": predictions}
#
#
# # Calculate loss using mean squared error
# loss = tf.losses.mean_squared_error(labels, predictions)
# ...
# ```
#
# See the [tf.losses](https://www.tensorflow.org/api_docs/python/tf/losses) for a
# full list of loss functions and more details on supported arguments and usage.
#
# Supplementary metrics for evaluation can be added to an `eval_metric_ops` dict.
# The following code defines an `rmse` metric, which calculates the root mean
# squared error for the model predictions. Note that the `labels` tensor is cast
# to a `float64` type to match the data type of the `predictions` tensor, which
# will contain real values:
#
# ```python
# eval_metric_ops = {
# "rmse": tf.metrics.root_mean_squared_error(
# tf.cast(labels, tf.float64), predictions)
# }
# ```
# ### Defining the training op for the model
#
# The training op defines the optimization algorithm TensorFlow will use when
# fitting the model to the training data. Typically when training, the goal is to
# minimize loss. A simple way to create the training op is to instantiate a
# `tf.train.Optimizer` subclass and call the `minimize` method.
#
# The following code defines a training op for the abalone `model_fn` using the
# loss value calculated in [Defining Loss for the Model](https://github.com/tensorflow/tensorflow/blob/eb84435170c694175e38bfa02751c3ef881c7a20/tensorflow/docs_src/extend/estimators.md#defining-loss), the
# learning rate passed to the function in `params`, and the gradient descent
# optimizer. For `global_step`, the convenience function
# tf.train.get_global_step takes care of generating an integer variable:
#
# ```python
# optimizer = tf.train.GradientDescentOptimizer(
# learning_rate=params["learning_rate"])
# train_op = optimizer.minimize(
# loss=loss, global_step=tf.train.get_global_step())
# ```
#
# ### The complete abalone `model_fn`
#
# Here's the final, complete `model_fn` for the abalone age predictor. The
# following code configures the neural network; defines loss and the training op;
# and returns a `EstimatorSpec` object containing `mode`, `predictions_dict`, `loss`,
# and `train_op`:
#
# ```python
# def model_fn(features, labels, mode, params):
# """Model function for Estimator."""
#
# # Connect the first hidden layer to input layer
# # (features["x"]) with relu activation
# first_hidden_layer = tf.layers.dense(features["x"], 10, activation=tf.nn.relu)
#
# # Connect the second hidden layer to first hidden layer with relu
# second_hidden_layer = tf.layers.dense(
# first_hidden_layer, 10, activation=tf.nn.relu)
#
# # Connect the output layer to second hidden layer (no activation fn)
# output_layer = tf.layers.dense(second_hidden_layer, 1)
#
# # Reshape output layer to 1-dim Tensor to return predictions
# predictions = tf.reshape(output_layer, [-1])
#
# # Provide an estimator spec for `ModeKeys.PREDICT`.
# if mode == tf.estimator.ModeKeys.PREDICT:
# return tf.estimator.EstimatorSpec(
# mode=mode,
# predictions={"ages": predictions})
#
# # Calculate loss using mean squared error
# loss = tf.losses.mean_squared_error(labels, predictions)
#
# # Calculate root mean squared error as additional eval metric
# eval_metric_ops = {
# "rmse": tf.metrics.root_mean_squared_error(
# tf.cast(labels, tf.float64), predictions)
# }
#
# optimizer = tf.train.GradientDescentOptimizer(
# learning_rate=params["learning_rate"])
# train_op = optimizer.minimize(
# loss=loss, global_step=tf.train.get_global_step())
#
# # Provide an estimator spec for `ModeKeys.EVAL` and `ModeKeys.TRAIN` modes.
# return tf.estimator.EstimatorSpec(
# mode=mode,
# loss=loss,
# train_op=train_op,
# eval_metric_ops=eval_metric_ops)
# ```
# # Submitting script for training
# +
from sagemaker.tensorflow import TensorFlow
abalone_estimator = TensorFlow(entry_point='abalone.py',
role=role,
framework_version='1.9',
training_steps= 100,
evaluation_steps= 100,
hyperparameters={'learning_rate': 0.001},
train_instance_count=1,
train_instance_type='ml.c4.xlarge')
abalone_estimator.fit(inputs)
# -
# `estimator.fit` will deploy a script in a container for training and returs the SageMaker model name using the following arguments:
#
# * **`entry_point="abalone.py"`** The path to the script that will be deployed to the container.
# * **`training_steps=100`** The number of training steps of the training job.
# * **`evaluation_steps=100`** The number of evaluation steps of the training job.
# * **`role`**. AWS role that gives your account access to SageMaker training and hosting
# * **`hyperparameters={'learning_rate' : 0.001}`**. Training hyperparameters.
#
# Running the code block above will do the following actions:
# * deploy your script in a container with tensorflow installed
# * copy the data from the bucket to the container
# * instantiate the tf.estimator
# * train the estimator with 100 training steps
# * save the estimator model
# # Submiting a trained model for hosting
#
abalone_predictor = abalone_estimator.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
# `abalone_estimator.deploy` deploys the trained model in a container ready for production.
# # Invoking the endpoint
# +
import tensorflow as tf
import numpy as np
prediction_set = tf.contrib.learn.datasets.base.load_csv_without_header(
filename=os.path.join('data/abalone_predict.csv'), target_dtype=np.int, features_dtype=np.float32)
data = prediction_set.data[0]
tensor_proto = tf.make_tensor_proto(values=np.asarray(data), shape=[1, len(data)], dtype=tf.float32)
# -
abalone_predictor.predict(tensor_proto)
# # Deleting the endpoint
sagemaker.Session().delete_endpoint(abalone_predictor.endpoint)
|
sagemaker-python-sdk/tensorflow_abalone_age_predictor_using_layers/tensorflow_abalone_age_predictor_using_layers.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="TVz5Idb0YkFh" colab_type="text"
# The purpose of this notebook is testing the most naive fast style transfer approach that I coin up - autoencoder.<br>
# I prepared 15 pairs of images with the same style donor, it was the <NAME>`s "The Starry Night".
# + id="r7RNoJJ5V3lL" colab_type="code" colab={}
from keras import layers
from keras.layers import Input, Dense, Conv2D, Conv2DTranspose, MaxPooling2D, UpSampling2D
from keras.layers import BatchNormalization, ReLU
from keras.models import Model, load_model, model_from_json
from keras import backend as K
from keras.preprocessing.image import load_img, img_to_array, ImageDataGenerator
import numpy as np
np.random.seed(42) # for reproducibility
import matplotlib.pyplot as plt
from tensorboardcolab import TensorBoardColab, TensorBoardColabCallback
import os
from google.colab.patches import cv2_imshow #instead of cv2.imshow
import cv2
# + [markdown] id="wQkkGvw6a6cJ" colab_type="text"
# ### Setting up model
# + [markdown] id="QdDCKlM3c30q" colab_type="text"
# The structure of this autoencoder was taken from the article [Perceptual Losses for Real-Time Style Transfer
# and Super-Resolution](https://cs.stanford.edu/people/jcjohns/papers/eccv16/JohnsonECCV16.pdf)
# + id="asaxyPYiWOst" colab_type="code" colab={}
def residual_block(y, nb_channels, _strides = (1, 1)):
shortcut = y
#forward pass
y = Conv2D(nb_channels, kernel_size = (3, 3), strides = _strides, padding = 'same')(y)
y = BatchNormalization()(y)
y = ReLU()(y)
y = Conv2D(nb_channels, kernel_size = (3, 3), strides = _strides, padding = 'same')(y)
y = BatchNormalization()(y)
#adding shortcut
y = layers.add([shortcut, y])
y = ReLU()(y)
return y
# + id="zWWC_LjiaVRj" colab_type="code" outputId="8cf2b581-5419-4cff-d823-17f59c4762e6" executionInfo={"status": "ok", "timestamp": 1575632357885, "user_tz": -180, "elapsed": 2634, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 207}
input_img = Input(shape = (512, 512, 3)) # 512 (not 256) because all of our train style images have higth = 400 px
x = Conv2D(32, (9, 9), strides = 1, activation = 'relu', padding = 'same')(input_img)
x = Conv2D(64, (3, 3), strides = 2, activation = 'relu', padding = 'same')(x)
x = Conv2D(128, (3, 3), strides = 2, activation = 'relu', padding = 'same')(x)
print('Shape after encoding:', x.shape)
x = residual_block(x, 128, (1, 1))
x = residual_block(x, 128, (1, 1))
x = residual_block(x, 128, (1, 1))
x = residual_block(x, 128, (1, 1))
x = residual_block(x, 128, (1, 1))
print('Shape after residual blocks:', x.shape)
x = Conv2DTranspose(64, (3, 3), strides = 2, activation='relu', padding='same')(x)
x = Conv2DTranspose(32, (3, 3), strides = 2, activation='relu', padding='same')(x)
decoded = Conv2DTranspose(3, (9, 9), strides = 1, activation='relu', padding='same')(x) #may be sigmoid function?
print('Shape after decoding:', decoded.shape)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# + [markdown] id="QHjFmvCgbDAJ" colab_type="text"
# ### Preparing Data
# + [markdown] id="F_-X0JsP9Y2S" colab_type="text"
# **Image generators**
# + id="Du5shf0qaaR4" colab_type="code" outputId="a5d616b8-c085-432f-d137-ade15bee86b4" executionInfo={"status": "ok", "timestamp": 1575632377159, "user_tz": -180, "elapsed": 2032, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/drive')
# + id="K05Y_Bd1a4Mg" colab_type="code" colab={}
# we have different input (original picture) and output (styletransfered picture) for our training set
train_input_dir = '/content/drive/My Drive/colab_notebooks/Keras_Fast_Style_Transfer/img_encoders_pairs/original_images'
train_output_dir = '/content/drive/My Drive/colab_notebooks/Keras_Fast_Style_Transfer/img_encoders_pairs/generated_results'
test_dir = '/content/drive/My Drive/colab_notebooks/Keras_Fast_Style_Transfer/img_encoders_pairs/test_images'
# + id="Xhf3q07Wb67H" colab_type="code" colab={}
# templates for our future image generators
train_datagen = ImageDataGenerator(
rescale = 1./255,
rotation_range = 40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True
)
test_datagen = ImageDataGenerator(rescale = 1./255)
# + id="Pl3xxpfVcIIN" colab_type="code" outputId="cc2af7bd-03b8-4d0c-a978-f7c5adbc47ff" executionInfo={"status": "ok", "timestamp": 1575559856343, "user_tz": -180, "elapsed": 846, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
train_input_generator = train_datagen.flow_from_directory(
train_input_dir,
target_size = (512, 512),
batch_size = 32,
class_mode = None,
seed = 42) #dix seed to get pairs of images
train_output_generator = train_datagen.flow_from_directory(
train_output_dir,
target_size = (512, 512),
batch_size = 32,
class_mode = None,
seed = 42)
# + id="kdnhosmOczNc" colab_type="code" colab={}
def pairs_generator(input_generator, output_generator):
for batch_pair in zip(input_generator, output_generator):
yield batch_pair
# + id="5e44sQhi7NcF" colab_type="code" colab={}
# Check image generators
# + [markdown] id="Bf4bZSJJ01d-" colab_type="text"
# https://github.com/keras-team/keras/issues/3386
# + id="tJnH45Q_18Kf" colab_type="code" colab={}
sample = next(pairs_generator(train_input_generator, train_output_generator))
# + id="iWI4CvtR3VY_" colab_type="code" outputId="1dee2d22-c06e-49d6-a628-957d98bd4d4c" executionInfo={"status": "ok", "timestamp": 1575559881908, "user_tz": -180, "elapsed": 1648, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 236}
imgs_1 = sample[0]
imgs_2 = sample[1]
print(imgs_1.shape)
plt.imshow(np.hstack([imgs_1[0, :, :, :], imgs_2[0, :, :, :]]))
plt.show()
# + [markdown] id="tkREmoa1fznW" colab_type="text"
# ### Train model
# + id="AjLgXydsf3hv" colab_type="code" outputId="b341a8a5-b10e-41cc-9b17-065656ea0a18" executionInfo={"status": "ok", "timestamp": 1575559908717, "user_tz": -180, "elapsed": 20062, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 68}
tbc = TensorBoardColab()
# + id="BofaQ76qfd38" colab_type="code" colab={}
def train_model():
'''
Safety wrapper for model training
'''
autoencoder.fit_generator(pairs_generator(train_input_generator, train_output_generator),
steps_per_epoch = 100,
nb_epoch = 10,
callbacks = [TensorBoardColabCallback(tbc)])
return autoencoder
# + id="h2sDTXeRHLCF" colab_type="code" outputId="0bb6f867-b52d-4b4d-87e7-3b0ed0b61404" executionInfo={"status": "ok", "timestamp": 1575562711712, "user_tz": -180, "elapsed": 2761238, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 615}
# # Uncomment to re-train model
# autoencoder = train_model()
# + id="Zh2_eXJ-fvQ_" colab_type="code" colab={}
#save results
def save_model(model_name = 'style_autoencoder'):
json_model = autoencoder.to_json()
save_dir = '/content/drive/My Drive/colab_notebooks/Keras_Fast_Style_Transfer/saved_models'
with open(os.path.join(save_dir, '{}.json'.format(model_name)), 'w') as json_file:
json_file.write(json_model)
autoencoder.save(os.path.join(save_dir, '{}_weights.h5'.format(model_name)))
# + id="f4RrXcoOMGv7" colab_type="code" colab={}
# # Uncomment to save trained model
# save_model(model_name = 'style_autoencoder_512')
# + [markdown] id="hYaklemFMTbm" colab_type="text"
# ### Load pre-trained model
# Start here if you just want to get results
# + [markdown] id="fmmN1pLxSiX6" colab_type="text"
# **Load model**
# + id="n9Qg-cc94_pG" colab_type="code" outputId="88df240b-b52a-4a7e-e0f2-e08e29f6807c" executionInfo={"status": "ok", "timestamp": 1575632396500, "user_tz": -180, "elapsed": 1116, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
from google.colab import drive
drive.mount('/content/drive')
# + id="g7fe2w6ISDXy" colab_type="code" colab={}
model_files_dir = '/content/drive/My Drive/colab_notebooks/Keras_Fast_Style_Transfer/saved_models'
# + id="lAoo0iHwMvvO" colab_type="code" colab={}
def load_model(json_file_name, h5_file_name):
json_file = open(os.path.join(model_files_dir, json_file_name), 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json)
model.load_weights(os.path.join(model_files_dir, h5_file_name))
return model
# + id="lPcpV1P0RSCp" colab_type="code" colab={}
autoencoder = load_model('style_autoencoder_512.json', 'style_autoencoder_512_weights.h5')
# + [markdown] id="04rsd52Jkhpc" colab_type="text"
# **Get test results**
# + id="EKIj-p--ArEX" colab_type="code" outputId="d0610ccf-ffe4-44f9-e600-82df80ae54b1" executionInfo={"status": "ok", "timestamp": 1575630677311, "user_tz": -180, "elapsed": 1893, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# Get list of names of all test images
test_images_names = os.listdir(os.path.join(test_dir, 'dummy_folder'))
print(test_images_names)
# + id="CwIEZiJODJjH" colab_type="code" outputId="d5d0a7a0-b522-4f49-b3a2-ab65064d8734" executionInfo={"status": "ok", "timestamp": 1575630783624, "user_tz": -180, "elapsed": 1282, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 170}
# Make dict with test files pathes
test_images_dict = {}
test_images_path = os.path.join(test_dir, 'dummy_folder')
for dirpath, _, filenames in os.walk(test_images_path):
for f in filenames:
test_images_dict[f] = os.path.abspath(os.path.join(dirpath, f))
test_images_dict
# + id="IxdZZDL1EocT" colab_type="code" colab={}
def make_picture(picture_name, saving_dir_path = None):
try:
picture_path = test_images_dict[picture_name]
except:
raise ValueError('Can`t find this file in folder!')
original_img = cv2.imread(picture_path)
original_img = cv2.cvtColor(original_img, cv2.COLOR_BGR2RGB) #BGR -> RGB
height, width, _ = original_img.shape
#print('Original image size:', height, width)
output_img_height = 400 #width size of any styled image from train set
output_img_width = int(width * (output_img_height/height))
#print('Scaled image size:', output_img_height, output_img_width)
scaled_original_img = cv2.resize(original_img, (output_img_width, output_img_height))
input_img = cv2.resize(original_img, (512, 512))/255
input_img = np.expand_dims(input_img, axis = 0)
output_img = autoencoder.predict(input_img)
output_img = cv2.resize(output_img[0], (output_img_width, output_img_height))
fig, axs = plt.subplots(1, 2, figsize = (10, 5))
fig.suptitle('Original and encoded pictures')
axs[0].imshow(scaled_original_img)
axs[1].imshow(output_img)
if saving_dir_path is not None:
plt.imsave(os.path.join(saving_dir_path, picture_name), np.clip(output_img, 0, 1))
return None
# + id="_SX0WYWj__84" colab_type="code" colab={}
save_results_dir = '/content/drive/My Drive/colab_notebooks/Keras_Fast_Style_Transfer/img_encoders_pairs/test_results'
# + id="2ysnkGyXCE9s" colab_type="code" outputId="c4c8703d-a4bc-4c22-8fb4-2679d723477b" executionInfo={"status": "ok", "timestamp": 1575632518069, "user_tz": -180, "elapsed": 10915, "user": {"displayName": "\u0413\u0440\u0438\u0433\u043e\u0440\u0438\u0439 \u0413\u0443\u0441\u0430\u0440\u043e\u0432", "photoUrl": "", "userId": "07886548581033481820"}} colab={"base_uri": "https://localhost:8080/", "height": 1000, "output_embedded_package_id": "1wZtRcMNI0O7LMKWoxcqMPXj1BDxtIG6g"}
for img_name in test_images_dict.keys():
make_picture(img_name, save_results_dir)
# + id="jPGOPRxFmHbI" colab_type="code" colab={}
|
research_notebooks/4.1) Style Transfer Autoencoder (naive fast algorythm).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/usm.jpg" width="480" height="240" align="left"/>
# + [markdown] slideshow={"slide_type": "slide"}
# # MAT281 - Visualización Imperativa
# + [markdown] slideshow={"slide_type": "slide"}
# ## Objetivos de la clase
#
# * Comprender el estilo de visualización imperativa.
# * Aplicar gráficos adecuados dependiendo de los datos.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Contenidos
# * [Visualización Imperativa](#imperative)
# * [Matplotlib](#matplotlib)
# * [Gráfico a Gráfico](#plot-plot)
# + [markdown] slideshow={"slide_type": "slide"}
# ## I.- Visualización Imperativa
# <a id='imperative'></a>
#
# <img src="https://www.butleranalytics.com/wp-content/uploads/2014/07/optimizationw.jpg" alt="" width="500" align="middle"/>
#
# Este paradigma se focaliza en las instrucciones recibidas, ya que no abstrae las operaciones o codificaciones visuales. Algunas de sus características son:
#
# * Se especifica _Cómo_ se debe hacer algo.
# * Se deben especificar manualmente los pasos del trazado.
# * Especificación y ejecución entrelazadas.
#
# Coloquialmente se puede entender como que se debe decidir pixel a pixel lo que se desea mostrar.
# + [markdown] slideshow={"slide_type": "slide"}
# ## II.- Matplotlib
#
# <img src="images/matplotlib.png" alt="" width="400" align="middle"/>
#
# **Matplotlib** es una biblioteca para la generación de gráficos a partir de datos contenidos en listas o arrays en el lenguaje de programación Python y su extensión matemática NumPy. Proporciona una API, pylab, diseñada para recordar a la de **MATLAB**.
#
# En matplotlib todo está organizado en una jerarquía:
#
# * En la parte superior se encuentra el módulo `matplotlib.pyplot`. En este nivel, se utilizan funciones simples para agregar elementos de trazado (líneas, imágenes, texto, etc.) a los ejes actuales en la figura actual.
#
#
# * El siguiente nivel en la jerarquía es el primer nivel de la interfaz orientada a objetos, en la que pyplot se usa solo para algunas funciones, como la creación de figuras, y el usuario crea y realiza un seguimiento explícito de los objetos de figuras y ejes. En este nivel, el usuario usa pyplot para crear figuras, y a través de esas figuras, se pueden crear uno o más objetos de ejes.
# -
# ## Componentes de un gráfico
#
# 
#
#
# ### a) Figure
#
# Es la visualización completa. _Figure_ realiza un seguimiento de todos los _Axes_ hijos y el _Canvas_. Una figura puede tener cualquier número de _Axes_, pero para ser útil debe tener al menos uno.
#
# La forma más fácil de crear una nueva _Figure_ es con pyplot:
#
# ```python
# fig = plt.figure() # an empty figure with no axes
#
# fig, ax_lst = plt.subplots(2, 2) # a figure with a 2x2 grid of Axes
# ```
#
# ### b) Axes
#
# Esto es lo que se puede pensar como 'un gráfico', es la región de la imagen con el espacio de datos. Un _Figure_ dada puede contener muchos _Axes_, pero un objeto _Axe_ dado solo puede estar en un _Figure_. _Axes_ contiene dos (o tres en el caso de 3D) objetos _Axis_ que se ocupan de los límites de datos. Cada _Axe_ tiene un título, una etiqueta para el eje horizonal y una etiqueta para el eje vertical.
#
# La clase _Axes_ y sus funciones son el punto de entrada principal para trabajar con la interfaz orientada a objetos.
#
# ### c) Axis
#
# Corresponden a los ejes, algo así como líneas rectas. Se encargan de establecer los límites del gráfico y generar los ticks (las marcas en el eje) y los ticklabels (_strings_ que etiquetan los ticks).
# + [markdown] slideshow={"slide_type": "slide"}
# ## III.- Gráfico a Gráfico
# <a id='plot-plot'></a>
#
# A continuación, mostraremos un amplia gama de gráficos que pueden ser desplegados con `Matplotlib`. Lo primero será cargar las librerias para este módulo.
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from scipy.stats import multivariate_normal
from mpl_heatmap import heatmap, annotate_heatmap
# %matplotlib inline
# -
# ### a) Gráfico de líneas
# +
# grafico simple
# datos
x = np.linspace(0, 2, 100)
# grafico
# tamano del grafico
fig = plt.figure(figsize=(10, 5))
# graficar
plt.plot(
x, # eje x
x, # eje y
label='linea', # etiquetado
color="black", # color
linewidth=1 # tamano de la curva
)
plt.legend() # agregar etiquetado
plt.title("grafico simple") # agregar titulo
plt.xlabel('x') # nombre eje x
plt.ylabel('y') # nombre eje y
plt.grid() # agregar grillado
plt.show() # mostrar grafico
# +
# grafico compuesto
# datos
x = np.linspace(0, 2, 100)
# grafico
# tamano del grafico
fig = plt.figure(figsize=(10, 5))
# graficar
# a) lineal
plt.plot(
x, # eje x
x, # eje y
label='linea', # etiquetado
color="black", # color
linewidth=1 # tamano de la curva
)
# b) cuadratica
plt.plot(
x, # eje x
x**2, # eje y
label='cuadratica', # etiquetado
color="b", # color
linewidth=1 # tamano de la curva
)
# c) cubica
plt.plot(
x, # eje x
x**3, # eje y
label='cubica', # etiquetado
color="r", # color
linewidth=1 # tamano de la curva
)
plt.legend() # agregar etiquetado
plt.title("grafico compuesto") # agregar titulo
plt.xlabel('x') # nombre eje x
plt.ylabel('y') # nombre eje y
plt.grid() # agregar grillado
plt.show() # mostrar grafico
# -
# ### ¿Cuándo utilizar gráfico de líneas?
# * x: Debe ser datos del tipo ordinal o cuantitativo.
# * y: Debe ser datos de tipo ordinal, posicional o cuantitativo.
# + [markdown] slideshow={"slide_type": "slide"}
# ### b) Gráfico de Barras
# <a id='barplot'></a>
# +
# datos
np.random.seed(0) # fijar semilla
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
# grafico
fig = plt.figure(figsize=(10, 5))
plt.bar(
y_pos, # eje x
performance, # eje y
yerr=error, # # error mostrado en eje y
align='center', # centrar nombre eje x
color="blue", # color
alpha=0.6 # intensidad del color
)
plt.xticks(y_pos, people)
plt.xlabel('People')
plt.show()
# -
# Ahora para realizar el mismo gráfico pero con los ejes invertidos, se debe graficar con `plt.barh`
# +
# datos
np.random.seed(0) # fijar semilla
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
# grafico
fig = plt.figure(figsize=(10, 5))
plt.barh(
y_pos, # eje x
performance, # eje y
xerr=error, # error mostrado en eje x
align='center', # centrar nombre eje y
color="blue", # color
alpha=0.4 # intensidad del color
)
plt.yticks(y_pos, people)
plt.xlabel('People')
plt.show()
# -
# Ahora, si queremos poner ambos gráficos en una sola vista, debemos ejecutar la siguiente rutina:
# +
# datos
np.random.seed(0) # fijar semilla
people = ('Tom', 'Dick', 'Harry', 'Slim', 'Jim')
y_pos = np.arange(len(people))
performance = 3 + 10 * np.random.rand(len(people))
error = np.random.rand(len(people))
# grafico
fig = plt.figure(figsize=(15, 5)) # ventana
# grafico lado izquierdo
plt.subplot(1, 2, 1) # sub-ventana
plt.barh(y_pos, performance, xerr=error, align='center', color="blue", alpha=0.4)
plt.yticks(y_pos, people)
plt.xlabel('Performance')
# grafico lado derecho
plt.subplot(1, 2, 2) # sub-ventana
plt.bar(y_pos, performance, yerr=error, align='center', color="blue", alpha=0.6)
plt.xticks(y_pos, people)
plt.xlabel('People')
plt.ylabel('Performance')
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ### ¿Cuándo utilizar gráfico de barras?
# * x: Debe ser datos del tipo nominal o ordinal.
# * y: Debe ser datos de tipo ordinal, posicional o cuantitativo.
#
# Evitar: gráfico de nominal vs nominal.
# + [markdown] slideshow={"slide_type": "slide"}
# ### c) Scatter Plot
# <a id='scatter-plot'></a>
# +
# datos
np.random.seed(42)
x = np.arange(0.0, 50.0, 2.0)
y = x ** 1.3 + np.random.rand(*x.shape) * 30.0
s = np.random.rand(*x.shape) * 800 + 500
# grafico
fig = plt.figure(figsize=(10, 5)) # ventana
plt.scatter(
x, # eje x
y, # eje y
s, # tamano de los puntos
c="g", # color
alpha=0.7, # intensidad color
marker=r'$\clubsuit$', # forma de los puntos
label="Suerte" # etiquetdo fijando posicion
)
plt.xlabel("Duende")
plt.ylabel("Oro")
plt.legend(loc='upper left')
plt.show()
# -
# **Ejercicio**: Realizar un gráfico que cumpla las siguientes restricciones:
#
# * Valores de los ejes: $x,y \in [0,1]$
# * Gráfico de línea de una circunferencia de radio $r_0$
# * Los puntos que se encuentren dentro de la circunferencia tengan forma de círculos con color naranja y aquellos utnos que se encuentren fuera tengan forma de triángulos con color azul.
# * Los puntos graficados deben estar escalado por tamaño.
# +
# datos
N = 100
r0 = 0.6 # radio inicial
x = 0.9 * np.random.rand(N) # puntos aleatorios eje x
y = 0.9 * np.random.rand(N) # puntos aleatorios eje y
r = np.sqrt(x ** 2 + y ** 2) # radio sacado de los puntos
area = np.pi * (10 * np.random.rand(N)) ** 2 # tamano
area1 = np.ma.masked_where(r < r0, area) # dentro del radio objetivo
area2 = np.ma.masked_where(r >= r0, area) # fuera del radio objetivo
# +
# grafico
# a) circunferencia
plt.figure(figsize=(8, 8))
theta = np.arange(0, np.pi / 2, 0.01)
plt.plot(r0 * np.cos(theta), r0 * np.sin(theta), "k--", lw=1.0)
# b) figuras dentro de la circuenferencia
sc1 = plt.scatter(x, y, s=area2, marker='o', c = "orange", label="interior" )
# b) figuras fuera de la circuenferencia
sc2 = plt.scatter(x, y, s=area1, marker='^', c = "b", label="exterior")
plt.xlabel("x")
plt.ylabel("y")
plt.legend(loc='upper left')
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ### ¿Cuándo utilizar scatter plot?
# * x: Dato del tipo posicional o cuantitativo.
# * y: Dato del tipo posicional o cuantitativo.
# * z: Dato del tipo nominal u ordinal (opcional)
#
# ***OBSERVACION***: Si hay pocos puntos, también puede usarse para z datos de tipo posicional o cuantitativo.
#
# -
# ### d) Mapa de calor
# +
# datos
vegetables = ["cucumber", "tomato", "lettuce", "asparagus",
"potato", "wheat", "barley"]
farmers = ["<NAME>", "Upland Bros.", "<NAME>",
"Agrifun", "Organiculture", "BioGoods Ltd.", "Cornylee Corp."]
harvest = np.array([[0.8, 2.4, 2.5, 3.9, 0.0, 4.0, 0.0],
[2.4, 0.0, 4.0, 1.0, 2.7, 0.0, 0.0],
[1.1, 2.4, 0.8, 4.3, 1.9, 4.4, 0.0],
[0.6, 0.0, 0.3, 0.0, 3.1, 0.0, 0.0],
[0.7, 1.7, 0.6, 2.6, 2.2, 6.2, 0.0],
[1.3, 1.2, 0.0, 0.0, 0.0, 3.2, 5.1],
[0.1, 2.0, 0.0, 1.4, 0.0, 1.9, 6.3]])
# graficos
fig, ax = plt.subplots(figsize=(10, 10))
im, cbar = heatmap(
harvest, # valores
vegetables, # filas
farmers, # columnas
ax=ax, # ventana
cmap="YlGn", # gama de colores
cbarlabel="harvest [t/year]" # nombre barra de colores
)
texts = annotate_heatmap(im, valfmt="{x:.1f} t")
fig.tight_layout()
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Otros gráficos de interés
#
# ### Gráfico de Barra de Error
# <a id='error-bar-plot'></a>
# +
# datos
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
# graficos
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
x_error = 0.1 + 0.2*np.random.rand(len(x))
ax1.errorbar(x, y, xerr=x_error)
y_error = 0.1 + 0.2*np.random.rand(len(x))
ax2.errorbar(x, y, yerr=y_error)
fig.show()
# + [markdown] slideshow={"slide_type": "slide"}
# #### ¿Cuándo utilizar gráfico de barra de error?
# * x: Dato del tipo posicional o cuantitativo.
# * y: Dato del tipo posicional o cuantitativo.
# * z: Dato del tipo posicional o cuantitativo.
# Los valores de z tienen que tener las mismas unidades y.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Countor Plot
# <a id='countour-plot'></a>
# +
# datos
x, y = np.mgrid[-3:3:.025, -2:2:.025]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
z1 = multivariate_normal.pdf(
pos,
mean=[-1.0, -1.0],
cov=[[1.0, 0.0], [0.0, 0.1]]
)
z2 = multivariate_normal.pdf(
pos,
mean=[1.0, 1.0],
cov=[[1.5, 0.0], [0.0, 0.5]]
)
z = 10 * (z1 - z2)
# grafico
fig, axs = plt.subplots(ncols=2, figsize=(20, 10), sharex=True, sharey=True)
cmaps = [cm.rainbow, cm.autumn, cm.coolwarm, cm.gray]
countour_styles = [
{"colors": "k", "linestyles": "solid"},
{"colors": "k", "linestyles": "dashed"},
]
for i, ax in zip(range(len(cmaps)), axs.ravel()):
cs = ax.contour(x, y, z, 11, **countour_styles[i])
if i > 0:
ax.clabel(cs, fontsize=9, inline=1)
ax.grid(alpha=0.5)
fig.show()
# + [markdown] slideshow={"slide_type": "slide"}
# #### ¿Cuándo se debe utiliar countour plot?
#
# * x: Dato del tipo posicional o cuantitativo.
# * y: Dato de tipo posicional o cuantitativo.
# * z: Dato de tipo posicional o cuantitativo.
#
# ***OBSERVACION***: Se debe tener suficiente densidad/regularidad de puntos como para poder obtener superficies de nivel.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Campos de Vectores
# <a id='vector-field'></a>
#
# ¿Porqué se llama quiver al campo de vectores en inglés?
# +
def my_vector_field():
"""
You can even define a new function.
"""
X, Y = np.meshgrid(np.arange(0, 2 * np.pi, .2), np.arange(0, 2 * np.pi, .2))
U = np.cos(X)
V = np.sin(Y)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(20, 10))
Q1 = ax1.quiver(U, V)
qk1 = ax1.quiverkey(
Q1,
0.5,
0.92,
2,
r'$2 \frac{m}{s}$',
labelpos='W',
fontproperties={'weight': 'bold'}
)
Q2 = ax2.quiver(
X[::3, ::3],
Y[::3, ::3],
U[::3, ::3],
V[::3, ::3],
pivot='mid',
color='r',
units='inches'
)
qk2 = ax2.quiverkey(
Q2,
0.5,
0.03,
1,
r'$1 \frac{m}{s}$',
fontproperties={'weight': 'bold'}
)
ax2.plot(X[::3, ::3], Y[::3, ::3], 'k.')
ax2.set_title("pivot='mid'; every third arrow; units='inches'")
fig.show()
my_vector_field()
# + [markdown] slideshow={"slide_type": "slide"}
# #### ¿Cuándo utilizar campos de vectores?
#
# * x: Debe ser datos del tipo posicional o cuantitativo.
# * y: Debe ser datos de tipo posicional o cuantitativo.
# * z: Pendiente debe ser dato de tipo posicional o cuantitativo.
#
# Evitar: gráfico de campo de vectores si no es posible la interpretación correspondiente.
# -
# ## Referencia
#
# 1. [Gallery-matplotlib](https://matplotlib.org/3.1.1/gallery/index.html)
#
|
labs/05_visualizacion/05_visualizacion_imperativa.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# The library data checkout set is extremely massive, to the point it's almost unweildy.
#
# As it turns out however, we don't need this kind of precision. So we will have to break it down a bit.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
n_CheckoutData = pd.read_csv('CheckoutData3/Data_Split_00', low_memory=False , na_values=[0.0])
display(n_CheckoutData.head(1))
n_CheckoutDataShrink1 = n_CheckoutData[['ItemType',str('CheckoutDateTime')]]
n_CheckoutDataShrink1.index = pd.to_datetime(n_CheckoutData['CheckoutDateTime'])
del n_CheckoutDataShrink1['CheckoutDateTime']
display(n_CheckoutDataShrink1.head(1))
n_CheckoutDataShrink2 = n_CheckoutDataShrink1.resample('H').count()
display(n_CheckoutDataShrink2.head(1))
# For the purposes of this study, we only need a very small amount of information here, Mostly CheckoutDateTime and ItemType
FullData1 = n_CheckoutDataShrink2.copy()
n_CheckoutData = pd.read_csv('CheckoutData3/Data_Split_01', low_memory=False , na_values=[0.0], names=["ID", "CheckoutYear", "BibNumber", "ItemBarcode", "ItemType","Collection","CallNumber","ItemTitle","Subjects","CheckoutDateTime"])
display(n_CheckoutData.head(1))
n_CheckoutDataShrink1 = n_CheckoutData[['ItemType',str('CheckoutDateTime')]]
n_CheckoutDataShrink1.index = pd.to_datetime(n_CheckoutData['CheckoutDateTime'])
del n_CheckoutDataShrink1['CheckoutDateTime']
display(n_CheckoutDataShrink1.head(1))
n_CheckoutDataShrink2 = n_CheckoutDataShrink1.resample('H').count()
display(n_CheckoutDataShrink2.head(1))
FullData2 = n_CheckoutDataShrink2.copy()
display(FullData1)
display(FullData2)
n_CheckoutData = pd.read_csv('CheckoutData3/Data_Split_21', low_memory=False , na_values=[0.0], names=["ID", "CheckoutYear", "BibNumber", "ItemBarcode", "ItemType","Collection","CallNumber","ItemTitle","Subjects","CheckoutDateTime"])
display(n_CheckoutData.head(1))
n_CheckoutDataShrink1 = n_CheckoutData[['ItemType',str('CheckoutDateTime')]]
n_CheckoutDataShrink1.index = pd.to_datetime(n_CheckoutData['CheckoutDateTime'])
del n_CheckoutDataShrink1['CheckoutDateTime']
display(n_CheckoutDataShrink1.head(1))
n_CheckoutDataShrink2 = n_CheckoutDataShrink1.resample('H').count()
display(n_CheckoutDataShrink2.head(1))
FullData3 = n_CheckoutDataShrink2.copy()
FullData3
df = FullData1.add(FullData2, fill_value=0)
df = df.add(FullData3, fill_value=0)
df
display(df.count())
n_CheckoutData = pd.read_csv('CheckoutData3/Data_Split_00', low_memory=False , na_values=[0.0])
display(n_CheckoutData.head(1))
n_CheckoutDataShrink1 = n_CheckoutData[['ItemType',str('CheckoutDateTime')]]
n_CheckoutDataShrink1.index = pd.to_datetime(n_CheckoutData['CheckoutDateTime'])
del n_CheckoutDataShrink1['CheckoutDateTime']
display(n_CheckoutDataShrink1.head(1))
n_CheckoutDataShrink2 = n_CheckoutDataShrink1.resample('H').count()
display(n_CheckoutDataShrink2.head(1))
for i in range(1, 9):
n_CheckoutData = pd.read_csv('CheckoutData3/Data_Split_0'+str(i), low_memory=False , na_values=[0.0], names=["ID", "CheckoutYear", "BibNumber", "ItemBarcode", "ItemType","Collection","CallNumber","ItemTitle","Subjects","CheckoutDateTime"])
display(n_CheckoutData.head(1))
n_CheckoutDataShrink1 = n_CheckoutData[['ItemType',str('CheckoutDateTime')]]
n_CheckoutDataShrink1.index = pd.to_datetime(n_CheckoutData['CheckoutDateTime'])
del n_CheckoutDataShrink1['CheckoutDateTime']
display(n_CheckoutDataShrink1.head(1))
n_CheckoutDataShrink2 = n_CheckoutDataShrink1.resample('H').count()
display(n_CheckoutDataShrink2.head(1))
df = df.add(n_CheckoutDataShrink2, fill_value=0)
display(df.count())
df
for i in range(10, 87):
print('CheckoutData3/Data_Split_'+str(i))
n_CheckoutData = pd.read_csv('CheckoutData3/Data_Split_'+str(i), low_memory=False , na_values=[0.0], names=["ID", "CheckoutYear", "BibNumber", "ItemBarcode", "ItemType","Collection","CallNumber","ItemTitle","Subjects","CheckoutDateTime"])
display(n_CheckoutData.head(1))
n_CheckoutDataShrink1 = n_CheckoutData[['ItemType',str('CheckoutDateTime')]]
n_CheckoutDataShrink1.index = pd.to_datetime(n_CheckoutData['CheckoutDateTime'])
del n_CheckoutDataShrink1['CheckoutDateTime']
display(n_CheckoutDataShrink1.head(1))
n_CheckoutDataShrink2 = n_CheckoutDataShrink1.resample('H').count()
display(n_CheckoutDataShrink2.head(1))
df = df.add(n_CheckoutDataShrink2, fill_value=0)
display(df.count())
df
df.to_pickle("Data/CheckoutData.pkl")
|
CheckoutShrinkingSample.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Extracting data from relational databases
# ## SQLite Database
#
# ### import package
#import sqlite3 package
import sqlite3
# create a connection to a databse : create the database if not available
connection = sqlite3.connect("classroom.db")
connection.close()
#open connection
connection = sqlite3.connect("classroom.db")
#open cursor
cursor = connection.cursor()
create_table = """
CREATE TABLE classroom (
student_id INTEGER PRIMARY KEY,
name VARCHAR(20),
gender CHAR(1),
physcis_marks INTEGER,
chemistry_marks INTEGER,
mathematics_marks INTEGER
);"""
cursor.execute(create_table)
connection.commit()
connection.close()
# +
classroom_data = [(1, "Raj", "M", 70, 84, 92),
(2, "Poonam", "F", 87, 69, 93),
(3, "Nik", "M", 65, 83, 90),
(4, "Rahul", "M", 83, 76, 89)]
connection = sqlite3.connect("classroom.db")
cursor = connection.cursor()
for student in classroom_data:
insert_statment = """INSERT INTO classroom
(student_id, name, gender, physcis_marks, chemistry_marks, mathematics_marks)
VALUES
({0}, "{1}", "{2}", {3}, {4}, {5});""".format(student[0], student[1], student[2],
student[3], student[4], student[5])
cursor.execute(insert_statment)
connection.commit()
connection.close()
# +
connection = sqlite3.connect("classroom.db")
cursor = connection.cursor()
query = "SELECT * FROM classroom"
cursor.execute(query)
result = cursor.fetchall()
for row in result:
print(row)
connection.close()
# -
# ### MySQL Database
# #### install package
# !conda install -y -q pymysql
# #### import package
import pymysql
# !conda install -y -q pymssql
import pymssql
cnx = {
'host': 'hostname:1433',
'username': '',
'password': '',
'db': ''}
conn = pymssql.connect(cnx['host'], cnx['username'], cnx['password'], cnx['db'])
conn.close()
connection = pymssql.connect(cnx['host'], cnx['username'], cnx['password'], cnx['db'])
cursor = connection.cursor()
create_table = """
CREATE TABLE classroom (
student_id INTEGER PRIMARY KEY,
name VARCHAR(20),
gender CHAR(1),
physcis_marks INTEGER,
chemistry_marks INTEGER,
mathematics_marks INTEGER
);"""
cursor.execute(create_table)
connection.commit()
connection.close()
# +
classroom_data = [(1, "Raj", "M", 70, 84, 92),
(2, "Poonam", "F", 87, 69, 93),
(3, "Nik", "M", 65, 83, 90),
(4, "Rahul", "M", 83, 76, 89)]
connection = pymssql.connect(cnx['host'], cnx['username'], cnx['password'], cnx['db'])
cursor = connection.cursor()
for student in classroom_data:
insert_statment = """INSERT INTO classroom
(student_id, name, gender, physcis_marks, chemistry_marks, mathematics_marks)
VALUES
({0}, '{1}', '{2}', {3}, {4}, {5});""".format(student[0], student[1], student[2],
student[3], student[4], student[5])
cursor.execute(insert_statment)
connection.commit()
connection.close()
# +
connection = pymssql.connect(cnx['host'], cnx['username'], cnx['password'], cnx['db'])
cursor = connection.cursor()
query = "SELECT * FROM classroom"
cursor.execute(query)
result = cursor.fetchall()
for row in result:
print(row)
connection.close()
|
notebooks/extractingData.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="FFt85FyjDbHm"
# <a href="https://colab.research.google.com/github/YKochura/rl-kpi/blob/main/tutor/dp/Dynamic_Programming.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="2OmSAch9bPpL"
# ## Динамічне програмування
# + id="XGDI6FoUQ2W0"
def fib(n):
if n <= 1:
return n
else:
return fib(n - 1) + fib(n - 2)
# + colab={"base_uri": "https://localhost:8080/"} id="AAO1jlMMQ7kp" outputId="dfec5159-62a8-4c8f-b5f4-67541e46dc88"
fib(10)
# + id="uByRnH-vQ_pN"
mem = {0:0, 1:1}
def fib_mem(n):
if n not in mem:
mem[n] = fib(n - 1) + fib(n - 2)
return mem[n]
# + colab={"base_uri": "https://localhost:8080/"} id="S4dS-aDvRCpM" outputId="aba6a9b5-5170-479a-a631-87f60c0df620"
fib_mem(10)
# + [markdown] id="noNNk6QHVwGG"
# [timeit](https://docs.python.org/3/library/timeit.html)
#
# `timeit` is more accurate then `time`, for three reasons:
#
#
#
# * it repeats the tests many times to eliminate the influence of other tasks on your machine, such as disk flushing and OS scheduling.
# * it disables the garbage collector to prevent that process from skewing the results by scheduling a collection run at an inopportune moment.
# * it picks the most accurate timer for your OS
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="LR9gv2pQRFWA" outputId="98041c38-e3d5-444e-a134-2eb859c18dbb"
# %timeit fib(38)
# We get 17.3 seconds to run with n=38
# + colab={"base_uri": "https://localhost:8080/"} id="BzFbGVJMRVb6" outputId="d06a71ab-1320-49f4-e41c-ab079429635c"
# %timeit fib_mem(38)
# We get 292 ns to run with n=38
# + colab={"base_uri": "https://localhost:8080/"} id="KfP8Lwn0SCCo" outputId="02ef1da1-3c21-4237-a029-fa693dae0d09"
mem
|
tutor/dp/Dynamic_Programming.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Evaluation of Czechoslovak hyphenation patterns
#
# It is common to evaluate patgen patterns on the metrics given by the program itself. Patgen gives three metrics during pattern generation: Good, Bad, Missed. These are simply the counts of how many hyphenation points in the training wordlist the patterns correctly predicted, how many points it predicted in places where there aren't any and how many the patterns missed, respectively. The following table shows the results of this "self-evaluation".
#
# 
#
# This metric doesn't provide insight into the patterns generalization properties, which is why we used standard 10-k cross validation.
# + tags=[]
import validate
import datetime
import statistics
# -
# ## Custom parameters
passes = validate.k_cross_val(10)
good = list(map(lambda l: l[0], passes))
bad = list(map(lambda l: l[1], passes))
missed = list(map(lambda l: l[2], passes))
print(round(statistics.mean(good),2))
print(round(statistics.mean(bad),2))
print(round(statistics.mean(missed),2))
print(statistics.stdev(good))
print(statistics.stdev(bad))
print(statistics.stdev(missed))
# ## Correct optimized parameters
# To regenerate cells with different parameters, replace `../src/csskhyphen.par` in the out/training.pat target with the selected parameter file, in this case `../src/cs-sojka-correctoptimized.par`.
passes = validate.k_cross_val(10)
good = list(map(lambda l: l[0], passes))
bad = list(map(lambda l: l[1], passes))
missed = list(map(lambda l: l[2], passes))
print(round(statistics.mean(good),2))
print(round(statistics.mean(bad),2))
print(round(statistics.mean(missed),2))
# ## Size optimized parameters
passes = validate.k_cross_val(10)
good = list(map(lambda l: l[0], passes))
bad = list(map(lambda l: l[1], passes))
missed = list(map(lambda l: l[2], passes))
print(round(statistics.mean(good),2))
print(round(statistics.mean(bad),2))
print(round(statistics.mean(missed),2))
|
evaluation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # External Source Operator - basic usage
#
# In this example, we will show you how to use the `ExternalSource` operator, so that you can
# use an external data source as an input to the pipeline.
# +
import types
import collections
import numpy as np
from random import shuffle
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.fn as fn
import nvidia.dali.types as types
batch_size = 16
# -
# ### Define the Data Source
# In this example, we will use an infinite iterator as a data source.
class ExternalInputIterator(object):
def __init__(self, batch_size):
self.images_dir = "../../data/images/"
self.batch_size = batch_size
with open(self.images_dir + "file_list.txt", 'r') as f:
self.files = [line.rstrip() for line in f if line != '']
shuffle(self.files)
def __iter__(self):
self.i = 0
self.n = len(self.files)
return self
def __next__(self):
batch = []
labels = []
for _ in range(self.batch_size):
jpeg_filename, label = self.files[self.i].split(' ')
f = open(self.images_dir + jpeg_filename, 'rb')
batch.append(np.frombuffer(f.read(), dtype = np.uint8))
labels.append(np.array([label], dtype = np.uint8))
self.i = (self.i + 1) % self.n
return (batch, labels)
# ### Defining the Pipeline
#
# The next step is to define the Pipeline.
#
# The `ExternalSource` operator accepts an iterable or a callable. If the source provides multiple outputs (for example images and labels), that number must also be specified as the `num_outputs` argument.
#
# Internally, the pipeline will call `source` (if callable) or run `next(source)` (if iterable) when additional data is needed to keep the pipeline running.
eii = ExternalInputIterator(batch_size)
pipe = Pipeline(batch_size=batch_size, num_threads=2, device_id=0)
with pipe:
jpegs, labels = fn.external_source(source=eii, num_outputs=2)
decode = fn.decoders.image(jpegs, device="mixed", output_type=types.RGB)
enhance = fn.brightness_contrast(decode, contrast=2)
pipe.set_outputs(enhance, labels)
# ### Using the Pipeline
pipe.build()
pipe_out = pipe.run()
# Here, the labels are still in the memory and no `as_cpu` call is needed to show the labels.
batch_cpu = pipe_out[0].as_cpu()
labels_cpu = pipe_out[1]
import matplotlib.pyplot as plt
img = batch_cpu.at(2)
print(img.shape)
print(labels_cpu.at(2))
plt.axis('off')
plt.imshow(img)
# ## Interacting with the GPU Input
#
# The external source operator can also accept GPU data from CuPy or any other data source that supports the [cuda array interface](https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html).
# For this example, we create the `ExternalInputGpuIterator` that returns data on the GPU. Since `decoders.image` does not accept data on the GPU we need to decode it outside DALI on the CPU and then move it to the GPU. Typically, because of the operation of another library, the image; or other data will already be on the GPU.
# +
import cupy as cp
import imageio
class ExternalInputGpuIterator(object):
def __init__(self, batch_size):
self.images_dir = "../../data/images/"
self.batch_size = batch_size
with open(self.images_dir + "file_list.txt", 'r') as f:
self.files = [line.rstrip() for line in f if line != '']
shuffle(self.files)
def __iter__(self):
self.i = 0
self.n = len(self.files)
return self
def __next__(self):
batch = []
labels = []
for _ in range(self.batch_size):
jpeg_filename, label = self.files[self.i].split(' ')
im = imageio.imread(self.images_dir + jpeg_filename)
im = cp.asarray(im)
im = im * 0.6;
batch.append(im.astype(cp.uint8))
labels.append(cp.array([label], dtype = np.uint8))
self.i = (self.i + 1) % self.n
return (batch, labels)
# -
# 1. Let us modify the previous pipeline by using the GPU version of the ExternalSource operator and remove the decoding
#
# **Note**: We assume that the raw image is already on the GPU.
# +
eii_gpu = ExternalInputGpuIterator(batch_size)
print(type(next(iter(eii_gpu))[0][0]))
# +
pipe_gpu = Pipeline(batch_size=batch_size, num_threads=2, device_id=0)
with pipe_gpu:
images, labels = fn.external_source(source=eii_gpu, num_outputs=2, device="gpu")
enhance = fn.brightness_contrast(images, contrast=2)
pipe_gpu.set_outputs(enhance, labels)
pipe_gpu.build()
# -
# 2. Visualize the results:
# +
pipe_out_gpu = pipe_gpu.run()
batch_gpu = pipe_out_gpu[0].as_cpu()
labels_gpu = pipe_out_gpu[1].as_cpu()
img = batch_gpu.at(2)
print(img.shape)
print(labels_cpu.at(2))
plt.axis('off')
plt.imshow(img)
|
docs/examples/general/data_loading/external_input.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ML
# language: python
# name: ml
# ---
# # Import Cool Stuff
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
from __future__ import print_function
from collections import defaultdict, deque
import datetime
import pickle
import time
import torch.distributed as dist
import errno
import collections
import os
import numpy as np
import torch
import torch.utils.data
from PIL import Image, ImageFile
import pandas as pd
from tqdm import tqdm
from torchvision import transforms
import torchvision
import random
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
ImageFile.LOAD_TRUNCATED_IMAGES = True
# -
# # Utility Functions (hidden)
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def collate_fn(batch):
return tuple(zip(*batch))
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
def f(x):
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters
return warmup_factor * (1 - alpha) + alpha
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
# -
# # Training Function
def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq):
model.train()
metric_logger = MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
lr_scheduler = None
if epoch == 0:
warmup_factor = 1. / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
for images, targets in metric_logger.log_every(data_loader, print_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
optimizer.zero_grad()
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# # RLE to Mask
def rle2mask(rle, width, height):
mask= np.zeros(width* height)
array = np.asarray([int(x) for x in rle.split()])
starts = array[0::2]
lengths = array[1::2]
current_position = 0
for index, start in enumerate(starts):
current_position += start
mask[current_position:current_position+lengths[index]] = 1
current_position += lengths[index]
return mask.reshape(width, height)
# # SIIM Dataset Class
class SIIMDataset(torch.utils.data.Dataset):
def __init__(self, df_path, img_dir):
self.df = pd.read_csv(df_path)
self.height = 1024
self.width = 1024
self.image_dir = img_dir
self.image_info = collections.defaultdict(dict)
counter = 0
for index, row in tqdm(self.df.iterrows(), total=len(self.df)):
image_id = row['ImageId']
image_path = os.path.join(self.image_dir, image_id)
if os.path.exists(image_path + '.png') and row[" EncodedPixels"].strip() != "-1":
self.image_info[counter]["image_id"] = image_id
self.image_info[counter]["image_path"] = image_path
self.image_info[counter]["annotations"] = row[" EncodedPixels"].strip()
counter += 1
def __getitem__(self, idx):
img_path = self.image_info[idx]["image_path"]
img = Image.open(img_path + '.png').convert("RGB")
width, height = img.size
img = img.resize((self.width, self.height), resample=Image.BILINEAR)
info = self.image_info[idx]
mask = rle2mask(info['annotations'], width, height)
mask = Image.fromarray(mask.T)
mask = mask.resize((self.width, self.height), resample=Image.BILINEAR)
mask = np.expand_dims(mask, axis=0)
pos = np.where(np.array(mask)[0, :, :])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes = torch.as_tensor([[xmin, ymin, xmax, ymax]], dtype=torch.float32)
labels = torch.ones((1,), dtype=torch.int64)
masks = torch.as_tensor(mask, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
iscrowd = torch.zeros((1,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
img = transforms.ToTensor()(img)
if random.random() < 0.8:
height, width = img.shape[-2:]
img = img.flip(-1)
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
target["masks"] = target["masks"].flip(-1)
return img, target
def __len__(self):
return len(self.image_info)
# # Create Dataset
dataset_train = SIIMDataset("../data/train-rle.csv", "../data/train_png/")
len(dataset_train)
# # Create Mask-RCNN Model
# +
# create mask rcnn model
num_classes = 2
device = torch.device('cuda:0')
model_ft = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
in_features = model_ft.roi_heads.box_predictor.cls_score.in_features
model_ft.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
in_features_mask = model_ft.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
model_ft.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)
model_ft.to(device)
for param in model_ft.parameters():
param.requires_grad = True
# -
# # Create Data Loader
data_loader = torch.utils.data.DataLoader(
dataset_train, batch_size=2, shuffle=True, num_workers=8,
collate_fn=lambda x: tuple(zip(*x)))
# # Define Training Parameters
params = [p for p in model_ft.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=0.001, momentum=0.9, weight_decay=0.0005)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
step_size=5,
gamma=0.1)
# # Train Model
num_epochs = 6
for epoch in range(num_epochs):
train_one_epoch(model_ft, optimizer, data_loader, device, epoch, print_freq=100)
lr_scheduler.step()
# # Mask to RLE helper
def mask_to_rle(img, width, height):
rle = []
lastColor = 0
currentPixel = 0
runStart = -1
runLength = 0
for x in range(width):
for y in range(height):
currentColor = img[x][y]
if currentColor != lastColor:
if currentColor == 1:
runStart = currentPixel
runLength = 1
else:
rle.append(str(runStart))
rle.append(str(runLength))
runStart = -1
runLength = 0
currentPixel = 0
elif runStart > -1:
runLength += 1
lastColor = currentColor
currentPixel+=1
return " " + " ".join(rle)
# # Convert Model to Evaluation Mode
# +
for param in model_ft.parameters():
param.requires_grad = False
model_ft.eval();
# -
# # Get Test Data
# +
sample_df = pd.read_csv("../data/sample_submission.csv")
# this part was taken from @raddar's kernel: https://www.kaggle.com/raddar/better-sample-submission
masks_ = sample_df.groupby('ImageId')['ImageId'].count().reset_index(name='N')
masks_ = masks_.loc[masks_.N > 1].ImageId.values
###
sample_df = sample_df.drop_duplicates('ImageId', keep='last').reset_index(drop=True)
# -
sample_df['ImageId'].nunique()
sample_df.shape
# +
tt = transforms.ToTensor()
sublist = []
counter = 0
threshold = 0.3
for index, row in tqdm(sample_df.iterrows(), total=len(sample_df)):
image_id = row['ImageId']
img_path = os.path.join('../data/test_png', image_id + '.png')
img = Image.open(img_path).convert("RGB")
width, height = img.size
img = img.resize((1024, 1024), resample=Image.BILINEAR)
img = tt(img)
result = model_ft([img.to(device)])[0]
if len(result["masks"]) > 0:
counter += 1
mask_added = 0
for ppx in range(len(result["masks"])):
if result["scores"][ppx] >= threshold:
mask_added += 1
res = transforms.ToPILImage()(result["masks"][ppx].permute(1, 2, 0).cpu().numpy())
res = np.asarray(res.resize((width, height), resample=Image.BILINEAR))
res = (res[:, :] * 255. > 127).astype(np.uint8).T
rle = mask_to_rle(res, width, height)
sublist.append([image_id, rle])
if mask_added == 0:
rle = " -1"
sublist.append([image_id, rle])
else:
rle = " -1"
sublist.append([image_id, rle])
submission_df = pd.DataFrame(sublist, columns=sample_df.columns.values)
submission_df.to_csv("submission.csv", index=False)
print(counter)
|
notebooks/mask-rcnn-with-augmentation-and-multiple-masks.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] colab_type="text" id="copyright-notice"
# #### Copyright 2017 Google LLC.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="copyright-notice2"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="4f3CKqFUqL2-" slideshow={"slide_type": "slide"}
# # 使用 TensorFlow 的基本步骤
# + [markdown] colab_type="text" id="Bd2Zkk1LE2Zr"
# **学习目标:**
# * 学习基本的 TensorFlow 概念
# * 在 TensorFlow 中使用 `LinearRegressor` 类并基于单个输入特征预测各城市街区的房屋价值中位数
# * 使用均方根误差 (RMSE) 评估模型预测的准确率
# * 通过调整模型的超参数提高模型准确率
# + [markdown] colab_type="text" id="MxiIKhP4E2Zr"
# 数据基于加利福尼亚州 1990 年的人口普查数据。
# + [markdown] colab_type="text" id="6TjLjL9IU80G"
# ## 设置
# 在此第一个单元格中,我们将加载必要的库。
# + [markdown] colab_type="text" id="xU8Oh-gKh05w"
#
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17, "output_extras": []} colab_type="code" executionInfo={"elapsed": 2357, "status": "ok", "timestamp": 1520816744038, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="rVFf5asKE2Zt" outputId="fdc1c9c6-d9b2-4075-9bec-d100e127e28f"
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
# + [markdown] colab_type="text" id="ipRyUHjhU80Q"
# 接下来,我们将加载数据集。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17, "output_extras": []} colab_type="code" executionInfo={"elapsed": 1611, "status": "ok", "timestamp": 1520816749276, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="9ivCDWnwE2Zx" outputId="7281ceee-9fb3-48d4-81ef-bb605d9779b7"
california_housing_dataframe = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv", sep=",")
# + [markdown] colab_type="text" id="vVk_qlG6U80j"
# 我们将对数据进行随机化处理,以确保不会出现任何病态排序结果(可能会损害随机梯度下降法的效果)。此外,我们会将 `median_house_value` 调整为以千为单位,这样,模型就能够以常用范围内的学习速率较为轻松地学习这些数据。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 439, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 1569, "status": "ok", "timestamp": 1520816754122, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="r0eVyguIU80m" outputId="48ba4bca-3d08-444e-eb02-e9125e5494b6"
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
california_housing_dataframe["median_house_value"] /= 1000.0
california_housing_dataframe
# + [markdown] colab_type="text" id="HzzlSs3PtTmt" slideshow={"slide_type": "-"}
# ## 检查数据
#
# 建议您在使用数据之前,先对它有一个初步的了解。
#
# 我们会输出关于各列的一些实用统计信息快速摘要:样本数、均值、标准偏差、最大值、最小值和各种分位数。
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 317, "output_extras": [{"item_id": 1}], "test": {"output": "ignore", "timeout": 600}} colab_type="code" executionInfo={"elapsed": 878, "status": "ok", "timestamp": 1520816761494, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="gzb10yoVrydW" outputId="ac9b5e01-9e23-4bf7-df91-e7dda8ecbe61" slideshow={"slide_type": "slide"}
california_housing_dataframe.describe()
# + [markdown] colab_type="text" id="Lr6wYl2bt2Ep" slideshow={"slide_type": "-"}
# ## 构建第一个模型
#
# 在本练习中,我们将尝试预测 `median_house_value`,它将是我们的标签(有时也称为目标)。我们将使用 `total_rooms` 作为输入特征。
#
# **注意**:我们使用的是城市街区级别的数据,因此该特征表示相应街区的房间总数。
#
# 为了训练模型,我们将使用 TensorFlow [Estimator](https://www.tensorflow.org/get_started/estimator) API 提供的 [LinearRegressor](https://www.tensorflow.org/api_docs/python/tf/estimator/LinearRegressor) 接口。此 API 负责处理大量低级别模型搭建工作,并会提供执行模型训练、评估和推理的便利方法。
# + [markdown] colab_type="text" id="0cpcsieFhsNI"
# ### 第 1 步:定义特征并配置特征列
# + [markdown] colab_type="text" id="EL8-9d4ZJNR7"
# 为了将我们的训练数据导入 TensorFlow,我们需要指定每个特征包含的数据类型。在本练习及今后的练习中,我们主要会使用以下两类数据:
#
# * **分类数据**:一种文字数据。在本练习中,我们的住房数据集不包含任何分类特征,但您可能会看到的示例包括家居风格以及房地产广告词。
#
# * **数值数据**:一种数字(整数或浮点数)数据以及您希望视为数字的数据。有时您可能会希望将数值数据(例如邮政编码)视为分类数据(我们将在稍后的部分对此进行详细说明)。
#
# 在 TensorFlow 中,我们使用一种称为“**特征列**”的结构来表示特征的数据类型。特征列仅存储对特征数据的描述;不包含特征数据本身。
#
# 一开始,我们只使用一个数值输入特征 `total_rooms`。以下代码会从 `california_housing_dataframe` 中提取 `total_rooms` 数据,并使用 `numeric_column` 定义特征列,这样会将其数据指定为数值:
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17, "output_extras": []} colab_type="code" executionInfo={"elapsed": 1901, "status": "ok", "timestamp": 1520816767068, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="rhEbFCZ86cDZ" outputId="dd6bfcb5-8cd0-4f7e-ded1-b38ac6e073e3"
# Define the input feature: total_rooms.
my_feature = california_housing_dataframe[["total_rooms"]]
# Configure a numeric feature column for total_rooms.
feature_columns = [tf.feature_column.numeric_column("total_rooms")]
# + [markdown] colab_type="text" id="K_3S8teX7Rd2"
# **注意**:`total_rooms` 数据的形状是一维数组(每个街区的房间总数列表)。这是 `numeric_column` 的默认形状,因此我们不必将其作为参数传递。
# + [markdown] colab_type="text" id="UMl3qrU5MGV6"
# ### 第 2 步:定义目标
# + [markdown] colab_type="text" id="cw4nrfcB7kyk"
# 接下来,我们将定义目标,也就是 `median_house_value`。同样,我们可以从 `california_housing_dataframe` 中提取它:
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17, "output_extras": []} colab_type="code" executionInfo={"elapsed": 1573, "status": "ok", "timestamp": 1520816772123, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="l1NvvNkH8Kbt" outputId="abd2f713-e000-48b9-f9fe-dc0e60bc46cf"
# Define the label.
targets = california_housing_dataframe["median_house_value"]
# + [markdown] colab_type="text" id="4M-rTFHL2UkA"
# ### 第 3 步:配置 LinearRegressor
# + [markdown] colab_type="text" id="fUfGQUNp7jdL"
# 接下来,我们将使用 LinearRegressor 配置线性回归模型,并使用 `GradientDescentOptimizer`(它会实现小批量随机梯度下降法 (SGD))训练该模型。`learning_rate` 参数可控制梯度步长的大小。
#
# **注意**:为了安全起见,我们还会通过 `clip_gradients_by_norm` 将[梯度裁剪](https://developers.google.com/machine-learning/glossary/#gradient_clipping)应用到我们的优化器。梯度裁剪可确保梯度大小在训练期间不会变得过大,梯度过大会导致梯度下降法失败。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17, "output_extras": []} colab_type="code" executionInfo={"elapsed": 1247, "status": "ok", "timestamp": 1520816777455, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="ubhtW-NGU802" outputId="aeea55c5-23ba-47ce-a27f-92fd4fb66e14"
# Use gradient descent as the optimizer for training the model.
my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.0000001)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
# Configure the linear regression model with our feature columns and optimizer.
# Set a learning rate of 0.0000001 for Gradient Descent.
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
# + [markdown] colab_type="text" id="-0IztwdK2f3F"
# ### 第 4 步:定义输入函数
# + [markdown] colab_type="text" id="S5M5j6xSCHxx"
# 要将加利福尼亚州住房数据导入 `LinearRegressor`,我们需要定义一个输入函数,让它告诉 TensorFlow 如何对数据进行预处理,以及在模型训练期间如何批处理、随机处理和重复数据。
#
# 首先,我们将 *Pandas* 特征数据转换成 NumPy 数组字典。然后,我们可以使用 TensorFlow [Dataset API](https://www.tensorflow.org/programmers_guide/datasets) 根据我们的数据构建 Dataset 对象,并将数据拆分成大小为 `batch_size` 的多批数据,以按照指定周期数 (num_epochs) 进行重复。
#
# **注意**:如果将默认值 `num_epochs=None` 传递到 `repeat()`,输入数据会无限期重复。
#
# 然后,如果 `shuffle` 设置为 `True`,则我们会对数据进行随机处理,以便数据在训练期间以随机方式传递到模型。`buffer_size` 参数会指定 `shuffle` 将从中随机抽样的数据集的大小。
#
# 最后,输入函数会为该数据集构建一个迭代器,并向 LinearRegressor 返回下一批数据。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17, "output_extras": []} colab_type="code" executionInfo={"elapsed": 943, "status": "ok", "timestamp": 1520816782623, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="RKZ9zNcHJtwc" outputId="5bc86219-a2a8-4c47-e1c3-7bf7177e3b8f"
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model of one feature.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified
if shuffle:
ds = ds.shuffle(buffer_size=10000)
# Return the next batch of data
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
# + [markdown] colab_type="text" id="wwa6UeA1V5F_"
# **注意**:在后面的练习中,我们会继续使用此输入函数。有关输入函数和 `Dataset` API 的更详细的文档,请参阅 [TensorFlow 编程人员指南](https://www.tensorflow.org/programmers_guide/datasets)。
# + [markdown] colab_type="text" id="4YS50CQb2ooO"
# ### 第 5 步:训练模型
# + [markdown] colab_type="text" id="yP92XkzhU803"
# 现在,我们可以在 `linear_regressor` 上调用 `train()` 来训练模型。我们会将 `my_input_fn` 封装在 `lambda` 中,以便可以将 `my_feature` 和 `target` 作为参数传入(有关详情,请参阅此 [TensorFlow 输入函数教程](https://www.tensorflow.org/get_started/input_fn#passing_input_fn_data_to_your_model)),首先,我们会训练 100 步。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 17, "output_extras": []} colab_type="code" executionInfo={"elapsed": 1591, "status": "ok", "timestamp": 1520816787277, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="5M-Kt6w8U803" outputId="a3912f69-6c51-4682-ba59-5623221cf507"
_ = linear_regressor.train(
input_fn = lambda:my_input_fn(my_feature, targets),
steps=100
)
# + [markdown] colab_type="text" id="7Nwxqxlx2sOv"
# ### 第 6 步:评估模型
# + [markdown] colab_type="text" id="KoDaF2dlJQG5"
# 我们基于该训练数据做一次预测,看看我们的模型在训练期间与这些数据的拟合情况。
#
# **注意**:训练误差可以衡量您的模型与训练数据的拟合情况,但并**_不能_**衡量模型**_泛化到新数据_**的效果。在后面的练习中,您将探索如何拆分数据以评估模型的泛化能力。
#
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 53, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 5527, "status": "ok", "timestamp": 1520816795047, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="pDIxp6vcU809" outputId="d59efb41-fa7d-4896-ecd9-aed4de4a1067"
# Create an input function for predictions.
# Note: Since we're making just one prediction for each example, we don't
# need to repeat or shuffle the data here.
prediction_input_fn =lambda: my_input_fn(my_feature, targets, num_epochs=1, shuffle=False)
# Call predict() on the linear_regressor to make predictions.
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
# Format predictions as a NumPy array, so we can calculate error metrics.
predictions = np.array([item['predictions'][0] for item in predictions])
# Print Mean Squared Error and Root Mean Squared Error.
mean_squared_error = metrics.mean_squared_error(predictions, targets)
root_mean_squared_error = math.sqrt(mean_squared_error)
print "Mean Squared Error (on training data): %0.3f" % mean_squared_error
print "Root Mean Squared Error (on training data): %0.3f" % root_mean_squared_error
# + [markdown] colab_type="text" id="AKWstXXPzOVz" slideshow={"slide_type": "slide"}
# 这是出色的模型吗?您如何判断误差有多大?
#
# 由于均方误差 (MSE) 很难解读,因此我们经常查看的是均方根误差 (RMSE)。RMSE 的一个很好的特性是,它可以在与原目标相同的规模下解读。
#
# 我们来比较一下 RMSE 与目标最大值和最小值的差值:
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 89, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 821, "status": "ok", "timestamp": 1520816798126, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="7UwqGbbxP53O" outputId="0cfc16b4-d2dd-46a9-84f0-a9edad555d53"
min_house_value = california_housing_dataframe["median_house_value"].min()
max_house_value = california_housing_dataframe["median_house_value"].max()
min_max_difference = max_house_value - min_house_value
print "Min. Median House Value: %0.3f" % min_house_value
print "Max. Median House Value: %0.3f" % max_house_value
print "Difference between Min. and Max.: %0.3f" % min_max_difference
print "Root Mean Squared Error: %0.3f" % root_mean_squared_error
# + [markdown] colab_type="text" id="JigJr0C7Pzit"
# 我们的误差跨越目标值的近一半范围,可以进一步缩小误差吗?
#
# 这是每个模型开发者都会烦恼的问题。我们来制定一些基本策略,以降低模型误差。
#
# 首先,我们可以了解一下根据总体摘要统计信息,预测和目标的符合情况。
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "test": {"output": "ignore", "timeout": 600}} colab_type="code" id="941nclxbzqGH" slideshow={"slide_type": "-"}
calibration_data = pd.DataFrame()
calibration_data["predictions"] = pd.Series(predictions)
calibration_data["targets"] = pd.Series(targets)
calibration_data.describe()
# + [markdown] colab_type="text" id="E2-bf8Hq36y8" slideshow={"slide_type": "-"}
# 好的,此信息也许有帮助。平均值与模型的 RMSE 相比情况如何?各种分位数呢?
#
# 我们还可以将数据和学到的线可视化。我们已经知道,单个特征的线性回归可绘制成一条将输入 *x* 映射到输出 *y* 的线。
#
# 首先,我们将获得均匀分布的随机数据样本,以便绘制可辨的散点图。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="SGRIi3mAU81H"
sample = california_housing_dataframe.sample(n=300)
# + [markdown] colab_type="text" id="N-JwuJBKU81J"
# 然后,我们根据模型的偏差项和特征权重绘制学到的线,并绘制散点图。该线会以红色显示。
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "test": {"output": "ignore", "timeout": 600}} colab_type="code" id="7G12E76-339G" slideshow={"slide_type": "-"}
# Get the min and max total_rooms values.
x_0 = sample["total_rooms"].min()
x_1 = sample["total_rooms"].max()
# Retrieve the final weight and bias generated during training.
weight = linear_regressor.get_variable_value('linear/linear_model/total_rooms/weights')[0]
bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')
# Get the predicted median_house_values for the min and max total_rooms values.
y_0 = weight * x_0 + bias
y_1 = weight * x_1 + bias
# Plot our regression line from (x_0, y_0) to (x_1, y_1).
plt.plot([x_0, x_1], [y_0, y_1], c='r')
# Label the graph axes.
plt.ylabel("median_house_value")
plt.xlabel("total_rooms")
# Plot a scatter plot from our data sample.
plt.scatter(sample["total_rooms"], sample["median_house_value"])
# Display graph.
plt.show()
# + [markdown] colab_type="text" id="t0lRt4USU81L"
# 这条初始线看起来与目标相差很大。看看您能否回想起摘要统计信息,并看到其中蕴含的相同信息。
#
# 综上所述,这些初始健全性检查提示我们也许可以找到更好的线。
# + [markdown] colab_type="text" id="AZWF67uv0HTG" slideshow={"slide_type": "slide"}
# ## 调整模型超参数
# 对于本练习,为方便起见,我们已将上述所有代码放入一个函数中。您可以使用不同的参数调用该函数,以了解相应效果。
#
# 我们会在 10 个等分的时间段内使用此函数,以便观察模型在每个时间段的改善情况。
#
# 对于每个时间段,我们都会计算训练损失并绘制相应图表。这可以帮助您判断模型收敛的时间,或者模型是否需要更多迭代。
#
# 此外,我们还会绘制模型随着时间的推移学习的特征权重和偏差项值的曲线图。您还可以通过这种方式查看模型的收敛效果。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="wgSMeD5UU81N"
def train_model(learning_rate, steps, batch_size, input_feature="total_rooms"):
"""Trains a linear regression model of one feature.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
input_feature: A `string` specifying a column from `california_housing_dataframe`
to use as input feature.
"""
periods = 10
steps_per_period = steps / periods
my_feature = input_feature
my_feature_data = california_housing_dataframe[[my_feature]]
my_label = "median_house_value"
targets = california_housing_dataframe[my_label]
# Create feature columns
feature_columns = [tf.feature_column.numeric_column(my_feature)]
# Create input functions
training_input_fn = lambda:my_input_fn(my_feature_data, targets, batch_size=batch_size)
prediction_input_fn = lambda: my_input_fn(my_feature_data, targets, num_epochs=1, shuffle=False)
# Create a linear regressor object.
my_optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
# Set up to plot the state of our model's line each period.
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
plt.title("Learned Line by Period")
plt.ylabel(my_label)
plt.xlabel(my_feature)
sample = california_housing_dataframe.sample(n=300)
plt.scatter(sample[my_feature], sample[my_label])
colors = [cm.coolwarm(x) for x in np.linspace(-1, 1, periods)]
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print "Training model..."
print "RMSE (on training data):"
root_mean_squared_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
predictions = np.array([item['predictions'][0] for item in predictions])
# Compute loss.
root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(predictions, targets))
# Occasionally print the current loss.
print " period %02d : %0.2f" % (period, root_mean_squared_error)
# Add the loss metrics from this period to our list.
root_mean_squared_errors.append(root_mean_squared_error)
# Finally, track the weights and biases over time.
# Apply some math to ensure that the data and line are plotted neatly.
y_extents = np.array([0, sample[my_label].max()])
weight = linear_regressor.get_variable_value('linear/linear_model/%s/weights' % input_feature)[0]
bias = linear_regressor.get_variable_value('linear/linear_model/bias_weights')
x_extents = (y_extents - bias) / weight
x_extents = np.maximum(np.minimum(x_extents,
sample[my_feature].max()),
sample[my_feature].min())
y_extents = weight * x_extents + bias
plt.plot(x_extents, y_extents, color=colors[period])
print "Model training finished."
# Output a graph of loss metrics over periods.
plt.subplot(1, 2, 2)
plt.ylabel('RMSE')
plt.xlabel('Periods')
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(root_mean_squared_errors)
# Output a table with calibration data.
calibration_data = pd.DataFrame()
calibration_data["predictions"] = pd.Series(predictions)
calibration_data["targets"] = pd.Series(targets)
display.display(calibration_data.describe())
print "Final RMSE (on training data): %0.2f" % root_mean_squared_error
# + [markdown] colab_type="text" id="kg8A4ArBU81Q"
# ## 任务 1:使 RMSE 不超过 180
#
# 调整模型超参数,以降低损失和更符合目标分布。
# 约 5 分钟后,如果您无法让 RMSE 低于 180,请查看解决方案,了解可能的组合。
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}, "test": {"output": "ignore", "timeout": 600}} colab_type="code" id="UzoZUSdLIolF" slideshow={"slide_type": "slide"}
train_model(
learning_rate=0.00001,
steps=100,
batch_size=1
)
# + [markdown] colab_type="text" id="ajVM7rkoYXeL"
# ### 解决方案
#
# 点击下方即可查看一种可能的解决方案。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="T3zmldDwYy5c"
train_model(
learning_rate=0.00002,
steps=500,
batch_size=5
)
# + [markdown] colab_type="text" id="M8H0_D4vYa49"
# 这只是一种可能的配置;也许还有同样能够提供理想结果的其他设置组合。请注意,总体而言,本练习重点不是查找*一种最佳*设置,而是帮助您对模型配置调整如何影响预测质量有一个直观的认识。
# + [markdown] colab_type="text" id="QU5sLyYTqzqL" slideshow={"slide_type": "slide"}
# ### 有适用于模型调整的标准启发法吗?
#
# 这是一个常见的问题。简短的答案是,不同超参数的效果取决于数据。因此,不存在必须遵循的规则,您需要对自己的数据进行测试。
#
# 即便如此,我们仍在下面列出了几条可为您提供指导的经验法则:
#
# * 训练误差应该稳步减小,刚开始是急剧减小,最终应随着训练收敛达到平稳状态。
# * 如果训练尚未收敛,尝试运行更长的时间。
# * 如果训练误差减小速度过慢,则提高学习速率也许有助于加快其减小速度。
# * 但有时如果学习速率过高,训练误差的减小速度反而会变慢。
# * 如果训练误差变化很大,尝试降低学习速率。
# * 较低的学习速率和较大的步数/较大的批量大小通常是不错的组合。
# * 批量大小过小也会导致不稳定情况。不妨先尝试 100 或 1000 等较大的值,然后逐渐减小值的大小,直到出现性能降低的情况。
#
# 重申一下,切勿严格遵循这些经验法则,因为效果取决于数据。请始终进行试验和验证。
# + [markdown] colab_type="text" id="GpV-uF_cBCBU" slideshow={"slide_type": "slide"}
# ## 任务 2:尝试其他特征
#
# 使用 `population` 特征替换 `total_rooms` 特征,看看能否取得更好的效果。
#
# 这部分不必超过 5 分钟。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="YMyOxzb0ZlAH"
# YOUR CODE HERE
# + [markdown] colab_type="text" id="ci1ISxxrZ7v0"
# ### 解决方案
#
# 点击下方即可查看一种可能的解决方案。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 241, "output_extras": [{"item_id": 1}]} colab_type="code" executionInfo={"elapsed": 1531, "status": "error", "timestamp": 1520816726141, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -480} id="SjdQQCduZ7BV" outputId="4596189c-65c9-4326-c91f-8d32177937fa"
train_model(
learning_rate=0.00002,
steps=1000,
batch_size=5,
input_feature="population"
)
|
TFlearning/first_steps_with_tensor_flow.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/ThilinaRajapakse/pytorch-transformers-classification/blob/master/colab_quickstart.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={} colab_type="code" id="0YLoS0hWz-ch"
# %%writefile utils.py
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
from io import open
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
logger = logging.getLogger(__name__)
csv.field_size_limit(2147483647)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
class BinaryProcessor(DataProcessor):
"""Processor for the binary data sets"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_example_to_feature(example_row, pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True):
example, label_map, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token, cls_token_segment_id, pad_on_left, pad_token_segment_id = example_row
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
return InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id)
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True,
process_count=cpu_count() - 2):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label : i for i, label in enumerate(label_list)}
examples = [(example, label_map, max_seq_length, tokenizer, output_mode, cls_token_at_end, cls_token, sep_token, cls_token_segment_id, pad_on_left, pad_token_segment_id) for example in examples]
with Pool(process_count) as p:
features = list(tqdm(p.imap(convert_example_to_feature, examples, chunksize=100), total=len(examples)))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
processors = {
"binary": BinaryProcessor
}
output_modes = {
"binary": "classification"
}
GLUE_TASKS_NUM_LABELS = {
"binary": 2
}
# + colab={} colab_type="code" id="VeXuXWylz7BD"
from __future__ import absolute_import, division, print_function
import glob
import logging
import os
import random
import json
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
import random
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm_notebook, trange
from pytorch_transformers import (WEIGHTS_NAME, BertConfig, BertForSequenceClassification, BertTokenizer,
XLMConfig, XLMForSequenceClassification, XLMTokenizer,
XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer,
RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)
from pytorch_transformers import AdamW, WarmupLinearSchedule
from utils import (convert_examples_to_features,
output_modes, processors)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# + colab={} colab_type="code" id="F93_pIopz7BG"
args = {
'data_dir': 'data/',
'model_type': 'roberta',
'model_name': 'roberta-base',
'task_name': 'binary',
'output_dir': 'outputs/',
'cache_dir': 'cache/',
'do_train': True,
'do_eval': True,
'fp16': False,
'fp16_opt_level': 'O1',
'max_seq_length': 128,
'output_mode': 'classification',
'train_batch_size': 8,
'eval_batch_size': 8,
'gradient_accumulation_steps': 1,
'num_train_epochs': 1,
'weight_decay': 0,
'learning_rate': 4e-5,
'adam_epsilon': 1e-8,
'warmup_steps': 0,
'max_grad_norm': 1.0,
'logging_steps': 50,
'evaluate_during_training': False,
'save_steps': 2000,
'eval_all_checkpoints': True,
'overwrite_output_dir': False,
'reprocess_input_data': False,
'notes': 'Using Yelp Reviews dataset'
}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# + colab={} colab_type="code" id="atGwIw3iz7BJ"
args
# + colab={} colab_type="code" id="Uzr2RwGLz7BL"
with open('args.json', 'w') as f:
json.dump(args, f)
# + colab={} colab_type="code" id="ymjmIyOhz7BN"
if os.path.exists(args['output_dir']) and os.listdir(args['output_dir']) and args['do_train'] and not args['overwrite_output_dir']:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args['output_dir']))
# + colab={} colab_type="code" id="LAHYiiLMz7BP"
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer)
}
config_class, model_class, tokenizer_class = MODEL_CLASSES[args['model_type']]
# + colab={} colab_type="code" id="qm5AguwFz7BR"
config = config_class.from_pretrained(args['model_name'], num_labels=2, finetuning_task=args['task_name'])
tokenizer = tokenizer_class.from_pretrained(args['model_name'])
# + colab={} colab_type="code" id="IGZHNvKAz7BU"
model = model_class.from_pretrained(args['model_name'])
# + colab={} colab_type="code" id="xyxKpk_6z7BW"
model.to(device);
# + colab={} colab_type="code" id="bsPmRyGE8GnR"
device
# + colab={} colab_type="code" id="Xe4P94Bfz7Ba"
task = args['task_name']
processor = processors[task]()
label_list = processor.get_labels()
num_labels = len(label_list)
# + colab={} colab_type="code" id="xqr_fwM3z7Bd"
def load_and_cache_examples(task, tokenizer, evaluate=False, undersample_scale_factor=0.01):
processor = processors[task]()
output_mode = args['output_mode']
mode = 'dev' if evaluate else 'train'
cached_features_file = os.path.join(args['data_dir'], f"cached_{mode}_{args['model_name']}_{args['max_seq_length']}_{task}")
if os.path.exists(cached_features_file) and not args['reprocess_input_data']:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args['data_dir'])
label_list = processor.get_labels()
examples = processor.get_dev_examples(args['data_dir']) if evaluate else processor.get_train_examples(args['data_dir'])
print(len(examples))
examples = [example for example in examples if np.random.rand() < undersample_scale_factor]
print(len(examples))
features = convert_examples_to_features(examples, label_list, args['max_seq_length'], tokenizer, output_mode,
cls_token_at_end=bool(args['model_type'] in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
cls_token_segment_id=2 if args['model_type'] in ['xlnet'] else 0,
pad_on_left=bool(args['model_type'] in ['xlnet']), # pad on the left for xlnet
pad_token_segment_id=4 if args['model_type'] in ['xlnet'] else 0,
process_count=2)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
elif output_mode == "regression":
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
# + colab={} colab_type="code" id="oCul6vvCz7Bg"
def train(train_dataset, model, tokenizer):
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args['train_batch_size'])
t_total = len(train_dataloader) // args['gradient_accumulation_steps'] * args['num_train_epochs']
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args['weight_decay']},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args['learning_rate'], eps=args['adam_epsilon'])
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args['warmup_steps'], t_total=t_total)
if args['fp16']:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args['fp16_opt_level'])
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args['num_train_epochs'])
logger.info(" Total train batch size = %d", args['train_batch_size'])
logger.info(" Gradient Accumulation steps = %d", args['gradient_accumulation_steps'])
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args['num_train_epochs']), desc="Epoch")
for _ in train_iterator:
epoch_iterator = tqdm_notebook(train_dataloader, desc="Iteration")
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args['model_type'] in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
print("\r%f" % loss, end='')
if args['gradient_accumulation_steps'] > 1:
loss = loss / args['gradient_accumulation_steps']
if args['fp16']:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args['max_grad_norm'])
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args['max_grad_norm'])
tr_loss += loss.item()
if (step + 1) % args['gradient_accumulation_steps'] == 0:
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args['logging_steps'] > 0 and global_step % args['logging_steps'] == 0:
# Log metrics
if args['evaluate_during_training']: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(model, tokenizer)
logging_loss = tr_loss
if args['save_steps'] > 0 and global_step % args['save_steps'] == 0:
# Save model checkpoint
output_dir = os.path.join(args['output_dir'], 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
return global_step, tr_loss / global_step
# + colab={} colab_type="code" id="tUvkEBZUz7Bk"
from sklearn.metrics import mean_squared_error, matthews_corrcoef, confusion_matrix
from scipy.stats import pearsonr
def get_mismatched(labels, preds):
mismatched = labels != preds
examples = processor.get_dev_examples(args['data_dir'])
wrong = [i for (i, v) in zip(examples, mismatched) if v]
return wrong
def get_eval_report(labels, preds):
mcc = matthews_corrcoef(labels, preds)
tn, fp, fn, tp = confusion_matrix(labels, preds).ravel()
return {
"mcc": mcc,
"tp": tp,
"tn": tn,
"fp": fp,
"fn": fn
}, get_mismatched(labels, preds)
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
return get_eval_report(labels, preds)
def evaluate(model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args['output_dir']
results = {}
EVAL_TASK = args['task_name']
eval_dataset = load_and_cache_examples(EVAL_TASK, tokenizer, evaluate=True, undersample_scale_factor = 0.5)
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args['eval_batch_size'])
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args['eval_batch_size'])
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm_notebook(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args['model_type'] in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args['output_mode'] == "classification":
preds = np.argmax(preds, axis=1)
elif args['output_mode'] == "regression":
preds = np.squeeze(preds)
result, wrong = compute_metrics(EVAL_TASK, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results, wrong
# + colab={} colab_type="code" id="MlaeXY9sz7Bm"
# IMPORTANT #
# Due to the 12 hour limit on Google Colab and the time it would take to convert the dataset into features, the load_and_cache_examples() function has been modified
# to randomly undersample the dataset by a scale of 0.1
if args['do_train']:
train_dataset = load_and_cache_examples(task, tokenizer, undersample_scale_factor=0.1)
global_step, tr_loss = train(train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# + colab={} colab_type="code" id="1On6YjIULf7v"
if args['do_train']:
if not os.path.exists(args['output_dir']):
os.makedirs(args['output_dir'])
logger.info("Saving model checkpoint to %s", args['output_dir'])
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args['output_dir'])
tokenizer.save_pretrained(args['output_dir'])
torch.save(args, os.path.join(args['output_dir'], 'training_args.bin'))
# + colab={} colab_type="code" id="tqiWWPA0z7Bo"
results = {}
if args['do_eval']:
checkpoints = [args['output_dir']]
print(checkpoints)
if args['eval_all_checkpoints']:
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args['output_dir'] + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(device)
result, wrong_preds = evaluate(model, tokenizer, prefix=global_step)
result = dict((k + '_{}'.format(global_step), v) for k, v in result.items())
results.update(result)
# + colab={} colab_type="code" id="AMb25x63z7Bq"
results
# + colab={} colab_type="code" id="eyvWYNjRLHrI"
|
RoBERTa Model/colab_quickstart.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="-wFUjZTmcG5a"
# # Dataset loading
#
# In the first step we prepare everything and load our dataset with handwritten digits. Our goal is to load the image into our program and classify it.
#
# Classifying means to recognize which digit it is. Is it a *0* or a *9*?
#
# A small hint # signals a comment in the code, so programmers note hints to understand lines of code easier ;-)
# + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" executionInfo={"elapsed": 2861, "status": "ok", "timestamp": 1600017837359, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiEI-GAxszORnjA0WqbcKrAT2dFjghG7ikD4zjctA=s64", "userId": "05156771066106099172"}, "user_tz": -120} id="0xLkmPMibmpd" outputId="6a7e987e-9fdb-4c8b-b1a9-0162d1e662bf"
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
# We are loading the data
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# We normalize the images, thus that it contains values between [0 - 1]. This is prefereable for NNs.
train_images = train_images / 255.0
test_images = test_images / 255.0
# + [markdown] colab_type="text" id="e0D6xKrIcElq"
# # Visualize - Illustrate - Pictorialize
#
# In the next step, we load a *0* and a *9* from our training dataset and visualize the two digits.
# + colab={"base_uri": "https://localhost:8080/", "height": 544} colab_type="code" executionInfo={"elapsed": 961, "status": "ok", "timestamp": 1600017839430, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiEI-GAxszORnjA0WqbcKrAT2dFjghG7ikD4zjctA=s64", "userId": "05156771066106099172"}, "user_tz": -120} id="QNUbehzkdgtE" outputId="f9110fca-82dc-48e7-d780-beb1f2080491"
# Load a 0 from the training data
indicies_of_all_0 = (np.where(test_labels == 0))[0]
image_with_0 = test_images[indicies_of_all_0[0]]
# Lade eine 9 aus den Trainingsdaten
indicies_of_all_9 = (np.where(test_labels == 9))[0]
image_with_9 = test_images[indicies_of_all_9[0]]
# Visualisieren (= anzeigen) der Bilder, damit wir auch sehen ob wir das ganze richtig geladen haben
plt.figure()
plt.imshow(image_with_0, cmap=plt.cm.binary)
plt.title("This is a 0")
plt.show()
plt.figure()
plt.imshow(image_with_9, cmap=plt.cm.binary)
plt.title("This is a 9")
plt.show()
# + [markdown] colab_type="text" id="aFmWf_OtgUeE"
# # Define neural network
# Next we need to define the architecture of our neural network. How many layers should it have, how many neurons do these layers have.
#
# We first decide on the following architecture:
#
#
# * Input Layer: 28x28 (this is the size of our images!)
# * Fully Connected Network (FCN) Layer (means *dense* in TF!) with 128 neurons and one ReLU activation
# * Output are 10 neurons (we have 10 digits we want to classify)
# + colab={} colab_type="code" executionInfo={"elapsed": 778, "status": "ok", "timestamp": 1600017845302, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiEI-GAxszORnjA0WqbcKrAT2dFjghG7ikD4zjctA=s64", "userId": "05156771066106099172"}, "user_tz": -120} id="LP0ZTVUqcC5x"
# Network architecture
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
# Let TF build our network
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# + [markdown] colab_type="text" id="Q9PhELVyhKD7"
# # Train the neural network
# In the next step we train our network with the data we loaded above. Training is also called *fitting*, because during training the weights of the neurons are adjusted, i.e. they are fitted. The word comes from English!
#
# Of course we have to tell TF how long the network should be trained. This is expressed by how often the training data should be shown to the network.
#
# * 1 x show all training data = 1 epoch
# * 2 x show all training data = 2 epochs
#
# + colab={"base_uri": "https://localhost:8080/", "height": 390} colab_type="code" executionInfo={"elapsed": 33154, "status": "ok", "timestamp": 1600017881644, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiEI-GAxszORnjA0WqbcKrAT2dFjghG7ikD4zjctA=s64", "userId": "05156771066106099172"}, "user_tz": -120} id="zfgWUUFnhTED" outputId="44ed8f30-c275-438a-a84c-2e2ad40e7e16"
# Train network for 10 epochs
model.fit(train_images, train_labels, epochs=10)
# + [markdown] colab_type="text" id="VJHCQFoniDM0"
# # Check how good the network is
# We have trained the network, now we also want to know how well it works. We also say we *evaluate* the network now. We evaluate with the test data. We ask how many of the test data are correctly classified, that is, how often the network correctly recognizes the number.
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" executionInfo={"elapsed": 779, "status": "ok", "timestamp": 1600017885440, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/<KEY>", "userId": "05156771066106099172"}, "user_tz": -120} id="fzI4H2ueijYC" outputId="433cea08-1cea-4d0d-d6c2-762d06a04aca"
# Testing the network
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=0)
print('Our result:')
print('Out of ', test_images.shape[0], ' we correctly classified ', int(test_acc * test_images.shape[0]), '. These are {:.2f}% of the data'.format(test_acc * 100.0))
# + [markdown] colab_type="text" id="RnlkyCx3g45B"
# # Can you find out the following.
#
#
# * Training time (= epochs) of the neural network:
# * What happens if you train only for a very short time (e.g.: 1 epoch)? How many of the test data are then still recognized correctly?
# * What happens if you train for a long time (e.g. 1000 epochs)? How many of the test data will be recognized correctly then? What can you observe?
# * **Tip**: Find the place in the code where you train and change the number of epochs accordingly.
#
#
# * What happens if you shift the input number slightly to the left? Will it still be recognized correctly? Just try the example and describe what you see. Can you find an explanation for it?
#
# * What happens if the input number is slightly noisy? Is it still recognized correctly? Just try the example and describe what you see. Can you find an explanation for it? Where could noise come from, for example, can you find examples of it?
# + colab={"base_uri": "https://localhost:8080/", "height": 580} colab_type="code" executionInfo={"elapsed": 1036, "status": "ok", "timestamp": 1600017891289, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiEI-GAxszORnjA0WqbcKrAT2dFjghG7ikD4zjctA=s64", "userId": "05156771066106099172"}, "user_tz": -120} id="8m2q5qo1iQfa" outputId="aa12810f-2464-43da-9654-7f662b55c113"
# Example of shifted 9
shifted_nine = np.zeros_like(image_with_9) # we create an empty image of the same size as the 9
shifted_nine[:, :15] = shifted_nine[:, 8:23]
plt.figure()
plt.imshow(image_with_9, cmap=plt.cm.binary)
plt.title("This is the correct 9")
plt.show()
plt.figure()
plt.imshow(shifted_nine, cmap=plt.cm.binary)
plt.title("This is the shifted 9")
plt.show()
from scipy.special import softmax
logits_of_nine = model.predict(np.expand_dims(image_with_9, 0))
probabilities_of_nine = softmax(logits_of_nine)[0]
detected_class_of_nine = np.argmax(probabilities_of_nine)
print('The NN classified the 9 as ', detected_class_of_nine, ' with a probability of ', probabilities_of_nine[detected_class_of_nine])
logits_of_shifted_nine = model.predict(np.expand_dims(shifted_nine, 0))
probabilities_of_shifted_nine = softmax(logits_of_shifted_nine)[0]
detected_class_of_shifted_nine = np.argmax(probabilities_of_shifted_nine)
print('The NN classified the shifted 9 as ', detected_class_of_shifted_nine, ' with a probability of ', probabilities_of_shifted_nine[detected_class_of_shifted_nine])
# + colab={"base_uri": "https://localhost:8080/", "height": 671} colab_type="code" executionInfo={"elapsed": 805, "status": "ok", "timestamp": 1600017900646, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiEI-GAxszORnjA0WqbcKrAT2dFjghG7ikD4zjctA=s64", "userId": "05156771066106099172"}, "user_tz": -120} id="vM3104LCnknu" outputId="974ebe64-2a20-4735-db50-0677475487a3"
# Example of noised 9
noised_nine = np.copy(image_with_9)
noise = np.zeros_like(image_with_9)
image_coordinates = [np.random.randint(0, i - 1, 50) for i in noise.shape]
noise[image_coordinates] = 1
noised_nine += noise
image_coordinates = [np.random.randint(0, i - 1, 50) for i in noise.shape]
noise[image_coordinates] = -1
noised_nine += noise
noised_nine = np.clip(noised_nine,0,1)
plt.figure()
plt.imshow(image_with_9, cmap=plt.cm.binary)
plt.title("This is the correct 9")
plt.show()
plt.figure()
plt.imshow(noised_nine, cmap=plt.cm.binary)
plt.title("This is the noised 9")
plt.show()
from scipy.special import softmax
logits_of_nine = model.predict(np.expand_dims(image_with_9, 0))
probabilities_of_nine = softmax(logits_of_nine)[0]
detected_class_of_nine = np.argmax(probabilities_of_nine)
print('The NN classified the 9 as ', detected_class_of_nine, ' with a probability of ', probabilities_of_nine[detected_class_of_nine])
logits_of_noised_nine = model.predict(np.expand_dims(noised_nine, 0))
probabilities_of_noised_nine = softmax(logits_of_noised_nine)[0]
detected_class_of_noised_nine = np.argmax(probabilities_of_noised_nine)
print('The NN classified the noised 9 as ', detected_class_of_noised_nine, ' with a probability of ', probabilities_of_noised_nine[detected_class_of_noised_nine])
|
english_version/modul_1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Variational Nystrom
# +
import numpy as np
from sklearn.utils import check_random_state
from scipy.sparse import csr_matrix, spdiags
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.neighbors import NearestNeighbors
plt.style.use('ggplot')
# %matplotlib inline
# -
# ## Variational Nystrom Method
# $X \in \mathbb{R}^{NxN}$
# ## Generate Data
# +
rng = check_random_state(1234)
N = 20000
a = (( 7 / 2 * np.pi - np.pi/2) * (rng.rand(N)**0.65) + np.pi/2)
t = 100 * rng.rand(N)
data = np.vstack((a * np.cos(a), t, a * np.sin(a))).T
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(data[:, 0], data[:, 1], data[:, 2])
plt.show()
# -
# ## Gaussian Affinities
# +
# %%time
# some baseline parameters
n_neighbors = 200
algorithm = 'brute'
metric = 'euclidean'
p=2
n_jobs = -1
# initialize nn model
nn_model = NearestNeighbors(
n_neighbors=n_neighbors,
metric=metric,
algorithm=algorithm,
p=p,
n_jobs=n_jobs
)
# fit nn model to data
nn_model.fit(data);
# grab distances and indices
dists, indices = nn_model.kneighbors(
data,
n_neighbors=n_neighbors,
return_distance=True
)
# -
def heat_kernel(distances, length_scale=None):
if length_scale is None:
length_scale = 1.0
return np.exp(- distances**2 / length_scale)
dists = heat_kernel(dists, 2)
# +
# Construct sparse KNN Graph
n_samples = data.shape[0]
indptr = np.arange(0, n_samples * n_neighbors + 1, n_neighbors)
adjacency_matrix = csr_matrix((dists.ravel(), indices.ravel(), indptr), shape=(n_samples, n_samples))
# ensure that its symmetrix
adjacency_matrix = 0.5 * (adjacency_matrix + adjacency_matrix.T)
# -
|
notebooks/uncategorized/eigenmap/variational_nystrom.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Fourier spectral methods in Matlab (and Python)
# Developed by <NAME> for a course on Approximation Theory and Spectral Methods at the University of Washington.
#
# See <http://faculty.washington.edu/rjl/classes/am570a2015/codes.html> for more IPython Notebook examples.
# These examples are based on material in <NAME>'s book Spectral Methods in Matlab. The m-files for this book are available at <http://people.maths.ox.ac.uk/trefethen/spectral.html>
# %load_ext pymatbridge
# ## Program 5
# This example is directly from p5.m found at <http://people.maths.ox.ac.uk/trefethen/spectral.html>
# + language="matlab"
#
# % p5.m - repetition of p4.m via FFT
# % For complex v, delete "real" commands.
#
# % Differentiation of a hat function:
# N = 24; h = 2*pi/N; x = h*(1:N)';
# v = max(0,1-abs(x-pi)/2); v_hat = fft(v);
# w_hat = 1i*[0:N/2-1 0 -N/2+1:-1]' .* v_hat;
# w = real(ifft(w_hat)); clf
# subplot(2,2,1), plot(x,v,'.-','markersize',13)
# axis([0 2*pi -.5 1.5]), grid on, title('function')
# subplot(2,2,2), plot(x,w,'.-','markersize',13)
# axis([0 2*pi -1 1]), grid on, title('spectral derivative')
#
# % Differentiation of exp(sin(x)):
# v = exp(sin(x)); vprime = cos(x).*v;
# v_hat = fft(v);
# w_hat = 1i*[0:N/2-1 0 -N/2+1:-1]' .* v_hat;
# w = real(ifft(w_hat));
# subplot(2,2,3), plot(x,v,'.-','markersize',13)
# axis([0 2*pi 0 3]), grid on
# subplot(2,2,4), plot(x,w,'.-','markersize',13)
# axis([0 2*pi -2 2]), grid on
# error = norm(w-vprime,inf);
# text(2.2,1.4,['max error = ' num2str(error)])
#
# -
# ## Illustration of spectral differentiation
# To make this a bit clearer, first illustrate how to compute the second derivative of periodic function.
# Start with $$u = \exp(\cos(x)),$$ and check that the numerical approximation agrees well with $$u''(x) = (\sin^2(x) - \cos(x)) \exp(\cos(x)).$$
#
# The only tricky thing here is the order of the indices in the wave number vector.
# + language="matlab"
# N = 16;
# x = linspace(2*pi/N,2*pi,N);
# ik = 1i*[0:N/2 -N/2+1:-1]; % i * wave number vector (matlab ordering)
# ik2 = ik.*ik; % multiplication factor for second derivative
#
# u = exp(cos(x));
# u_hat = fft(u);
# v_hat = ik2 .* u_hat;
# v = real(ifft(v_hat)); % imaginary part should be at machine precision level
#
# error = v - (sin(x).^2 - cos(x)) .* exp(cos(x));
# norm(error,inf)
# -
# ## Illustration of solving a periodic boundary value problem
# Now let's solve the boundary value problem
# $$u''(x) = f(x)$$
# on $0 \leq x \leq 2\pi$ with periodic boundary conditions and the constraint $\int_0^{2\pi} u(x) dx = 0$.
#
# Use $f(x) = (\sin^2(x) - \cos(x)) \exp(\cos(x))$ so the solution should be $u(x) = \exp(\cos(x)) + C$, where the constant is chosen so the integral constraint is satisfied.
#
# We now have to divide by `ik2`, with the complication that 1/0 should be replaced by 0. This results in the $\hat u_0 = 0$, which gives the integral constraint.
# + language="matlab"
#
# N = 16;
# x = linspace(2*pi/N,2*pi,N);
# f = (sin(x).^2 - cos(x)) .* exp(cos(x));
# f_hat = fft(f);
#
# ik = 1i*[0:N/2 -N/2+1:-1]; % i * wave number vector (matlab ordering)
# ik2 = ik.*ik; % multiplication factor for second derivative
# ii = find(ik ~= 0); % indices where ik is nonzero
# ik2inverse = ik2; % initialize zeros in same locations as in ik2
# ik2inverse(ii) = 1./ik2(ii); % multiplier factor to solve u'' = f
#
# u_hat = ik2inverse .* f_hat;
# u = real(ifft(u_hat)); % imaginary parts should be roundoff level
# -
# Plotting the solution shows that it is a shifted version of $\exp(\cos(x))$:
# + language="matlab"
# plot(x,u,'b-o')
# hold on
# v = exp(cos(x));
# plot(x,v,'r-o')
# -
# If we shift so that one value of $u$ agrees with $v$, then we hope everything will line up:
# + language="matlab"
# u2 = u + v(1)-u(1);
# norm(u2 - v, inf)
# -
# ## Python versions:
# %pylab inline
# We repeat these examples in Python. The codes are essentially identical, with some changes from Matlab to Python notation.
#
# First illustrate how to compute the second derivative of periodic function.
# Start with $$u = \exp(\cos(x)),$$ and check that the numerical approximation agrees well with $$u''(x) = (\sin^2(x) - \cos(x)) \exp(\cos(x))$$
# +
from scipy import fft,ifft
N = 16;
x = linspace(2*pi/N,2*pi,N)
ik = 1j*hstack((range(0,N/2+1), range(-N/2+1,0))); # i * wave number vector (matlab ordering)
ik2 = ik*ik; # multiplication factor for second derivative
u = exp(cos(x))
u_hat = fft(u)
v_hat = ik2 * u_hat
v = real(ifft(v_hat)) # imaginary part should be at machine precision level
error = v - (sin(x)**2 - cos(x)) * exp(cos(x))
norm(error,inf)
# -
# Now let's solve the boundary value problem
# $$u''(x) = f(x)$$
# on $0 \leq x \leq 2\pi$ with periodic boundary conditions and the constraint $\int_0^{2\pi} u(x) dx = 0$.
#
# Use $f(x) = (\sin^2(x) - \cos(x)) \exp(\cos(x))$ so the solution should be $u(x) = \exp(\cos(x)) + C$, where the constant is chosen so the integral constraint is satisfied.
# +
N = 16;
x = linspace(2*pi/N,2*pi,N)
f = (sin(x)**2 - cos(x)) * exp(cos(x))
f_hat = fft(f)
ik = 1j*hstack((range(0,N/2+1), range(-N/2+1,0))); # i * wave number vector (matlab ordering)
ik2 = ik*ik; # multiplication factor for second derivative
ik2inverse = where(ik2 != 0, 1./ik2, 0.)
u_hat = ik2inverse * f_hat;
u = real(ifft(u_hat))
plot(x,u,'b-o')
v = exp(cos(x));
plot(x,v,'r-o')
# -
# Again we get good agreement if we shift by the difference at the left-most point:
u2 = u + v[0]-u[0]
norm(u2 - v, inf)
|
sphinx/_static/Fourier-Spectral.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 64-bit (conda)
# metadata:
# interpreter:
# hash: 00c5ebd15dde17c77a6b6f9ae051f794b5549b54221bac7cfc6b836366532e51
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_rows', 40)
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', 200)
# -
# # Downloading a csv in internet and loding it into a DataFrame
# > 1. Download the file using urlretrive
# > 2. Load the downloaded file in a dataframe
# +
# Import package
from urllib.request import urlretrieve
# Import pandas
import pandas as pd
# Assign url of file: url
url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
# Save file locally
urlretrieve(url, 'datasets/winequality-red.csv')
# Read file into a DataFrame and print its head
df = pd.read_csv('datasets/winequality-red.csv', sep=';')
print(df.head())
# -
# # Loading a file from internet directly in a DataFrame
# > 1. Download the file using urlretrive
# > 2. Load the downloaded file in a dataframe
# +
# Import packages
import matplotlib.pyplot as plt
import pandas as pd
# Assign url of file: url
url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
# Read file into a DataFrame: df
df = pd.read_csv(url, sep=';')
# Print the head of the DataFrame
print(df.head())
# Plot first column of df
pd.DataFrame.hist(df.iloc[:, 0:1])
plt.xlabel('fixed acidity (g(tartaric acid)/dm$^3$)')
plt.ylabel('count')
plt.show()
# +
# Import package
import pandas as pd
# Assign url of file: url
url = 'http://s3.amazonaws.com/assets.datacamp.com/course/importing_data_into_r/latitude.xls'
# Read in all sheets of Excel file: xls
xls = pd.read_excel(url, sheet_name=None)
# Print the sheetnames to the shell
print(xls.keys())
# Print the head of the first sheet (using its name, NOT its index)
print(xls['1700'].head())
# +
## Loading HTTP code using urllib
# + tags=[]
# Import packages
from urllib.request import urlopen, Request
# Specify the url
url = "https://campus.datacamp.com/courses/1606/4135?ex=2"
# This packages the request
request = Request(url)
# Sends the request and catches the response: response
response = urlopen(request)
# Extract the response: html
html = response.read()
# Print the html
print(html[:100])
# Be polite and close the response!
response.close()
# -
# # Loading HTTP code using requests
# +
# Import package
import requests
# Specify the url: url
url = "http://www.datacamp.com/teach/documentation"
# Packages the request, send the request and catch the response: r
r = requests.get(url)
# Extract the response: text
text = r.text
# Print the html
print(text[:100])
# -
# ## BeautifulSoup
# +
# Import packages
import requests
from bs4 import BeautifulSoup
# Specify url: url
url = 'https://www.python.org/~guido/'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Extracts the response as html: html_doc
html_doc = r.text
# Create a BeautifulSoup object from the HTML: soup
soup = BeautifulSoup(html_doc)
# Prettify the BeautifulSoup object: pretty_soup
pretty_soup = soup.prettify()
# Print the response
print(pretty_soup[:1000])
# +
# Import packages
import requests
from bs4 import BeautifulSoup
# Specify url: url
url = 'https://www.python.org/~guido/'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Extract the response as html: html_doc
html_doc = r.text
# Create a BeautifulSoup object from the HTML: soup
soup = BeautifulSoup(html_doc)
# Get the title of Guido's webpage: guido_title
guido_title = soup.title
# Print the title of Guido's webpage to the shell
print(guido_title)
# Get Guido's text: guido_text
guido_text = soup.get_text()
# Print Guido's text to the shell
print(guido_text[:1000])
# +
# Import packages
import requests
from bs4 import BeautifulSoup
# Specify url
url = 'https://www.python.org/~guido/'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Extracts the response as html: html_doc
html_doc = r.text
# create a BeautifulSoup object from the HTML: soup
soup = BeautifulSoup(html_doc)
# Print the title of Guido's webpage
print(soup.title)
# Find all 'a' tags (which define hyperlinks): a_tags
a_tags = soup.find_all('a')
# Print the URLs to the shell
for link in a_tags:
print(link.get('href'))
# -
# ## Load json file
# +
import json
# Load JSON: json_data
with open("datasets/tweets3.txt") as json_file:
json_data = json.load(json_file)
# Print each key-value pair in json_data
for k in json_data.keys():
print(k + ': ', json_data[k])
# -
# ## API requests
# >Now it's your turn to pull some movie data down from the Open Movie Database (OMDB) using their API.
# >
# >The movie you'll query the API about is The Social Network. Recall that, in the video, to query the API about the movie Hackers, Hugo's query string was 'http://www.omdbapi.com/?t=hackers' and had a single argument t=hackers.
# +
# Import requests package
import requests
# Assign URL to variable: url
url = 'http://www.omdbapi.com/?apikey=72bc447a&t=the+social+network'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Print the text of the response
print(r.text)
# -
# ## JSON–from the web to Python
# +
# Import package
import requests
# Assign URL to variable: url
url = 'http://www.omdbapi.com/?apikey=72bc447a&t=social+network'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Decode the JSON data into a dictionary: json_data
json_data = r.json()
# Print each key-value pair in json_data
for k in json_data.keys():
print(k + ': ', json_data[k])
# +
# Import package
import requests
# Assign URL to variable: url
url = 'https://en.wikipedia.org/w/api.php?action=query&prop=extracts&format=json&exintro=&titles=pizza'
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Decode the JSON data into a dictionary: json_data
json_data = r.json()
# Print the Wikipedia page extract
pizza_extract = json_data['query']['pages']['24768']['extract']
print(pizza_extract)
# -
# # Twitter API
#
# +
# Import package
import tweepy
# Store OAuth authentication credentials in relevant variables
access_token = "<KEY>"
access_token_secret = "<KEY>"
consumer_key = "nZ6EA0FxZ293SxGNg8g8aP0HM"
consumer_secret = "<KEY>"
# Pass OAuth details to tweepy's OAuth handler
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
# -
|
courses/Intermediate Importing Data in Python/importing_data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
# # Documentation: https://wowchemy.com/docs/managing-content/
#
# title: "A quick look at Indonesia's current account and balance of trade using World Bank's API in Python"
# subtitle: ""
# summary: ""
# authors: [admin]
# tags: [python, economics]
# categories: [python, economics]
# date: 2020-10-21T21:07:25+11:00
# lastmod: 2020-10-21T21:07:25+11:00
# featured: false
# draft: false
#
# # Featured image
# # To use, add an image named `featured.jpg/png` to your page's folder.
# # Focal points: Smart, Center, TopLeft, Top, TopRight, Left, Right, BottomLeft, Bottom, BottomRight.
# image:
# caption: ""
# focal_point: ""
# preview_only: false
#
# # Projects (optional).
# # Associate this post with one or more of your projects.
# # Simply enter your project's folder or file name without extension.
# # E.g. `projects = ["internal-project"]` references `content/project/deep-learning/index.md`.
# # Otherwise, set `projects = []`.
# projects: []
# ---
#
# Indonesia has been always famed for its lack of depth and innovation in its financial market. Banks are powerful and aim mostly at consumer banking. Indeed, the role of foreign investment is quite central to Joko 'Jokowi' Widodo's development policy, from building infrastructures and the same-price gasoline policy, both relies on SOE's corporate bonds, to attracting FDI to provide jobs. The high influx of foreign investment of course leads to a current account deficit, which means increase the surge of imports. This is a basic GDP accounting where current account balance is $=S-I=X-M$.
#
# Interestingly, minimizing Current Account Deficit (CAD) has been something Indonesian government set as their de facto policy targeting. Targeting CAD may be important(?) for stability, but might not be something you would want if you aim for growth. In this blog, I try to have a look at Indonesia's CAD, what's causing it, and a bit of insight from looking at it. My visualization relies on Python's Seaborn and [World Bank's API manager](https://github.com/OliverSherouse/wbdata), `wbdata`. My learning resources are [<NAME>'s blog](http://abdulbaqi.io/2017/09/13/Wdi/) and World Bank's [blog](https://blogs.worldbank.org/opendata/accessing-world-bank-data-apis-python-r-ruby-stata) and [documentation](https://wbdata.readthedocs.io/en/stable/).
#
# ## Using wbdata
#
# install it first using:
#
# ```
# pip install -U wbdata
# ```
#
# We then import it and see what are the source.
# +
import wbdata as wb
wb.get_source()
# -
# So many databases, but we will get what we want from World Development Indicators, number 2 on the above list. Now, let's search current account balance in the database
wb.search_indicators('current account balance',source=2)
# I will use one with % of GDP. I will take the current account balance only for Indonesia and only from 1981 to 2019. Don't forget to create a datetime tuple to limit the year you're taking.
import pandas as pd
import datetime
tanggal=(datetime.datetime(1981,1,1), datetime.datetime(2019,1,1))
a=wb.get_dataframe({"BN.CAB.XOKA.GD.zS" : "Current account balance (% of GDP)"}, country=["IDN"], data_date=tanggal, convert_date=True, keep_levels=True)
a=a.reset_index()
a.head()
# Let's visualize it with seaborn
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
plt.figure(figsize=(13.5,8))
graph1=sns.lineplot(x="date", y="Current account balance (% of GDP)",data=a)
graph1.axhline(0, color='black')
graph1.axvline(datetime.datetime(1998,1,1), color='red')
graph1.text(datetime.datetime(1999,1,1),-6, "Asian Financial Crisis",color='red',size='x-large')
plt.show()
# The above graph is Indonesia's current account balance. I suppose it is clear to us that Indonesia were running a current account deficit since the collapse of oil price in the early 80s. Financial liberalization were then took place around the same period and was getting more progressive up until right before Asian Financial Crisis (AFC) in 1998. Indeed, running a mismanaged CAD is somewhat dangerous when the investment is not allocated efficiently. Those investment were not productive and Indonesia failed to earn enough foreign currency to pay back its debt. The economy was then crash, and perhaps haunt us until now. This is, perhaps, the reason why we hate CAD so much.
#
# Indonesia was having a positive current account since then, but starting 2011, the CAD started to happen. Since then, CAD has been happening consistently up until now. The anti-import sentiment was return, and the government has been rising Non-Tariff Measures to combat it. In fact, according to [ERIA's publication](https://www.eria.org/uploads/media/10.ERIA_Book_2019_NTM_Update_Chapter_3.pdf), NTM shot up by almost 30% in numbers, from 2015 to 2018.
#
# ## What causes our CAD?
#
# Interestingly, Indonesia's trade balance (export - import) has always been positive. We have been exporting enough to offset our import. Our deficit was mainly driven by a negative net primary income. The negative net primary income was driven by interest payment from portfolio and FDI. Indonesia's foreign investment inflow has largely been used to actually pay these interests. That's one reason why Indonesian policy makers talk a lot on speeding up growth and improving export relative to import, because reducing net primary income deficit is much harder in the short run.
b=wb.get_dataframe({"BN.GSR.FCTY.CD" : "Net Primary Account (Current USD)"}, country=["IDN"], data_date=tanggal, convert_date=True, keep_levels=True)
b=b.reset_index()
b.head()
c=wb.get_dataframe({"BN.GSR.MRCH.CD" : "Net Trade in goods (Current USD)"}, country=["IDN"], data_date=tanggal, convert_date=True, keep_levels=True)
c=c.reset_index()
b["Net Trade in goods (Current USD)"]=c["Net Trade in goods (Current USD)"]/1000000
b["Net Primary Account (Current USD)"]=b["Net Primary Account (Current USD)"]/1000000
b=b.set_index('date')
del b['country']
plt.figure(figsize=(13.5,8))
graph2=sns.lineplot(data=b)
graph2.axhline(color='black')
graph2.set(ylabel='Juta USD',xlabel='Tahun')
plt.show()
# ## Increasing export or reducing import?
#
# If we take a look at the graph below, Indonesia's import follows its export closely.
b=wb.get_dataframe({"BX.GSR.MRCH.CD" : "Goods Export (Current USD)"}, country=["IDN"], data_date=tanggal, convert_date=True, keep_levels=True)
b=b.reset_index()
b.head()
c=wb.get_dataframe({"BM.GSR.MRCH.CD" : "Goods Import (Current USD)"}, country=["IDN"], data_date=tanggal, convert_date=True, keep_levels=True)
c=c.reset_index()
b["Goods Export (Current USD)"]=b["Goods Export (Current USD)"]/1000000
b["Goods Import (Current USD)"]=c["Goods Import (Current USD)"]/1000000
b=b.set_index('date')
del b['country']
plt.figure(figsize=(13.5,8))
graph2=sns.lineplot(data=b)
graph2.set(ylabel='Juta USD',xlabel='Tahun')
plt.show()
# Improving export in the short run can be challenging, especially given the current global situation. Indonesia's two main industries mining and plantation, are still performing quite well, but these two commodities are very sensitive to price volatility, and [COVID-19 makes it worse](https://www.eastasiaforum.org/2020/07/16/covid-19-punishes-indonesian-commodity-exporters/).
#
# Unfortunately, trying to reduce import is also not ideal. Indonesian imports are currently consist of many industrial supplies and capital goods, needed for firms to operate. In fact, for firms operate in the Global Value Chain (GVC), imported inputs are crucial for them to be competitive in the global market. This is perhaps the reason why Indonesian import and export are moving together, because its exporting firms need imports to be able to export. Reducing imports can be bad both in the short run (industrial supplies) and in the long run (capital goods).
# This one's not using WBdata. I use UNCOMTRADE i used for my other project
import pandas as pd
import seaborn as sns; sns.set()
import matplotlib.pyplot as plt
c=pd.read_csv('data.csv')
c=c.rename(columns = {'Trade Value (US$)' : 'Trade Value (Million US$)'})
c['Commodity']=c['Commodity'].replace({'Capital goods (except transport equipment), and parts and accessories thereof':'Capital goods'})
c['Commodity']=c['Commodity'].replace({'Transport equipment, and parts and accessories thereof':'Transport eq & parts'})
c['Trade Value (Million US$)']=c['Trade Value (Million US$)']/1000000
plt.figure(figsize=(13.5,8))
g=sns.lineplot(x="Period", y="Trade Value (Million US$)", hue="Commodity",data=c)
g.legend(loc='upper left', ncol=1)
# Indeed, Indonesia face challenge again during this times. I personally think that reducing import is a bad move. What is important is to make sure its investment pays off and keep [foreign inventor's confidence high](https://www.eastasiaforum.org/2020/10/19/can-bank-indonesia-protect-its-independence/).
|
content/id/post/imporinput/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RohanOpenSource/ml-notebooks/blob/main/Regression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="YtAg6KgqlY0v"
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="vN7_BYZ-n6_-" outputId="3ba392e1-29ec-477d-d089-7cee4f5506c8"
X = np.array([-7.0, -4.0, -1.0, 2.0, 5.0, 8.0, 11.0])
y = np.array([3.0, 6.0, 9.0, 12.0 ,15.0, 18.0, 21.0])
plt.scatter(X, y)
# + colab={"base_uri": "https://localhost:8080/"} id="JE_N6BNEoctl" outputId="ec7f4f22-ec8d-482e-981f-ca4bb7c56e61"
y == X + 10
# + id="IGzy4AgkopG2"
model = tf.keras.Sequential([
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dense(1)
])
model.compile(loss = tf.losses.mae, optimizer = tf.keras.optimizers.Adam(learning_rate=0.003), metrics = ["mae"])
model.fit(X, y, epochs = 100)
# + colab={"base_uri": "https://localhost:8080/"} id="IZnnMykd9Nsi" outputId="dccda50b-69f2-4a20-8270-d8d509543dbb"
model.predict([17.0])
# + id="Y73UpBItLC4_"
X = tf.range(-100, 100, 4)
y = X + 10
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="naqkWJRvLLuO" outputId="9c0edc47-04e6-41db-a146-caea3640f756"
plt.scatter(X, y)
# + [markdown] id="abOlqxy3L3av"
# We want 3 different data sets for training a model. Training set(80%), Validation set(10%)(optional)
# Test set(10%)
# + colab={"base_uri": "https://localhost:8080/"} id="-agTd8gaMNmf" outputId="f704ff19-e92b-4bcd-eb16-8b05400df085"
X_train = X[:40]
X_test = X[40:]
y_train = y[:40]
y_test = y[40:]
len(X_train), len(X_test)
# + id="Xbep1c2rNpcH"
model_1 = tf.keras.Sequential(
[
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dense(100, activation="relu"),
tf.keras.layers.Dense(1)
])
model_1.compile(loss=tf.keras.losses.mae, optimizer=tf.optimizers.Adam(learning_rate=0.002), metrics=["mae"])
model_1.fit(X_train, y_train, epochs=200, verbose = 1)
# + id="IBxzqjLOG0Jg"
X_test
# + id="ef2rV22QGdZQ"
model_1.predict(X_test)
|
Regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from IPython.display import Image
Image('../img/schematic.png', width=300)
# +
from padl import same, transform, batch, unbatch, value, IfTrain
import torch
import re
import json
with open('data/lm/train.json') as f: train_data = json.load(f)
with open('data/lm/valid.json') as f: valid_data = json.load(f)
WORDS = []
for x in train_data:
WORDS.extend(x.split())
WORDS = sorted(list(set(WORDS)))
# +
lower_case = same.lower()
clean = transform(lambda x: re.sub('[^a-zA_Z ]', '', x))
@transform
def tokenize(sentence):
return sentence.split()
@transform
class ToInteger:
def __init__(self, words):
self.words = words + ['</s>']
self.dictionary = dict(zip(self.words, range(len(self.words))))
def __call__(self, word):
if not word in self.dictionary:
word = "<unk>"
return self.dictionary[word]
to_integer = ToInteger(value(WORDS))
EOS_VALUE = to_integer.dictionary['</s>']
@transform
def to_tensor(x):
x = list(x[:10][:])
for _ in range(10 - len(x)):
x.append(EOS_VALUE)
return torch.tensor(x)
left_shift = same[:, :-1]
right_shift = same[:, 1:]
unk_value = value(to_integer.dictionary['<unk>'])
WORD_DROPOUT_PROBABILITY = 0.2
@transform
def word_dropout(tensor_input):
mask = (
torch.rand(*tensor_input.shape) > WORD_DROPOUT_PROBABILITY
).type(torch.long)
out = mask * tensor_input + (1 - mask) * UNK_VALUE
return out
@transform
class Loss:
def __call__(self, x, y):
l = 0
for i in range(x.shape[0]):
l += torch.nn.functional.cross_entropy(x[i], y[i])
return l / x.shape[0]
loss = Loss()
@transform
class LM(torch.nn.Module):
def __init__(self, n_words):
super().__init__()
self.rnn = torch.nn.GRU(64, 512, 2, batch_first=True)
self.embed = torch.nn.Embedding(n_words, 64)
self.project = torch.nn.Linear(512, n_words)
def forward(self, x):
output = self.rnn(self.embed(x))[0]
return self.project(output)
N_WORDS = value(len(to_integer.words))
model = LM(N_WORDS)
# -
for t in [
lower_case,
clean,
tokenize,
to_integer,
to_tensor,
left_shift,
right_shift,
loss,
model,
]:
print('-' * 10)
print(t)
# +
preprocess = (
lower_case
>> clean
>> tokenize
>> ~ to_integer
>> to_tensor
>> batch
)
preprocess
# +
forward_pass = (
left_shift
>> IfTrain(word_dropout)
>> model
)
forward_pass
# +
targets = (
preprocess >> right_shift
)
targets
# +
train_model = (
(preprocess >> model >> left_shift)
+ targets
) >> loss
train_model
# -
train_model.infer_apply('test an input')
# +
optimizer = torch.optim.Adam(train_model.pd_parameters())
for l in train_model.train_apply(train_data[:100], batch_size=10):
optimizer.zero_grad()
l.backward()
optimizer.step()
print('loss is:', l.item())
# -
train_model.pd_save('test.padl', force_overwrite=True)
# !ls test.padl
# !cat test.padl/versions.txt
# !cat test.padl/transform.py
|
notebooks/02_nlp_example.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: chem
# language: python
# name: chem
# ---
# #### Exploration of the *in vitro* Fub and Clint datasets
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
import os
import glob
raw_dir = '/home/grace/Documents/python/httk/data/raw/'
processed_dir = '/home/grace/Documents/python/httk/data/processed/'
interim_dir = '/home/grace/Documents/python/httk/data/interim/'
figures_dir = '/home/grace/Documents/python/httk/reports/figures/'
external_dir = '/home/grace/Documents/python/httk/data/external/'
# #### Reading in the data files
fub = pd.read_csv(raw_dir+'Fub_1139.csv')
clint = pd.read_csv(raw_dir+'Cl_642.csv')
# Shape of the human in vitro Fub dataset - 1139 chemicals
fub.shape
# Shape of the in vitro Clint data - 642 chemicals
clint.shape
fub.head()
fub.set_index('CASRN', inplace = True)
plt.hist(fub['Human.Funbound.plasma'], bins = 20)
plt.xlabel('Original Fraction Unbound');
clint.head()
plt.hist(clint['Human.Clint'])
plt.xlabel('Original Intrinsic clearance');
# #### Read AR-ER data to keep those chemicals as an external test set
# +
AR_data = pd.read_excel(external_dir+'Supplemental File 2_ARpathway_Results_ConfScores_CI_2016-08-30.xlsx', index_col='CASRN')
AR_ACC_columns = [col for col in AR_data if col.endswith('ACC')]
AR_data_subset = AR_data[(AR_data['AUC.Agonist']>0.1) | (AR_data['AUC.Antagonist']>0.1)][AR_ACC_columns]
#ER data
ER_data = pd.read_excel(external_dir+'S2 ER SuperMatrix 2015-03-24.xlsx', index_col='CASRN')
ER_ACC_columns = [col for col in ER_data if col.endswith('ACC')]
ER_data_subset = ER_data[(ER_data['AUC.Agonist']>0.1) | (ER_data['AUC.Antagonist']>0.1)][ER_ACC_columns]
## Combine ER-AR data
ERARdata = pd.concat([AR_data_subset, ER_data_subset], axis = 1)
ERARdata.replace(1000000, np.nan, inplace = True)
# -
ERARdata.shape
## Separate training data and external test data
trainingData = fub.loc[fub.index.difference(ERARdata.index)]
trainingData.shape
trainingData.head()
plt.hist(trainingData['Human.Funbound.plasma']);
y_var = 'Human.Funbound.plasma'
Y = trainingData[y_var]
Y = Y[Y!= 0]
Y[Y==1.0] = 0.99
Y[Y==0] = 0.005
Y.shape
# +
## Extract y data
Y = trainingData[y_var]
## Transform Y
Y = Y[Y!= 0]
Y[Y==1.0] = 0.99
Y[Y==0] = 0.005
Y_model = (1-Y)/Y
Y_model = Y_model.apply(lambda x: np.log10(x))
Y_index = Y_model.index
# -
Y_model.hist(bins=20, alpha = 0.8, grid=False);
|
notebooks/01_Data_Exploration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/manuel103/Machine-Learning/blob/master/ML_Lab_1_Group_Work.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="GxEHtkO8W52c" colab_type="code" colab={}
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# + id="JOVwRfmZW52n" colab_type="code" colab={}
m = 0
c = 0
L = 0.0001 # learning rate
epoch = 100
epoch_error_list = []
epoch_count_list = []
# + id="LLujvx1aW52w" colab_type="code" colab={}
data = pd.read_csv('data.csv', header = 0)
# + id="nkT8OUEnW525" colab_type="code" colab={}
X = data['size_in_sq_ft']
Y = data['price_x1000_sh']
N = float(len(X))
# print(N)
# + id="za1TVY61W53C" colab_type="code" colab={}
# from scipy.spatial import distance
# manhattan_distance = distance.cityblock(X, Y)
'''
Lasso regression
'''
hyper_param = 1.5
w = sum(abs(X-Y))
lasso_reg = hyper_param*w
# print('Mine:::',w)
# print('Lib:::', manhattan_distance)
'''
Ridge regression
'''
wr = (sum(abs(X-Y)))**2
ridge_reg = hyper_param*wr
# print(wr)
# + id="ZYXTTv1kW53J" colab_type="code" colab={}
'''
def mse(N, Y, Y_hat):
return (1/N)* sum(Y-Y_hat)**2 - lasso_reg # mse minus lasso regression
'''
def mse(N, Y, Y_hat):
return (1/N)* sum(Y-Y_hat)**2 - ridge_reg # mse minus ridge regression
# + id="9XpoPQkUW53P" colab_type="code" colab={}
def gradient_descent_fit(m,c,N,L):
epoch_error_list=[]
epoch_count_list=[]
for i in range(epoch):
Y_hat = m*X+c
# compute error for every iteration
epoch_count_list=epoch_count_list[:]# get list instance
epoch_count_list.append(i)
epoch_error=mse(N,Y,Y_hat)
epoch_error_list=epoch_error_list[:]
epoch_error_list.append(epoch_error)
#minimize the error function by computing partial derivatives
D_m=(-2/N)*sum(X*(Y-Y_hat))
D_c=(-2/N)*sum(Y-Y_hat)
m=m-(L * D_m)
c=c-(L * D_c)
print("final m is: ",m," and final c is: ",c)
#return final predicted value of Y-dependent variable
return Y_hat,epoch_count_list,epoch_error_list
# + id="FZOiDwMuW53Y" colab_type="code" colab={} outputId="4fcdac31-b930-4b17-c186-77eea1b51235"
#call gradient descent fit function
returned_params=gradient_descent_fit(m,c,N,L)
y_predicted=returned_params[0]
total_epochs=returned_params[1]
all_epoch_errors=returned_params[2]
#create plot for both line of best fit and error reduction graphs
fig,(ax1,ax2)=plt.subplots(2,gridspec_kw={'top':2})
ax1.set(xlabel="Office size in Sq ft")
ax1.set(ylabel="Monthly Rent in Ksh. (X1000)")
ax1.set_title("Regression Model For Nairobi Office Prices.\n\n Graph 1 (Main): Line of Best Fit")
ax1.scatter(X,Y)
ax1.plot([min(X),max(X)],[min(y_predicted),max(y_predicted)],color='black')
ax2.set_title("Graph 2:MSE Monitoring Error Curve")
ax2.set(xlabel="Epochs/Iteration")
ax2.set(ylabel="Mean Squared Error")
ax2.plot(total_epochs,all_epoch_errors)
epoch_count_list.clear()
epoch_error_list.clear()
# + id="aCUtFTL4W53j" colab_type="code" colab={}
|
ML_Lab_1_Group_Work.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
def_lda = '''lda = {'''
field = " '#ATR':' '"
dda = 'lda'
comp = 'def_{} += field'.format(dda)
exec compile(comp, '', 'exec')
def_lda
import json
from Util.warehouse import DDA
def_gda = '''gda = {'''
references = {}
dda = 'GLOBAL'
Using = 'g11111aa'
using = file(r'Convertidos\{}.txt'.format(Using)).read()
comp = 'def_{} += using'.format(DDA[dda])
exec compile(comp, '', 'exec')
filejson = file('Convertidos/{}.json'.format(Using)).read()
references.update(json.loads(filejson))
print def_gda
print references
references[u'#TAB-TXT-TELA'][u'def']
references[u'#TAB-TXT-TELA'][u'length']
lda = {'#TELA':{'#CAMPO-ALFA': ' ', '#CAMPO-NUM': 123 }}
lda['#TELA']['#CAMPO-NUM']
ancestors = ['#TELA']
ancestors.append('#CAMPO-ALFA')
init = 123
lda = eval("""'{}'.format("['{}']" * len(ancestors))""").format(*ancestors)
attrb = eval("""'{}'.format("['{}': " * len(ancestors))""").format(*ancestors)
attrb = attrb.replace('[','{').replace(']', '}')
attrb
ref = eval("""'{}'.format("['{}']" * len(ancestors))""").format(*ancestors)
print ref
import json
from Util.warehouse import DDA
def_lda = '''lda = {'''
references = {}
dda = 'LOCAL'
Using = 'L11111AA'
using = file(r'Convertidos\{}.txt'.format(Using)).read()
comp = 'def_{} += using'.format(DDA[dda])
exec compile(comp, '', 'exec')
filejson = file('Convertidos/{}.json'.format(Using)).read()
references.update(json.loads(filejson))
print def_lda
print references
i = None
print '>{}<'.format(i)
|
exec.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp core
# -
# # Sparse Core
#
# > Core functionality for sparsifying dense modules & models.
#hide
from nbdev.showdoc import *
#export
import numpy as np
import torch
import torch.nn as nn
#export
from fastcore.all import *
from fastai.basics import *
from fastai.vision.all import *
from fastai.callback.all import *
from fastai.test_utils import *
# ## Sparsify Module
#
# > For sparsifying a single module.
# When a parameter and buffer in a module follow the naming convention: `{p_name}`, `{p_name}_mask`, respectively, the buffer is assumed to be the mask for the parameter. For example, masked Linear and ConvNd layers will typically have a parameter named `weight`, a buffer named `weight_mask`. Additionally, parameters optionally also contain a sparsity buffer (e.g. for ConvNd, named `weight_sparsity`), which is used by the DynamicSparseTrainingCallback.
# +
#export
@torch.no_grad()
def sparse_mask(sizes, sparsity):
'''Returns a boolean mask with uniformly distributed zeros. # zeros = `sparsity` * np.prod(`sizes`)'''
n_total = np.prod(sizes)
n_ones = round((1-sparsity) * n_total)
shuffled_ones = torch.randperm(n_total)[:n_ones]
mask = torch.zeros(n_total, dtype=torch.bool)
mask[shuffled_ones] = True
return mask.reshape(*sizes)
def sparse_mask_like(param, sparsity): return sparse_mask(param.shape, sparsity).to(param.device)
def mask_from_tensor(t): return t.ne(0)
def sparsity_from_tensor(t): return 1 - mask_from_tensor(t).sum() / t.numel()
# +
mask = sparse_mask((10,5), 0.8)
test_eq(10, int(mask.sum()))
t = torch.rand(3,10)
mask = sparse_mask_like(t, 0.8)
t.mul_(mask)
test_eq(mask, mask_from_tensor(t))
test_close(0.8, sparsity_from_tensor(t))
# +
#export
def maybe_float(num):
try: return float(num)
except: return num
def sparse_params(module):
'''Returns list of all (param, mask, sparsity) tuples in a module.'''
buffer_d = {name:b for name, b in module.named_buffers()}
param_mask_sparsities = [(p, buffer_d[f'{name}_mask'], maybe_float(buffer_d.get(f'{name}_sparsity')))
for name, p in module.named_parameters()
if f'{name}_mask' in buffer_d]
return list(set(param_mask_sparsities))
# -
s, m = 0.8, nn.Linear(5,10)
m.register_buffer('weight_mask', sparse_mask_like(m.weight, s))
m.register_buffer('weight_sparsity', tensor(s))
m.register_buffer('bias_mask', sparse_mask_like(m.bias, s))
param_mask_sparsity = sparse_params(m)
test_eq(2, len(param_mask_sparsity))
#export
@torch.no_grad()
def apply_masks(module, *args, inplace=True):
for param, mask, sparsity in sparse_params(module):
if inplace: param.data.mul_(mask)
else: param.data = param.data.mul(mask)
apply_masks(m)
test_eq(10, m.weight.abs().gt(0).sum())
# +
#export
_sparseable_module_types = (nn.Linear,
nn.Conv1d, nn.Conv2d, nn.Conv3d,
nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d,
nn.MultiheadAttention,
nn.RNN, nn.RNNCell, nn.GRU, nn.GRUCell, nn.LSTM, nn.LSTMCell)
def is_sparseable_module(m, additional_types=[]):
types = set(_sparseable_module_types) | set(additional_types)
return isinstance(m, tuple(types))
# +
#export
# TODO: flatten_model gets rid of nn.MultiheadAttention which has it's own parameter 'in_proj_weight'
# which means sparsity_model doesn't sparsify this parameter
def sparseable_modules(model, additional_types=[]):
filt = partial(is_sparseable_module, additional_types=additional_types)
return L(flatten_model(model)).filter(filt)
# +
def test_model():
return nn.Sequential(
nn.Conv2d(3,32,3), nn.ReLU(),
nn.Conv2d(32,128,3), nn.ReLU(),
nn.Conv2d(128,512,3), nn.ReLU(), AdaptiveAvgPool(), Flatten(),
nn.Linear(512, 10))
model = test_model()
s_mods = sparseable_modules(model)
test_eq(4, len(s_mods))
# -
#export
def mask_from_tensor(t): return t != 0
def sparsity_from_tensor(t): return 1 - mask_from_tensor(t).sum() / t.numel()
t = torch.rand(3,10)
mask = sparse_mask_like(t, 0.8)
t.mul_(mask)
test_eq(mask, mask_from_tensor(t))
test_close(0.8, sparsity_from_tensor(t))
# ## Sparse Weight Initialization
#
# > Sparsifying weights changes the variance, so we need to adjust the usual kaiming normal initialization.
#
# Correctly initializing a network can improve the speed and accuracy of training by ensuring good forward signal propagation and backward gradient flow. This is especially important in networks without batch normalization or skip connections. In sparse networks, initalizing the variance to the harmonic mean of fan_in and fan_out can improve training results. For example, using 'fan_in_out' initialization on MNIST+LeNet5 results in similar or improved accuracies compared to nn.Linear's default dense initialization).
#
# See [Gradient Flow in Sparse Neural Networks and How Lottery Tickets Win](https://arxiv.org/abs/2010.03533) by <NAME> et al. for a more in-depth discussion of sparse weight initialization techniques.
#export
@torch.no_grad()
def init_kaiming_normal_sparse_(t, a=0, mode='fan_in', sparse_mode='fan_in_out', nonlinearity='leaky_relu'):
'''A modified kaiming normal initialization which adjusts for sparsity in weights.'''
# calculate sparse adjustment to standard deviation
# dense kaiming init = mode / sqrt(dense_fan), e.g. for relu = 2 / sqrt(dense_fan)
# sparse kaiming init = mode / sqrt(sparse_fan), note: sparse fan is unique to each input/output
# = (dense kaiming init) * sqrt(dense_fan / sparse_fan)
mask = mask_from_tensor(t)
mode = mode if mask.sum() == t.numel() else sparse_mode
mode_ix = ['fan_in', 'fan_out', 'fan_in_out'].index(mode)
dim = [1,0,1][mode_ix]
dense_fan = t.shape[dim] * t[0][0].numel()
sparse_fan_in = mask.sum(1, keepdim=True)
sparse_fan_out = mask.sum(0, keepdim=True)
# variance of 'fan_in_out' is harmonic mean of 'fan_in' and 'fan_out'
sparse_fan_in_out = (sparse_fan_in + sparse_fan_out) / 2
sparse_fan = [sparse_fan_in, sparse_fan_out, sparse_fan_in_out][mode_ix]
sparse_fan[sparse_fan==0] = 1 # avoid div by 0, can set to anything since these are masked
std_adj = torch.sqrt(dense_fan / sparse_fan)
# initialize as dense, then apply mask and apply sparse adjustment
mode = 'fan_in' if mode == 'fan_in_out' else mode
nn.init.kaiming_normal_(t, a=a, mode=mode, nonlinearity=nonlinearity)
return t.mul_(mask).mul_(std_adj)
# +
def backward_variance(t):
t.grad = None
loss = mse(t, torch.zeros_like(t)).sum()
loss.backward()
var = t.grad.var()
return var
# here we compare the variance of a dense matrix multiply and a several sparse matrix multiply ops
t1 = torch.rand(10,1000) # this is our input
t2 = torch.rand(50000,1000) # this is our weight matrix
t2.requires_grad_(True)
nn.init.kaiming_normal_(t2)
var1 = torch.var(t1 @ t2.t()) # dense variance
bvar1 = backward_variance(t2)
t2.requires_grad_(False)
mask = sparse_mask_like(t2, 0.99)
t2 = t2.mul_(mask) # sparsify our weight matrix
t2.requires_grad_(True)
var2 = torch.var(t1 @ t2.t()) # variance before adjustment
bvar2 = backward_variance(t2)
init_kaiming_normal_sparse_(t2, sparse_mode='fan_in')
var3 = torch.var(t1 @ t2.t()) # variance after fan_in adjustment
bvar3 = backward_variance(t2)
init_kaiming_normal_sparse_(t2, sparse_mode='fan_out').abs().gt(0).sum()
var4 = torch.var(t1 @ t2.t()) # variance after fan_out adjustment
bvar4 = backward_variance(t2)
init_kaiming_normal_sparse_(t2, sparse_mode='fan_in_out').abs().gt(0).sum()
var5 = torch.var(t1 @ t2.t()) # variance after fan_in_out adjustment
bvar5 = backward_variance(t2)
test_ne(var1, var2)
test_close(var1, var3, eps=0.02)
print('dense :', var1, bvar1, bvar1 / bvar1)
print('sparse no adj :', var2, bvar2, bvar2 / bvar1)
print('sparse fan_in :', var3, bvar3, bvar3 / bvar1)
print('sparse fan_out :', var4, bvar4, bvar4 / bvar1)
print('sparse fan_in_out:', var5, bvar5, bvar5 / bvar1)
# -
# ## Sparse Distributions
#
# > For determining the layer-wise sparsity of a list of modules.
# ### Uniform Distribution
#
# > All layers have a the same percentage of connection removed.
#export
def uniform_sparsity(params, model_sparsity):
return [model_sparsity] * len(params)
# ### First-Layer-Dense Uniform Distribution
#
# > Uniform sparsity except for the first layer, which is dense.
#export
def first_layer_dense_uniform(params, model_sparsity):
sparsities = [0.] + [model_sparsity] * (len(params) - 1)
return sparsities
# ### Erdos-Renyi (Kernel) Distribution
#
# > For a fixed overall sparsity, the Erdos-Renyi sparsity distribution allocates more connections to smaller layers and fewer to large layers when compared to a uniform sparsity distribution.
#export
# modified from https://github.com/google-research/rigl/blob/master/rigl/sparse_utils.py.
def erdos_renyi_sparsity(params, model_sparsity, include_kernel=True, erk_power_scale=1.0):
"""
Returns a list of sparsities in the same order as params. Sparsities satisfy
the Erdos-Renyi(Kernel) distribution, where the model has a total parameter count
as one with uniform sparsities, that is, satisfying the following equation:
$ eps * (p_1 * N_1 + p_2 * N_2) = (1 - model_sparsity) * (N_1 + N_2) $, for some float `eps`.
Args:
params: list of all sparseable parameters
model_sparsity: target overall sparsity between 0 and 1
include_kernel: if True, kernel dimensions are included in the scaling (e.g. for ConvNd layers)
erk_power_scale: scale < 1 softens the erdos_renyi distribution (i.e. closer to uniform)
Returns a list of sparsities where values correspond to individual param sparsities.
"""
# Enforce custom sparsities, then find correct scaling factor, `eps` for remaining params
dense_layers = set()
is_eps_valid = False
while not is_eps_valid:
# Start with all layers and try to find right eps. If any sparsity exceeds 1,
# make that layer dense and repeat with the non-dense layers.
#
# E.g. where N_3, and N_4 are found to be dense:
# eps * (p_1 * N_1 + p_2 * N_2) + (N_3 + N_4) =
# (1 - model_sparsity) * (N_1 + N_2 + N_3 + N_4)
# eps * (p_1 * N_1 + p_2 * N_2) =
# (1 - model_sparsity) * (N_1 + N_2) - model_sparsity * (N_3 + N_4) <--- == rhs
# eps = rhs / (\sum_i p_i * N_i) <--- == divisor
# eps = rhs / divisor
divisor = 0
rhs = 0
raw_sparsity = {}
for p in params:
n_zeros = int(np.floor(model_sparsity * p.numel()))
if p in dense_layers:
rhs -= n_zeros
else:
n_ones = p.numel() - n_zeros
rhs += n_ones
if include_kernel:
raw_sparsity[p] = (np.sum(p.shape) / np.prod(p.shape))**erk_power_scale
else:
raw_sparsity[p] = (np.sum(p.shape[:2]) / np.prod(p.shape[:2]))
divisor += raw_sparsity[p] * p.numel()
eps = rhs / divisor
# If eps * raw_sparsity[p] > 1, we add the param to the set of dense_layers
max_sparsity = np.max(list(raw_sparsity.values()))
if eps * max_sparsity > 1:
for p, p_raw_sparsity in raw_sparsity.items():
if p_raw_sparsity == max_sparsity:
dense_layers.add(p)
else:
is_eps_valid = True
# With the valid eps, we can set sparsities of the remaining layers
sparsities = [0. if p in dense_layers else (1. - eps * raw_sparsity[p]) for p in params]
return sparsities
# +
model = test_model()
s_params = L(sparseable_modules(model)).map(lambda m: m.weight)
sparsities = erdos_renyi_sparsity(s_params, 0.9)
n_nonzeros = sum([(1-s) * p.numel() for p, s in zip(s_params, sparsities)])
test_close(n_nonzeros, 0.1 * sum([p.numel() for p in s_params]), eps=len(s_params))
# test_eq([0., 0., 0., 0.], sparsities) # TODO: calc sparsities by hand and compare
# -
# ## Sparsify Model
#
# > For sparsifying an entire model.
#export
@torch.no_grad()
def sparsify_model(model, model_sparsity, sparse_f=uniform_sparsity,
sparse_init_mode=None, enforce_mask=True):
'''
Adds a sparse mask for each sparseable-module weight in model and applies mask to weights.
`sparse_f`: per RigL paper, `uniform_sparsity` has fewer FLOPs, `erdos_renyi_sparsity`
results in better model.
`sparse_init_mode`: initialization mode of sparse modules, or no initialization if None.
Possible values: [None, 'fan_in', 'fan_out', 'fan_in_out']
If `enforce_mask` is True, a forward_pre_hook will be registered to each module
to apply the weight mask before every forward pass of the module.
Returns a fastai Hooks object. You can remove the hooks after training by calling hooks.remove().
'''
if isinstance(model, Learner): model = model.model
modules = sparseable_modules(model)
module_name_param = L([(m, p_name, p) for m in modules for p_name, p in m.named_parameters()
if 'weight' in p_name])
params = module_name_param.itemgot(2)
sparsities = sparse_f(params, model_sparsity)
hooks = Hooks([], noop)
for (m, p_name, p), s in zip(module_name_param, sparsities):
if s > 0:
mask = sparse_mask_like(m.weight, s)
m.register_buffer('weight_mask', mask)
m.register_buffer('weight_sparsity', tensor(s))
apply_masks(m)
if sparse_init_mode is not None:
init_f = partial(init_kaiming_normal_sparse_, sparse_mode=sparse_init_mode)
init_default(m, func=init_f)
apply_masks(m)
if enforce_mask:
h = m.register_forward_pre_hook(apply_masks)
hooks.hooks.append(h)
return hooks
# +
model = test_model()
s_mods = sparseable_modules(model)
n_params = sum(m.weight.numel() for m in s_mods)
sparsify_model(model, 0.9, sparse_f=uniform_sparsity)
n_nonzeros = sum(m.weight.abs().gt(0).sum() for m in s_mods)
# increase `eps` to account for rounding to nearest whole weight
test_close(n_nonzeros, 0.1 * n_params, eps=len(s_mods))
p = s_mods[0].weight
test_close(p.abs().gt(0).sum(), 0.1 * p.numel(), eps=1)
model = nn.Sequential(nn.Linear(1,50), nn.ReLU(), nn.Linear(50,1))
hooks = sparsify_model(model, 0.9)
model(torch.rand(10,1))
test_eq(10, sum([model[i].weight.abs().gt(0).sum() for i in (0,2)]))
hooks.remove()
for i in (0,2): model[i].weight.data = torch.ones_like(model[i].weight)
model(torch.rand(10,1))
test_eq(100, sum([model[i].weight.abs().gt(0).sum() for i in (0,2)]))
# -
# ## Sparse Training
# ### Drop/Grow Heuristics
#export
def random_score(p, **kwargs): return torch.rand_like(p)
#export
def weight_magnitude(p, **kwargs): return p.data.abs()
#export
def gradient_magnitude(p, **kwargs): return p.grad.abs()
#export
def gradient_momentum(p, opt, **kwargs):
'''Calculates the momentum of the gradient for a parameter `p` from the `opt` state.'''
state = opt.state[p]
grad_avg = state['grad_avg'] if 'grad_avg' in state else None
sqr_avg = state['sqr_avg'] if 'sqr_avg' in state else None
if grad_avg is None:
raise Exception(f"Error: 'grad_avg' key not found in optimizer state. Tip: set the `mom` hyperparamter in the learner.")
if sqr_avg is None:
grad_mom = grad_avg
else:
try: eps = opt.state_dict()['hypers'][0]['eps']
except: eps = 1e-6
grad_mom = grad_avg / (torch.sqrt(sqr_avg + eps))
return grad_mom
# ### Sparsity Redistribution Heuristics
# +
#export
def momentum_redistribution(dst_cb):
'''
Modifies each sparseable parameter's target sparsity proportional to its mean absolute momentum.
Based on redistribution method in Sparse Networks From Scratch by Dettmers et al.
(https://arxiv.org/abs/1907.04840). Instead of evenly distributing leftover weights, as in the
official implementation, this method finds exact distribution amounts by making parameters dense
one at a time until valid sparsities are found.
'''
param_d = {p: (mask, s, m) for m in dst_cb.modules for p,mask,s in sparse_params(m)}
# calculate mean absolute momentum per layer and total # of params to distribute
p2mom, p2drop, p2maxgrow = {}, {}, {}
for p, (mask, s, m) in param_d.items():
mom = gradient_momentum(p, dst_cb.learn.opt)
mean_nonzero_mom = (mom * mask).abs().sum() / mask.sum()
p2mom[p] = mean_nonzero_mom
n_nonzeros = mask.sum()
n_zeros = mask.numel() - n_nonzeros
n_drop = int(n_nonzeros * dst_cb.drop_grow_pct)
p2drop[p] = n_drop
p2maxgrow[p] = n_zeros + n_drop
# normalize momentum contributions to determine each parameters's growth factor
total_mom = sum(p2mom.values())
p2growth_factor = {p: float(mom / total_mom) for p, mom in p2mom.items()}
total_n_drop = sum(p2drop.values())
if total_n_drop == 0:
return
# Distribute weights proportional to parameter's momentum, without changing overall sparsity
# total_n_drop = total_n_grow
# sum_p: n_drop[p] = sum_p: n_grow[p]
# sum_p: n_drop[p] = sum_dense_p: max_grow[p]
# + eps * sum_sparse_p: growth_factor[p] * n_drop[p]
# Goal is to find eps satisfying ^ this ^ equation where no layer's density > 1:
# eps = ( sum(n_drop[p]) - sum_dense_p(max_grow[p]) ) / sum_sparse_p(growth_factor[p] * n_drop[p])
# eps = (total_n_drop - total_dense_grow) / proportional_sparse_grow
# Loop until no target density > 1, adding largest layer to dense set if not satisfied
# print('dropping:', total_n_drop, 'individ:', p2drop.values())
p2grow = {}
dense_params = set()
done = False
while done == False:
for p, (mask, s, m) in param_d.items():
if p in dense_params:
p2grow[p] = p2maxgrow[p] # = total_dense_grow[p]
else:
p2grow[p] = p2growth_factor[p] * p2drop[p] # = proportional_sparse_grow[p]
# find eps
total_dense_grow = sum(p2grow[p] for p in param_d.keys() if p in dense_params)
proportional_sparse_grow = sum(p2grow[p] for p in param_d.keys() if p not in dense_params)
# print('dense:', [p.numel() for p in dense_params])
eps = (total_n_drop - total_dense_grow) / proportional_sparse_grow
# find new sparsities
p2sparsity = {}
for p, (mask, s, m) in param_d.items():
if p in dense_params:
p2sparsity[p] = 0.
else:
n_drop = p2drop[p]
n_grow = eps * p2grow[p]
target_nonzeros = mask.sum() - n_drop + n_grow
p2sparsity[p] = 1 - target_nonzeros / mask.numel()
# if any sparse params have sparsity < 0 (i.e. denser than possible), move the lowest sparsity
# param to the set of dense params, otherwise end loop
min_sparsity = min([s for s in p2sparsity.values()])
if min_sparsity < 0:
for p, s in p2sparsity.items():
if s == min_sparsity:
dense_params.add(p)
else:
done = True
# set each parameter's sparsity buffer to new target sparsity
for p, (mask, s, m) in param_d.items():
pname = {param:pname for pname, param in m.named_parameters()}[p]
sparsity_buffer = getattr(m, pname+'_sparsity')
sparsity_buffer.data = torch.tensor(float(p2sparsity[p]))
# -
# ### Dynamic Sparse Training Callback
#export
def top_k_mask(t, n_keep):
'''Returns a mask with `n_keep` ones cooresponding to the largest values in `t`'''
n_drop = t.numel() - n_keep
_, sorted_ixs = torch.topk(t.flatten(), k=t.numel())
mask = torch.cat([torch.ones(n_keep, dtype=torch.bool, device=t.device),
torch.zeros(n_drop, dtype=torch.bool, device=t.device)])
mask = mask.scatter(0, sorted_ixs, mask)
return mask.view(*t.shape)
t = torch.linspace(-0.9, 0.9, 20).reshape(4,5)
mask = top_k_mask(t, 5)
test_eq(0, mask[:3].sum())
test_eq(5, mask[3:].sum())
# +
#export
class DynamicSparseTrainingCallback(Callback):
'''Dynamically updates the network connectivity during training.'''
def __init__(self, sparse_modules=None,
batches_per_update=None, initial_drop_grow_pct=0.3, stop_pct=0.75,
keep_score_f=weight_magnitude, grow_score_f=gradient_magnitude, redistribute_f=None):
store_attr('initial_drop_grow_pct,stop_pct,keep_score_f,grow_score_f,redistribute_f,batches_per_update')
self.modules = sparse_modules
def before_fit(self):
self.modules = ifnone(self.modules, sparseable_modules(self.learn.model))
self.batches_per_update = ifnone(self.batches_per_update, len(self.dls.train))
self.drop_grow_pct_sched = combine_scheds(
[self.stop_pct, 1-self.stop_pct],
[SchedCos(self.initial_drop_grow_pct, 0.), SchedNo(0.,0.)]
)
self.n_param_count = sum([int(mask.numel()) for m in self.modules for _,mask,_ in sparse_params(m)])
self.n_nonzeros = sum([int(mask.sum()) for m in self.modules for _,mask,_ in sparse_params(m)])
self.model_sparsity = 1 - self.n_nonzeros / self.n_param_count
def after_backward(self):
self.step()
# self.learn.opt.step()
if self.is_update_step:
if self.redistribute_f:
self.redistribute_f(self)
for m in self.modules:
self.rewire_module(m)
raise CancelBatchException()
def step(self):
if not self.training:
self.is_update_step = False
else:
step = self.epoch * self.n_iter + self.iter
n_steps = self.n_epoch * self.n_iter
pct_train = step / n_steps
is_last_step = step + 1 == n_steps
self.is_update_step = (step > 0
and step % self.batches_per_update == 0
and self.drop_grow_pct > 0
and not is_last_step)
self.drop_grow_pct = self.drop_grow_pct_sched(pct_train)
@torch.no_grad()
def rewire_module(self, m):
for param, mask, target_sparsity in sparse_params(m):
current_sparsity = 1 - float(mask.sum() / mask.numel())
n_grow = int(mask.sum() * self.drop_grow_pct)
n_keep = mask.sum() - n_grow
# modify n_grow if actual sparsity differs from target sparsity
current_nonzeros = int(mask.sum())
target_nonzeros = round(mask.numel() * (1 - target_sparsity))
n_grow = max(0, n_grow + target_nonzeros - current_nonzeros)
# determine which weights to keep
if current_sparsity > 0 and target_sparsity > 0:
keep_score = self.keep_score_f(param, opt=self.learn.opt)
keep_mask = top_k_mask(keep_score, n_keep)
else:
keep_mask = torch.ones_like(mask)
# determine which weights to grow, if any
if self.grow_score_f:
grow_score = self.grow_score_f(param, opt=self.learn.opt)
# make all keep weights to negative so we don't choose to grow them
grow_score = grow_score * keep_mask.logical_not() - keep_mask.float()
grow_mask = top_k_mask(grow_score, n_grow)
else:
grow_mask = torch.zeros_like(mask)
# update network connectivity
mask.data = keep_mask | grow_mask
# zero momentum for new connections
self.reset_momentum(param, grow_mask & keep_mask.logical_not())
@torch.no_grad()
def reset_momentum(self, p, mask):
state = self.opt.state[p]
if 'grad_avg' in state: state['grad_avg'].mul_(mask)
if 'sqr_avg' in state: state['sqr_avg'].mul_(mask)
_docs = dict(__init__='''Args:
sparse_modules: optional, specify which modules to modify the connectivity of
batches_per_update: # of batches per update, None (default) updates at end of each training epoch
initial_drop_grow_pct: percentage of weights to change during each dynamic weight update
stop_pct: stop dynamic weight updates after `stop_pct` of training
keep_score_f: function scoring each weight, top n are kept and the rest are zeroed
grow_score_f: function scoring each weight, top n excl. kept weights are unmasked and initialized to zero''',
before_fit="Schedule the number of connections to drop & grow per update.",
before_batch="Add dynamic update hooks.",
after_backward="Remove dynamic update hooks and skip gradient update.",
step="Update self.is_update_step and self.drop_grow_pct.",
rewire_module="Update step for one module.",
reset_momentum="Initialize momentum to zero for newly-added connections.")
# -
show_doc(DynamicSparseTrainingCallback)
# First, let's test the callback on a toy model:
model = nn.Sequential(nn.Linear(1,32), nn.ReLU(), nn.Linear(32,32), nn.ReLU(), nn.Linear(32,1))
learn = synth_learner(data=synth_dbunch(bs=100), model=model)
sparse_hooks = sparsify_model(learn.model, 0.8, sparse_f=first_layer_dense_uniform)
cbs = DynamicSparseTrainingCallback(redistribute_f=momentum_redistribution, batches_per_update=None, stop_pct=0.9, grow_score_f=gradient_momentum)
learn.fit(10, lr=1e-2, cbs=cbs)
# Now, let's test a slightly more realistic use case: MNIST_TINY on ResNet18.
# +
#slow
# without momentum_redistribution
dls = ImageDataLoaders.from_folder(untar_data(URLs.MNIST_TINY))
learn = Learner(dls, xresnet18(n_out=2), metrics=accuracy)
sparse_hooks = sparsify_model(learn.model, 0.9, erdos_renyi_sparsity)
cbs = DynamicSparseTrainingCallback(batches_per_update=8, stop_pct=0.9,
grow_score_f=gradient_momentum)
learn.fit_one_cycle(5, 1e-2, cbs=cbs)
test_close(1, learn.final_record[-1], eps=0.03) # better than 97% accuracy
for m in sparseable_modules(learn.model):
for p, mask, s in sparse_params(m):
n_alive = p.abs().gt(0).sum()
n_total = p.numel()
test_close(s, 1 - n_alive / n_total, eps=0.01) # layer sparsity = target sparsity
# +
#slow
# with momentum_redistribution
dls = ImageDataLoaders.from_folder(untar_data(URLs.MNIST_TINY))
learn = Learner(dls, xresnet18(n_out=2), metrics=accuracy)
sparse_hooks = sparsify_model(learn.model, 0.99, first_layer_dense_uniform)
cbs = DynamicSparseTrainingCallback(batches_per_update=8, stop_pct=0.9,
grow_score_f=gradient_momentum,
redistribute_f=momentum_redistribution)
n_nonzeros = sum([mask.sum() for m in sparseable_modules(learn.model) for p, mask, s in sparse_params(m)])
n = sum([mask.numel() for m in sparseable_modules(learn.model) for p, mask, s in sparse_params(m)])
before_sparsity = n_nonzeros / n
learn.fit_one_cycle(5, 1e-2, cbs=cbs)
test_close(1, learn.final_record[-1], eps=0.03) # better than 97% accuracy
n_nonzeros = sum([mask.sum() for m in sparseable_modules(learn.model) for p, mask, s in sparse_params(m)])
n = sum([mask.numel() for m in sparseable_modules(learn.model) for p, mask, s in sparse_params(m)])
after_sparsity = n_nonzeros / n
test_close(before_sparsity, after_sparsity, eps=1e-5) # model sparsity is unchanged
# -
# ## Preset Definitions
# ### Sparse Evolutionary Training (SET)
#export
SET_presets = {'keep_score_f': weight_magnitude, 'grow_score_f': random_score,
'initial_drop_grow_pct': 0.3, 'stop_pct': 1.0,}
# ### Sparse Networks From Scratch (SNFS)
#export
SNFS_presets = {'redistribute_f':momentum_redistribution,
'keep_score_f': weight_magnitude, 'grow_score_f': gradient_momentum,
'initial_drop_grow_pct': 0.5, 'stop_pct': 1.0,}
# ### Rigged Lottery (RigL)
#export
RigL_presets = {'keep_score_f': weight_magnitude, 'grow_score_f': gradient_magnitude,
'initial_drop_grow_pct':0.3, 'stop_pct':0.75, 'batches_per_update': 100}
# ## Counting FLOPs
# +
#export
def flop_counter_hook(m, i, o):
'''Counts FLOPs from nn.Linear and nn.ConvNd layers'''
flops = 0
if isinstance(m, (nn.Conv1d, nn.Conv2d, nn.Conv3d)):
bs,ch,*ks = i[0].shape
sx, sy = m.stride
flops = bs * np.prod(ks) * m.weight.numel() / (sx * sy)
elif isinstance(m, nn.Linear):
bs = np.prod(i[0].shape[:-1])
flops = bs * m.weight.numel()
else:
return 0
return flops
def sparse_flop_counter_hook(m, i, o):
'''Counts FLOPs from nonzero-valued weights.'''
density = m.weight.abs().gt(0).sum() / m.weight.numel() if hasattr(m, 'weight') else 1
dense_flops = flop_counter_hook(m, i, o)
return int(density * dense_flops)
def count_flops(model, xb, sparse=False):
flops = 0
hook = sparse_flop_counter_hook if sparse else flop_counter_hook
with Hooks(flatten_model(model), hook) as h:
model(xb)
flops = sum(h.stored)
return flops
# -
#export
class FlopsCounter(HookCallback):
def __init__(self, sparse=True, verbose=False, **kwargs):
super().__init__(**kwargs)
store_attr('sparse,verbose')
def hook(self, m, i, o):
f = sparse_flop_counter_hook if self.sparse else flop_counter_hook
return f(m, i, o)
def before_fit(self):
if not hasattr(self, 'm2flops'): self.m2flops = defaultdict(int)
super().before_fit()
def after_batch(self):
"Take the stored results and puts it in `self.m2flops`"
if self.training and (self.every is None or self.train_iter%self.every == 0):
for m, flops in zip(self.modules, self.hooks.stored):
self.m2flops[m] += flops
super().after_batch()
def after_fit(self):
if self.verbose: print(f'Training FLOPs (forward pass only): {self.fwd_train_flops()}')
super().after_fit()
def fwd_train_flops(self): return sum(self.m2flops.values())
# +
dls = ImageDataLoaders.from_folder(untar_data(URLs.MNIST_TINY), 'train', 'valid', bs=11)
xb, yb = dls.one_batch()
layers = [Flatten(), nn.Linear(xb[0].numel(), 100), nn.ReLU(), nn.Linear(100, 10)]
model = nn.Sequential(*layers).to(xb.device)
sparsity = 0.5
sparsify_model(model, sparsity, sparse_f=uniform_sparsity)
dense_count = count_flops(model, xb)
dense_count4x = count_flops(model, torch.cat([xb,xb,xb,xb], dim=0))
sparse_count = count_flops(model, xb, sparse=True)
test_close(dense_count4x, dense_count * 4, eps=dense_count/1000)
test_close(sparse_count, dense_count * 0.5, eps=sparse_count/1000)
n_epochs = 1
learn = Learner(dls, model, cbs=FlopsCounter(sparse=True, verbose=True))
learn.fit_one_cycle(n_epochs, 1e-2)
flops_per_image = sparsity * (xb[0].numel()*100 + 100*10)
images_per_epoch = len(dls.train) * dls.train.bs
flops = flops_per_image * images_per_epoch * n_epochs
test_close(flops, learn.flops_counter.fwd_train_flops(), eps=flops/1000)
# -
# # Export
#hide
from nbdev.export import notebook2script
notebook2script()
|
00_core.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
# %load_ext autoreload
# %autoreload 2
# +
# default_exp dcmm
# -
# # DCMM
#
# > A Dynamic Count Mixture Model, or DCMM, is the combination of a Bernoulli and Poisson DGLM as described in [Berry and West (2019)](https://arxiv.org/pdf/1805.05232.pdf).
# The DCMM is a combination of a Bernoulli and Poisson DGLM. The Bernoulli DGLM models the probability of the observation being zero. Conditional on a non-zero outcome, then the observation follows a Poisson distribution. This is useful for modeling time series with a greater number of zeros than expected under a Poisson distribution, which is frequently the case for low-valued count time series.
#
# In more formal terms, a DCMM models observations $y_t$ as:
# $$
# \quad z_{t} \sim Bern(\pi_{t}) \quad \textrm{and}\quad y_{t} | z_{t} =
# \begin{cases}
# 0, & \text{if } z_{t} = 0,\\
# 1 + x_{t}, \quad x_{t} \sim Pois(\mu_{t}), & \textrm{if}\ z_{t} = 1.
# \end{cases}
# $$
#hide
#exporti
import numpy as np
from pybats.latent_factor_fxns import forecast_marginal_lf_dcmm, forecast_path_lf_dcmm
from pybats.dglm import bern_dglm, pois_dglm
from pybats.update import update_F
from scipy.special import expit
#export
class dcmm:
def __init__(self,
a0_bern = None,
R0_bern = None,
nregn_bern = 0,
ntrend_bern = 0,
nlf_bern = 0,
nhol_bern = 0,
seasPeriods_bern = [],
seasHarmComponents_bern = [],
deltrend_bern = 1, delregn_bern = 1,
delhol_bern = 1,
delseas_bern = 1, dellf_bern = 1,
a0_pois = None,
R0_pois = None,
nregn_pois = 0,
ntrend_pois = 0,
nlf_pois = 0,
nhol_pois = 0,
seasPeriods_pois = [],
seasHarmComponents_pois = [],
deltrend_pois = 1, delregn_pois = 1,
delhol_pois = 1,
delseas_pois = 1, dellf_pois = 1,
rho = 1,
interpolate=True,
adapt_discount=False):
"""
:param a0_bern: Prior mean vector for bernoulli DGLM
:param R0_bern: Prior covariance matrix for bernoulli DGLM
:param nregn_bern: Number of regression components in bernoulli DGLM
:param ntrend_bern: Number of trend components in bernoulli DGLM
:param nlf_bern: Number of latent factor components in bernoulli DGLM
:param seasPeriods_bern: List of periods of seasonal components in bernoulli DGLM
:param seasHarmComponents_bern: List of harmonic components included for each period in bernoulli DGLM
:param deltrend_bern: Discount factor on trend components in bernoulli DGLM
:param delregn_bern: Discount factor on regression components in bernoulli DGLM
:param delhol_bern: Discount factor on holiday component in bernoulli DGLM (currently deprecated)
:param delseas_bern: Discount factor on seasonal components in bernoulli DGLM
:param dellf_bern: Discount factor on latent factor components in bernoulli DGLM
:param a0_pois: Prior mean vector for poisson DGLM
:param R0_pois: Prior covariance matrix for poisson DGLM
:param nregn_pois: Number of regression components in poisson DGLM
:param ntrend_pois: Number of trend components in poisson DGLM
:param nlf_pois: Number of latent factor components in poisson DGLM
:param seasPeriods_pois: List of periods of seasonal components in poisson DGLM
:param seasHarmComponents_pois: List of harmonic components included for each period in poisson DGLM
:param deltrend_pois: Discount factor on trend components in poisson DGLM
:param delregn_pois: Discount factor on regression components in poisson DGLM
:param delhol_pois: Discount factor on holiday component in poisson DGLM (currently deprecated)
:param delseas_pois: Discount factor on seasonal components in poisson DGLM
:param dellf_pois: Discount factor on latent factor components in poisson DGLM
:param rho: Discount factor for random effects extension in poisson DGLM (smaller rho increases variance)
"""
self.bern_mod = bern_dglm(a0=a0_bern,
R0=R0_bern,
nregn=nregn_bern,
ntrend=ntrend_bern,
nlf=nlf_bern,
nhol=nhol_bern,
seasPeriods=seasPeriods_bern,
seasHarmComponents=seasHarmComponents_bern,
deltrend=deltrend_bern, delregn=delregn_bern,
delhol=delhol_bern, delseas=delseas_bern,
dellf=dellf_bern,
interpolate=interpolate,
adapt_discount=adapt_discount)
self.pois_mod = pois_dglm(a0=a0_pois,
R0=R0_pois,
nregn=nregn_pois,
ntrend=ntrend_pois,
nlf=nlf_pois,
nhol=nhol_pois,
seasPeriods=seasPeriods_pois,
seasHarmComponents=seasHarmComponents_pois,
deltrend=deltrend_pois, delregn=delregn_pois,
delhol=delhol_pois, delseas=delseas_pois,
dellf=dellf_pois,
rho=rho,
interpolate=interpolate,
adapt_discount=adapt_discount)
self.t = 0
# X is a list or tuple of length 2. The first component is data for the bernoulli DGLM, the next is for the Poisson DGLM.
def update(self, y = None, X = None):
X = self.make_pair(X)
if y is None:
self.bern_mod.update(y=y)
self.pois_mod.update(y=y)
elif y == 0:
self.bern_mod.update(y = 0, X = X[0])
self.pois_mod.update(y = np.nan, X = X[1])
else: # only update beta model if we have significant uncertainty in the forecast
# get the lower end forecast on the logit scale
F = update_F(self.bern_mod, X[0], F=self.bern_mod.F.copy())
ft, qt = self.bern_mod.get_mean_and_var(F, self.bern_mod.a, self.bern_mod.R)
fcast_logit_lb = ft - np.sqrt(qt)
# translate to a prod for a rough idea of whether we're already pretty confident for this forecast
if expit(fcast_logit_lb) < 0.975:
self.bern_mod.update(y=1, X = X[0])
else:
self.bern_mod.update(y=np.nan, X=X[0])
self.pois_mod.update(y = y - 1, X = X[1]) # Shifted Y values in the Poisson DGLM
self.t += 1
def update_lf_sample(self, y = None, X = None, phi_samps = None, parallel=False):
X = self.make_pair(X)
phi_samps = self.make_pair(phi_samps)
if y is None:
self.bern_mod.update_lf_sample(y=y)
self.pois_mod.update_lf_sample(y=y)
elif y == 0:
self.bern_mod.update_lf_sample(y = 0, X = X[0], phi_samps = phi_samps[0], parallel = parallel)
self.pois_mod.update_lf_sample(y = np.nan, X = X[1], phi_samps = phi_samps[1], parallel = parallel)
else:
self.bern_mod.update_lf_sample(y = 1, X = X[0], phi_samps = phi_samps[0], parallel = parallel)
# Shifted Y values in the Poisson DGLM
self.pois_mod.update_lf_sample(y =y - 1, X = X[1], phi_samps = phi_samps[1], parallel = parallel)
self.t += 1
def update_lf_analytic(self, y = None, X = None, phi_mu = None, phi_sigma = None):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
if y is None:
self.bern_mod.update_lf_analytic(y=y)
self.pois_mod.update_lf_analytic(y=y)
elif y == 0:
self.bern_mod.update_lf_analytic(y = 0, X = X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0])
self.pois_mod.update_lf_analytic(y = np.nan, X = X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1])
else:
self.bern_mod.update_lf_analytic(y = 1, X = X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0])
# Shifted Y values in the Poisson DGLM
self.pois_mod.update_lf_analytic(y =y - 1, X = X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1])
self.t += 1
def forecast_marginal(self, k, X = None, nsamps = 1, mean_only = False, state_mean_var = False):
X = self.make_pair(X)
if mean_only:
mean_bern = self.bern_mod.forecast_marginal(k, X[0], nsamps, mean_only)
mean_pois = self.pois_mod.forecast_marginal(k, X[1], nsamps, mean_only)
return mean_bern * (mean_pois + 1)
elif state_mean_var:
mv_bern = self.bern_mod.forecast_marginal(k, X[0], state_mean_var = state_mean_var)
mv_pois = self.pois_mod.forecast_marginal(k, X[1], state_mean_var = state_mean_var)
return mv_bern, mv_pois
else:
samps_bern = self.bern_mod.forecast_marginal(k, X[0], nsamps)
samps_pois = self.pois_mod.forecast_marginal(k, X[1], nsamps) + np.ones([nsamps]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_marginal_lf_analytic(self, k, X = None, phi_mu = None, phi_sigma = None, nsamps = 1, mean_only = False, state_mean_var = False):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
if mean_only:
mean_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], nsamps, mean_only)
mean_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], nsamps, mean_only)
return np.array([[mean_bern * (mean_pois + 1)]])
elif state_mean_var:
mv_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], state_mean_var = state_mean_var)
mv_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], state_mean_var = state_mean_var)
return mv_bern, mv_pois
else:
samps_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0], nsamps = nsamps)
samps_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1], nsamps = nsamps) + np.ones([nsamps]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_marginal_lf_analytic_new(self, k, X = None, phi_mu = None, phi_sigma = None, nsamps = 1, mean_only = False, state_mean_var = False):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
if mean_only:
mean_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], nsamps, mean_only)
mean_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], nsamps, mean_only)
return np.array([[mean_bern * (mean_pois + 1)]])
elif state_mean_var:
mv_bern = self.bern_mod.forecast_marginal_lf_analytic(k, X[0], phi_mu[0], phi_sigma[0], state_mean_var = state_mean_var)
mv_pois = self.pois_mod.forecast_marginal_lf_analytic(k, X[1], phi_mu[1], phi_sigma[1], state_mean_var = state_mean_var)
return mv_bern, mv_pois
else:
return forecast_marginal_lf_dcmm(self, k, X[0], phi_mu[0], phi_sigma[0], nsamps=nsamps)
def forecast_marginal_lf_sample(self, k, X = None, phi_samps = None, nsamps = 1, mean_only = False):
X = self.make_pair(X)
phi_samps = self.make_pair(phi_samps)
samps_bern = self.bern_mod.forecast_marginal_lf_sample(k, X[0], phi_samps[0], mean_only)
samps_pois = self.pois_mod.forecast_marginal_lf_sample(k, X[1], phi_samps[1], mean_only) + np.ones([nsamps]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_lf_sample(self, k, X = None, phi_samps=None, nsamps = 1):
X = self.make_pair(X)
phi_samps = self.make_pair(phi_samps)
samps_bern = self.bern_mod.forecast_path_lf_sample(k, X[0], phi_samps[0], nsamps)
samps_pois = self.pois_mod.forecast_path_lf_sample(k, X[1], phi_samps[1], nsamps) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path(self, k, X = None, nsamps = 1):
X = self.make_pair(X)
samps_bern = self.bern_mod.forecast_path(k, X[0], nsamps)
samps_pois = self.pois_mod.forecast_path(k, X[1], nsamps) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_copula(self, k, X = None, nsamps = 1, **kwargs):
X = self.make_pair(X)
samps_bern = self.bern_mod.forecast_path_copula(k, X[0], nsamps, **kwargs)
samps_pois = self.pois_mod.forecast_path_copula(k, X[1], nsamps, **kwargs) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_lf_copula(self, k, X = None, phi_mu = None, phi_sigma = None, phi_psi = None, nsamps = 1, **kwargs):
X = self.make_pair(X)
if k == 2 and isinstance(phi_mu, (list, tuple)):
if not isinstance(phi_mu[0], (list, tuple)):
phi_mu = (phi_mu, phi_mu)
phi_sigma = (phi_sigma, phi_sigma)
phi_psi = (phi_psi, phi_psi)
else:
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
phi_psi = self.make_pair(phi_psi)
samps_bern = self.bern_mod.forecast_path_lf_copula(k, X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0], phi_psi = phi_psi[0], nsamps = nsamps, **kwargs)
samps_pois = self.pois_mod.forecast_path_lf_copula(k, X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1], phi_psi = phi_psi[1], nsamps = nsamps, **kwargs) + np.ones([nsamps, k]) # Shifted Y values in the Poisson DGLM
return samps_bern * samps_pois
def forecast_path_lf_copula_new(self, k, X = None, phi_mu = None, phi_sigma = None, phi_psi = None, nsamps = 1, **kwargs):
X = self.make_pair(X)
if k == 2 and isinstance(phi_mu, (list, tuple)):
if not isinstance(phi_mu[0], (list, tuple)):
phi_mu = (phi_mu, phi_mu)
phi_sigma = (phi_sigma, phi_sigma)
phi_psi = (phi_psi, phi_psi)
else:
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
phi_psi = self.make_pair(phi_psi)
return forecast_path_lf_dcmm(self, k, X[0], phi_mu[0], phi_sigma[0], phi_psi[0], nsamps=nsamps, **kwargs)
def forecast_path_lf_copula_density(self, y, k, X = None, phi_mu = None, phi_sigma = None, phi_psi = (None, None), nsamps = 1, **kwargs):
X = self.make_pair(X)
phi_mu = self.make_pair(phi_mu)
phi_sigma = self.make_pair(phi_sigma)
phi_psi = self.make_pair(phi_psi)
z = np.zeros([k])
y = y.reshape(-1)
z[y > 0] = 1
logdens_bern = self.bern_mod.forecast_path_lf_copula(k, X[0], phi_mu = phi_mu[0], phi_sigma = phi_sigma[0], phi_psi = phi_psi[0], nsamps = nsamps, y = z, **kwargs)
# Shifted Y values in the Poisson DGLM
y = y - 1
y = y.astype('float')
# 0's in the original data (now -1's) are considered 'missing by the Poisson model
y[y < 0] = np.nan
logdens_pois = self.pois_mod.forecast_path_lf_copula(k, X[1], phi_mu = phi_mu[1], phi_sigma = phi_sigma[1], phi_psi = phi_psi[1], nsamps = nsamps, y = y, **kwargs)
return logdens_bern, logdens_pois
def forecast_state_mean_and_var(self, k = 1, X = None):
mean_var_bern = self.bern_mod.forecast_state_mean_and_var(k, X[0])
mean_var_pois = self.pois_mod.forecast_state_mean_and_var(k, X[1])
return mean_var_bern, mean_var_pois
def make_pair(self, x):
if isinstance(x, (list, tuple)):
if len(x) == 2:
return x
else:
return (x, x)
else:
return (x, x)
# A DCMM can be used in the same way as a DGLM, with the standard methods `dcmm.update`, `dcmm.forecast_marginal`, and `dcmm.forecast_path`. There are equivalent helper functions as well. A full analysis can be run with `analysis_dcmm`, and `define_dcmm` helps to initialize a DCMM. These helper functions assume that the same predictors `X` are used for the Bernoulli and Poisson DGLMs.
#
# The only difference from using a standard `dglm` is that outside of `analysis_dcmm`, the update and forecast functions do not automatically recognize whether the DCMM includes latent factors or call a copula for path forecasting. This means that the modeler needs to be more explicit in calling the correct method, such as `dcmm.forecast_path_copula` for path forecasting with a copula.
#
# A quick example of using `analysis_dcmm` to model simulated sales data follows. Another example with a DCMM can also be found [here](https://github.com/lavinei/pybats_nbdev/blob/master/examples/DCMM%20Latent%20Factor%20Example.ipynb).
# +
import pandas as pd
import numpy as np
from pybats.shared import load_sales_example2
from pybats.analysis import analysis_dcmm
from pandas.tseries.holiday import USFederalHolidayCalendar
data = load_sales_example2()
data.head()
# -
prior_length = 25 # Number of days of data used to set prior
k = 7 # Forecast horizon
rho = 0.5 # Random effect discount factor to increase variance of forecast distribution
forecast_samps = 1000 # Number of forecast samples to draw
forecast_start = pd.to_datetime('2018-01-01') # Date to start forecasting
forecast_end = pd.to_datetime('2018-05-01') # Date to stop forecasting
holidays = USFederalHolidayCalendar.rules
mod, samples = analysis_dcmm(data['Sales'].values, data[['Price', 'Promotion']].values,
k, forecast_start, forecast_end,
nsamps=forecast_samps,
prior_length=prior_length,
seasPeriods=[7], seasHarmComponents=[[1,2,3]],
dates=data.index, holidays=holidays,
rho=rho,
ret = ['model', 'forecast'])
# Because the DCMM is effectively a container for a Poisson and a Bernoulli DGLM, we can access each of them individually. The coefficients in the Bernoulli DGLM affect the probability of a non-zero observation, and the coefficients in the Poisson DGLM impact the size of any non-zero observations. To illustrate, we'll take a look at the holiday coefficients in both DGLMs.
# +
pois_hol = mod.pois_mod.get_coef('hol')
bern_hol = mod.bern_mod.get_coef('hol')
coef = pd.DataFrame({'Holidays':[h.name for h in holidays],
'Pois Mean': pois_hol['Mean'],
'Pois Std Dev': pois_hol['Standard Deviation'],
'Bern Mean': bern_hol['Mean'],
'Bern Std Dev': bern_hol['Standard Deviation']}).round(2)
coef
# -
# The largest negative coefficients are for Christmas and New Years Day, which means that they are more likely to have very low or $0$ sales.
#
# The largest positive coefficients are for July 4th and Memorial day, which means that they are likely to have increased sales.
#hide
from nbdev.export import notebook2script
notebook2script()
|
nbs/11_dcmm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Before we begin, let's execute the cell below to display information about the CUDA driver and GPUs running on the server by running the `nvidia-smi` command. To do this, execute the cell block below by giving it focus (clicking on it with your mouse), and hitting Ctrl-Enter, or pressing the play button in the toolbar above. If all goes well, you should see some output returned below the grey cell.
# !nvidia-smi
# ## Learning objectives
# The **goal** of this lab is to:
# The goal of this lab is:
# - Learn how to use CUDA Fortran to parallelize our code.
# - Understand the basic terms and steps involved in making a sequential code parallel.
#
# We do not intend to cover:
# - Optimization techniques like memory access patterns, memory hierarchy.
#
# # Introduction
# Graphics Processing Units (GPUs) were initially designed to accelerate graphics processing, but in 2007 the release of CUDA introduced GPUs as General Purpose Processors. CUDA is a parallel computing platform and programming model that makes using a GPU for general purpose computing simple and elegant. The developer still programs in the familiar C, C++, Fortran, or an ever expanding list of supported languages, and incorporates extensions of these languages in the form of a few basic keywords.
#
# CUDA Fortran is:
# - Based on a standard Fortran
# - A small set of extensions to enable heterogeneous programming
# - A straightforward API to manage devices, memory, etc.
#
#
# # CUDA
#
#
# **Heterogeneous Computing:** CUDA is a heterogeneous programming model that includes provisions for both a CPU and GPU. The CUDA Fortran programming interface consists of Fortran language extensions so that you can target portions of source code for parallel execution on the device (GPU). It is based on a standard Fortran and provides a library of Fortran functions that can be executed on the host (CPU) so that it can interact with the device. The two processor that work with each other are:
#
# - Host: CPU and its memory (Host Memory)
# - Device: GPU and its memory (Device Memory)
#
#
# Let us look at a Hello World! CUDA Fortran code
#
# ```fortran
#
# module printgpu
# contains
# attributes(global) subroutine print_form_gpu()
# implicit none
# integer :: i
# i = blockDim%x * (blockIdx%x - 1) + threadIdx%x
# print *, i
# end subroutine saxpy
# end module printgpu
#
# program testPrint
# use printgpu
# use cudafor
# implicit none
#
# call print_form_gpu<<<1, 1>>>()
# cudaDeviceSynchronize()
# end program testPrint
#
# ```
#
# So you might have already observed that CUDA Fortran is nothing but extensions/constructs to existing language. Let us look at what those additional constructs we introduced above:
#
# - ```global``` :This attribute, when added for the function, tells the compiler that this is a subroutine that will run on the device and not on the host.
# - ``` <<<,>>> ``` : This keyword tells the compiler that this is a call to the device function and not the host function. Additionally, the 1,1 parameter basically dictates the number of threads to launch in the kernel. We will cover the parameters inside angle brackets later.
# - ``` threadIdx%x, blockIdx%x ``` : This is a unique ID that's given to all threads.
# - ``` cudaDeviceSynchronize() ``` : All of the kernel(Function that runs on GPU) calls in CUDA are asynchronous in nature. This API will make sure that host does not proceed until all device calls are over.
#
#
# ## GPU Architecture
#
# In this section will take an approach of describing the CUDA programming model by showing relationship between the software programming concepts and how do they get mapped to GPU hardware.
#
# The diagram below shows a higher level of abstraction of components of GPU hardware and its respective programming model mapping.
#
# <img src="../images/cuda_hw_sw.png">
#
# As shown in the diagram above CUDA programming model is tightly coupled with hardware design. This makes CUDA one of the most efficient parallel programming model for shared memory systems. Another way to look at the diagram shown above is given below:
#
# | Software | Executes | Hardware |
# | --- | --- | --- |
# | CUDA thread | on/as | CUDA Core |
# | CUDA block | on/as | Streaming Multiprocessor |
# | GRID/Kernel | on/as | GPU Device |
#
# We will get into the concept of _blocks_ and _threads_ in upcoming section. But let us first look at steps involved in writing CUDA code.
#
#
# ## Steps in CUDA Programming
#
# The below table highlights the typical steps which are required to convert sequential code to CUDA code:
#
# | Sequential code | CUDA Code |
# | --- | --- |
# | **Step 1** Allocate memory on the CPU | **Step 1** : Allocate memory on the CPU |
# | **Step 2** Populate/initialize the CPU data | **Step 2** Allocate memory on the GPU, defining array as _device_ type |
# | **Step 3** Call the CPU function that has the crunching of data. | **Step 3** Populate/initialize the CPU |
# | **Step 4** Consume the crunched data on Host | **Step 4** Transfer the data from the host to the device with _cudaMemcpy()_ |
# | | **Step 5** Call the GPU function with _<<<,>>>_ brackets |
# | | **Step 6** Synchronize the device and host with _cudaDeviceSynchronize()_ |
# | | **Step 7** Transfer data from the device to the host with _cudaMemcpy()_ |
# | | **Step 8** Consume the crunched data on Host |
#
# CPU and GPU memory are different and developer needs to use additional CUDA API to allocate and free memory on GPU. Only device memory can be consumed inside GPU function call (kernel). Linear memory on Device is typically allocated by defining array as ```allocatable, device``` type and data transfer between host memory and device memory are typically done using ```cudaMemcpy()```.
#
#
# The API definition of these are as follows:
#
#
# **integer function cudaMemcpy(dst, src, count, kdir)** cudaMemcpy copies data from one location to another. dst and src , where kind is one of ```cudaMemcpyHostToHost```, ```cudaMemcpyHostToDevice```, ```cudaMemcpyDeviceToHost```, or ```cudaMemcpyDeviceToDevice```, and specifies the direction of the copy. Calling cudaMemcpy() with dst and src pointers that do not match the direction of the copy results in an undefined behavior
#
# Let us look at these steps in more detail for a simple vector addition code:
#
# ```fortran
# module kernel
# contains
# # ! CUDA kernel. Each thread takes care of one element of c
# attributes(global) subroutine vecAdd_kernel(n, a, b, c)
# integer, value :: n
# real(8), device :: a(n), b(n), c(n)
# integer :: id
#
# # ! Get our global thread ID
# id = (blockidx%x-1)*blockdim%x + threadidx%x
#
# # ! Make sure we do not go out of bounds
# if (id <= n) then
# c(id) = a(id) + b(id)
# endif
# end subroutine vecAdd_kernel
# end module kernel
#
# program main
# use cudafor
# use kernel
#
# type(dim3) :: blockSize, gridSize
# real(8) :: sum
# integer :: i
#
# # ! Size of vectors
# integer :: n = 1
#
# # ! Host input vectors
# real(8),dimension(:),allocatable :: h_a
# real(8),dimension(:),allocatable :: h_b
# # !Host output vector
# real(8),dimension(:),allocatable :: h_c
#
# # ! Device input vectors
# real(8),device,dimension(:),allocatable :: d_a
# real(8),device,dimension(:),allocatable :: d_b
# # !Host output vector
# real(8),device,dimension(:),allocatable :: d_c
#
# # ! Allocate memory for each vector on host
# allocate(h_a(n))
# allocate(h_b(n))
# allocate(h_c(n))
#
# # ! Allocate memory for each vector on GPU
# allocate(d_a(n))
# allocate(d_b(n))
# allocate(d_c(n))
#
# # ! Initialize content of input vectors, vector a[i] = sin(i)^2 vector b[i] = cos(i)^2
# do i=1,n
# h_a(i) = sin(i*1D0)*sin(i*1D0)
# h_b(i) = cos(i*1D0)*cos(i*1D0)
# enddo
#
# # ! Implicit copy of host vectors to device
# d_a = h_a(1:n)
# d_b = h_b(1:n)
#
#
# # ! Execute the kernel
# call vecAdd_kernel<<<1, 1>>>(n, d_a, d_b, d_c)
#
# # ! Implicit copy of device array to host
# h_c = d_c(1:n)
#
# # ! Sum up vector c and print result divided by n, this should equal 1 within error
# sum = 0.0;
# do i=1,n
# sum = sum + h_c(i)
# enddo
# sum = sum/real(n)
# print *, 'final result: ', sum
#
# # ! Release device memory
# deallocate(d_a)
# deallocate(d_b)
# deallocate(d_c)
#
# # ! Release host memory
# deallocate(h_a)
# deallocate(h_b)
# deallocate(h_c)
#
# end program main
# ```
#
#
# ### Unified Memory
# An easier way to allocate memory accessible by the GPU is to use *Unified Memory*. It provides a single memory space accessible by all GPUs and CPUs in the system. To allocate data in unified memory, we add declare an array with attribute `managed`To read more about unified memory, please checkout the blog on [Unified Memory for CUDA beginners](https://developer.nvidia.com/blog/unified-memory-cuda-fortran-programmers/).
#
# <img src="../images/unified_memory.png">
#
# Below is the example usage of how to use managed memory in the CUDA code:
#
# ```fortran
# # !matrix data
# real, managed, allocatable, dimension(:,:) :: A, B, C
# ```
#
# ## Understanding Threads and Blocks
# We will be looking at understanding _thread_ and _block_ level parallelism in this section.The number of threads and blocks to be launched is passed as parameter to ```<<<,>>>``` brackets in a kernel call.
#
# ### Creating multiple blocks
#
# In order to create multiple blocks for vector addition code above you need to change two things:
# 1. Change _<<<1,1>>>_ to <<<N,1>>>_ which basically launches N number of blocks
# 2. Access the array with block index using private variable passed by default to CUDA kernel: _blockIdx%x_
#
# ```fortran
# attributes(global) subroutine vecAdd_kernel(n, a, b, c)
# integer, value :: n
# real(8), device :: a(n), b(n), c(n)
# integer :: id
#
# # ! Get our global thread ID
# id = blockidx%x
#
# # ! Make sure we do not go out of bounds
# if (id <= n) then
# c(id) = a(id) + b(id)
# endif
# end subroutine vecAdd_kernel
# }
# ```
#
# By using blockIdx%x to index the array, each block handles a different element of the array and may execute in parallel to each other.
#
# | Block Id | Performs |
# | --- | --- |
# | Block 0 | _c\[0\]=b\[0\]+a\[0\]_ |
# | Block 1 | _c\[1\]=b\[1\]+a\[1\]_ |
# | Block 2 | _c\[2\]=b\[2\]+a\[2\]_ |
#
#
# ### Creating multiple threads
#
# In order to create multiple threads for vector addition code above. You need to change two things:
# 1. change _<<<1,1>>>_ to <<<1,N>>>_ which basically launches N number of threads inside 1 block
# 2. Access the array with thread index using private variable passed by default to CUDA kernel: _threadIdx.x_
#
# ```fortran
# attributes(global) subroutine vecAdd_kernel(n, a, b, c)
# integer, value :: n
# real(8), device :: a(n), b(n), c(n)
# integer :: id
#
# # ! Get our global thread ID
# id = threadidx%x
#
# # ! Make sure we do not go out of bounds
# if (id <= n) then
# c(id) = a(id) + b(id)
# endif
# end subroutine vecAdd_kernel
# ```
#
# By using threadIdx.x to index the array, each thread handles a different element of the array and can execute in parallel.
#
# | thread Id | Performs |
# | --- | --- |
# | Thread 0 | _c\[0\]=b\[0\]+a\[0\]_ |
# | Thread 1 | _c\[1\]=b\[1\]+a\[1\]_ |
# | Thread 2 | _c\[2\]=b\[2\]+a\[2\]_ |
#
#
# ### Creating multiple blocks each having many threads
#
# So far, we've looked at parallel vector addition through the use of several blocks with one thread and one block with several
# threads. Now let us look at creating multiple blocks, each block containing multiple threads.
#
# To understand it lets take a scenario where the total number of vector elements is 32 which needs to be added in parallel. Total number of parallel execution unit required is 32. As a first step let us define that each block contains eight threads(we are not saying this is optimal configuration and is just for explanation purpose). Next we define the number of blocks. The simplest calculation is No_Of_Blocks = 32/8 where 8 is number of threads per blocks. The code changes required to launch 4 blocks with 8 thread each is as shown below:
# 1. Change _<<<1,1>>>_ to <<<4,8>>>_ which basically launches 4 number of threads per block and 8 total blocks
# 2. Access the array with both thread index and block index using private variable passed by default to call CUDA kernel: _threadIdx.x_ and _blockIdx.x_ and _bloxkDim.x_ which tells how many threads are allocated per block.
#
# ```fortran
# # ! Number of threads in each thread block
# blockSize = dim3(8,1,1)
# # ! Number of thread blocks in grid
# gridSize = dim3(ceiling(real(n)/real(blockSize%x)) ,1,1)
# call vecAdd_kernel<<<gridSize, blockSize>>>(n, d_a, d_b, d_c)
#
# # ! CUDA kernel. Each thread takes care of one element of c
# attributes(global) subroutine vecAdd_kernel(n, a, b, c)
# integer, value :: n
# real(8), device :: a(n), b(n), c(n)
# integer :: id
#
# # ! Get our global thread ID
# id = (blockidx%x-1)*blockdim%x + threadidx%x
#
# # ! Make sure we do not go out of bounds
# if (id <= n) then
# c(id) = a(id) + b(id)
# endif
# end subroutine vecAdd_kernel
# ```
#
# The diagram below shows the launch configuration that we discussed so far:
#
# <img src="../images/cuda_indexing.png">
#
# Modern GPU Architectures consists of multiple SM, each consisting of number of cores. In order to utilize whole GPU it is important to make use of both threads and blocks.
#
# The more important question which may arise is why bother with threads altogether? What do we gain by adding additional level of parallelism? Short answer is CUDA programming model defines that unlike parallel blocks, threads have mechanisms to efficiently communicate and synchronize.
#
# This is necessary to implement certain algorithms where threads needs to communicate with each other.We do not require synchronization across threads in **Pair Calculation** so we will not be going into details of concept of synchronization across threads and usage of specialized memory like _shared_ memory in this tutorial.
#
# # Atomic Construct
#
# In the code you will also require one more construct which will help you in getting the right results. Atomic construct ensures that a particular variable is accessed and/or updated atomically to prevent indeterminate results and race conditions. In other words, it prevents one thread from stepping on the toes of other threads due to accessing a variable simultaneously, resulting in different results run-to-run. For example, if I want to count the number of elements that have a value greater than zero, we could write the following:
#
# ```fortran
#
# if(r<cut)then
# oldvalue = atomicadd(g(ind),1.0d0)
# endif
#
# ```
#
# # A Quick Recap
# We saw the definition of CUDA and CUDA C. We covered briefly CUDA architecture and introduced CUDA C constructs. Also we played with block and thread configurations for a simple vector addition code. All this was done under the following restrictions:
# 1. **Multiple Dimension**: We launched threads and blocks in one dimension. We have been using _threadIdx.x_ and _blockIdx.x_, so what is _.x_ ? THis statement basically says that we are launching threads and blocks in one dimension only. CUDA allows to launch threads in 3 dimensions. You can also have _.y_ and _.z_ for index calculation. For example you can launch threads and blocks in 2 dimensions for dividing work for a 2D image. Also the maximum number of threads per block and number of blocks allowed per dimension is restricted based on the GPU that the code is run on.
# 2. **GPU Memory**: What we have not covered is that GPU has different hierarchy of memory, e.g. GPU has a read only memory which provides high bandwidth for 2D and 3D locality access called _texture_. Also GPU provides a scratch pad limited memory called as _shared memory_
# 3. **Optimization** : What we did not cover so far is the right way to access the compute and memory to get max performance.
#
# **One key characteristic about CUDA is that a user can control access pattern of data for each thread. The user can decide which part of memory the data can sits on. While we are covering some part of this in this lab, which is required for us to port our code, we do not intend to cover all optimizations**
# ## Compile and Run for NVIDIA GPU
# Now, lets start modifying the original code and add CUDA C constructs. You can either explicitly transfer the allocated data between CPU and GPU or use unified memory which creates a pool of managed memory that is shared between the CPU and GPU.
#
# Click on the <b>[rdf.f90](../../source_code/cudafortran/rdf.f90)</b> link and modify `rdf.f90`. Remember to **SAVE** your code after changes, before running below cells.
#compile for Tesla GPU
# !cd ../../source_code/cudafortran && nvfortran -cuda -o rdf nvtx.f90 rdf.f90 -L/opt/nvidia/hpc_sdk/Linux_x86_64/21.3/cuda/11.2/lib64 -lnvToolsExt
# Make sure to validate the output by running the executable and validate the output.
#Run on Nvidia GPU
# !cd ../../source_code/cudafortran && ./rdf && cat Pair_entropy.dat
# The output should be the following:
#
# ```
# s2 : -2.452690945278331
# s2bond : -24.37502820694527
# ```
#profile and see output of nvptx
# !cd ../../source_code/cudafortran && nsys profile -t nvtx,cuda --stats=true --force-overwrite true -o rdf_cuda ./rdf
# Let's checkout the profiler's report. Download and save the report file by holding down <mark>Shift</mark> and <mark>Right-Clicking</mark> [Here](../../source_code/cudafortran/rdf_cuda.qdrep) and open it via the GUI. Have a look at the example expected profiler report below:
#
# <img src="../images/cuda_profile_timeline.jpg">
#
# Nsight systems is capable of capturing information about CUDA execution in the profiled process.CUDA API row in the timeline view shows traces of CUDA Runtime and Driver calls made by application. As shown in image above, if you hover your mouse over it, you will see more information about the calls.
#
#
# Near the bottom of the timeline row tree, the GPU node will appear and contain a CUDA node. Within the CUDA node, each CUDA context used within the process will be shown along with its corresponding CUDA streams. Streams will contain memory operations and kernel launches on the GPU. In this example screenshot, unified memory was used rather than explicitly transferring data between CPU and GPU.
#
#
# Feel free to checkout the [solution (with managed memory)](../../source_code/cudafortran/SOLUTION/rdf_unified_memory.cu) to help you understand better or compare your implementation with the sample solution.
#
#
# # CUDA Fortran Analysis
#
# **Usage Scenarios**
#
# Using launguage extensions like CUDA C, CUDA Fortran helps developers get the best performance out of their code on an NVIDIA GPU. CUDA C and other language construct exposes the GPU architecture and programming model which gives more control to developers with respect to memory storage, access and thread control. Based on the type of application it can provide many fold improvement over say compiler generated codes with help of directives.
#
# **How is CUDA different from other GPU progamming models like OpenACC and OpenMP?**
#
# CUDA Fortran should not be considered an alternative to OpenMP or OpenACC. In fact CUDA complements directive-based programming models and there are defined interoperability strategies between them. You can always start accelerating your code with OpenACC and use CUDA Fortran to optimize the most performance critical kernels. For example use OpenACC for data transfer and then pass a device pointer to one of critical CUDA kernels which is written in CUDA Fortran.
# ## Post-Lab Summary
#
# If you would like to download this lab for later viewing, it is recommend you go to your browsers File menu (not the Jupyter notebook file menu) and save the complete web page. This will ensure the images are copied down as well. You can also execute the following cell block to create a zip-file of the files you've been working on, and download it with the link below.
# + language="bash"
# cd ..
# rm -f nways_files.zip
# zip -r nways_files.zip *
# -
# **After** executing the above zip command, you should be able to download and zip the zip file by holding down <mark>Shift</mark> and <mark>Right-Clicking</mark> [Here](../nways_files.zip).
#
# Let us now go back to parallelizing our code using other approaches.
#
# **IMPORTANT**: Please click on **HOME** to go back to the main notebook for *N ways of GPU programming for MD* code.
#
# -----
#
# # <p style="text-align:center;border:3px; border-style:solid; border-color:#FF0000 ; padding: 1em"> <a href=../../../nways_MD_start.ipynb>HOME</a></p>
#
# -----
#
#
# # Links and Resources
# [Introduction to CUDA](https://devblogs.nvidia.com/even-easier-introduction-cuda/)
#
# [NVIDIA Nsight System](https://docs.nvidia.com/nsight-systems/)
#
# [CUDA Toolkit Download](https://developer.nvidia.com/cuda-downloads)
#
# **NOTE**: To be able to see the Nsight System profiler output, please download Nsight System latest version from [here](https://developer.nvidia.com/nsight-systems).
#
# Don't forget to check out additional [OpenACC Resources](https://www.openacc.org/resources) and join our [OpenACC Slack Channel](https://www.openacc.org/community#slack) to share your experience and get more help from the community.
#
# ---
#
# ## Licensing
#
# This material is released by OpenACC-Standard.org, in collaboration with NVIDIA Corporation, under the Creative Commons Attribution 4.0 International (CC BY 4.0).
|
hpc/nways/nways_labs/nways_MD/English/Fortran/jupyter_notebook/cudafortran/nways_cuda.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import collections
import glob
import json
import os
from pprint import pprint
from sys_config import EXP_DIR
import warnings
warnings.filterwarnings('ignore')
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors
from matplotlib import rc
import pandas as pd
import seaborn as sns
import numpy as np
import json
sns.set_style("white")
sns.set_context("notebook")
plt.rcParams['font.family'] = "serif"
from IPython.core.display import display, HTML
# %matplotlib inline
# # %matplotlib notebook
# # %matplotlib widget
# # %matplotlib ipympl
# +
base = json.load(open("entropies_test_deen_base.json"))
base_ls = json.load(open("entropies_test_deen_base_ls.json"))
postnorm = json.load(open("entropies_test_deen_postnorm.json"))
prior_3M = json.load(open("entropies_test_deen_prior_3M_kl.json"))
prior_30M = json.load(open("entropies_test_deen_prior_30M_kl.json"))
rprior_30M = json.load(open("entropies_test_deen_prior_30M_rkl.json"))
prior_3M_ls = json.load(open("entropies_test_deen_prior_3M_kl_ls.json"))
# prior_30M_ls = json.load(open("entropies_test_deen_prior_30M_kl_ls.json"))
# prior_30M_ls: lm, tm
# -
def set_size(width, fraction=1):
""" Set aesthetic figure dimensions to avoid scaling in latex.
Parameters
----------
width: float
Width in pts
fraction: float
Fraction of the width which you wish the figure to occupy
Returns
-------
fig_dim: tuple
Dimensions of figure in inches
"""
# Width of figure
fig_width_pt = width * fraction
# Convert from pt to inches
inches_per_pt = 1 / 72.27
# Golden ratio to set aesthetic figure height
golden_ratio = (5**.5 - 1) / 2
# Figure width in inches
fig_width_in = fig_width_pt * inches_per_pt
# Figure height in inches
fig_height_in = fig_width_in * golden_ratio
fig_dim = (fig_width_in, fig_height_in)
return fig_dim
# +
plt.style.use('classic')
nice_fonts = {
# Use LaTeX to write all text
"text.usetex": True,
"font.family": "serif",
# Use 10pt font in plots, to match 10pt font in document
"axes.labelsize": 8,
"font.weight": "bold",
"axes.labelweight": "bold",
"font.size": 8,
# Make the legend/label fonts a little smaller
"legend.fontsize": 8,
"xtick.labelsize": 8,
"ytick.labelsize": 8,
}
rc('text', usetex=True)
sns.set_style("white", {
"font.family": "serif",
"font.serif": ["CMU Serif", "Times", "serif"],
})
plt.rcParams.update(nice_fonts)
# sns.set_palette("deep")
# sns.set_palette("muted")
# sns.set_palette("bright")
# Set the font to be serif, rather than sans
# sns.set(font='serif')
fig_width_pt = 219.08612
figsize = set_size(fig_width_pt)
# plt.rcParams["figure.figsize"] = (figsize[0]*1.15, figsize[1]*1.2)
plt.rcParams["figure.figsize"] = (figsize[0]*1.2, figsize[1]*1.45)
plt.rcParams["legend.numpoints"] = 2.
# plt.rcParams["font.weight"] = "bold"
# plt.rcParams["axes.labelweight"] = "bold"
plt.rcParams["legend.labelspacing"] = 0.2
plt.rcParams["legend.handlelength"] = 2.8
# sns.set(font_scale=1.2) # crazy big
plt.rcParams['axes.formatter.useoffset'] = False
plt.rcParams['xtick.major.pad']='0'
plt.rcParams['ytick.major.pad']='0'
bins = 100
# -
learnui = ["#003f5c", "#58508d", "#bc5090", "#ff6361", "#ffa600"]
bw = 0.22
for p in ["tab10", "deep", "muted", "bright", "colorblind"]:
sns.set_palette(p)
sns.distplot(base["tm"], label="Base", bins=bins, hist=False, kde_kws={"shade": False, "lw": 1.6, "bw":bw, 'linestyle':':', 'dash_capstyle':'round', } )
sns.distplot(base_ls["tm"], label=r"Base+\textsc{ls}", bins=bins, hist=False, kde_kws={"shade": False, "lw": 1.6, "bw":bw, 'linestyle':':', 'dash_capstyle':'round', } )
sns.distplot(prior_30M["lm"], label="LM", bins=bins, hist=False, kde_kws={"shade": False, "lw": 1.3, "bw":bw, 'linestyle':'--', })
# sns.distplot(prior_30M["tm"], label="Base+Prior", bins=bins, hist=False, kde_kws={"shade": False, "lw": 1.3, "bw":bw, 'linestyle':'-', 'dash_capstyle':'round', } )
# sns.distplot(rprior_30M["tm"], label="Base+RPrior", bins=bins, hist=False, kde_kws={"shade": False, "lw": 1.3, "bw":bw, 'linestyle':'-', 'dash_capstyle':'round', } )
# sns.distplot(postnorm["tm"], label=r"\textsc{postnorm}", bins=bins, hist=False, kde_kws={"shade": False, "lw": 1.3, "bw":bw, 'linestyle':'-', 'dash_capstyle':'round', } )
plt.ylabel("density",labelpad=2)
plt.xlabel("entropy per token",labelpad=2)
plt.xlim(-0.2, 7)
plt.ylim(0,1)
ax = plt.gca()
ax.set_xticks([0, 1, 3, 5 ,7])
ax.ticklabel_format(useOffset=False)
plt.legend(ncol=1, prop={'weight':'bold'})
plt.tight_layout()
plt.savefig(f'entropy_{p}.pdf', bbox_inches='tight', format="pdf",pad_inches = 0)
sns.distplot(prior_30M["tm"], label=r"Base+Prior",bins=bins, hist=False, kde_kws={"shade": False, "lw": 1.3, "bw":bw, 'linestyle':'-', 'dash_capstyle':'round', } )
plt.savefig(f'entropy_{p}_prior.pdf', bbox_inches='tight', format="pdf",pad_inches = 0)
sns.distplot(postnorm["tm"], label=r"\textsc{postnorm}",bins=bins, hist=False, kde_kws={"shade": False, "lw": 1.3, "bw":bw, 'linestyle':'-', 'dash_capstyle':'round', } )
plt.savefig(f'entropy_{p}_postnorm.pdf', bbox_inches='tight', format="pdf",pad_inches = 0)
# plt.savefig(f'entropy_{p}.svg', bbox_inches='tight', format="svg",pad_inches = 0)
plt.show()
plt.clf()
# +
from scipy.stats import gaussian_kde
xs = np.linspace(0,9,100)
kde = lambda x: gaussian_kde(x, bw_method=0.3)(xs)
props = dict(linewidth=2)
sns.set_palette("bright")
nice_fonts = {
# Use LaTeX to write all text
"text.usetex": True,
"font.family": "serif",
# Use 10pt font in plots, to match 10pt font in document
"axes.labelsize": 8,
"font.size": 8,
# Make the legend/label fonts a little smaller
"legend.fontsize": 8,
"xtick.labelsize": 8,
"ytick.labelsize": 8,
}
plt.rcParams.update(nice_fonts)
# fig, ax = plt.subplots()
fig_width_pt = 219.08612
figsize = set_size(fig_width_pt)
figsize = (figsize[0]*1.4, figsize[1]*1.6)
figsize = (4.*0.9, 3.*0.9)
fig, ax = plt.subplots(1, 1, figsize=figsize)
ax.plot(xs, kde(prior_3M["lm"]), label="LM", linestyle='--', **props)
ax.plot(xs, kde(base["tm"]), label="Base", linestyle=':', **props)
ax.plot(xs, kde(base_ls["tm"]), label=r"Base+\textsc{LS}", linestyle=':', **props)
ax.plot(xs, kde(prior_3M["tm"]), label=r"\textsc{LM}-prior", linestyle="-", **props)
ax.plot(xs, kde(postnorm["tm"]), label=r"\textsc{postnorm}", linestyle='-', **props)
ax.set_xlabel("entropy per token")
ax.set_ylabel("density")
plt.legend(ncol=2, prop={'weight':'bold'})
plt.xlim(0, 7)
plt.ylim(0, 0.8)
fig.tight_layout()
plt.savefig('entropy.pdf', bbox_inches='tight', format="pdf")
plt.show()
|
analysis/entropy/entropy_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import tensorflow_probability as tfp
tfd = tfp.distributions
import claude.utils as cu
import claude.tx as ctx
import claude.claudeflow.autoencoder as ae
import claude.claudeflow.helper as cfh
import claude.claudeflow.training as cft
print('Tensorflow version:', tf.__version__)
print('Tensorflow probability version:', tfp.__version__)
# +
seed = 42
tf.set_random_seed(seed)
np.random.seed(seed)
# Parameters
# Channel Parameters
chParam = cu.AttrDict()
chParam.M = 64
chParam.SNR_db = 15
# Auto-Encoder Parameters
aeParam = cu.AttrDict()
aeParam.constellationDim = 2
aeParam.constellationOrder = chParam.M
aeParam.temperature = 1
aeParam.nLayersEnc = 1
aeParam.nLayersDec = 2
aeParam.nHiddenEnc = 128
aeParam.nHiddenDec = 128
aeParam.activation = tf.nn.relu
aeParam.dtype = tf.float32
aeParam.cpx_dtype = tf.complex64
# Training Parameters
trainingParam = cu.AttrDict()
trainingParam.nBatches = 16
trainingParam.batchSize = 32*chParam.M
trainingParam.learningRate = 0.001
trainingParam.displayStep = 5
trainingParam.iterations = 75
# -
# TF constants
one = tf.constant(1, aeParam.dtype)
twoZeroCpx = tf.constant(2, aeParam.cpx_dtype)
# +
def p_norm(p, x, fun=lambda x: tf.square(tf.abs(x))):
return tf.reduce_sum(p * fun(x))
def r2c(x):
return tf.cast(x, aeParam.cpx_dtype)
def c2r(x):
return tf.cast(x, aeParam.dtype)
@tf.custom_gradient
def straight_through_estimator(s_bar):
s = tf.one_hot(tf.math.argmax(s_bar, axis=-1), chParam.M)
def grad(dy):
return dy
return s, grad
# +
SNR_db = tf.placeholder(aeParam.dtype, shape=(1,1))
# Sampling
temperature = tf.constant(aeParam.temperature, aeParam.dtype)
enc_inp = tf.constant(1, dtype=aeParam.dtype, shape=(1,1))
s_logits = ae._encoder(enc_inp, aeParam.nHiddenEnc, aeParam.nLayersEnc, aeParam.activation, nOutput=chParam.M, name='encoder')
g_dist = tfd.Gumbel(loc=0., scale=1.)
g = g_dist.sample(sample_shape=[trainingParam.batchSize, chParam.M])
s_bar = tf.nn.softmax((g + s_logits)/temperature)
# straight through estimator
s = straight_through_estimator(s_bar)
# -
# geo shaping
enc_vec = ae._encoder(s, aeParam.nHiddenEnc, aeParam.nLayersEnc, aeParam.activation, nOutput=aeParam.constellationDim, name='encoder_geo')
xSeed = tf.linalg.eye(aeParam.constellationOrder, dtype=s.dtype)
constellation_vec = ae._encoder(xSeed, aeParam.nHiddenEnc, aeParam.nLayersEnc, aeParam.activation, nOutput=aeParam.constellationDim, name='encoder_geo')
constellation = tf.expand_dims(tf.complex(constellation_vec[:, 0], constellation_vec[:, 1]), 0)
enc = tf.expand_dims(tf.complex(enc_vec[:, 0], enc_vec[:, 1]), -1)
# normalization & Modulation
p_s = tf.nn.softmax(s_logits)
norm_factor = tf.rsqrt(p_norm(p_s, constellation))
norm_constellation = r2c(norm_factor) * constellation
x = r2c(norm_factor) * enc
# +
# checks
should_always_be_one = p_norm(p_s, norm_constellation)
# Channel
noise_cpx = tf.complex(tf.random_normal(shape=tf.shape(x), dtype=aeParam.dtype),
tf.random_normal(shape=tf.shape(x), dtype=aeParam.dtype))
sigma2 = one / cfh.dB2lin(SNR_db, 'dB')
noise_snr = r2c(tf.sqrt(sigma2)) * tf.rsqrt(twoZeroCpx) * noise_cpx
y = x + noise_snr
# demodulator
y_vec = cfh.complex2real(tf.squeeze(y))
dec = ae._encoder(y_vec, aeParam.nHiddenDec, aeParam.nLayersDec, aeParam.activation, nOutput=chParam.M, name='decoder')
# +
# loss
loss = tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.stop_gradient(s), logits=dec)
entropy_S = -p_norm(p_s, p_s, lambda x: cfh.log2(x))
loss_hat = loss - entropy_S
gaussian_MI = cfh.gaussianMI(x, y, norm_constellation, chParam.M, dtype=aeParam.dtype)
optimizer = tf.train.AdamOptimizer(learning_rate=trainingParam.learningRate).minimize(loss_hat)
metricsDict = {'loss_hat': loss_hat, 'gaussian_MI_metric': gaussian_MI}
meanMetricOpsDict, updateOps, resetOps = cft.create_mean_metrics(metricsDict)
# -
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
# +
np_SNR_db = chParam.SNR_db * np.ones((1,1))
for epoch in range(1, trainingParam.iterations+1):
sess.run(resetOps)
for batch in range(0,trainingParam.nBatches):
feedDict = {SNR_db: np_SNR_db}
sess.run([optimizer, updateOps], feed_dict=feedDict)
[outLossHat, outGausianMI, outShouldAlwaysBeOne] = sess.run([meanMetricOpsDict['loss_hat'],
meanMetricOpsDict['gaussian_MI_metric'],
should_always_be_one], feed_dict=feedDict)
if epoch%trainingParam.displayStep == 0:
print('epoch: {} - lossHat: {:.3} - gaussianMI: {:.3} - always 1: {:.2}'.format(epoch, outLossHat, outGausianMI, outShouldAlwaysBeOne))
# +
# evaluation
sess.run(resetOps)
for batch in range(0, 1000):
sess.run(updateOps, feed_dict=feedDict)
outGausianMI = sess.run(meanMetricOpsDict['gaussian_MI_metric'], feed_dict=feedDict)
print('Final MI: {:.3}'.format(outGausianMI))
# -
np_x = []
np_y = []
for _ in range(1000):
temp_x, temp_y = sess.run([x, y], feed_dict=feedDict)
np_x.append(temp_x)
np_y.append(temp_y)
# +
all_y = np.reshape(np.stack(np_y),-1)
all_x = np.reshape(np.stack(np_x),-1)
noise = np.random.normal(0,1,size=all_x.shape)+1j*np.random.normal(0,1,size=all_x.shape)
all_x = all_x + 0.05*noise
# +
heatmap, xedges, yedges = np.histogram2d(np.real(all_x), np.imag(all_x), bins=500)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.figure(figsize=(8,8))
plt.clf()
plt.imshow(heatmap.T, extent=extent, origin='lower')
plt.axis('square');
|
examples/tf_AutoEncoderForJointGeoProbShapingAndAwgn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tacotron2
# language: python
# name: tacotron2
# ---
import glob
import librosa
import os
import sys
from pathlib import Path
from glob import glob
from os.path import join, exists, basename, splitext
# ## Encoder model数据处理
# ST-CMD数据处理
# cd /search/hadoop07/wuzijun/voice_data/STcmds
file_list = glob.glob('./ST-CMDS-20170001_1-OS/*.wav')
splitext(basename(file_list[0]))
speaker_list = set([splitext(basename(f))[0][9:15] for f in file_list])
len(speaker_list)
for speaker in speaker_list:
Path(f'./wav/{speaker}').mkdir(exist_ok=True)
os.system(f'mv ./ST-CMDS-20170001_1-OS/*{speaker}* ./wav/{speaker}/')
# Prime数据处理
# cd ../Primewords/
import pandas as pd
import json
js = json.load(open('/search/hadoop07/wuzijun/voice_data/Primewords/set1_transcript.json', 'r'))
wave_data = pd.DataFrame(js)
wave_data.head(10)
max(wave_data.user_id.apply(lambda x: len(str(x))))
wave_data.dtypes
wave_data.user_id = wave_data.user_id.apply(lambda x: str(10000 + int(x))[-4:])
len(set(wave_data.user_id))
wave_data.to_csv('./transcript/transcript.txt', header=None, index=None)
find ./audio_files/ -type f -exec mv {} ./wav/ \;
for speaker in set(wave_data.user_id):
Path(f'./wav/{speaker}').mkdir(exist_ok=True)
wave_data[['file', 'user_id']] \
.apply(lambda x: os.system(f'mv ./wav/{x[0]} ./wav/{x[1]}'), axis=1)
# ## Synthesizer model数据处理
from pypinyin import pinyin, lazy_pinyin, Style
pinyin('算了吧', style=Style.TONE3)
from pypinyin.contrib.neutral_tone import NeutralToneWith5Mixin
from pypinyin.converter import DefaultConverter
from pypinyin.core import Pinyin
class MyConverter(NeutralToneWith5Mixin, DefaultConverter):
pass
my_pinyin = Pinyin(MyConverter())
pinyin = my_pinyin.pinyin
lazy_pinyin = my_pinyin.lazy_pinyin
lazy_pinyin('我不喜欢吃薄荷', style=Style.TONE3)
# ### AIshell
# !head -3 /search/hadoop07/wuzijun/voice_data/Biaobei/audio_text_val_filelist.txt
path_aishell = '/search/hadoop07/wuzijun/voice_data/AIshell'
ll $path_aishell/wav/S0002
# !head -3 $path_aishell/transcript/aishell_transcript_v0.8.txt
speaker_aishell = glob(f'{path_aishell}wav/*')
len(speaker_aishell)
transcripts_aishell = open(f'{path_aishell}/transcript/aishell_transcript_v0.8.txt', 'r').readlines()
def get_pinyin(sentence):
sentence = sentence.strip().replace(' ', '').replace('\n', '')
return ' '.join(lazy_pinyin(sentence, style=Style.TONE3))
def get_path(root_path, first):
speaker_path = first[6:11]
file_name = first + '.wav'
return join(root_path, speaker_path, file_name)
aishell_tuples = [(get_path(join(path_aishell, 'wav'), l[:16]), get_pinyin(l[16:]), l[6:11]) for l in transcripts_aishell]
len(aishell_tuples)
import pandas as pd
import numpy as np
import random
from sklearn.model_selection import train_test_split
aishell_train, aishell_eval = train_test_split(aishell_tuples, test_size=300)
len(aishell_train), len(aishell_eval)
aishell_train_df = pd.read_csv(f'{path_aishell}/audio_text_train_filelist.txt',
sep='|', header=None,
names=['wav_path', 'pinyin', 'speaker_name', 'speaker_emb_path'])
aishell_train_df.speaker_emb_path = aishell_train_df.speaker_emb_path.apply(lambda x: x.replace('AIshell/', 'AIshell/embed/'))
aishell_val_df = pd.read_csv(f'{path_aishell}/audio_text_val_filelist.txt',
sep='|', header=None,
names=['wav_path', 'pinyin', 'speaker_name', 'speaker_emb_path'])
aishell_val_df.speaker_emb_path = aishell_val_df.speaker_emb_path.apply(lambda x: x.replace('AIshell/', 'AIshell/embed/'))
aishell_train_df = pd.DataFrame(aishell_train, columns=['wav_path', 'pinyin', 'SpeakerID'])
aishell_eval_df = pd.DataFrame(aishell_eval, columns=['wav_path', 'pinyin', 'SpeakerID'])
aishell_eval_df.head(1)
aishell_train_df['speaker_emb_path'] = aishell_train_df[['wav_path', 'SpeakerID']] \
.apply(lambda x: x[0].replace('wav', 'embed').replace('/' + x[1], ''), axis=1)
aishell_eval_df['speaker_emb_path'] = aishell_eval_df[['wav_path', 'SpeakerID']] \
.apply(lambda x: x[0].replace('wav', 'embed').replace('/' + x[1], ''), axis=1)
aishell_train_df.to_csv(f'{path_aishell}/audio_text_train_filelist.txt', sep='|', header=False, index=False)
aishell_val_df.to_csv(f'{path_aishell}/audio_text_val_filelist.txt', sep='|', header=False, index=False)
aishell_trans = pd.read_csv('/search/hadoop07/wuzijun/voice_data/AIshell/transcript/aishell_transcript_v0.8.txt', sep=' ', names=['file_name', 'text'])
aishell_trans.head(3)
# ### Magicdata
import pandas as pd
path_magic = '/search/hadoop07/wuzijun/voice_data/Magicdata'
# cat $path_magic/transcript/*.txt >> $path_magic/transcript/all_trans.txt
magic_list = pd.read_csv(path_magic + '/transcript/all_trans.txt', sep='\t')
magic_list = magic_list[magic_list.UtteranceID.apply(lambda x: '.wav' in x)]
magic_list.drop_duplicates(subset='UtteranceID', inplace=True)
len(magic_list)
magic_list.head(3)
path_magic
magic_list['wav_path'] = magic_list[['UtteranceID', 'SpeakerID']].apply(lambda x: f'{path_magic}/wav/{x[1]}/{x[0]}', axis=1)
magic_list['pinyin'] = magic_list.Transcription.apply(lambda x: get_pinyin(x))
magic_list['speaker_emb_path'] = magic_list[['wav_path', 'SpeakerID']] \
.apply(lambda x: x[0].replace('wav', 'embed').replace(x[1] + '/', ''), axis=1)
magic_list.speaker_emb_path[0], magic_list.wav_path[0]
magic_trans = magic_list[['wav_path', 'pinyin', 'SpeakerID', 'speaker_emb_path']].copy()
magic_train, magic_val = train_test_split(magic_trans, test_size=300)
len(magic_train), len(magic_val)
magic_train = pd.read_csv(path_magic + '/audio_text_train_filelist.txt', header=None, sep='|',
names=['wav_path', 'pinyin', 'Speaker_name', 'speaker_emb_path'])
magic_val = pd.read_csv(path_magic + '/audio_text_val_filelist.txt', header=None, sep='|',
names=['wav_path', 'pinyin', 'Speaker_name', 'speaker_emb_path'])
magic_train.speaker_emb_path[0]
magic_train.speaker_emb_path = magic_train.speaker_emb_path.apply(lambda x: x.replace('Magicdata/embed', 'Magicdata/embed/'))
magic_val.speaker_emb_path = magic_val.speaker_emb_path.apply(lambda x: x.replace('Magicdata/embed', 'Magicdata/embed/'))
magic_train.to_csv(path_magic + '/audio_text_train_filelist.txt', header=None, sep='|', index=False)
magic_val.to_csv(path_magic + '/audio_text_val_filelist.txt', header=None, sep='|', index=False)
# ### Biaobei
biaobei_train = pd.read_csv('/search/hadoop07/wuzijun/voice_data/Biaobei/audio_text_train_filelist.txt',
sep='|', header=None, names=['wav_path', 'pinyin', 'speaker_name', 'speaker_emb_path'])
biaobei_val = pd.read_csv('/search/hadoop07/wuzijun/voice_data/Biaobei/audio_text_val_filelist.txt',
sep='|', header=None, names=['wav_path', 'pinyin', 'speaker_name', 'speaker_emb_path'])
biaobei_train.speaker_emb_path = biaobei_train.speaker_emb_path.apply(lambda x: x.replace('Biaobei/', 'Biaobei/embed/'))
biaobei_val.speaker_emb_path = biaobei_val.speaker_emb_path.apply(lambda x: x.replace('Biaobei/', 'Biaobei/embed/'))
biaobei_val.speaker_emb_path[0]
biaobei_train['speaker_name'] = 'Biaobei'
biaobei_val['speaker_name'] = 'Biaobei'
biaobei_train['speaker_emb_path'] = biaobei_train.wav_path.apply(lambda x: x.replace('Wave', 'wav').replace('wav', 'embed'))
biaobei_val['speaker_emb_path'] = biaobei_val.wav_path.apply(lambda x: x.replace('Wave', 'wav').replace('wav', 'embed'))
biaobei_train.speaker_emb_path[0], biaobei_train.wav_path[0]
biaobei_train.reset_index(inplace=True)
biaobei_train.wav_path = biaobei_train.level_0
biaobei_train.pinyin = biaobei_train.level_1
biaobei_train = biaobei_train[['wav_path', 'pinyin', 'speaker_emb_path']].copy()
biaobei_train['speaker_name'] = 'Biaobei'
biaobei_train.wav_path = biaobei_train.wav_path.apply(lambda x: x.replace('Wave', 'wav'))
biaobei_train[['wav_path', 'pinyin', 'speaker_name', 'speaker_emb_path']].to_csv('/search/hadoop07/wuzijun/voice_data/Biaobei/audio_text_train_filelist.txt', sep='|', index=False, header=False)
biaobei_val.reset_index(inplace=True)
biaobei_val.wav_path = biaobei_val.level_0
biaobei_val.pinyin = biaobei_val.level_1
biaobei_val = biaobei_val[['wav_path', 'pinyin', 'speaker_emb_path']].copy()
biaobei_val['speaker_name'] = 'Biaobei'
biaobei_val.wav_path = biaobei_val.wav_path.apply(lambda x: x.replace('Wave', 'wav'))
biaobei_val[['wav_path', 'pinyin', 'speaker_name', 'speaker_emb_path']].to_csv('/search/hadoop07/wuzijun/voice_data/Biaobei/audio_text_val_filelist.txt', sep='|', index=False, header=False)
# ## 测试
test1 = np.load('/search/hadoop07/wuzijun/voice_data/Biaobei/embed/009996.npy')
test2 = np.load('/search/hadoop07/wuzijun/voice_data/Biaobei/embed/009991.npy')
import librosa
import IPython
from IPython.display import Audio
path = '/search/hadoop07/wuzijun/voice_data/Magicdata/wav/15_3621/15_3621_20170810140303.wav'
IPython.display.display(Audio(librosa.load(path, sr=16000)[0], rate=16000))
import torch
from torch import nn
nn.utils.rnn.pad_packed_sequence
a = torch.rand(5, 9, 4)
b = torch.rand(5, 9, 2)
torch.Tensor.expand
torch.zeros(1).repeat(3,4)
from encoder.model import SpeakerEncoder
model = SpeakerEncoder('cuda', 'cuda')
import torch
checkpoint = torch.load('encoder/saved_models/pretrained.pt')
import numpy as np
from synthesizer.utils.audio import AudioProcessor
from synthesizer.utils.generic_utils import load_config
c = load_config('synthesizer/config.json')
ap = AudioProcessor(**c.audio)
ap._stft_parameters()
import numpy as np
import librosa
import soundfile
x, sr = soundfile.read('/search/hadoop07/wuzijun/voice_data/Biaobei/wav/000001.wav')
x, len(x)
x1, sr1 = librosa.load('/search/hadoop07/wuzijun/voice_data/Biaobei/wav/000001.wav', sr=16000)
x1, sr1
len(x1)
x2 = librosa.resample(x, sr, 16000)
x2
len(x2)
x2 == x1
# cd /search/hadoop07/wuzijun/voice_data/AIshell/embed/
pwd
import glob
test = glob.glob('*')
to_rename = [n for n in test if '\n' in n]
to_rename[0]
len(to_rename)
import os, sys
for n in to_rename:
new_n = n.replace('\n', '')
prefix = new_n.replace('.npy', '').split('/')[-1]
# print(f'{n} => {new_n}')
os.system(f"mv {prefix}*.npy {new_n}")
1200//2
p = Path('./bt')
p.
TypeError
import numpy as np
t = np.load('/search/hadoop07/wuzijun/voice_data/Biaobei/WaveGAN/all/000012-feats.npy')
t.shape
l = [4,5,6]
for i, l in enumerate(l, 1):
print(i, l)
os.path.join("/search/hadoop07/wuzijun/voice_data/SV2TTS/synthesizer/", "mels/11")
import numpy as np
np.load('../Data/voice_data/Biaobei/Mels/mel/009100.npy').shape
a = np.load('../Data/voice_data/SV2TTS/synthesizer/mels/mel-BAC009S0025W0125_00.npy')
a.shape
a.T.shape
|
data_process.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Define module in wihch `#export` tag will save the code in `src`
# +
#default_exp rolling
# -
# ## Import modules that are only used in documentation and nbdev related (not going to src)
# +
#hide
from nbdev.showdoc import *
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append('..') #appends project root to path in order to import project packages since `noteboks_dev` is not on the root
#DO NOT EDIT
# +
#hide
#Internal Imports
#imports that are going to be used only during development and are not intended to be loaded inside the generated modules.
#for example: use imported modules to generate graphs for documentation, but lib is unused in actual package
#import ...
# -
# # rolling
# ## Dev comments
# ### TODOs
# - [X] TODO: do something
# - [ ] TODO: do something else
# ### <comment section 2>
# ## Code Session
# ### External Iimports
# > imports that are intended to be loaded in the actual modules e.g.: module dependencies
# +
#export
from functools import reduce, partial
import os
import datetime as dt
from tqdm import tqdm
from warnings import warn
import pandas as pd
import numpy as np
import numba
from dask import dataframe as dd
from dask import delayed
from dask.diagnostics import ProgressBar
# -
# ### utils -
# ### Create historical "open invoices" features
# +
#export
def _get_index_rolling_windows(rolling_obj):
'''
get positional indexes of rows of each rolling window
'''
if hasattr(rolling_obj, '_selection'):
previous_selection = getattr(rolling_obj, '_selection')
else:
previous_selection = None
INDEX_LIST = []
#define function to append values to global INDEX_LIST since rolling apply won't let return arrays
def f(x): INDEX_LIST.append(x.astype(int)); return 0
assert '__indexer__' not in rolling_obj.obj.columns, 'DataFrame should not contain any col with "__indexer__" name'
rolling_obj.obj = rolling_obj.obj.assign(__indexer__ = np.arange(len(rolling_obj.obj)), inplace = True)
rolling_obj._selection = '__indexer__'
rolling_obj.apply(f, raw = True)
rolling_obj.obj = rolling_obj.obj.drop(columns = ['__indexer__'])
delattr(rolling_obj, '_selection')
if not previous_selection is None:
setattr(rolling_obj, '_selection', previous_selection)
return INDEX_LIST
def _apply_custom_rolling(rolling_obj, func, raw = True, engine = 'numpy', *args, **kwargs):
engines = {
'numpy':_rolling_apply_custom_agg_numpy,
'pandas':_rolling_apply_custom_agg_pandas,
'numba':_rolling_apply_custom_agg_numpy_jit
}
_rolling_apply = engines[engine]
indexes = _get_index_rolling_windows(rolling_obj)
if hasattr(rolling_obj, '_selection'):
if getattr(rolling_obj, '_selection') is None:
values = _rolling_apply(rolling_obj.obj, indexes, func, *args, **kwargs)
values = _rolling_apply(rolling_obj.obj[rolling_obj._selection], indexes, func, *args, **kwargs)
else:
values = _rolling_apply(rolling_obj.obj, indexes, func, *args, **kwargs)
return values
def _rolling_apply_custom_agg_numpy_jit(df, indexes, func):
'''
applies some aggregation function over groups defined by index.
groups are numpy arrays
'''
dfv = df.values
# template of output to create empty array
#use this for jit version
shape = np.array(func(dfv[:1])).shape
#d = [np.empty(*shape) for _ in range(len(indexes))]
result_array = np.empty((len(indexes),*shape))
@numba.jit(forceobj=True)
def _roll_apply(dfv, indexes, func, result_array):
for i in np.arange(len(indexes)):
data = dfv[indexes[i]]
if len(data) > 0:
result = func(data)
result_array[i] = result
else:
result = np.empty(shape)
return result_array
return _roll_apply(dfv, indexes, func, result_array)
def _rolling_apply_custom_agg_numpy(df, indexes, func, *args, **kwargs):
'''
applies some aggregation function over groups defined by index.
groups are numpy arrays
'''
dfv = df.values
d = [[] for _ in range(len(indexes))]
for i in tqdm(range(len(indexes))):
data = dfv[indexes[i]]
if len(data) > 0:
result = func(data, *args, **kwargs)
d[i] = result
return d
def _rolling_apply_custom_agg_pandas(df, indexes, func, *args, **kwargs):
'''
applies some aggregation function over groups defined by index.
groups are pandas dataframes
'''
# template of output to create empty array
d = [[] for _ in range(len(indexes))]
for i in tqdm(range(len(indexes))):
data = df.iloc[indexes[i]]
if len(data) > 0:
result = func(data, *args, **kwargs)
d[i] = result
return pd.concat(d)
# -
# ### Generic Rolling + resample features
# +
#export
def _make_rolling_groupby_object(df, group_columns, date_column):
'''
helping function to make computational graph creation faster
'''
groupby_object = df.set_index(date_column).groupby(group_columns)
return groupby_object
def make_generic_rolling_features(
df,
calculate_columns,
group_columns,
date_column,
suffix = None,
rolling_operation = 'mean',
window = '60D',
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
**rolling_operation_kwargs
):
'''
make generic/custom rolling opeartion for a given column, grouped by customer, having Data de Emissao as date index
if calculate cols is None, than use all cols
Parameters
----------
df: DataFrame
DataFrame to make rolling features over
calculate_columns: list of str
list of columns to perform rolling_operation over
group_columns: list of str
list of columns passed to GroupBy operator prior to rolling
date_column: str
datetime column to roll over
suffix: Str
suffix for features names
rolling_operation: Str of aggregation function, deafult = "mean"
str representing groupby object method, such as mean, var, quantile ...
window:
DataFrameGroupBy.Rolling parameter. please refer to documentation
min_periods:
DataFrameGroupBy.Rolling parameter. please refer to documentation
center:
DataFrameGroupBy.Rolling parameter. please refer to documentation
win_type:
DataFrameGroupBy.Rolling parameter. please refer to documentation
on:
DataFrameGroupBy.Rolling parameter. please refer to documentation
axis:
DataFrameGroupBy.Rolling parameter. please refer to documentation
closed:
DataFrameGroupBy.Rolling parameter. please refer to documentation
rolling_operation_kwargs:
key word arguments passed to rolling_operation
Returns
-------
DataFrame with the new calculated features
'''
assert group_columns.__class__ in (set, tuple, list), 'group_columns type should be one of (tuple, list, set), not {group_columns.__class__}'
if calculate_columns is None:
calculate_columns = [i for i in df.columns if not i in [*group_columns, date_column]]
keep_columns = [*group_columns, date_column, *calculate_columns]
if not isinstance(df,(
dd.groupby.DataFrameGroupBy,
pd.core.groupby.generic.DataFrameGroupBy,
pd.core.groupby.generic.SeriesGroupBy,
dd.groupby.SeriesGroupBy
)):
df = _make_rolling_groupby_object(df, group_columns, date_column)
if isinstance(df, (pd.core.groupby.generic.DataFrameGroupBy, pd.core.groupby.generic.SeriesGroupBy)):
df = getattr(
df[calculate_columns]
.rolling(
window = window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed
),
rolling_operation,
)(**rolling_operation_kwargs).reset_index()
else: #syntax for dask groupby rolling
df = df[calculate_columns].apply(
lambda x: getattr(
x.sort_index().rolling(
window = window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed
),
rolling_operation,
)(**rolling_operation_kwargs).reset_index()
#meta = meta, #works only for float rolling
).reset_index().drop(columns = [f'level_{len(group_columns)}']) #drop unwanted "level_n" cols
if not suffix:
df.columns = [
f'{col}__rolling_{rolling_operation}_{window}_{str(rolling_operation_kwargs)}'
if not col in (*group_columns, date_column) else col
for col in df.columns
]
else:
df.columns = [
f'{col}__rolling_{window}_{suffix}'
if not col in (*group_columns, date_column) else col
for col in df.columns
]
return df
def _make_shift_resample_groupby_object(df, group_columns, date_column,freq, n_periods_shift):
groupby_object = (
df
.assign(**{date_column:df[date_column] + pd.Timedelta(n_periods_shift,freq)}) #shift
.set_index(date_column)
.groupby([*group_columns, pd.Grouper(freq = freq)])
)
return groupby_object
def make_generic_resampling_and_shift_features(
df, calculate_columns, group_columns, date_column, freq = 'm',
agg = 'last', n_periods_shift = 0, assert_frequency = False, suffix = '',**agg_kwargs
):
'''
makes generic resamples (aggregates by time frequency) on column.
shifts one period up to avoid information leakage.
Doing this through this function, although imposing some limitations to resampling periods, is much more efficient than
pandas datetime-set_index + groupby + resampling.
Parameters
----------
df: DataFrame
DataFrame to make rolling features over
calculate_columns: list of str
list of columns to perform rolling_operation over
group_columns: list of str
list of columns passed to GroupBy operator prior to rolling
date_column: str
datetime column to roll over
freq: valid pandas freq str:
frequency to resample data
agg: Str of aggregation function, deafult = "last"
str representing groupby object method, such as mean, var, last ...
n_periods_shift: int
number of periods to perform the shift opeartion. shifting is important after aggregation to avoid information leakage
e.g. assuming you have the information of the end of the month in the beggining of the month.
assert_frequency: bool, default = False
resamples data to match freq, using foward fill method for
missing values
suffix: Str
suffix for features names
agg_kwargs:
key word arguments passed to agg
Returns
-------
DataFrame with the new calculated features
'''
if calculate_columns is None:
calculate_columns = [i for i in df.columns if not i in [*group_columns, date_column]]
keep_columns = [*group_columns, date_column, *calculate_columns]
df = (
df
.assign(**{date_column:df[date_column] + pd.Timedelta(n_periods_shift,freq)}) #shift
.set_index(date_column)
.groupby([*group_columns, pd.Grouper(freq = freq)])
)
if isinstance(agg, str):
df = getattr(df[calculate_columns], agg)(**agg_kwargs)
else:
df = df[calculate_columns].apply(lambda x: agg(x,**agg_kwargs))
if not suffix:
df.columns = [f'{i}__{str(agg)}_{str(agg_kwargs)}' for i in df.columns]
else:
df.columns = [f'{i}__{suffix}' for i in df.columns]
#create new shifted date_col
#df.loc[:, date_column] = date_col_values
if assert_frequency:
df = df.reset_index()
df = df.set_index(date_column).groupby(group_columns).resample(freq).fillna(method = 'ffill')
resetable_indexes = list(set(df.index.names) - set(df.columns))
df = df.reset_index(level = resetable_indexes)
df = df.reset_index(drop = True)
return df
def create_rolling_resampled_features(
df,
calculate_columns,
group_columns,
date_column,
extra_columns = [],
n_periods_shift = 1,
rolling_first = True,
rolling_operation = 'mean',
window = '60D',
resample_freq = 'm',
resample_agg = 'last',
assert_frequency = False,
rolling_suffix = '',
resample_suffix = '',
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
rolling_operation_kwargs = {},
resample_agg_kwargs = {}
):
'''
calculates rolling features groupwise, than resamples according to resample period.
calculations can be done the other way arround if rolling_first is set to False
Parameters
----------
df: DataFrame
DataFrame to make rolling features over
calculate_columns: list of str
list of columns to perform rolling_operation over
group_columns: list of str
list of columns passed to GroupBy operator prior to rolling
date_column: str
datetime column to roll over
extra_columns: list of str
list of extra columns to be passed to the final dataframe without aggregation (takes the last values, assumes they're constant along groupby).
usefull to pass merge keys
n_periods_shift: int
number of periods to perform the shift opeartion. shifting is important after aggregation to avoid information leakage
e.g. assuming you have the information of the end of the month in the beggining of the month.
rolling_first: bool, deafult = True
whether to perform rolling before resampling, or the other way arround
rolling_operation: Str of aggregation function, deafult = "mean"
str representing groupby object method, such as mean, var, quantile ...
window:
DataFrameGroupBy.Rolling parameter. please refer to documentation
resample_freq: valid pandas freq str:
frequency to resample data
resample_agg: Str of aggregation function, deafult = "last"
str representing groupby object method, such as mean, var, last ...
assert_frequency: bool, default = False
resamples data to match freq, using foward fill method for
missing values
rolling_suffix: Str
suffix for the rolling part of features names
resample_suffix: Str
suffix for the resample part of features names
min_periods:
DataFrameGroupBy.Rolling parameter. please refer to documentation
center:
DataFrameGroupBy.Rolling parameter. please refer to documentation
win_type:
DataFrameGroupBy.Rolling parameter. please refer to documentation
on:
DataFrameGroupBy.Rolling parameter. please refer to documentation
axis:
DataFrameGroupBy.Rolling parameter. please refer to documentation
closed:
DataFrameGroupBy.Rolling parameter. please refer to documentation
rolling_operation_kwargs: dict
key word arguments passed to rolling_operation
resample_agg_kwargs: dict
key word arguments passed to resample_agg
'''
if rolling_first:
features_df = make_generic_rolling_features(
df,
calculate_columns = calculate_columns,
group_columns = group_columns,
date_column = date_column,
suffix = rolling_suffix,
rolling_operation = rolling_operation,
window = window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
**rolling_operation_kwargs
)
if extra_columns:
features_df = features_df.merge(
df[extra_columns + group_columns + [date_column]],
how = 'left',
left_on = group_columns + [date_column],
right_on = group_columns + [date_column]
)
features_df = make_generic_resampling_and_shift_features(
features_df,
calculate_columns = None,
date_column = date_column,
group_columns = group_columns,
freq = resample_freq,
agg = resample_agg,
assert_frequency = assert_frequency,
suffix = resample_suffix,
n_periods_shift = n_periods_shift,
)
else:
features_df = make_generic_resampling_and_shift_features(
df,
calculate_columns = calculate_columns,
date_column = date_column,
group_columns = group_columns,
freq = resample_freq,
agg = resample_agg,
assert_frequency = assert_frequency,
suffix = resample_suffix,
n_periods_shift = n_periods_shift,
)
features_df = features_df.merge(
df[extra_columns + group_columns + [date_column]],
how = 'left',
left_on = group_columns + [date_column],
right_on = group_columns + [date_column]
)
features_df = make_generic_rolling_features(
features_df,
calculate_columns = None,
group_columns = group_columns,
date_column = date_column,
suffix = rolling_suffix,
rolling_operation = rolling_operation,
window = window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
**rolling_operation_kwargs
)
return features_df
# -
# ## Experimentation session and usage examples
# + tags=[]
import pandas as pd
import dask.dataframe as dd
covid_data = pd.read_csv(
r'.\datasets\covid_19_data.csv',
parse_dates = ['ObservationDate']
)
covid_data
# +
make_generic_rolling_features(
covid_data,
calculate_columns = ['Deaths','Confirmed'],
group_columns = ['Country/Region'],
date_column = 'ObservationDate',
rolling_operation = 'mean',
window = '7D',
suffix = ''
)
# -
make_generic_resampling_and_shift_features(
covid_data,
calculate_columns = ['Deaths','Confirmed'],
group_columns = ['Country/Region'],
date_column = 'ObservationDate',
agg = 'mean',
freq = 'W',
suffix = '',
assert_frequency = True
)
create_rolling_resampled_features(
covid_data,
calculate_columns = ['Deaths','Confirmed'],
group_columns = ['Country/Region'],
date_column = 'ObservationDate',
rolling_operation = 'mean',
window = '15D',
resample_freq = 'W'
)
# ## Define jitted agg func to pass to engine = 'numba'
# +
@numba.jit
def jit_sum(x):
return np.sum(x, axis = 0)
def jit_correlation(x):
if x.shape[0] > 1:
r = np.correlate(x[:,0],x[:,1],)
else:
r = np.nan
return r
# -
# ## Run for each
brazil_data = covid_data.query('`Country/Region` == "Brazil"')
brazil_data = brazil_data.groupby(['ObservationDate','Province/State'])[['Confirmed','Deaths','Recovered']].sum().reset_index()
brazil_data = brazil_data.set_index('ObservationDate').groupby(['Province/State']).resample('D').fillna('ffill').reset_index(level = 'Province/State', drop = True)
brazil_data = brazil_data.query('`Province/State` != "Unknown"')
new_cases = brazil_data.groupby('Province/State')[['Confirmed','Deaths','Recovered']].diff()
new_cases.columns = ['new_'+i for i in ['Confirmed','Deaths','Recovered']]
brazil_data = pd.concat([brazil_data, new_cases], axis = 1)
brazil_data.query('`Province/State` == "Amazonas"')[['new_Confirmed','new_Deaths','new_Recovered']].apply(lambda x: (x-x.mean())/x.std())
# +
grouper = covid_data.sample(10000).set_index('ObservationDate').groupby('Country/Region').rolling('30D')[['Confirmed','Deaths']]
# %timeit -r 1 -n 1 _apply_custom_rolling(grouper, jit_correlation, engine = 'numba')
# %timeit -r 1 -n 1 _apply_custom_rolling(grouper, jit_correlation, engine = 'numpy')
# -
_apply_custom_rolling(grouper, lambda x: np.corrcoef(x, rowvar = False).flatten(), engine = 'numpy')
# ## Export -
#hide
from nbdev.export import notebook2script
notebook2script()
|
notebooks_dev/rolling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Beyond Girko's Law
#
# 1. <NAME> (1984). Circular law. Theory of Probability and Its Applications, 29(4), 694-706.
# 2. <NAME>, <NAME>, <NAME> and <NAME> (1988). [Spectrum of large random asymmetric matrices](http://pdfs.semanticscholar.org/978e/e44dd0292ddfdb745e03f26508b31df2c83b.pdf). Physical Review Letters, 60(19), 1895-1898.
# 3. <NAME> and <NAME> (1997). [Non-gaussian non-hermitian random matrix theory: phase transition and addition formalism](https://ac.els-cdn.com/S0550321397004197/1-s2.0-S0550321397004197-main.pdf?_tid=688e92ae-6a5b-4590-887a-5eedb2201d64&acdnat=1527079313_8960089921305ac393eb90ee8946e519). Nuclear Physics B, 501(3), 643-669.
# 4. <NAME> and <NAME>(1997). [Non-hermitian random matrix theory: method of hermitian reduction](https://ac.els-cdn.com/S0550321397005026/1-s2.0-S0550321397005026-main.pdf?_tid=d25fc151-c966-4703-9e05-ab6dbaf010a1&acdnat=1527079010_7043499b34f6d2482e70df18d26fa46c). Nuclear Physics B, 504(3), 579-608.
# 5. <NAME> and <NAME> (2006). [Eigenvalue spectra of random matrices for neural networks](http://genomics.princeton.edu/rajan/downloads/papers/Rajan_PRL_2006.pdf). Physical Review Letters, 97(18), 188104.
import numpy as np
import numpy.linalg as la
from scipy.linalg import sqrtm
import matplotlib.pyplot as plt
# ## 1 Girko's circular law
# + **Real**
# +
N = 1000
trials = 1
evs = []
for _ in range(trials):
G = np.random.randn(N, N)
G = G / np.sqrt(N)
es = la.eigvals(G)
for e in es:
evs.append(e)
evs = np.array(evs)
dt = .005 * np.pi
t = np.linspace(0, 2 * np.pi, int(2 * np.pi / dt))
x = np.cos(t)
y = np.sin(t)
plt.figure(figsize=(8, 8))
Gaussian, = plt.plot(evs.real, evs.imag, 'ob')
Circle, = plt.plot(x, y, 'r-')
plt.title('Girko\'s Circular Law', fontsize=18)
plt.xlabel(r'$Re(\lambda)$', fontsize=18)
plt.ylabel(r'$Im(\lambda)$', fontsize=18)
plt.legend(handles=[Gaussian, Circle], labels=['Real Gaussian', 'Circle'], loc='upper right')
plt.xlim((-1.1, 1.1))
plt.ylim((-1.1, 1.1))
plt.show()
# + **Complex**
# +
N = 1000
trials = 1
evs = []
for _ in range(trials):
G = np.random.randn(N, N) + 1j * np.random.randn(N, N)
G = G / np.sqrt(2 * N)
es = la.eigvals(G)
for e in es:
evs.append(e)
evs = np.array(evs)
dt = .005 * np.pi
t = np.linspace(0, 2 * np.pi, int(2 * np.pi / dt))
x = np.cos(t)
y = np.sin(t)
plt.figure(figsize=(8, 8))
Gaussian, = plt.plot(evs.real, evs.imag, 'ob')
Circle, = plt.plot(x, y, 'r-')
plt.title('Girko\'s Circular Law', fontsize=18)
plt.xlabel(r'$Re(\lambda)$', fontsize=18)
plt.ylabel(r'$Im(\lambda)$', fontsize=18)
plt.legend(handles=[Gaussian, Circle], labels=['Complex Gaussian', 'Circle'], loc='upper right')
plt.xlim((-1.1, 1.1))
plt.ylim((-1.1, 1.1))
plt.show()
# -
# ## 2 Column Sampling
# +
N = 1000
trials = 1
evs = []
f = 0.3
mu_E = 0.1
mu_I = f * mu_E / (f - 1)
for _ in range(trials):
M = np.zeros((N, N))
idxs = np.random.permutation(N)
for i in range(int(f * N)):
M[:, idxs[i]] = 1 / np.sqrt(N) * np.random.randn(N) + mu_E / np.sqrt(N)
for j in range(int(f * N), N):
M[:, idxs[j]] = 1 / np.sqrt(N) * np.random.randn(N) + mu_I / np.sqrt(N)
es = la.eigvals(M)
for e in es:
evs.append(e)
evs = np.array(evs)
dt = .005 * np.pi
r = np.sqrt(1 + f * mu_E ** 2 + (1 - f) * mu_I ** 2)
t = np.linspace(0, 2 * np.pi, int(2 * np.pi / dt))
x = r * np.cos(t)
y = r * np.sin(t)
plt.figure(figsize=(8, 8))
Gaussian_Sampling, = plt.plot(evs.real, evs.imag, 'ob')
Circle, = plt.plot(x, y, 'r-')
plt.title('Column Sampling', fontsize=18)
plt.xlabel(r'$Re(\lambda)$', fontsize=18)
plt.ylabel(r'$Im(\lambda)$', fontsize=18)
plt.legend(handles=[Gaussian_Sampling, Circle], labels=['Gaussian_Sampling', 'Circle'], loc='upper right')
plt.xlim((-1.1, 1.1))
plt.ylim((-1.1, 1.1))
plt.show()
# -
# ## 3 Ellipse via partial symmetry
# +
N = 1000
trials = 1
evs = []
for i in range(trials):
G = np.random.randn(N, N)
G = G / np.sqrt(N)
idxs = np.random.permutation(N)
ps = 350
for j in range(ps):
for k in range(ps):
G[idxs[j], idxs[k]] = G[idxs[k], idxs[j]]
es = la.eigvals(G)
for e in es:
evs.append(e)
evs = np.array(evs)
dt = .005 * np.pi
t = np.linspace(0, 2 * np.pi, int(2 * np.pi / dt))
# by trial, polar parameters a and b may be derived theorically.
x = np.sqrt(1.5) * np.cos(t)
y = np.sin(t)
plt.figure(figsize=(8, 8))
Gaussian, = plt.plot(evs.real, evs.imag, 'ob')
Ellipse, = plt.plot(x, y, 'r-')
plt.title('Ellipse via partial symmetry', fontsize=18)
plt.xlabel(r'$Re(\lambda)$', fontsize=18)
plt.ylabel(r'$Im(\lambda)$', fontsize=18)
plt.legend(handles=[Gaussian, Ellipse], labels=['Real Gaussian', 'Ellipse'], loc='upper right')
plt.xlim((-1.5, 1.5))
plt.ylim((-1.5, 1.5))
plt.show()
# -
# ## 4 Shift the outliers inside the unit circle
# + by imposing a constraint: **different means** but **same variances**
# +
N = 1000
trials = 1
evs = []
f = 0.3
mu_E = 0.1
mu_I = f * mu_E / (f - 1)
for _ in range(trials):
J = np.random.randn(N, N)
J = J / np.sqrt(N)
for i in range(N):
J[i, :] = J[i, :] - np.mean(J[i, :])
M = np.zeros((N, N))
m = np.zeros(N)
idxs = np.random.permutation(N)
for i in range(int(f * N)):
m[idxs[i]] = mu_E
for j in range(int(f * N), N):
m[idxs[j]] = mu_I
for k in range(N):
M[k, :] = 1 / np.sqrt(N) * m
G = J + M
es = la.eigvals(G)
for e in es:
evs.append(e)
evs = np.array(evs)
dt = .005 * np.pi
t = np.linspace(0, 2 * np.pi, int(2 * np.pi / dt))
x = np.cos(t)
y = np.sin(t)
plt.figure(figsize=(8, 8))
Constraint, = plt.plot(evs.real, evs.imag, 'ob')
Circle, = plt.plot(x, y, 'r-')
plt.title('Constraint Construction with different means and same variances', fontsize=18)
plt.xlabel(r'$Re(\lambda)$', fontsize=18)
plt.ylabel(r'$Im(\lambda)$', fontsize=18)
plt.legend(handles=[Constraint, Circle], labels=['Constraint', 'Circle'], loc='upper right')
plt.xlim((-1.1, 1.1))
plt.ylim((-1.1, 1.1))
plt.show()
# + by imposing a constraint: **same means** but **different variances**
#
# **Note**: no longer distributed **uniformly**
# +
N = 1000
trials = 1
evs = []
f = 0.5
alpha = 0.06
mu_E = 0.1
mu_I = f * mu_E / (f - 1)
for _ in range(trials):
J = np.random.randn(N, N)
J = J / np.sqrt(N)
X = np.zeros((N, N))
sigma = np.random.rand(N)
idxs = np.random.permutation(N)
for j in range(int(f * N)):
X[:, idxs[j]] = J[:, idxs[j]] * sigma[idxs[j]] / np.sqrt(alpha)
for j in range(int(f * N), N):
X[:, idxs[j]] = J[:, idxs[j]] * sigma[idxs[j]]
es = la.eigvals(X)
for e in es:
evs.append(e)
evs = np.array(evs)
dt = .005 * np.pi
t = np.linspace(0, 2 * np.pi, int(2 * np.pi / dt))
r = np.sqrt(1 - f + f / alpha)
x = (r - 1) * np.cos(t)
y = (r - 1) * np.sin(t)
plt.figure(figsize=(8, 8))
Constraint, = plt.plot(evs.real, evs.imag, 'ob')
Circle, = plt.plot(x, y, 'r-')
plt.title('Constraint Construction with same means and different variances', fontsize=18)
plt.xlabel(r'$Re(\lambda)$', fontsize=18)
plt.ylabel(r'$Im(\lambda)$', fontsize=18)
plt.legend(handles=[Constraint, Circle], labels=['Constraint', 'Circle'], loc='upper right')
plt.xlim((-(r - 1), r - 1))
plt.ylim((-(r - 1), r - 1))
plt.show()
# -
# ## 5 Girko's Elliptic Law
#
# [<NAME>](http://www.maths.qmul.ac.uk/~boris/): Non-Hermitian random matrices.
#
# **Complex matrices**:
#
# (1)
# $$\tilde{J} = A + i v B,$$
# (2)
# $$J = \frac{A + i v B}{\sqrt{n}},$$
# (3)
# $$\tau = \frac{1 - v^2}{1 + v^2}.$$
#
# + if $\tau$ = 0, $\tilde{J}$ is Ginibre ensemble;
# + if $\tau$ = 1, $\tilde{J}$ is GUE where $\tilde{J}$ = $J^{\tilde\ast}$;
# + if $\tau$ = -1, $\tilde{J}$ = - $J^{\tilde\ast}$.
#
# **Two cases**:
#
# + case 1: $n \rightarrow \infty$, v constant;
# + case 2: $n \rightarrow \infty$, $v^2 n \rightarrow$ constant, then we may think of eigenvalues of J as of perturbed eigenvalues of $\frac{A}{\sqrt{n}}$.
# +
N = 1000
trials = 1
evs = []
v = 0.5
#v = 0.05
tau = (1 - v ** 2) / (1 + v ** 2)
for _ in range(trials):
G1 = np.random.randn(N, N) + 1j * np.random.randn(N, N)
G1 = np.mat(G1)
A = (G1 + G1.H) / 2 # GUE E(trA^2)=N^2
G2 = np.random.randn(N, N) + 1j * np.random.randn(N, N)
G2 = np.mat(G2)
B = (G2 + G2.H)/2 # GUE E(trB^2)=N^2
J = (A + 1j * v * B) / np.sqrt(N)
es = la.eigvals(J)
for e in es:
evs.append(e)
evs = np.array(evs)
dt = .005 * np.pi
t = np.linspace(0, 2 * np.pi, int(2 * np.pi / dt))
x = (1 + tau) * np.cos(t)
y = (1 - tau) * np.sin(t)
plt.figure(figsize=(8, 8))
Gaussian, = plt.plot(evs.real, evs.imag, 'ob')
Elliptic, = plt.plot(x, y, 'r-')
plt.title('Girko\'s Elliptic Law', fontsize=18)
plt.xlabel(r'$Re(\lambda)$', fontsize=18)
plt.ylabel(r'$Im(\lambda)$', fontsize=18)
plt.legend(handles=[Gaussian, Elliptic], labels=['Non-Hermitian', 'Ellipse'], loc='upper right')
plt.xlim((-(1 + tau), 1 + tau))
plt.ylim((-(1 + tau), 1 + tau))
plt.show()
# -
# ## 6 Simple Ring Law
#
# $$X = [x_1^T, \dots, x_N^T]$$
#
# where $$x_i = [x_i^1, \dots, x_i^n]$$
#
# for i $\in$ \{1, 2, $\dots$, n\}.
#
# $$Y = \sqrt{S} U = \sqrt{\frac{1}{N} X X^H} U,$$
#
# where U of size n $\times$ n is the **unitary Haar** matrix.
#
# The n eigenvalues of the data matrix $\sqrt{S}$ are supported on the non-negative real axis.
#
# The n eigenvalues of the transformed matrix $Y$ are supported on the whole complex plane.
#
# **Note**: **Circular Ensemble**(关于**圆系综**的性质以及在**Neural Networks**方面的应用, 以后抽时间再写)
#
# $$A = U^T U$$
# +
n = 1000
trials = 1
evs = []
c = 2 # fixed > 1
N = int(n * c)
for _ in range(trials):
X = np.random.randn(n, N) + 1j * np.random.randn(n, N) # if we use other stochastic matrix, outliers occur
X = np.mat(X)
S = 1. / N * np.dot(X, X.H)
Q = la.qr(np.random.randn(n, n) + 1j * np.random.randn(n, n))[0]
# R = la.qr(np.random.randn(n, n) + 1j * np.random.randn(n, n))[1]
U = np.dot(Q, np.diag(np.exp(2 * np.pi * 1j * np.random.rand(n)))) # Haar unitary matrix
# U = np.dot(Q, np.diag(np.sign(np.diag(R))))
# 注意, 这里sqrtm是矩阵开根号运算, 即A = sqrtm(S), 保证A*A.T = S. 针对矩阵操作还有logm, expm等。
Y = np.dot(sqrtm(S), U)
es = la.eigvals(Y)
for e in es:
evs.append(e)
evs = np.array(evs)
dt = .005 * np.pi
t = np.linspace(0, 2 * np.pi, int(2 * np.pi / dt))
# 具体公式请查找文献后自行推导
x1 = np.cos(t)
y1 = np.sin(t)
x2 = (1 + 1. / c) * np.cos(t)
y2 = (1 + 1. / c) * np.sin(t)
plt.figure(figsize=(8, 8))
Haar_Unitary, = plt.plot(evs.real, evs.imag, 'ob')
Inner_Circle, = plt.plot(x1, y1, 'r-')
Outer_Circle, = plt.plot(x2, y2, 'r-')
plt.title('Simple Ring Law', fontsize=18)
plt.xlabel(r'$Re(\lambda)$', fontsize=18)
plt.ylabel(r'$Im(\lambda)$', fontsize=18)
plt.legend(handles=[Haar_Unitary, Inner_Circle, Outer_Circle], labels=['Haar Unitary', 'Inner Circle', 'Outer_Circle'], loc='upper right')
plt.xlim((-1.5, 1.5))
plt.ylim((-1.5, 1.5))
plt.show()
|
beyond_Girko_law.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.0 64-bit (''harmonizome'': venv)'
# name: python38064bitharmonizomevenve1fd62c035f347dbaffce382c9034785
# ---
# # Harmonizome ETL: BioGPS (Human Cell Line)
# Created by: <NAME> <br>
# Credit to: <NAME>
#
# Data Source: http://biogps.org/downloads/
# appyter init
from appyter import magic
magic.init(lambda _=globals: _())
# +
import sys
import os
from datetime import date
import zipfile
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import harmonizome.utility_functions as uf
import harmonizome.lookup as lookup
# -
# %load_ext autoreload
# %autoreload 2
# ### Notebook Information
print('This notebook was run on:', date.today(), '\nPython version:', sys.version)
# # Initialization
# +
# %%appyter hide_code
{% do SectionField(
name='data',
title='Upload Data',
img='load_icon.png'
) %}
{% do SectionField(
name='settings',
title='Settings',
img='setting_icon.png'
) %}
# +
# %%appyter code_eval
{% do DescriptionField(
name='description',
text='The following dataset examples were sourced from <a href="http://biogps.org/downloads/" target="_blank">biogps.org</a>. The example for probe annotations was sourced from <a href="http://www.affymetrix.com/support/technical/byproduct.affx?product=hgu133" target="blank">www.affymetrix.com</a>. If clicking on the examples does not work, they should be downloaded directly from the source website. For the first file, the first example is that of the Human U133A/GNF1H Gene Atlas, and the second is of the Human NCI60 Cell Lines.',
section='data'
) %}
{% set matrix_file = FileField(
constraint='.*\.zip$',
name='matrix',
label='Dataset File (zip)',
default='gnf1h-gcrma.zip',
examples={
'gnf1h-gcrma.zip': 'http://plugins.biogps.org/download/gnf1h-gcrma.zip',
'NCI60_U133A_20070815.raw.csv.zip': 'http://plugins.biogps.org/download/NCI60_U133A_20070815.raw.csv.zip'
},
section='data'
) %}
{% set gene_file = FileField(
constraint='.*\.csv.zip$',
name='gene_meta',
label='Probe Annotations (csv.zip)',
default='HG-U133A.na36.annot.csv.zip',
examples={
'HG-U133A.na36.annot.csv.zip': 'http://www.affymetrix.com/Auth/analysis/downloads/na36/ivt/HG-U133A.na36.annot.csv.zip',
},
section='data'
) %}
# +
# %%appyter code_eval
{% set dataset = ChoiceField(
name='dataset',
label='Dataset',
choices={
'Human U133A/GNF1H Gene Atlas': 'U133A',
'Human NCI60 Cell Lines': 'NCI60'
},
default='Human U133A/GNF1H Gene Atlas',
section='settings'
) %}
# -
# ### Load Mapping Dictionaries
symbol_lookup, geneid_lookup = lookup.get_lookups()
# ### Output Path
# +
# %%appyter code_exec
output_name = 'bioGPS-{{dataset}}'
path = 'Output/BioGPS-{{dataset}}'
if not os.path.exists(path):
os.makedirs(path)
# -
# # Load Data
# +
# %%appyter code_exec
matrix = pd.read_csv({{matrix_file}},
sep=',', index_col=0)
# -
matrix.head()
matrix.shape
# ## Load Probe Annotations
# +
# %%appyter code_exec
with zipfile.ZipFile({{gene_file}}) as zipf:
gene_meta = pd.read_csv(zipf.open('HG-U133A.na36.annot.csv'),
sep=',', skiprows=25,
usecols=['Probe Set ID', 'Gene Symbol'],
index_col=0)
# -
gene_meta.head()
gene_meta.shape
# # Pre-process Data
# ## Map Gene to Probe
# +
# %%appyter code_exec
columns_name = {
'U133A': 'Tissue',
'NCI60': 'Cell Line'
}['{{dataset}}']
# -
matrix.index = gene_meta.reindex(matrix.index)['Gene Symbol']
matrix.index.name = 'Gene Symbol'
matrix.columns.name = columns_name
matrix.head()
# ## Revert Duplicate Column Names
matrix.columns = matrix.columns.map(lambda x: x.split('.')[0])
matrix.head()
# ## Save Unfiltered Matrix to file
uf.save_data(matrix, path, output_name + '_matrix_unfiltered',
compression='gzip', dtype=np.float32)
# # Filter Data
# ## Map Gene Symbols to Up-to-date Approved Gene Symbols
matrix = uf.map_symbols(matrix, symbol_lookup)
matrix.shape
# ## Merge Duplicate Genes By Rows and Duplicate Columns
matrix = uf.merge(matrix, 'row')
matrix = uf.merge(matrix, 'column')
matrix.shape
# ## Remove Data that is More Than 95% Missing and Impute Missing Data
matrix = uf.remove_impute(matrix)
matrix.head()
matrix.shape
# ## Log2 Transform
matrix = uf.log2(matrix)
matrix.head()
# ## Normalize Matrix (Quantile Normalize the Matrix by Column)
matrix = uf.quantile_normalize(matrix)
matrix.head()
# ## Normalize Matrix (Z-Score the Rows)
matrix = uf.zscore(matrix)
matrix.head()
# ## Histogram of First Sample
matrix.iloc[:, 0].hist(bins=100)
# ## Histogram of First Gene
matrix.iloc[0, :].hist(bins=100)
# ## Save Filtered Matrix
uf.save_data(matrix, path, output_name + '_matrix_filtered',
ext='tsv', compression='gzip')
# # Analyze Data
# ## Create Gene List
gene_list = uf.gene_list(matrix, geneid_lookup)
gene_list.head()
gene_list.shape
uf.save_data(gene_list, path, output_name + '_gene_list',
ext='tsv', compression='gzip', index=False)
# ## Create Attribute List
attribute_list = uf.attribute_list(matrix)
attribute_list.head()
attribute_list.shape
uf.save_data(attribute_list, path, output_name + '_attribute_list',
ext='tsv', compression='gzip')
# ## Create matrix of Standardized values (values between -1, and 1)
standard_matrix = uf.standardized_matrix(matrix)
standard_matrix.head()
uf.save_data(standard_matrix, path, output_name + '_standard_matrix',
ext='tsv', compression='gzip')
# ## Plot of A Single Celltype, Normalized Value vs. Standardized Value
plt.plot(matrix[matrix.columns[0]],
standard_matrix[standard_matrix.columns[0]], 'bo')
plt.xlabel('Normalized Values')
plt.ylabel('Standardized Values')
plt.title(standard_matrix.columns[0])
plt.grid(True)
# ## Create Ternary Matrix
ternary_matrix = uf.ternary_matrix(standard_matrix)
ternary_matrix.head()
uf.save_data(ternary_matrix, path, output_name + '_ternary_matrix',
ext='tsv', compression='gzip')
# ## Create Gene and Attribute Set Libraries
uf.save_setlib(ternary_matrix, 'gene', 'up', path, output_name + '_gene_up_set')
uf.save_setlib(ternary_matrix, 'gene', 'down', path, output_name + '_gene_down_set')
uf.save_setlib(ternary_matrix, 'attribute', 'up', path,
output_name + '_attribute_up_set')
uf.save_setlib(ternary_matrix, 'attribute', 'down', path,
output_name + '_attribute_down_set')
# ## Create Attribute Similarity Matrix
attribute_similarity_matrix = uf.similarity_matrix(standard_matrix.T, 'cosine')
attribute_similarity_matrix.head()
uf.save_data(attribute_similarity_matrix, path,
output_name + '_attribute_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
# ## Create Gene Similarity Matrix
gene_similarity_matrix = uf.similarity_matrix(standard_matrix, 'cosine')
gene_similarity_matrix.head()
uf.save_data(gene_similarity_matrix, path,
output_name + '_gene_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
# ## Create Gene-Attribute Edge List
edge_list = uf.edge_list(standard_matrix)
uf.save_data(edge_list, path, output_name + '_edge_list',
ext='tsv', compression='gzip')
# # Create Downloadable Save File
uf.archive(path)
# ### Link to download output files: [click here](./output_archive.zip)
|
appyters/BioGPS_Harmonizome_ETL/BioGPS.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3. XGBoost
# **Start from the most basic features, and try to improve step by step.**
# ## Run name
# +
import time
project_name = 'TalkingdataAFD2018'
step_name = 'XGBoost'
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
run_name = '%s_%s_%s' % (project_name, step_name, time_str)
print('run_name: %s' % run_name)
t0 = time.time()
# -
# ## Important params
# +
date = 6
print('date: ', date)
test_n_rows = None
# test_n_rows = 18790469
# test_n_rows = 10*10000
# -
day_rows = {
0: {
'n_skiprows': 1,
'n_rows': 10 * 10000
},
6: {
'n_skiprows': 1,
'n_rows': 9308568
},
7: {
'n_skiprows': 1 + 9308568,
'n_rows': 59633310
},
8: {
'n_skiprows': 1 + 9308568 + 59633310,
'n_rows': 62945075
},
9: {
'n_skiprows': 1 + 9308568 + 59633310 + 62945075,
'n_rows': 53016937
}
}
n_skiprows = day_rows[date]['n_skiprows']
n_rows = day_rows[date]['n_rows']
# ## Import PKGs
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from IPython.display import display
import os
import gc
import time
import random
import zipfile
import h5py
import pickle
import math
from PIL import Image
import shutil
from tqdm import tqdm
import multiprocessing
from multiprocessing import cpu_count
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score
random_num = np.random.randint(10000)
print('random_num: %s' % random_num)
# -
# ## Project folders
# +
cwd = os.getcwd()
input_folder = os.path.join(cwd, 'input')
output_folder = os.path.join(cwd, 'output')
model_folder = os.path.join(cwd, 'model')
log_folder = os.path.join(cwd, 'log')
print('input_folder: \t\t\t%s' % input_folder)
print('output_folder: \t\t\t%s' % output_folder)
print('model_folder: \t\t\t%s' % model_folder)
print('log_folder: \t\t\t%s' % log_folder)
train_csv_file = os.path.join(input_folder, 'train.csv')
train_sample_csv_file = os.path.join(input_folder, 'train_sample.csv')
test_csv_file = os.path.join(input_folder, 'test.csv')
sample_submission_csv_file = os.path.join(input_folder, 'sample_submission.csv')
print('\ntrain_csv_file: \t\t%s' % train_csv_file)
print('train_sample_csv_file: \t\t%s' % train_sample_csv_file)
print('test_csv_file: \t\t\t%s' % test_csv_file)
print('sample_submission_csv_file: \t%s' % sample_submission_csv_file)
# -
# ## Load data
# +
# %%time
train_csv = pd.read_csv(train_csv_file, skiprows=range(1, n_skiprows), nrows=n_rows, parse_dates=['click_time'])
test_csv = pd.read_csv(test_csv_file, nrows=test_n_rows, parse_dates=['click_time'])
sample_submission_csv = pd.read_csv(sample_submission_csv_file)
print('train_csv.shape: \t\t', train_csv.shape)
print('test_csv.shape: \t\t', test_csv.shape)
print('sample_submission_csv.shape: \t', sample_submission_csv.shape)
print('train_csv.dtypes: \n', train_csv.dtypes)
display(train_csv.head(2))
display(test_csv.head(2))
display(sample_submission_csv.head(2))
# -
y_data = train_csv['is_attributed']
train_csv.drop(['is_attributed'], axis=1, inplace=True)
display(y_data.head())
# ## Features
train_csv['day'] = train_csv['click_time'].dt.day.astype('uint8')
train_csv['hour'] = train_csv['click_time'].dt.hour.astype('uint8')
train_csv['minute'] = train_csv['click_time'].dt.minute.astype('uint8')
train_csv['second'] = train_csv['click_time'].dt.second.astype('uint8')
print('train_csv.shape: \t', train_csv.shape)
display(train_csv.head(2))
test_csv['day'] = test_csv['click_time'].dt.day.astype('uint8')
test_csv['hour'] = test_csv['click_time'].dt.hour.astype('uint8')
test_csv['minute'] = test_csv['click_time'].dt.minute.astype('uint8')
test_csv['second'] = test_csv['click_time'].dt.second.astype('uint8')
print('test_csv.shape: \t', test_csv.shape)
display(test_csv.head(2))
arr = np.array([[3,6,6],[4,5,1]])
print(arr)
np.ravel_multi_index(arr, (7,6))
print(arr)
print(np.ravel_multi_index(arr, (7,6), order='F'))
def df_add_counts(df, cols, tag="_count"):
arr_slice = df[cols].values
unq, unqtags, counts = np.unique(np.ravel_multi_index(arr_slice.T, arr_slice.max(0) + 1), return_inverse=True, return_counts=True)
df["_".join(cols) + tag] = counts[unqtags]
return df
def df_add_uniques(df, cols, tag="_unique"):
gp = df[cols] \
.groupby(by=cols[0:len(cols) - 1])[cols[len(cols) - 1]] \
.nunique() \
.reset_index() \
.rename(index=str, columns={cols[len(cols) - 1]: "_".join(cols)+tag})
df = df.merge(gp, on=cols[0:len(cols) - 1], how='left')
return df
# +
train_csv = df_add_counts(train_csv, ['ip', 'day', 'hour'])
train_csv = df_add_counts(train_csv, ['ip', 'app'])
train_csv = df_add_counts(train_csv, ['ip', 'app', 'os'])
train_csv = df_add_counts(train_csv, ['ip', 'device'])
train_csv = df_add_counts(train_csv, ['app', 'channel'])
train_csv = df_add_uniques(train_csv, ['ip', 'channel'])
display(train_csv.head())
# +
test_csv = df_add_counts(test_csv, ['ip', 'day', 'hour'])
test_csv = df_add_counts(test_csv, ['ip', 'app'])
test_csv = df_add_counts(test_csv, ['ip', 'app', 'os'])
test_csv = df_add_counts(test_csv, ['ip', 'device'])
test_csv = df_add_counts(test_csv, ['app', 'channel'])
test_csv = df_add_uniques(test_csv, ['ip', 'channel'])
display(test_csv.head())
# -
# ## Prepare data
# +
train_useless_features = ['click_time', 'attributed_time']
train_csv.drop(train_useless_features, axis=1, inplace=True)
test_useless_features = ['click_time', 'click_id']
test_csv.drop(test_useless_features, axis=1, inplace=True)
display(train_csv.head())
display(test_csv.head())
# -
x_train, x_val, y_train, y_val = train_test_split(train_csv, y_data, test_size=0.01, random_state=2017)
x_test = test_csv
print(x_train.shape)
print(y_train.shape)
print(x_val.shape)
print(y_val.shape)
print(x_test.shape)
# ## Train
# +
import lightgbm as lgb
from sklearn.metrics import roc_auc_score
lgb_train = lgb.Dataset(x_train, label=y_train)
lgb_val = lgb.Dataset(x_val, label=y_val, reference=lgb_train)
# LightGBM parameters
params = {
'task': 'train',
'num_boost_round': 200,
'early_stopping_rounds': 10,
'boosting_type': 'gbdt', # (default="gbdt")
'num_leaves': 300, # (default=31)
'max_depth': -1, # (default=-1)
'learning_rate': 0.1, # (default=0.1)
'n_estimators': 500, # (default=10)
'max_bin': 30, # (default=255)
'subsample_for_bin': 100*10000, # (default=50000)
'objective': 'binary', # (default=None)
'min_split_gain': 0., # (default=0.)
'min_child_weight': 1e-3, # (default=1e-3)
'min_child_samples': 10, # (default=20)
'subsample': 0.7, # (default=1.)
# 'subsample_freq': 1, # (default=1)
'colsample_bytree': 0.9, # (default=1.)
'reg_alpha': 0., # (default=0.)
'reg_lambda': 0., # (default=0.)
'random_state': random_num, # (default=None)
'n_jobs': -1, # (default=-1)
'silent': False, # (default=True)
'metric': ['auc', 'binary_logloss'],
}
print('params: ', params)
# train
gbm = lgb.train(
params,
train_set=lgb_train,
valid_sets=lgb_val
)
print('*' * 80)
y_train_proba = gbm.predict(x_train, num_iteration=gbm.best_iteration)
y_train_pred = (y_train_proba>=0.5).astype(int)
acc_train = accuracy_score(y_train, y_train_pred)
roc_train = roc_auc_score(y_train, y_train_proba)
print('acc_train: %.4f \t roc_train: %.4f' % (acc_train, roc_train))
y_val_proba = gbm.predict(x_val, num_iteration=gbm.best_iteration)
y_val_pred = (y_val_proba>=0.5).astype(int)
acc_val = accuracy_score(y_val, y_val_pred)
roc_val = roc_auc_score(y_val, y_val_proba)
print('acc_val: %.4f \t roc_val: %.4f' % (acc_val, roc_val))
# -
# ## Predict
run_name_acc = run_name + '_' + str(int(roc_val*10000)).zfill(4)
print(run_name_acc)
y_test_proba = gbm.predict(x_test, num_iteration=gbm.best_iteration)
print(y_test_proba.shape)
print(y_test_proba[:20])
# +
def save_proba(y_train_proba, y_train, y_val_proba, y_val, y_test_proba, click_ids, file_name):
print(click_ids[:5])
if os.path.exists(file_name):
os.remove(file_name)
print('File removed: \t%s' % file_name)
with h5py.File(file_name) as h:
h.create_dataset('y_train_proba', data=y_train_proba)
h.create_dataset('y_train', data=y_train)
h.create_dataset('y_val_proba', data=y_val_proba)
h.create_dataset('y_val', data=y_val)
h.create_dataset('y_test_proba', data=y_test_proba)
h.create_dataset('click_ids', data=click_ids)
print('File saved: \t%s' % file_name)
def load_proba(file_name):
with h5py.File(file_name, 'r') as h:
y_train_proba = np.array(h['y_train_proba'])
y_train = np.array(h['y_train'])
y_val_proba = np.array(h['y_val_proba'])
y_val = np.array(h['y_val'])
y_test_proba = np.array(h['y_test_proba'])
click_ids = np.array(h['click_ids'])
print('File loaded: \t%s' % file_name)
print(click_ids[:5])
return y_train_proba, y_train, y_val_proba, y_val, y_test_proba, click_ids
y_proba_file = os.path.join(model_folder, 'proba_%s.p' % run_name_acc)
save_proba(y_train_proba, y_train, y_val_proba, y_val, y_test_proba, np.array(sample_submission_csv['click_id']), y_proba_file)
y_train_proba, y_train, y_val_proba, y_val, y_test_proba, click_ids = load_proba(y_proba_file)
print(y_train_proba.shape)
print(y_train.shape)
print(y_val_proba.shape)
print(y_val.shape)
print(y_test_proba.shape)
print(len(click_ids))
# -
# %%time
submission_csv_file = os.path.join(output_folder, 'pred_%s.csv' % run_name_acc)
print(submission_csv_file)
submission_csv = pd.DataFrame({ 'click_id': click_ids , 'is_attributed': y_test_proba })
submission_csv.to_csv(submission_csv_file, index = False)
# +
print('Time cost: %.2f s' % (time.time() - t0))
print('random_num: ', random_num)
print('date: ', date)
print(run_name_acc)
print('Done!')
# -
|
talkingdata-adtracking-fraud-detection/3. XGBoost0.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Apache Spark for astronomy: hands-on session 1
#
# ### Context
#
# Welcome to the series of notebooks on Apache Spark! The main goal of this series is to get familiar with Apache Spark, and in particular its Python API called PySpark in a context of the astronomy. In this first notebook, we will introduce few Apache Spark functionalities of interest (and by no means complete!).
#
# ### Apache Spark
#
# [Apache Spark](https://spark.apache.org/) is a cluster computing framework, that is a set of tools to perform computation on a network of many machines. Spark started in 2009 as a research project, and it had a huge success so far in the industry. It is based on the so-called MapReduce cluster computing paradigm, popularized by the Hadoop framework using implicit data parallelism and fault tolerance.
#
# The core of Spark is written in Scala which is a general-purpose programming language that has been started in 2004 by <NAME> (EPFL). The language is inter-operable with Java and Java-like languages, and Scala executables run on the Java Virtual Machine (JVM). Note that Scala is not a pure functional programming language. It is multi-paradigm, including functional programming, imperative programming, object-oriented programming and concurrent computing.
#
# Spark provides many functionalities exposed through Scala/Python/Java/R API (Scala being the most complete one). As far as this workshop is concerned, we will use the Python API (called PySpark) for obvious reasons. But feel free to put your hands on Scala, it's worth it. For those interested, you can have a look at this [tutorial](https://gitlab.in2p3.fr/MaitresNageurs/QuatreNages/Scala) on Scala.
#
# ### Learning objectives
#
# - Loading and distributing data with Spark SQL (Apache Spark Data Sources API)
# - Exploring DataFrame & partitioning
# - Manipulating Spark SQL built-in functions
# ## Apache Spark Data Sources
#
# ### A tour of data formats
#
# There are many data formats used in the context of Big Data: CSV (1978), XML (1996), JSON (2001), Thrift (2007), Protobuf (2008), Avro & SequenceFile (2009), Parquet (2013), ORC (2016), and the list goes on... Some are _naively_ structured that is using a single type to describe the data (e.g. text) without any internal organisation to access faster the data. Others are more complex and highly optimised for big data treatment (e.g. Parquet). Spark handles most of them by default. Unfortunately those are not the data formats typically chosen by the scientific community. In astronomy for example you would rather store the data in FITS (1981) or HDF5 (1988) format, and in particle physics you would use ROOT (1995).
# These are multi-purposes data formats: images, histograms, spectra, particle lists, data cubes, or even structured data such as multi-table databases can be efficiently stored and accessed.
#
# ### Connecting to Data Source
#
# The data source API in Apache Spark belongs to the [Spark SQL module](https://spark.apache.org/sql/). Note that Spark Core has some simple built-in ways to read data from disk (binary or text), but Spark SQL is more complete and give you access to DataFrames directly. If you want to connect a specific data source with Apache Spark, you have mostly two ways:
#
# - [indirect] Access and distribute your files as binary streams (Spark does it natively), and decode the data on-the-fly within executors using third-party libraries.
# - [native] Use a built-in or custom connector to access, distribute and decode the data natively.
#
# FITS or HDF5 as most of scientific data formats, were not designed for serialisation (distribution of data over machines) originally and they often use compression to reduce the size on disk. Needless to say that default Spark cannot read those natively.
#
# First attempts to connect those data formats (see e.g. [1] for FITS) with Spark were using the indirect method above. By reading files as binary streams, the indirect method has the advantage of having access to all FITS functionalities implemented in the underlying user library. This can be an advantage when working with the Python API for example which already contains many great scientific libraries. However this indirect method assumes each Spark mapper will receive and handle one entire file (since the filenames are parallelized and entire file data must be reconstructed from binary once the file has been opened by a Spark mapper). Therefore each single file must fit within the memory of a Spark mapper, hence the indirect method cannot distribute a dataset made of large FITS files (e.g. in [1] they have a 65 GB dataset made of 11,150 files). In addition by assuming each Spark mapper will receive and handle one entire file, the indirect method will have a poor load balancing if the dataset is made of files with not all the same size.
#
# Fortunately Apache Spark low-level layers are sufficiently well written to allow extending the framework and write native connectors for any kind of data sources. Recently connectors for FITS, HDF5 and ROOT were made available [2, 3, 4] to the community. With such connectors, there is a guarantee of having a good load balancing regardless the structure of the dataset and the size of the input files is no more a problem (a 1 TB dataset made of thousand 1 GB files or one single 1 TB file will be viewed as almost the same by a native Spark connector). Note however that the Data Source API is in Java/Scala and if there is no library to play with your data source in those languages you must implement it (what has been done in [2]) or interface with another language.
#
# Note that the low-level layers dealing with the data sources have been recently updated. Apache Spark 2.3 introduced the Data Source API version 2. While the version 1 is still available and usable for a long time, we expect that all Spark connectors will comply with this v2 in the future.
#
# [1] <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>ira: Processing Astronomy Imagery Using Big Data Technology, DOI 10.1109/TBDATA.2016.2599926.
# [2] <NAME> Arnault, <NAME> <NAME>, FITS Data Source for Apache Spark, Computing and Software for Big Science (1804.07501). https://github.com/astrolabsoftware/spark-fits
# [3] <NAME> and <NAME> and <NAME> and Canon, <NAME>, H5spark: bridging the I/O gap between Spark and scientific data formats on HPC systems, Cray user group (2016). https://github.com/valiantljk/h5spark
# [4] <NAME>, & <NAME>. (2017, October 20). diana-hep/spark-root: Release 0.1.14 (Version v0.1.14). Zenodo. http://doi.org/10.5281/zenodo.1034230
# ## Spark SQL and DataFrames
#
# ### DataFrameReader
#
# The interface to read data from disk is always the same for any kind of built-in and officially supported data format:
#
# ```python
# df = spark.read\
# .format(format: str)\
# .option(key: str, value: Any)\
# # ...
# .option(key: str, value: Any)\
# .load(path: str)
# ```
#
# Note that for most of the data sources, you can use wrappers such as:
#
# ```python
# spark.read.csv(path, key1=value1, key2=value2, ...)
# ```
#
# **Format**: The format can be "csv", "json", "parquet", etc.
#
# **Options**: The number of options depends on the underlying data source. Each has its own set of options.
# In most of the case, no options are needed, but you might want to explore the different possibilities at some point. Surprisingly it is not easy to find documentation and the best remains to read the source code documentation. In pyspark you can easily access it via the wrappers:
#
# ```python
# # DataFrameReader object
# df_reader = spark.read
#
# # Doc on reading CSV
# df_reader.csv?
# # doc printed
#
# # Doc on reading Parquet
# df_reader.parquet?
# # doc printed
# ```
#
#
# **Path**: The way to specify path is threefold: either a single file (`path/to/folder/myfile.source`), or an entire folder (`path/to/folder`), or a glob pattern (`path/to/folder/*pattern*.source`). Note that you also need to specify the type of file system you are using. Example:
#
# ``` python
# # Connect to hdfs
# path = 'hdfs:///path/to/data'
#
# # Connect to S3
# path = 's3:///path/to/data'
#
# # Connect to local file system
# path = 'files:///path/to/data'
# ```
#
# If nothing is specified (`'/path/to/data'`), it will adapt to your `--master` (e.g. if you launch spark in local mode, you will connect to the local file system by default).
#
#
# ### Using a custom connector
#
# You can also connect to custom connector not included in the default Spark distribution. To do so, you will need to specify the dependencies when submitting your job or invoking your shell. If your connector is available through [Maven Central Repository](https://search.maven.org/), you can easily specify it via:
#
# ```
# # Direct download from central repository
# spark-submit --packages groupId:artifactId:version ...
# ```
#
# Note that this is the same syntax when launching the `pyspark` shell.
# For example, if you want to read FITS files using the [spark-fits](https://github.com/astrolabsoftware/spark-fits) connector you would add the following:
#
# ```
# # Direct download from central repository
# spark-submit --packages com.github.astrolabsoftware:spark-fits_2.11:0.7.1 ...
# ```
#
# You can find the spark-fits entry in the Maven Central [here](https://search.maven.org/artifact/com.github.astrolabsoftware/spark-fits_2.11/0.7.1/jar) for reference.
# Alternatively you can download the source code for a particular connector, compile it and include the `jars`:
#
# ```
# # Specify manually the dependency
# spark-submit --jars /path/to/lib/spark-fits.jars ...
# ```
#
# Note that when you launch `pyspark`, already a numbers of `jars` are included by default (the ones for Spark for example).
# ## Loading and distributing data
#
# You will find test data in the folder `data`.
# +
from pyspark.sql import SparkSession
# Initialise our Spark session
spark = SparkSession.builder.getOrCreate()
# -
# ### Loading Data: simply structured data (text)
#
# You can load CSV data into a DataFrame by simply using:
# Load simple CSV file
df_csv = spark.read.format("csv")\
.load("../../data/clusters.csv")
df_csv.printSchema()
# Notice by default the CSV connector interprets all entries as String, and give dummy names to columns. You can infer the data type and use the first row as column names by specifying options:
df_csv = spark.read.format("csv")\
.option('inferSchema', True)\
.option('header', True)\
.load("../../data/clusters.csv")
df_csv.printSchema()
# Make a nice representation of our data
df_csv.show(5)
# ### Loading Data: complex structured data (Parquet)
#
# More complex data format can infer automatically schema, and data types.
# They are also optimised for fast data access and small memory consumption.
# Same using Parquet - Note that the schema and the data types
# are directly inferred.
df_parquet = spark.read.format("parquet").load("../../data/clusters.parquet")
df_parquet.printSchema()
df_parquet.show(5)
# ### Loading Data: astronomy format (FITS)
#
# To read FITS, you will need to specify a custom connector such as [spark-fits](https://github.com/astrolabsoftware/spark-fits) (this is done for you):
#
# ```
# PYSPARK_DRIVER_PYTHON=jupyter-notebook pyspark --packages com.github.astrolabsoftware:spark-fits_2.11:0.8.3 ...
# ```
df_fits = spark.read.format("fits").option("hdu", 1).load("../../data/clusters.fits")
df_fits.printSchema()
df_fits.show(5)
# ## Partitioning
#
# You might noticed Spark cut out the dataset into partitions, and for each partition Spark will run one task.
# Following the principle that moving computation is usually cheaper than moving data, Spark reads file blocks in a performant way: instead of copying file blocks to a central compute node, which can be expensive, the driver sends the computation to worker nodes close to DataNodes where the data reside.
# Normally, Spark tries to set the number of partitions automatically based on your distributed file system configuration. For example in HDFS, the size of data blocks is typically 128 MB (tunable), therefore the default number of Spark partitions when reading data will be the total number of 128 MB chunks for your dataset.
#
# ```
# How many partitions should I use?
# ```
#
# There is no unique answer to that. You will often hear: `typically you want 2-4 partitions for each CPU in your cluster`, but that implies you can accomodate infinite number of CPUs at limited partition size. In practice it will mainly depend on:
# - the total volume of data you want to distribute,
# - the number of CPU you have access to and their RAM,
# - and the kind of task you want to perform.
#
# If you have too few partitions, you will not take benefit from all of the cores available in the cluster (time to solution can be longer, and you can run out of memory for intensive tasks).
# If you have too many partitions, there will be excessive overhead in managing many small tasks.
# In between, you are generally good.
# Note that when you load data, Spark assign itself the number of partitions, and you can repartition the dataset using:
#
# ```python
# # numPartitions is arbitrary but
# # this operation will add a shuffle step
# df.repartition(numPartitions)
#
# # Using either a number of partition or
# # column names to repartition by range
# df.repartitionByRange(numPartitions, colnames)
#
# # Using one or several columns to repartition
# df.orderBy(colnames)
#
# # numPartitions must be lower than the
# # current one, but no shuffle is performed
# df.coalesce(numPartitions)
# ```
#
# You can access the number of partitions in use using:
#
# ```python
# df.rdd.getNumPartitions()
# ```
#
# Frequent basic use-cases:
# - The standard: You have a lot of data stored in large files and data entries need to be process independently from each other --> keep the default.
# - The multi-files: When reading many small files (each being much smaller than the typical 128 MB data block size), you usually end up with way more partitions than if you were reading the same volume of data but with fewer files --> repartition your dataset with fewer partitions.
# - The shuffle: If your tasks involve a lot of data movement and communication between machines (data shuffle) --> it is usually a good idea to keep the number of partitions not too high.
# - The heavy filter: sometimes you filter out a lot of data based on some condition, and then you execute some action on the remaining subset. Because of the filering, you might end up with many empty partitions --> try to see if repartitioning with fewer partitions helps in processing the remaining faster.
#
# **In practice you will end up experimenting a bit with the number of partitions... But always keep in mind the main reason to repartition is to minimize data movement inside the cluster.**
# ## Basic operations on DataFrames
#
# Let's load our data
df = spark.read.format("parquet").load("../../data/clusters.parquet")
# ### Select & filters
#
# There are powerful methods to select subsets of columns or to filter rows based on values. Note that column selection and row filtering are transformations (in the sense of functional programming) - nothing really happens to the data until you trigger an action.
# +
# Filtering rows based on entry values
df_x_more_than_one = df.filter("x > 1")
# Same as before, but different syntax
df_x_more_than_one = df.filter(df["x"] > 1)
# Filtering column based on their name
df_y_only = df.select('y')
df_x_and_y = df.select(['x', 'y'])
# You can chain transformations
df_x_cluster_one = df.select('x').filter('id == 1')
# Trigger an action
row_with_x_more_than_one = df_x_more_than_one.count()
print("{} entries with x > 1".format(row_with_x_more_than_one))
# -
# ### Map and mapPartitions
#
# You can also apply transformation on DataFrame values. The most simple transformation would use the `map` method which preserves the cardinality of the DataFrame. `mapPartitions` is similar, although the cardinality is not preserved.
# +
# Example for map: multiply all elements by 2
def multiply_by_two(row):
"""
"""
new_row = [2*i for i in row]
return new_row
# map is a RDD method (not available for DataFrame in pyspark)
df.rdd.map(multiply_by_two).toDF(df.columns).show(5)
# +
# Example for mapPartitions: count the number of rows per partition
def yield_num_rows(part, param1):
""" Yield the number of rows in the partition
Parameters
----------
part : Iterator
Iterator containing partition data
Yield
----------
length: integer
number of rows inside the partition
"""
partition_data = [*part]
print(param1)
yield len(partition_data)
# Let's repartition our DataFrame in 12 partitions
df_repart = df.repartition(12)
# mapPartitions is a RDD method(not available for DataFrame in pyspark)
print("Number of rows per Spark partitions:")
# df_repart.rdd.mapPartitions(lambda part: yield_num_rows(part)).collect()
param1 = 2
df_repart.rdd.mapPartitions(lambda part: yield_num_rows(part, param1)).collect()
# -
# notice the super good load balancing! Be careful though in using `collect`, as data flows from the executors to the (poor and lonely and undersized) driver. Always reducing the data first!
# **Exercise (££)**: Compute the barycentre of each partition (hint: repartition or re-order according to the `id` column).
# +
import numpy as np
def yield_barycentre(part):
""" Yield the number of rows in the partition
Parameters
----------
part : Iterator
Iterator containing partition data
Yield
----------
length: integer
number of rows inside the partition
"""
try:
partition_data = [*part]
x, y, z, _ = np.transpose(partition_data)
yield np.mean([x, y, z], axis=1)
except ValueError as e:
# Empty partition
yield [None, None, None]
# Let's repartition our DataFrame according to "id"
df_repart = df.orderBy("id")
# mapPartitions is a RDD method(not available for DataFrame in pyspark)
print("Cluster coordinates:")
df_repart.rdd.mapPartitions(yield_barycentre).collect()
# -
# ### Statistics
#
# You can easily access basics statistics of your DataFrame:
# The describe method returns a DataFrame
df.describe().show()
# ### Aggregation
#
# Apache Spark has built-in method to perform aggregation. Be careful though - this implies shuffle (i.e. communication between machines and data transfer), and can be a performance killer!
#
# **Exercise (£):** group by `id`, and count the number of elements per `id`
df.groupBy("id").count().show()
# ### Direct acyclic graph (DAG)
#
# As quickly highlighted above, Spark commands are either transformations (filter, select, ...) or actions (show, take, ...). You can chain actions, and in the end you trigger the computation with an action. Before running any action, Spark will build a graph of the commands, called Direct Acyclic Graph, and... it will do some magic for you.
#
# **Exercise (£):** Look at the two commands and output. Do you notice the magic?
df.groupBy("id").count().filter('id >= 1').explain()
df.filter('id >= 1').groupBy("id").count().explain()
|
notebooks/solutions/session1_solutions.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import spacy
nlp = spacy.load('en_core_web_sm')
# ### Lemmatizing
# - Text normalization
# - Reducing word to base form
# - A word based on its intended meaning
# - Stemming
# > cutting suffix and prefix to convert to its base form
doc1 = nlp("study studious student studio studying")
doc1
for token in doc1:
print(token.text, token.lemma_, token.pos_)
doc2 = nlp("walk walking walker was be were go going gone")
doc2
for token in doc2:
print(token.text, token.lemma_, token.pos_)
# ### Analysing doc_covid
doc_covid = nlp(open('covid19.txt').read())
doc_covid
for token in doc_covid:
if token.text != token.lemma_:
print(token.text, token.lemma_, token.pos_, sep = '\t')
# +
# Capital is converted to small letters
# s is removed from Nouns
# Verb is converted to base form
#
# -
for token in doc_covid:
if token.text.lower() != token.lemma_.lower():
print(token.text, token.lemma_, token.pos_, sep = '\t')
for token in doc_covid:
if token.text.lower() != token.lemma_.lower() and token.pos_ != 'NOUN':
print(token.text, token.lemma_, token.pos_, sep = '\t')
|
Spacy/3_Spacy_Lemmatizing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
#export
import nb_002
from nb_002c import *
import operator
# -
DATA_PATH = Path('data')
PATH = DATA_PATH/'caltech101' # http://www.vision.caltech.edu/Image_Datasets/Caltech101/
# # Caltech 101
# ## Create validation set
# The first step will be to create a dataset from our files. We need to separate a definite amount of files to be used as our validation set. We will do this randomly by setting a percentage apart, in this case 0.2.
#export
class FilesDataset(Dataset):
def __init__(self, fns, labels, classes=None):
if classes is None: classes = list(set(labels))
self.classes = classes
self.class2idx = {v:k for k,v in enumerate(classes)}
self.fns = np.array(fns)
self.y = [self.class2idx[o] for o in labels]
def __len__(self): return len(self.fns)
def __getitem__(self,i): return open_image(self.fns[i]),self.y[i]
@classmethod
def from_folder(cls, folder, classes=None, test_pct=0.):
if classes is None: classes = [cls.name for cls in find_classes(folder)]
fns,labels = [],[]
for cl in classes:
fnames = get_image_files(folder/cl)
fns += fnames
labels += [cl] * len(fnames)
if test_pct==0.: return cls(fns, labels, classes=classes)
fns,labels = np.array(fns),np.array(labels)
is_test = np.random.uniform(size=(len(fns),)) < test_pct
return (cls(fns[~is_test], labels[~is_test], classes=classes),
cls(fns[is_test], labels[is_test], classes=classes))
# +
classes = ["airplanes", "Motorbikes", "BACKGROUND_Google", "Faces", "watch", "Leopards", "bonsai",
"car_side", "ketch", "chandelier", "hawksbill", "grand_piano", "brain", "butterfly", "helicopter", "menorah",
"trilobite", "starfish", "kangaroo", "sunflower", "ewer", "buddha", "scorpion", "revolver", "laptop", "ibis", "llama",
"minaret", "umbrella", "electric_guitar", "crab", "crayfish",]
np.random.seed(42)
train_ds,valid_ds = FilesDataset.from_folder(PATH, test_pct=0.2)
x = train_ds[1114][0]
def xi(): return Image(train_ds[1114][0])
classes = train_ds.classes
c = len(classes)
len(train_ds),len(valid_ds),c
# -
# ## Rectangular affine fix
show_image(x, figsize=(6,3), hide_axis=False)
print(x.shape)
rot_m = np.array(rotate.func(40.)); rot_m
rotate(xi(), 40.).show(figsize=(6,3))
# +
#export
def affine_mult(c,m):
if m is None: return c
size = c.size()
_,h,w,_ = size
m[0,1] *= h/w
m[1,0] *= w/h
c = c.view(-1,2)
c = torch.addmm(m[:2,2], c, m[:2,:2].t())
return c.view(size)
nb_002.affine_mult = affine_mult
# -
rotate(xi(), 40.).show(figsize=(6,3))
# ## Crop with padding
# Now we are going to add padding or crop automatically according to a desired final size. The best way to do this is to integrate both transforms into the same function.
#
# We will do the padding necessary to achieve a _size x size_ (square) image. If _size_ is greater than either the height or width dimension of our image, we know we will need to add padding. If _size_ is smaller than either _height_ or _width_ dimension of our image, we will have to crop. We might have to do one, the other, both or neither. In this example we are only adding padding since both our _height_ and _width_ are smaller than 300, our desired dimension for the new _height_ and _width_.
#
# As is the case with our original function, we can add a *row_pct* or *col_pct* to our transform to focus on different parts of the image instead of the center which is our default.
#
# **Crop_pad**
#
# Crop_pad crops and pads our image to create an output image according to a given target size.
#
# _Parameters_
#
# 1. **Size** What is the target size of each side in pixels. If only one number *s* is specified, image is made square with dimensions *s* \* *s*.
#
# Domain: Positive integers.
#
# 2. **Padding_mode** What is the type of padding used in the transform.
#
# Domain: 'reflect', 'zeros', 'border'
#
# 3. **Row_pct** Determines where to cut our image vertically on the bottom and top when cropping (which rows are left out). If <0.5, more rows will be cut in the top than in the bottom and viceversa (varies linearly).
#
# Domain: Real numbers between 0 and 1.
#
# 4. **Col_pct** Determines where to cut our image horizontally on the left and right when cropping (which columns are left out). If <0.5, more rows will be cut in the left than in the right and viceversa (varies linearly).
#
# Domain: Real numbers between 0 and 1.
#
# Note: While experimenting take into account that this example image contains a thin black border in the original. This affects our transforms and can be seen when we use reflect padding.
# +
#export
class TfmCrop(TfmPixel): order=99
@TfmCrop
def crop_pad(x, size, padding_mode='reflect',
row_pct:uniform = 0.5, col_pct:uniform = 0.5):
if padding_mode=='zeros': padding_mode='constant'
size = listify(size,2)
if x.shape[1:] == size: return x
rows,cols = size
if x.size(1)<rows or x.size(2)<cols:
row_pad = max((rows-x.size(1)+1)//2, 0)
col_pad = max((cols-x.size(2)+1)//2, 0)
x = F.pad(x[None], (col_pad,col_pad,row_pad,row_pad), mode=padding_mode)[0]
row = int((x.size(1)-rows+1)*row_pct)
col = int((x.size(2)-cols+1)*col_pct)
x = x[:, row:row+rows, col:col+cols]
return x.contiguous() # without this, get NaN later - don't know why
# -
crop_pad(xi(), 300, row_pct=0.,col_pct=0., padding_mode='constant').show()
crop_pad(xi(), 150).show()
crop_pad(xi(), 150, row_pct=0.,col_pct=0.98, padding_mode='constant').show()
# +
tfm = crop_pad(size=100, row_pct=(0,1.), col_pct=(0,1.))
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes.flat:
tfm.resolve()
tfm(xi()).show(ax)
# -
# ## Combine crop/resize
# Next, we are going to combine our cropping and padding with the resize operation. In other words, we will get a picture, and crop/pad it in such a way that we get our desired size. It is similar to our previous transform only this time the final dimensions don't have to be square. This gives us more flexibility since our network architecture might take rectangular pictures as input.
#
# First, we will get the target dimensions. For this we have built *get_crop_target*. This function takes three arguments: a target_px, a target_aspect and a multiple. *target_px* is our base dimension, *target_aspect* is our relation between width and height and _mult_ is what do we need our dimensions to be a multiple of.
#
# To understand this better, let's take our example where our values are *target_px*=220, *target_aspect*=2., _mult_=32 (default). In plain text we are telling our function: return the dimensions that meet a ~220\*220 area image with a width twice as long as the height and where height and width are multiples of 32.
# +
#export
def round_multiple(x, mult): return (int(x/mult+0.5)*mult)
def get_crop_target(target_px, mult=32):
target_r,target_c = listify(target_px, 2)
return round_multiple(target_r,mult),round_multiple(target_c,mult)
# -
crop_target = get_crop_target(220)
target_r,target_c = crop_target
crop_target
_,r,c = x.shape; x.shape
# We are now going to transform our image to our desired dimensions by using crop or padding. Before we crop or pad we will make an intermediate transform that will allow us to later get our output image with the desired dimensions. Let's call our initial dimensions h_i, w_i, our intermediate dimensions h_m, w_m and our output dimensions h_o, w_o.
#
# Our objective will be to get our output image by cropping or padding but not both. To achive this, we will first enlarge or reduce our original image. **get_resize_target will enlarge or reduce our input image (keeping the shape or h_i/w_i constant) until one of the dimensions is equal to the corresponding final output dimension (i.e. h_m=h_o or w_m=w_o)**. But how does it know which dimension to equate? We can figure this out intuitively. If we intend to crop, our intermediate image's area has to be larger than our output image (since we are going to crop out some pixels) and if we intend to pad, our intermediate image's area has to be smaller than our output image (since we will add some pixels). This means that the dimension we will chose to equate will depend on the relationship between the ratios h_i/h_0 and w_i/w_o. If we want to **crop** we will want to equate the dimension with **the smallest ratio** since that would mean that (h_m, w_m) >= (h_o, w_o) which is exactly what we want (a larger area). Conversely if we want to **pad**, we will equate the dimension with **the largest ratio** since that will guarantee that (h_m, w_m) <= (h_o, w_o) (a smaller area).
#
# As an example say we have our image with dimensions h_i = 192 and w_i = 128 and our target dimensions are h_o=160 w_o=320. That is, we have to turn a vertical rectangle into a horizontal rectangle. We can do this in to ways:
#
# 1. Padding the borders so we make our image wider
# 2. Cropping the top and bottom so we squash our image and make it wider
#
# If we intend to crop, our intermediate dimensions will be (h_m, w_m) = (480, 320). If we intend to pad (h_m, w_m) = (160, 107). Note that 480/320 ≈ 160/107 ≈ 192/128.
r_ratio = r/target_r
c_ratio = c/target_c
# min -> crop; max -> pad
ratio = max(r_ratio,c_ratio)
r_ratio,c_ratio,ratio
r2,c2 = round(r/ratio),round(c/ratio); r2,c2
#export
def get_resize_target(img, crop_target, do_crop=False):
if crop_target is None: return None
ch,r,c = img.shape
target_r,target_c = crop_target
ratio = (min if do_crop else max)(r/target_r, c/target_c)
return ch,round(r/ratio),round(c/ratio)
get_resize_target(x, crop_target, False)
get_resize_target(x, crop_target, True)
# +
#export
def is_listy(x)->bool: return isinstance(x, (tuple,list))
def apply_tfms(tfms, x, do_resolve=True, xtra=None, size=None,
mult=32, do_crop=True, padding_mode='reflect', **kwargs):
if not tfms: return x
if not xtra: xtra={}
tfms = sorted(listify(tfms), key=lambda o: o.tfm.order)
if do_resolve: resolve_tfms(tfms)
x = Image(x.clone())
x.set_sample(padding_mode=padding_mode, **kwargs)
if size:
crop_target = get_crop_target(size, mult=mult)
target = get_resize_target(x, crop_target, do_crop=do_crop)
x.resize(target)
size_tfms = [o for o in tfms if isinstance(o.tfm,TfmCrop)]
for tfm in tfms:
if tfm.tfm in xtra: x = tfm(x, **xtra[tfm.tfm])
elif tfm in size_tfms: x = tfm(x, size=size, padding_mode=padding_mode)
else: x = tfm(x)
return x.px
import nb_002b
nb_002b.apply_tfms = apply_tfms
# +
tfms = [rotate(degrees=(40.,40.)),
crop_pad()]
img = apply_tfms(tfms, x, size=210)
show_image(img, figsize=(6,3))
img.shape
# +
tfms = [rotate(degrees=(40.,40.)),
crop_pad(row_pct=(0,1.), col_pct=(0,1.))]
img = apply_tfms(tfms, x, size=210)
show_image(img, figsize=(6,3))
get_crop_target(210), img.shape
# -
# # Fit
# Let's see how our transforms look for different values of zoom, rotate and crop_pad.
# ## Transform
#export
def rand_zoom(*args, **kwargs): return zoom(*args, row_pct=(0,1), col_pct=(0,1), **kwargs)
def rand_crop(*args, **kwargs): return crop_pad(*args, row_pct=(0,1), col_pct=(0,1), **kwargs)
def zoom_crop(scale, do_rand=False, p=1.0):
zoom_fn = rand_zoom if do_rand else zoom
crop_fn = rand_crop if do_rand else crop_pad
return [zoom_fn(scale=scale, p=p), crop_fn()]
# +
tfms = [
rotate(degrees=(-20,20.)),
*zoom_crop(scale=(1.,1.95), do_rand=True)
]
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes.flat:
show_image(apply_tfms(tfms, x, padding_mode='zeros', do_crop=False, size=100), ax)
# -
_,axes = plt.subplots(1,4, figsize=(12,3))
for ax in axes.flat: show_image(apply_tfms(tfms, x, size=100), ax)
# ## Fit
# Finally, with our choice of transforms and parameters we are going to fit our Darknet model and check our results. To fit our model we will need to resize our images to have the same size so we can feed them in batches to our model. We face the same decisions as before.
#
# In this case we chose to pad our images (since in \_apply_affine do_crop default is False). If we wanted to crop instead, we can easily add do_crop=True to train_tds.
#
# We also decided to make our images square, with dimension size x size. If we wanted a rectangle with width to height ratio *a* we could have added aspect=*a* to train_ds.
size = 150
train_tfms = [
rotate(degrees=(-20,20.)),
*zoom_crop(scale=(1.,2.))
]
valid_tfms = [crop_pad()]
_,axes = plt.subplots(1,4, figsize=(10,5))
for ax in axes.flat: show_image(apply_tfms(train_tfms, x, size=size), ax)
show_image(apply_tfms(valid_tfms, x, size=size))
bs = 128
import nb_002b
nb_002b.apply_tfms = apply_tfms
valid_tds = DatasetTfm(valid_ds, valid_tfms, size=size)
train_tds = DatasetTfm(train_ds, train_tfms, size=size)
data = DataBunch(train_tds, valid_tds, bs=bs, num_workers=0)
xb,yb = next(iter(data.train_dl))
b = xb.transpose(1,0).reshape(3,-1)
data_mean=b.mean(1).cpu()
data_std=b.std(1).cpu()
data_mean,data_std
show_image_batch(data.train_dl, train_ds.classes, 4)
norm,denorm = normalize_funcs(data_mean,data_std)
data = DataBunch(train_tds, valid_tds, bs=bs, num_workers=12, tfms=norm)
len(data.train_dl),len(data.valid_dl)
model = Darknet([1, 2, 4, 4, 2], num_classes=c, nf=16)
learn = Learner(data, model)
opt_fn = partial(optim.SGD, momentum=0.9)
learn.fit(1, 0.1, opt_fn=opt_fn)
learn.fit(1, 0.2, opt_fn=opt_fn)
learn.fit(5, 0.4, opt_fn=opt_fn)
learn.fit(5, 0.1, opt_fn=opt_fn)
learn.fit(5, 0.01, opt_fn=opt_fn)
# # Fin
|
dev_nb/003_rect_square_crop.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import SequenceCleanup
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
from collections import Counter
segments = ['0', '1 PB2', '2 PB1', '3 PA', '4', '5 NP', '6', '7 M1', '8 NS1']
number = 5
segment = segments[number]
sequences = AlignIO.read('Segment %s CDS.fasta' % segment, 'fasta')
print sequences
# +
start_gap_counter = Counter()
end_gap_counter = Counter()
gap_lengths = SequenceCleanup.GetListOfGapLengths(sequences)
for start, end in gap_lengths:
start_gap_counter[start] += 1
end_gap_counter[end] += 1
# -
print gap_lengths
print start_gap_counter
print end_gap_counter
# We will only be concerned with the segments that have 18 gaps at the beginning and 3 gaps at the end.
# +
# Create a new list that holds the sequences with 18 gaps at the beginning and 3 gaps at the end.
alignment = MultipleSeqAlignment(records=None)
front_cutoff = 18
end_cutoff = 3
for sequence in sequences:
# adding 3 was a hack, not sure why the original value did not work, but this one did.
# TODO: check design of original function.
if SequenceCleanup.GetStartingGapLength(sequence.seq) == front_cutoff + 3:
alignment.append(sequence)
# -
alignment
# Get the first 100 n.t. of alignment, then get the last 100 n.t. of alignment (exclude gaps)
trimmed = alignment[:,front_cutoff:front_cutoff+100] + alignment[:,-(end_cutoff+100):-end_cutoff]
AlignIO.write(trimmed, 'Segment %s Aligned and Trimmed First and Last 100 nt.fasta' % segment, 'fasta')
# Get the full sequence of the align21 (exclude gaps)
AlignIO.write(alignment[:,18:-3], 'Segment %s Aligned and Trimmed.fasta' % segment, 'fasta')
# +
# Daniel wants to go ~100 n.t. more beyond the packaging regions, in order to compare MI between positions in the packaging
# regions with MI between positions outside the packaging regions. Therefore, grab 200 n.t. from each end.
trimmed = alignment[:, front_cutoff:front_cutoff + 200] + alignment[:, -(end_cutoff+200):-end_cutoff]
AlignIO.write(trimmed, 'Segment %s Aligned and Trimmed First and Last 200 nt.fasta' % segment, 'fasta')
# -
|
(1) Clean Up Segment 5 Alignment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
# +
import pickle
import json
import os
from glob import glob
from tqdm.notebook import tqdm
import re
import gc
# -
# # Get articles
# +
PATH = '/home/hlaboa-server/jupyter/TFM/DATA/text/'
not_files = ['/home/hlaboa-server/jupyter/TFM/DATA/text/AA',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AG',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AR',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AI',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BD',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AB',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AZ',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AM',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AH',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AO',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BA',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AU',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BH',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BC',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BK',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BB',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AY',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BG',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BF',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AT',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AC',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AF',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AP',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AW',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AJ',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AN',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AX',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AE',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AK',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AQ',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AV',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AD',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BE',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BJ',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AS',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BI',
'/home/hlaboa-server/jupyter/TFM/DATA/text/AL',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BN',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BL',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BO',
'/home/hlaboa-server/jupyter/TFM/DATA/text/BM',]
elements = [y for x in os.walk(PATH) for y in glob(os.path.join(x[0], '*'))]
elements = [e for e in elements if e not in not_files]
# +
def get_articles(file):
pattern = "\{.*?\}"
articles = re.findall(pattern, file)
articles_l = list()
for ind, article in enumerate(articles):
try:
j = json.loads(article)
articles_l.append(dict(j))
except:
#print(f'1... {ind}')
try:
article = article[:-1]+'"'+'}'
j = json.loads(article)
articles_l.append(dict(j))
except:
#print(f'2... {ind}')
break
return articles_l
######
articles = list()
with tqdm(total=len(elements)) as pbar:
for e in elements:
file = open(e, 'r')
file = file.read()
articles.append(get_articles(file))
pbar.update(1)
articles = [article for flat_articles in articles for article in flat_articles]
print(f'Total # of Spanish Wikipedia articles precleaned: {len(articles)}')
# -
for ind,a in enumerate(articles):
try:
text = a['text']
splits = text.split('\n')
for i,s in enumerate(splits):
if len(s)>40:
text = s
break
articles[ind]['text'] = text
except:
articles[ind]['text'] = ''
pass
# # Save the articles
pickle.dump(articles, open( "../Data/data_v1_2.p", "wb" ) )
|
Scripts/1_data_gathering_precleaned.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] deletable=true editable=true
# Now we will train the High Level Feature classifier.
#
# To run this notebook we used the following configuration:
# * *Software stack*: LCG 94 (it has spark 2.3.1)
# * *Platform*: centos7-gcc7
# * *Spark cluster*: Hadalytic
# + deletable=true editable=true
# Check if Spark Session has been created correctly
spark
# + deletable=true editable=true
# Add the BDL zip file
#sc.addPyFile("/eos/project/s/swan/public/BigDL/bigdl-0.7.0-python-api.zip")
# + [markdown] deletable=true editable=true
# ## Load train and test dataset
# + deletable=true editable=true
PATH = "file:///data/cern/"
trainDF = spark.read.format('parquet')\
.load(PATH + 'trainUndersampled.parquet')\
.select(['HLF_input', 'encoded_label'])
testDF = spark.read.format('parquet')\
.load(PATH + 'testUndersampled.parquet')\
.select(['HLF_input', 'encoded_label'])
# + deletable=true editable=true
trainDF.printSchema()
# + deletable=true editable=true
trainDF.count()
# + [markdown] deletable=true editable=true
# ## Create the model
# + deletable=true editable=true
# Init analytics zoo
from zoo.common.nncontext import *
sc = init_nncontext("hlf classifier")
# + deletable=true editable=true
sc
# + deletable=true editable=true
# Create tensorflow keras model.
# Only need to change package name from keras to zoo.pipeline.api.keras
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from zoo.tfpark import KerasModel, TFDataset
def create_model(nh_1, nh_2, nh_3):
## Create model
model = Sequential()
model.add(Dense(nh_1, input_shape=(14,), activation='relu'))
model.add(Dense(nh_2, activation='relu'))
model.add(Dense(nh_3, activation='relu'))
model.add(Dense(3, activation='softmax'))
## Compile model
optimizer = 'Adam'
loss = 'categorical_crossentropy'
model.compile(loss=loss, optimizer=optimizer, metrics=["accuracy"])
return model
model = create_model(50,20,10)
# + [markdown] deletable=true editable=true
# ## Create train and valiation RDD
#
#
# + deletable=true editable=true
# Let's have a look at one element of trainDF
trainDF.show(1, truncate=False)
# + deletable=true editable=true
# from bigdl.util.common import Sample
import numpy as np
trainRDD = trainDF.rdd.map(lambda row: (np.array(row.HLF_input),
np.array(row.encoded_label))
)
testRDD = testDF.rdd.map(lambda row: (np.array(row.HLF_input),
np.array(row.encoded_label))
)
# + [markdown] deletable=true editable=true
# We can see the 14 high level features (`shape=[14]`) and the encoded label (`shape=[3]`).
# + deletable=true editable=true
# The batch used by BDL must be a multiple of numExecutors * executorCores
# Because data will be equally distibuted inside each executor
workerBatch = 50
# numExecutors = int(sc._conf.get('spark.executor.instances'))
numExecutors = 1
# executorCores = int(sc._conf.get('spark.executor.cores'))
executorCores = 4
BDLbatch = workerBatch * numExecutors * executorCores
# + deletable=true editable=true
import tensorflow as tf
# create TFDataset for TF training
dataset = TFDataset.from_rdd(trainRDD,
features=(tf.float32, [14]),
labels=(tf.float32, [3]),
batch_size=128,
val_rdd=testRDD)
# + [markdown] deletable=true editable=true
# ## Estimator setup and training
# + deletable=true editable=true
# Set of hyperparameters
numEpochs = 5
# + deletable=true editable=true
# Create SparkML compatible estimator for deep learning training
from bigdl.optim.optimizer import EveryEpoch, Loss, TrainSummary, ValidationSummary
from zoo.pipeline.nnframes import *
from zoo.pipeline.api.keras.objectives import CategoricalCrossEntropy
# Use Keras model training API to train
from bigdl.optim.optimizer import *
from bigdl.nn.criterion import CategoricalCrossEntropy
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
keras_model = KerasModel(model)
# estimator = NNEstimator(model, CategoricalCrossEntropy())\
# .setOptimMethod(Adam()) \
# .setBatchSize(BDLbatch) \
# .setMaxEpoch(numEpochs) \
# .setFeaturesCol("HLF_input") \
# .setLabelCol("encoded_label") \
# .setValidation(trigger=EveryEpoch() , val_df=testDF,
# val_method=[Loss(CategoricalCrossEntropy())], batch_size=BDLbatch)
# + [markdown] deletable=true editable=true
# Let's define a directory to store logs (i.e. train and validation losses) and save models
# + deletable=true editable=true
# name of our application
appName = "HLFclassifier"
# Change it!
logDir = "/data/cern/ZOOlogs"
# Check if there is already an application with the same name
# and remove it, otherwise logs will be appended to that app
import os
try:
os.system('rm -rf '+logDir+'/'+appName)
except:
pass
print("Saving logs to {}".format(logDir+'/'+appName))
# + deletable=true editable=true
# Set tensorboard for model training and validation
trainSummary = TrainSummary(log_dir=logDir,app_name=appName)
valSummary = ValidationSummary(log_dir=logDir,app_name=appName)
keras_model.set_train_summary(trainSummary)
keras_model.set_val_summary(valSummary)
# + [markdown] deletable=true editable=true
# We are now ready to launch the training.
#
# Warnign: During the trainign it would be better to shutdown the Toggle Spark Monitorin Display because each iteration is seen as a spark job, therefore the toggle will try to display everything causing problem to the browser.
# + deletable=true editable=true
# %%time
keras_model.fit(dataset,
epochs=numEpochs,
distributed=True)
# + [markdown] deletable=true editable=true
# ## Plot loss
# + deletable=true editable=true
import matplotlib.pyplot as plt
plt.style.use('seaborn-darkgrid')
# %matplotlib notebook
loss = np.array(trainSummary.read_scalar("Loss"))
val_loss = np.array(valSummary.read_scalar("Loss"))
plt.plot(loss[:,0], loss[:,1], label="Training loss")
plt.plot(val_loss[:,0], val_loss[:,1], label="Validation loss", color='crimson', alpha=0.8)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.legend()
plt.title("HLF classifier loss")
plt.show()
# + [markdown] deletable=true editable=true
# ## Save the model
# + deletable=true editable=true
modelDir = os.path.join(logDir, "models", "hlf.model")
keras_model.save_model(modelDir)
# + [markdown] deletable=true editable=true
# It is possible to load the model in the following way:
# ```Python
# model = NNModel.load(path=modelDir)
# ```
# The default feature column name is "features", if your feature column is different, set feature column with this way:
# ```
# model = model.setFeaturesCol("HLF_input")
# ```
# + [markdown] deletable=true editable=true
# ## Prediction
# + deletable=true editable=true
testRDD2 = testDF.rdd.map(lambda row: np.array(row.HLF_input))
test_dataset = TFDataset.from_rdd(testRDD2,
features=(tf.float32, [14]),
labels=None,
batch_per_thread=128)
# + deletable=true editable=true
# Predict with trained NNModel using pipeline transform API
predRDD = keras_model.predict(test_dataset)
# + deletable=true editable=true
result = predRDD.collect()
# + deletable=true editable=true
result
# + deletable=true editable=true
y_pred = result
y_true = np.asarray(testDF.select('encoded_label').rdd\
.map(lambda row: np.asarray(row.encoded_label)).collect())
# + deletable=true editable=true
y_pred = np.squeeze(y_pred)
y_pred.shape
# + deletable=true editable=true
from sklearn.metrics import roc_curve, auc
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(3):
fpr[i], tpr[i], _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# + deletable=true editable=true
plt.figure()
plt.plot(fpr[0], tpr[0], lw=2,
label='HLF classifier (AUC) = %0.4f' % roc_auc[0])
plt.plot([0, 1], [0, 1], linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Background Contamination (FPR)')
plt.ylabel('Signal Efficiency (TPR)')
plt.title('$tt$ selector')
plt.legend(loc="lower right")
plt.show()
# + deletable=true editable=true
# + deletable=true editable=true
|
Training_BigDL_Zoo/4.1-Training-HLFclassifier-TFPark.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.insert(0, '../')
# +
from collections import Counter
import time
import random
import rdflib
import numpy as np
from pprint import pprint
from sklearn import svm
import wlkernel
# -
rdf_graph = rdflib.Graph().parse('../data/aifbfixed_complete.n3', format='n3')
all_triples = [
(str(subj), str(pred), str(obj))
for subj, pred, obj in rdf_graph
]
quantiles = np.linspace(0.1, 1, 10) # [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1. ]
results_wlrdf = []
results_wl = []
n = len(all_triples)
RANDOM_STATE = 42
for q in quantiles:
n_sub = int(n * q)
random.seed(RANDOM_STATE)
triples = random.sample(all_triples, n_sub)
instances_class_map = {
subj: obj
for subj, pred, obj in triples
if 'affiliation' in pred
and 'id5instance' not in obj
}
instances = list(instances_class_map.keys())
y = list(instances_class_map.values())
triples = [
(subj, pred, obj)
for subj, pred, obj in triples
if 'affiliation' not in pred
and 'employs' not in pred
and 'member' not in pred
and 'head' not in pred
]
t0 = time.time()
wlrdf_graph = wlkernel.WLRDFGraph(triples, instances, max_depth=3)
kernel_matrix = wlkernel.wlrdf_kernel_matrix(wlrdf_graph, instances, iterations=0)
t1 = time.time()
results_wlrdf.append(t1 - t0)
rdf_graph = rdflib.Graph().parse('../data/aifbfixed_complete.n3', format='n3')
all_triples = [
(str(subj), str(pred), str(obj))
for subj, pred, obj in rdf_graph
]
for q in quantiles:
n_sub = int(n * q)
random.seed(RANDOM_STATE)
triples = random.sample(all_triples, n_sub)
instances_class_map = {
subj: obj
for subj, pred, obj in triples
if 'affiliation' in pred
and 'id5instance' not in obj
}
instances = list(instances_class_map.keys())
y = list(instances_class_map.values())
triples = [
(subj, pred, obj)
for subj, pred, obj in triples
if 'affiliation' not in pred
and 'employs' not in pred
and 'member' not in pred
and 'head' not in pred
]
t0 = time.time()
wl_graphs = [wlkernel.WLGraph(triples, instance, max_depth=3) for instance in instances]
kernel_matrix = wlkernel.wl_kernel_matrix(wl_graphs, iterations=0)
t1 = time.time()
results_wl.append(t1 - t0)
# +
import matplotlib.pyplot as plt
class Result:
def __init__(self, values = None, color = 'red', name = ''):
if values is not None:
self.values = values
self.color = color
self.name = name
x = quantiles
y = Result(results_wlrdf, 'orange', 'WL RDF')
y1 = Result(results_wl, 'purple', 'WL')
n = len(x)
fig, ax = plt.subplots(figsize=(15, 8))
for i in range(n - 1):
plt.plot(x[i: i+2], y.values[i: i+2],
'o-', color=y.color, markersize=8)
plt.plot(x[i: i+2], y1.values[i: i+2],
'o-', color= y1.color, markersize=8)
ax.xaxis.label.set_text('fraction of the dataset')
ax.yaxis.label.set_text('runnning time (s)')
custom_lines = [plt.Line2D([0], [0], color=y.color, lw=4),
plt.Line2D([0], [0], color=y1.color, lw=4)]
ax.legend(custom_lines, [y.name, y1.name])
plt.savefig('../results/affiliation_timing.png', format='png')
|
notebooks/affiliation_timing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# First, we create our camera class like this. Please note, you can only create one USBCamera instance.
# +
from jetcam.usb_camera import USBCamera
camera = USBCamera(width=224, height=224, capture_width=640, capture_height=480, capture_device=1)
# -
# We can then capture a frame from the camera like this
# +
image = camera.read()
print(image.shape)
# -
# Calling ``read`` also updates the camera's internal ``value``
print(camera.value.shape)
# You can create a widget to display this image. You'll need to convert from bgr8 format to jpeg to stream to browser
# +
import ipywidgets
from IPython.display import display
from jetcam.utils import bgr8_to_jpeg
image_widget = ipywidgets.Image(format='jpeg')
image_widget.value = bgr8_to_jpeg(image)
display(image_widget)
# -
# You can set the ``running`` value of the camera to continuously update the value in background. This allows you to attach callbacks to the camera value changes.
# +
camera.running = True
def update_image(change):
image = change['new']
image_widget.value = bgr8_to_jpeg(image)
camera.observe(update_image, names='value')
# -
# You can unattach the callback like this
camera.unobserve(update_image, names='value')
# You can also use the traitlets ``dlink`` method to connect the camera to the widget, using a transform inbetween
# +
import traitlets
camera_link = traitlets.dlink((camera, 'value'), (image_widget, 'value'), transform=bgr8_to_jpeg)
# -
# You can remove this link like this
camera_link.unlink()
# And reconnect it like this
camera_link.link()
# That's all for this notebook!
|
notebooks/usb_camera/usb_camera.ipynb
|
# ---
# jupyter:
# jupytext:
# formats: md,ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Computer Vision](https://www.kaggle.com/learn/computer-vision) course. You can reference the tutorial at [this link](https://www.kaggle.com/ryanholbrook/custom-convnets).**
#
# ---
#
# # Introduction #
#
# In these exercises, you'll build a custom convnet with performance competitive to the VGG16 model from Lesson 1.
#
# Get started by running the code cell below.
# +
# Setup feedback system
from learntools.core import binder
binder.bind(globals())
from learntools.computer_vision.ex5 import *
# Imports
import os, warnings
import matplotlib.pyplot as plt
from matplotlib import gridspec
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
# Reproducability
def set_seed(seed=31415):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
set_seed()
# Set Matplotlib defaults
plt.rc('figure', autolayout=True)
plt.rc('axes', labelweight='bold', labelsize='large',
titleweight='bold', titlesize=18, titlepad=10)
plt.rc('image', cmap='magma')
warnings.filterwarnings("ignore") # to clean up output cells
# Load training and validation sets
ds_train_ = image_dataset_from_directory(
'../input/car-or-truck/train',
labels='inferred',
label_mode='binary',
image_size=[128, 128],
interpolation='nearest',
batch_size=64,
shuffle=True,
)
ds_valid_ = image_dataset_from_directory(
'../input/car-or-truck/valid',
labels='inferred',
label_mode='binary',
image_size=[128, 128],
interpolation='nearest',
batch_size=64,
shuffle=False,
)
# Data Pipeline
def convert_to_float(image, label):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
ds_train = (
ds_train_
.map(convert_to_float)
.cache()
.prefetch(buffer_size=AUTOTUNE)
)
ds_valid = (
ds_valid_
.map(convert_to_float)
.cache()
.prefetch(buffer_size=AUTOTUNE)
)
# -
# # Design a Convnet #
#
# Let's design a convolutional network with a block architecture like we saw in the tutorial. The model from the example had three blocks, each with a single convolutional layer. Its performance on the "Car or Truck" problem was okay, but far from what the pretrained VGG16 could achieve. It might be that our simple network lacks the ability to extract sufficiently complex features. We could try improving the model either by adding more blocks or by adding convolutions to the blocks we have.
#
# Let's go with the second approach. We'll keep the three block structure, but increase the number of `Conv2D` layer in the second block to two, and in the third block to three.
#
# <figure>
# <!-- <img src="./images/2-convmodel-2.png" width="250" alt="Diagram of a convolutional model."> -->
# <img src="https://i.imgur.com/Vko6nCK.png" width="250" alt="Diagram of a convolutional model.">
# </figure>
#
# # 1) Define Model #
#
# Given the diagram above, complete the model by defining the layers of the third block.
# +
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
# Block One
layers.Conv2D(filters=32, kernel_size=3, activation='relu', padding='same',
input_shape=[128, 128, 3]),
layers.MaxPool2D(),
# Block Two
layers.Conv2D(filters=64, kernel_size=3, activation='relu', padding='same'),
layers.MaxPool2D(),
# Block Three
# YOUR CODE HERE
layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
layers.MaxPool2D(),
# Head
layers.Flatten(),
layers.Dense(6, activation='relu'),
layers.Dropout(0.2),
layers.Dense(1, activation='sigmoid'),
])
# Check your answer
q_1.check()
# -
# Lines below will give you a hint or solution code
q_1.hint()
q_1.solution()
# # 2) Compile #
#
# To prepare for training, compile the model with an appropriate loss and accuracy metric for the "Car or Truck" dataset.
# +
model.compile(
optimizer=tf.keras.optimizers.Adam(epsilon=0.01),
# YOUR CODE HERE: Add loss and metric
loss='binary_crossentropy',
metrics=['binary_accuracy']
)
# Check your answer
q_2.check()
# -
model.compile(
optimizer=tf.keras.optimizers.Adam(epsilon=0.01),
loss='binary_crossentropy',
metrics=['binary_accuracy'],
)
q_2.assert_check_passed()
# Lines below will give you a hint or solution code
q_2.hint()
q_2.solution()
# Finally, let's test the performance of this new model. First run this cell to fit the model to the training set.
history = model.fit(
ds_train,
validation_data=ds_valid,
epochs=50,
)
# And now run the cell below to plot the loss and metric curves for this training run.
import pandas as pd
history_frame = pd.DataFrame(history.history)
history_frame.loc[:, ['loss', 'val_loss']].plot()
history_frame.loc[:, ['binary_accuracy', 'val_binary_accuracy']].plot();
# # 3) Train the Model #
#
# How would you interpret these training curves? Did this model improve upon the model from the tutorial?
# View the solution (Run this code cell to receive credit!)
q_3.check()
# # Conclusion #
#
# These exercises showed you how to design a custom convolutional network to solve a specific classification problem. Though most models these days will be built on top of a pretrained base, it certain circumstances a smaller custom convnet might still be preferable -- such as with a smaller or unusual dataset or when computing resources are very limited. As you saw here, for certain problems they can perform just as well as a pretrained model.
#
# # Keep Going #
#
# Continue on to [**Lesson 6**](https://www.kaggle.com/ryanholbrook/data-augmentation), where you'll learn a widely-used technique that can give a boost to your training data: **data augmentation**.
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/196537) to chat with other Learners.*
|
Platforms/Kaggle/Courses/Computer_Vision/5.Custom_Convnets/exercise-custom-convnets.ipynb
|
# <a
# href="https://colab.research.google.com/github/LearnPythonWithRune/MachineLearningWithPython/blob/main/colab/final/07 - Project - Deep Neural Network.ipynb"
# target="_parent">
# <img
# src="https://colab.research.google.com/assets/colab-badge.svg"
# alt="Open In Colab"/>
# </a>
# # Project: Deep Neural Network
# - Identify false banknotes
# ### Step 1: Import libraries
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
# ### Step 2: Read the data
# - Use Pandas [read_csv](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html) method to read **files/banknotes.csv**
data = pd.read_csv('files/banknotes.csv')
data.head()
# ### Step 3: Investitigate the data
# - Check how many classes (class)
# - HINT: use [unique()](https://pandas.pydata.org/docs/reference/api/pandas.unique.html)
# - Check for missing data
# - HINT: use [isna()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.isna.html)[.sum()](https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.sum.html)
data['class'].unique()
data.isna().sum()
# ### Step 4: Divite data into feature vectors and labels
# - Assign the feature vectors to $X$
# - HINT: that is all but the last column of the data
# - Assign the labels to $y$
# - HINT: that is the last column (**class**)
X = data.iloc[:,:-1]
y = data.iloc[:,-1]
X.head()
y.head()
# ### Step 5: Create training and test datasets
# - Split $X$ and $y$ into train and test sets using **train_test_split** with **test_size=.4**
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4, random_state=42)
# ### Step 6: Create and compile the model
# - Create a **Sequential** model
# - **Dense** with 8 nodes with **input_dim=4, activaition='relu'**
# - **Dense** with 1 (the output node) with **activaition='sigmoid'**
# - Complie the model with **optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']**
model = Sequential()
model.add(Dense(8, input_dim = 4, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# ### Step 7: Fit and test the accuracy
# - Fit the model on training data with **epochs=20**
# - Evaluate the model with test data with **verbose=2**
model.fit(X_train, y_train, epochs=20)
model.evaluate(X_test, y_test, verbose=2)
# ### Step 8 (Optional): Add another hidden layer
# - Add another hidden layer in the model
# - Test performance
model = Sequential()
model.add(Dense(8, input_dim = 4, activation='relu'))
model.add(Dense(4, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X_train, y_train, epochs=20)
model.evaluate(X_test, y_test, verbose=2)
|
Machine Learning With Python/colab/final/07 - Project - Deep Neural Network.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.read_csv('dataset/interviews.csv', index_col=0)
df = df.drop('file_name', 1)
df.head()
# +
import re, string, unicodedata
import nltk
import contractions
import inflect
from bs4 import BeautifulSoup
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer, WordNetLemmatizer
from stop_words import get_stop_words
from string import ascii_letters, digits, whitespace
import glob
import errno
# -
def tokenize(text):
words = nltk.word_tokenize(text)
return words
def is_ascii(word):
for c in word:
if c in ascii_letters:
return True
return False
def remove_ascii(words):
"""Remove ASCII characters from list of tokenized words"""
new_words = []
for word in words:
if not is_ascii(word):
new_words.append(word)
return new_words
def to_lowercase(words):
"""Convert all characters to lowercase from list of tokenized words"""
new_words = []
for word in words:
new_word = word.lower()
new_words.append(new_word)
return new_words
def remove_punctuation(words):
"""Remove punctuation from list of tokenized words"""
new_words = []
for word in words:
new_word = re.sub(r'[^\w\s]', '', word)
if new_word != '':
new_words.append(new_word)
return new_words
def replace_numbers(words):
"""Replace all interger occurrences in list of tokenized words with textual representation"""
p = inflect.engine()
new_words = []
for word in words:
if word.isdigit():
new_word = 'число_' + str(word)
new_words.append(new_word)
else:
new_words.append(word)
return new_words
def remove_numbers(words):
"""Remove all interger occurrences in list of tokenized words"""
new_words = []
for word in words:
if not word.isdigit():
new_words.append(word)
return new_words
def remove_stopwords(words):
"""Remove stop words from list of tokenized words"""
new_words = []
for word in words:
if word not in get_stop_words('bg'):
new_words.append(word)
return new_words
def stem_words(words=None):
"""Stem words in list of tokenized words"""
print("USE PRESLAV NAKOV's STEMMER !!!")
def remove_empty_words(words):
new_words = []
for word in words:
if word.strip():
new_words.append(word)
return new_words
def print_words(df):
for i, words in enumerate(df['words'], 1):
print('Interview ' + str(i))
print(words)
df['words'] = [tokenize(text) for text in df['text']]
print_words(df)
df['words'] = [remove_ascii(words) for words in df['words']]
print_words(df)
df['words'] = [to_lowercase(words) for words in df['words']]
print_words(df)
df['words'] = [remove_punctuation(words) for words in df['words']]
print_words(df)
df['words'] = [remove_numbers(words) for words in df['words']]
print_words(df)
df['words'] = [remove_stopwords(words) for words in df['words']]
print_words(df)
df['words'] = [remove_empty_words(words) for words in df['words']]
print_words(df)
def words_to_file(words, index):
outF = open("words/{0}.txt".format(index), "w")
for i, word in enumerate(words, 1):
outF.write(word)
if i < len(words):
outF.write("\n")
outF.close()
def words_to_files(df):
for row in df.itertuples():
words_to_file(row.words, row.Index)
words_to_files(df)
stem_words()
def read_txt(file_name):
with open(file_name, 'r') as file:
lines = [line.strip() for line in file]
return lines
def get_index(file_name):
index = file_name.replace(".txt", "")
return int(index)
def load_df_with_stemmed_words(directory):
path = directory + '/*.txt'
files = glob.glob(path)
df = pd.DataFrame(np.nan, index=range(len(files)), columns=[directory])
for file_name in files:
try:
index = int(file_name.replace(".txt", "").split("\\")[1])
df[directory].iloc[index] = read_txt(file_name)
except IOError as exc:
if exc.errno != errno.EISDIR:
raise
return df
def load_df_stem():
df1 = load_df_with_stemmed_words('words_stem_1')
df2 = load_df_with_stemmed_words('words_stem_2')
df3 = load_df_with_stemmed_words('words_stem_3')
df1 = df1.join(df2)
df1 = df1.join(df3)
return df1
df_stem = load_df_stem()
df_stem.head()
from gensim.corpora import Dictionary
from gensim.models import NormModel
from gensim.models import TfidfModel
def tf_idf(df, attr):
documents = df[attr]
dictionary = Dictionary(documents)
n_items = len(dictionary)
corpus = [dictionary.doc2bow(text) for text in documents]
tfidf = TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
ds = []
for doc in corpus_tfidf:
d = [0] * n_items
for index, value in doc :
d[index] = value
ds.append(d)
df_tfidf = pd.DataFrame(ds)
return df_tfidf
df_tfidf_1 = tf_idf(df_stem, 'words_stem_1')
df_tfidf_2 = tf_idf(df_stem, 'words_stem_2')
df_tfidf_3 = tf_idf(df_stem, 'words_stem_3')
def get_headers(df, attr):
documents = df[attr]
dictionary = Dictionary(documents)
return list(dictionary.values())
df_tfidf_headers_1 = get_headers(df_stem, 'words_stem_1')
df_tfidf_headers_2 = get_headers(df_stem, 'words_stem_2')
df_tfidf_headers_3 = get_headers(df_stem, 'words_stem_3')
df_tfidf_1.columns = df_tfidf_headers_1
df_tfidf_2.columns = df_tfidf_headers_2
df_tfidf_3.columns = df_tfidf_headers_3
print(df_tfidf_1.shape)
df_tfidf_1.head()
print(df_tfidf_2.shape)
df_tfidf_2.head()
print(df_tfidf_3.shape)
df_tfidf_3.head()
file_name = 'dataset/word2vec/tfidf_stem_1.csv'
df_tfidf_1.to_csv(file_name, sep=',', encoding='utf-8', header=True, index=True)
file_name = 'dataset/word2vec/tfidf_stem_2.csv'
df_tfidf_2.to_csv(file_name, sep=',', encoding='utf-8', header=True, index=True)
file_name = 'dataset/word2vec/tfidf_stem_3.csv'
df_tfidf_3.to_csv(file_name, sep=',', encoding='utf-8', header=True, index=True)
|
Dataset Text Processing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Dmitri9149/TensorFlow_PyTorch_CNN/blob/main/TensorFlow_LeNet_MaxPooling_Sigmoid%2BReLu_01_17_2021.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="rl3xEpwL0hDC" outputId="79774f64-b2f5-46ff-a922-97eeaab84974"
# -U: Upgrade all packages to the newest available version
# !pip install -U d2l
from d2l import tensorflow as d2l
import tensorflow as tf
from tensorflow.distribute import MirroredStrategy, OneDeviceStrategy
from matplotlib import pyplot
from keras.datasets import fashion_mnist
# + [markdown] id="w-vYLSe208vE"
# ### Convolutional Neural Network LeNet.
#
# This is implementation of classical LeNet convolutional neural network , originally designed for handwritten digit recignition.
#
# The basic architecture is used for some experimentation:
# we may change AveragePooling to MaxPooling and Sigmoid to ReLu activations. It is interesting to check, how it will change the results.
#
# I use some code from d2l.ai : http://d2l.ai/
#
# There is also some intermediate code with experimentation with TensorFlow objects.
# + [markdown] id="hS-FN4I_1xdi"
# ## In this version we use Max Pooling ; Sigmoid and ReLu.
#
#
# We use 'sigmoid' for Conv2d layers and 'relu' for Dense layers. The Max Pooling (instead of Average Pooling) + 'sigmoid' for Conv2d layers + 'relu' for Dense layers -> the configuration gives the best results.
#
#
# + [markdown] id="9k4G8nExlZva"
# My comments are marked as :' ### ' -> there are many my comments in the d2l.ai code.
# + id="bwiiUoSE3eJp"
def LN():
return tf.keras.models.Sequential([
tf.keras.layers.Conv2D(filters=6, kernel_size=5, activation='sigmoid',
padding='same'),
tf.keras.layers.MaxPool2D(pool_size=2, strides=2),
tf.keras.layers.Conv2D(filters=16, kernel_size=5,
activation='sigmoid'),
tf.keras.layers.MaxPool2D(pool_size=2, strides=2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(120, activation='relu'),
tf.keras.layers.Dense(84, activation='relu'),
tf.keras.layers.Dense(10)])
# + colab={"base_uri": "https://localhost:8080/"} id="FEvUlPKo4h-r" outputId="53b14d3a-6d91-45eb-f7e2-a7ab75720e9e"
X = tf.zeros((1,28,28,1))
for layer in LN().layers:
X = layer(X)
print(layer.__class__.__name__,' output shape \t', X.shape)
# + [markdown] id="cdK3KJLxzsoj"
# ### Data
#
# + [markdown] id="ZTibI3PfzwdE"
# We will use FASHION-MNIST dataset.
# + id="gVDDpMZ-0dlW" colab={"base_uri": "https://localhost:8080/"} outputId="dad4c67f-6398-4816-ef0c-b553d8394e44"
(train_X, train_y), (test_X, test_y) = fashion_mnist.load_data()
# + colab={"base_uri": "https://localhost:8080/"} id="vtKoZCB03sOq" outputId="9373873c-204f-4ab8-f5d6-23385e9c99c4"
print('Train : {} , {}'.format(train_X.shape, train_y.shape))
print('Test : {} , {}'.format(test_X.shape, test_y.shape))
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="tcRBo_WS4ofi" outputId="ba0b5794-954c-4ec5-8af8-a8e2a60e4a78"
for i in range(9):
pyplot.subplot(3,3,1 + i)
pyplot.imshow(train_X[i], cmap = pyplot.get_cmap('Greys'))
pyplot.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="z7wj5-jU7JUu" outputId="abe79f65-f777-4959-8224-afa6f7c4325b"
for i in range(9):
pyplot.subplot(3,3,1 + i)
pyplot.imshow(train_X[i], cmap = pyplot.get_cmap('Spectral'))
pyplot.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 268} id="vGaEzLXWAzZZ" outputId="5e931fcc-13a0-452d-ee2c-441da4b6df3f"
for i in range(9):
pyplot.subplot(3,3,1 + i)
pyplot.imshow(train_X[i], cmap = pyplot.get_cmap('gray'))
pyplot.show()
# + id="fW_2n_5C8dUl"
def reshape_cast(X,y):
# scale to [0,1] interval, add dim=3 -> will be single colour channel
return (tf.expand_dims(X,axis=3)/255, tf.cast(y,dtype='int32'))
def load_data(batch_size):
return (
tf.data.Dataset.from_tensor_slices(reshape_cast(*(train_X,train_y)))
.batch(batch_size).shuffle(len(train_X)),
tf.data.Dataset.from_tensor_slices(reshape_cast(*(test_X,test_y)))
.batch(batch_size)
)
# + id="XWrLdkjIYeSo"
batch_size = 256
train_iter, test_iter = load_data(batch_size=batch_size)
# + id="JQWFq-oe8jWz"
# from d2l.ai
class Timer:
"""Record multiple running times."""
def __init__(self):
self.times = []
self.start()
def start(self):
"""Start the timer."""
self.tik = time.time()
def stop(self):
"""Stop the timer and record the time in a list."""
self.times.append(time.time() - self.tik)
return self.times[-1]
def avg(self):
"""Return the average time."""
return sum(self.times) / len(self.times)
def sum(self):
"""Return the sum of time."""
return sum(self.times)
def cumsum(self):
"""Return the accumulated time."""
return np.array(self.times).cumsum().tolist()
# + id="VXazC8HEX5Vp"
# from d2l.ai
class Accumulator:
"""For accumulating sums over `n` variables."""
def __init__(self, n):
self.data = [0.0] * n
def add(self, *args):
self.data = [a + float(b) for a, b in zip(self.data, args)]
def reset(self):
self.data = [0.0] * len(self.data)
def __getitem__(self, idx):
return self.data[idx]
# + id="TJ-qOZgmYvPR"
# from d2l.ai
def try_gpu(i=0):
"""Return gpu(i) if exists, otherwise return cpu()."""
if len(tf.config.experimental.list_physical_devices('GPU')) >= i + 1:
return tf.device(f'/GPU:{i}')
return tf.device('/CPU:0')
# + id="zDqcZksKYUrv"
# from d2l.ai
class TrainCallback(tf.keras.callbacks.Callback):
"""A callback to visiualize the training progress."""
def __init__(self, net, train_iter, test_iter, num_epochs, device_name):
self.timer = d2l.Timer()
self.animator = d2l.Animator(
xlabel='epoch', xlim=[1, num_epochs], legend=[
'train loss', 'train acc', 'test acc'])
self.net = net
self.train_iter = train_iter
self.test_iter = test_iter
self.num_epochs = num_epochs
self.device_name = device_name
### this method will be called at the beginning of epoch in training process
def on_epoch_begin(self, epoch, logs=None):
self.timer.start()
### --- at the end of epoch in training process
def on_epoch_end(self, epoch, logs):
self.timer.stop()
test_acc = self.net.evaluate(
self.test_iter, verbose=0, return_dict=True)['accuracy']
metrics = (logs['loss'], logs['accuracy'], test_acc)
self.animator.add(epoch + 1, metrics)
if epoch == self.num_epochs - 1:
batch_size = next(iter(self.train_iter))[0].shape[0]
num_examples = batch_size * tf.data.experimental.cardinality(
self.train_iter).numpy()
print(f'loss {metrics[0]:.3f}, train acc {metrics[1]:.3f}, '
f'test acc {metrics[2]:.3f}')
print(f'{num_examples / self.timer.avg():.1f} examples/sec on '
f'{str(self.device_name)}')
def train_ch6(net_fn, train_iter, test_iter, num_epochs, lr,
device=d2l.try_gpu()):
"""Train a model with a GPU."""
device_name = device._device_name
### tf.distribute -> Library for running a computation across multiple devices.
strategy = tf.distribute.OneDeviceStrategy(device_name)
with strategy.scope():
optimizer = tf.keras.optimizers.SGD(learning_rate=lr)
### ' Use this crossentropy loss function when there are two or more label classes.
### We expect labels to be provided as integers. ' <- from official tf documents.
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
net = net_fn()
### Calculates how often predictions equal labels. <- metrics=['accuracy']
net.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
callback = TrainCallback(net, train_iter, test_iter, num_epochs,
device_name)
net.fit(train_iter, epochs=num_epochs, verbose=0, callbacks=[callback])
return net
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="seD6J6blYkLB" outputId="1d566d74-00f3-4406-e62e-71550e0f6bde"
lr, num_epochs = 0.9, 50
train_ch6(LN, train_iter, test_iter, num_epochs, lr)
|
TensorFlow_LeNet_MaxPooling_Sigmoid+ReLu_01_17_2021.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
def validCols(board):
for col in board:
cache = []
for n in col:
if (n in cache or n == 0):
return False
else:
cache.append(n)
return True
def validRows(board):
for i in range(0, 9):
cache = []
for col in board:
if (col[i] in cache or col[i] == 0):
return False
else:
cache.append(col[i])
return True
def validBlock(block):
cache = []
for col in block:
for i in col:
if (i in cache):
return False
else:
cache.append(i)
return True
def validBoxes(board):
B = np.array(board)
start = [0, 0]
end = [3, 3]
for i in range(0, 3):
for j in range(0, 3):
block = B[start[0]:end[0], start[1]:end[1]]
if (validBlock(block) == False):
return False
start[0] += 3
end[0] += 3
start[1] += 3
end[1] += 3
return True
def validSolution(board):
if (validCols(board) == False):
return False
elif (validRows(board) == False):
return False
elif (validBoxes(board) == False):
return False
return True
|
SudokuValidator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ASR dataset labeling pipeline based on Wikipedia articles
# The idea behind this notebook is to get tagged pairs for Text-to-Speech and Speech-to-Text problems based on a huge database of Wikipedia articles for some fixed language (which is supported by Toloka) using Toloka's toolkit (toloka-kit).
# First of all, we higly recommend to read [the image segmentation example](https://github.com/Toloka/toloka-kit/blob/main/examples/image_segmentation/image_segmentation.ipynb) before you start this one to be familiar with the main kinds of entities in Toloka.
# ## Content
# We will implement 3 projects:
# * Clearing raw texts as results from Wikipedia exploring script
# * Recording cleared texts
# * Verification project for previous one
#
# You can learn more about each of these projects from the corresponding section of the notebook and the diagram attached below.
# <table align="center">
# <tr><td>
# <img src="./img/scheme.png"
# alt="Scheme" width="800">
# </td></tr>
# <tr><td align="center">
# <b>Figure 1.</b> Scheme of the pipeline
# </td></tr>
# </table>
#
# ## Preparation
# Installing toloka-kit
# !pip install toloka-kit==0.1.3
# Installing boto3 for working with object storage (s3)
# !pip install boto3
# +
import os
import datetime
import time
import ipyplot
import pandas as pd
import boto3
import toloka.client as toloka
import toloka.client.project.template_builder as tb
from tqdm.notebook import tqdm
# -
# You can change it for any other language maintained by Toloka
# For now there are public instructions and golden data for 'en' language only
# For other languages you will have to write your own instructions and mark your own golden sets
# (or use english version)
LANGUAGE = 'en'
# In this notebook we will use Yandex Object Storage for uploading our records to the internet.
# First of all, you need to [join Yandex Cloud services](https://cloud.yandex.com/en-ru/) if you haven't done it before.
# After that you'll need 3 steps:
# * [Create a service account](https://cloud.yandex.com/en-ru/docs/iam/operations/sa/create)
# * [Assign a role to a service account](https://cloud.yandex.com/en-ru/docs/iam/operations/sa/assign-role-for-sa)
# * [Create a static access key](https://cloud.yandex.com/en-ru/docs/iam/operations/sa/create-access-key)
#
# Finally, put your secret and public access keys in a cell below to variables `aws_secret_access_key` and `aws_access_key`.
# +
toloka_token = <PASTE YOUR TOLOKA OAUTH TOKEN HERE>
# In this notebook we used Yandex Object Storage
# You need to do 3 actions in section above
aws_secret_access_key = <PASTE YOUR SECRET ACCESS KEY HERE>
aws_access_key = <PASTE YOUR PUBLIC ACCESS KEY HERE>
# -
SANDBOX = False # Change to True for working with Sandbox Toloka
toloka_client = toloka.TolokaClient(toloka_token, 'SANDBOX' if SANDBOX else 'PRODUCTION')
toloka_domain = "sandbox.toloka.yandex.com" if SANDBOX else "toloka.yandex.com"
# ### Pricing
# +
TEXTS_COUNT = 100
approx_classification_price = round(TEXTS_COUNT * 5 / 25 * (0.025 + 0.05), 2)
approx_recording_price = round(TEXTS_COUNT * (0.015 + 0.05), 2)
approx_verification_price = round(1.2 * TEXTS_COUNT * 3 / 20 * (0.03 + 0.05), 2)
approx_pipeline_price = approx_classification_price + approx_recording_price + approx_verification_price
requester = toloka_client.get_requester()
if requester.balance >= approx_pipeline_price:
print('You have enough money on your account!')
else:
print('You haven\'t got enough money on your account!')
# -
# ### Getting raw data
# You can use already collected texts from the data folder or collect it by yourself (it will take some time).
#
# **If this is your first time viewing this notebook, we highly recommend using the pre-collected data.**
# If you want to collect data run CLI-tool `./scripts/collect_corpus.py` in the terminal.
#
# To get more info about parameters use help flag: `python3 ./scripts/collect_corpus.py -h`
#
# English pre-collected data was collected by launch with following parameters:
# `python3 ./scripts/collect_corpus.py en -size 2000 -fp ../data/en_precollected.tsv -s -min_len 90`
# +
# Working with pre-colleted data (you can skip this cell if collecting data by yourself)
precollected_data = pd.read_csv(f'./data/{LANGUAGE}_precollected.tsv', sep='\t', error_bad_lines=False)
raw_data = precollected_data.sample(TEXTS_COUNT)
# it's okay if some lines throw an error
# +
# Working with collected by yourself data (uncomment next line for read data from disk)
# raw_data = pd.read_csv(<PATH_TO_YOUR_DATA>, sep='\t', error_bad_lines=False)
# -
# ## 1. Clearing collected data from incorrect paragraphs
# ### 1.1. Classification project
# In this project, performers will check paragraphs for mistakes and Wikipedia automated word processing artifacts.
# +
# Adding the performer the ability to choose whether texts are incorrect
radio_group_field = tb.fields.RadioGroupFieldV1(
data=tb.data.OutputData(path='is_correct'),
label='Does this text correct?',
validation=tb.conditions.RequiredConditionV1(),
options=[
tb.fields.GroupFieldOption(
label='Yes',
value='yes'),
tb.fields.GroupFieldOption(
label='No',
value='no'),
]
)
# Creating interface which performers will see using previous element
project_interface = toloka.project.view_spec.TemplateBuilderViewSpec(
config=tb.TemplateBuilder(
view=tb.view.ListViewV1(
items=[
tb.view.TextViewV1(
label='Text',
content=tb.data.InputData(path='text')
),
radio_group_field
]
)
)
)
# Setting up the project with defined parameters and interface
classification_project = toloka.project.Project(
assignments_issuing_type=toloka.project.Project.AssignmentsIssuingType.AUTOMATED,
public_name=open(f"./instructions/classification/{LANGUAGE}_project_name.txt").read().strip(),
private_comment='Clearing texts',
public_description=open(f"./instructions/classification/{LANGUAGE}_short_instructions.txt").read().strip(),
public_instructions=open(f"./instructions/classification/{LANGUAGE}_public_instructions.html").read().strip(),
task_spec=toloka.project.task_spec.TaskSpec(
input_spec={
'text_id': toloka.project.field_spec.StringSpec(),
'text': toloka.project.field_spec.StringSpec(),
},
output_spec={
'is_correct': toloka.project.field_spec.StringSpec(
allowed_values=[
'yes',
'no',
]
)
},
view_spec=project_interface,
),
)
# -
# Calling the API to create a new project
# If you have already created all pools and projects you can just get it using toloka_client.get_project('your marking project id')
classification_project = toloka_client.create_project(classification_project)
print(f'Created marking project with id {classification_project.id}')
print(f'To view the project, go to: https://{toloka_domain}/requester/project/{classification_project.id}')
# <table align="center">
# <tr><td>
# <img src="./img/classification_project_interface.png"
# alt="cls_iface" width="800">
# </td></tr>
# <tr><td align="center">
# <b>Figure 2.</b> How performers will see the tasks
# </td></tr>
# </table>
#
# <table align="center">
# <tr><td>
# <img src="./img/classification_project_instruction.png"
# alt="cls_inst" width="800">
# </td></tr>
# <tr><td align="center">
# <b>Figure 3.</b> How performers will see the instruction
# </td></tr>
# </table>
# ### 1.2. Classification training
# Here there will be training tasks for show examples of right done tasks for performers.
# +
# Setting up training
classification_training = toloka.training.Training(
project_id=classification_project.id,
private_name='[Clearing texts] Training',
may_contain_adult_content=True,
mix_tasks_in_creation_order=True,
shuffle_tasks_in_task_suite=True,
training_tasks_in_task_suite_count=10,
assignment_max_duration_seconds=60*10,
task_suites_required_to_pass=2,
retry_training_after_days=1,
inherited_instructions=True,
)
# Calling the API to create a new training
classification_training = toloka_client.create_training(classification_training)
print(f'Created training with id {classification_training.id}')
print(f'To view the training, go to: https://{toloka_domain}/requester/project/{classification_project.id}/training/{classification_training.id}')
# -
# Let`s take a look on classified training data
training_data = pd.read_csv(f'./data/{LANGUAGE}_classification_training.tsv', sep='\t')
training_data.head()
# +
# Creating tasks from training data
training_tasks = [
toloka.task.Task(
input_values={
'text_id': str(row[0]),
'text': row[1],
},
known_solutions=[
toloka.task.BaseTask.KnownSolution(
output_values={'is_correct': row[2]}
)
],
infinite_overlap=True,
pool_id=classification_training.id,
message_on_unknown_solution="" if pd.isna(row[3]) else row[3]
) for row in training_data.values
]
# Calling the API to create a new tasks
tasks_op = toloka_client.create_tasks_async(training_tasks)
op_res = toloka_client.wait_operation(tasks_op)
print(
f'Total tasks: {op_res.details["total_count"]}',
f'Total failed: {op_res.details["failed_count"]}',
f'Total success: {op_res.details["success_count"]}',
f'Total valid: {op_res.details["valid_count"]}',
f'Total not valid: {op_res.details["not_valid_count"]}',
sep='\n'
)
# -
# ### 1.3. Classification pool
# In this pool, trained performers will do classification task: they will decide whether text is correct or not.
#
# About some parameters:
# * **Overlap** - We need multiple opinions per each text for aggregate results with high degree of confidence.
# * We want **filter performers** by their results on training and by their knowledge of language of texts from dataset.
# * We need also allow performers do tasks from mobile app and browser both.
#
# About quality control:
# * We want to ban performers who answers too fast.
# * We want to ban performers based on low quality on the golden set tasks.
# * We want to ban performers who fails Captcha suspiciously often.
# * We want to ban lazy performers who skips tasks until find an easy one.
# * We want to ban performers who too much deviates from majority opinion.
# +
# Setting up pool
classification_pool = toloka.pool.Pool(
project_id=classification_project.id,
private_name='[Clearing texts] Classification pool',
may_contain_adult_content=True,
will_expire=datetime.datetime.utcnow() + datetime.timedelta(days=365),
reward_per_assignment=0.025,
auto_accept_solutions=True,
assignment_max_duration_seconds=60*25,
defaults=toloka.pool.Pool.Defaults(
default_overlap_for_new_task_suites=5
),
filter=toloka.filter.FilterAnd(
[
toloka.filter.Languages.in_(LANGUAGE.upper()),
toloka.filter.ClientType.eq(toloka.filter.ClientType.ClientType.BROWSER),
]
)
)
# Setting task mixing configuration
classification_pool.set_mixer_config(
real_tasks_count=25,
golden_tasks_count=1,
training_tasks_count=0
)
# Setting up pool quality control
# Banning performer who answers too fast
classification_pool.quality_control.add_action(
collector=toloka.collectors.AssignmentSubmitTime(
history_size=5,
fast_submit_threshold_seconds=375
),
conditions=[toloka.conditions.FastSubmittedCount > 2],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration_unit=toloka.user_restriction.DurationUnit.PERMANENT,
private_comment='Fast responses'
)
)
# Banning performer who answers too fast (another case)
classification_pool.quality_control.add_action(
collector=toloka.collectors.AssignmentSubmitTime(
history_size=5,
fast_submit_threshold_seconds=250
),
conditions=[toloka.conditions.FastSubmittedCount > 0],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration_unit=toloka.user_restriction.DurationUnit.PERMANENT,
private_comment='Fast responses'
)
)
# Banning performer by captcha criteria
classification_pool.quality_control.add_action(
collector=toloka.collectors.Captcha(history_size=5),
conditions=[toloka.conditions.FailRate >= 60],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration=3,
duration_unit=toloka.user_restriction.DurationUnit.DAYS,
private_comment='Captcha'
)
)
# Banning performer by majority vote criteria
classification_pool.quality_control.add_action(
collector=toloka.collectors.MajorityVote(history_size=5, answer_threshold=3),
conditions=[
toloka.conditions.TotalAnswersCount > 9,
toloka.conditions.CorrectAnswersRate < 65,
],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration_unit=toloka.user_restriction.DurationUnit.PERMANENT,
private_comment='Majority vote low quality'
)
)
# Banning performer who skips some tasks in a row
classification_pool.quality_control.add_action(
collector=toloka.collectors.SkippedInRowAssignments(),
conditions=[toloka.conditions.SkippedInRowCount > 2],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration=15,
duration_unit='DAYS',
private_comment='Lazy performer',
)
)
# Bannning performer for classification results worse than random choise
classification_pool.quality_control.add_action(
collector=toloka.collectors.GoldenSet(),
conditions=[
toloka.conditions.GoldenSetCorrectAnswersRate < 50,
toloka.conditions.GoldenSetAnswersCount > 3
],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration=1,
duration_unit=toloka.user_restriction.DurationUnit.DAYS,
private_comment='Golden set'
)
)
# -
# Calling the API to create a new pool
classification_pool = toloka_client.create_pool(classification_pool)
print(f'Created pool with id {classification_pool.id}')
print(f'To view the pool, go to: https://{toloka_domain}/requester/project/{classification_project.id}/pool/{classification_pool.id}')
# Now we need to add tasks for this pool. As we can see upper, we are using golden set of tasks for quality control, so we need not only real tasks but tasks with answers *(from golden set)* too.
raw_data
# Creating tasks from raw data
classification_tasks = [
toloka.task.Task(
input_values={
'text_id': str(row[0]),
'text': row[1],
},
pool_id=classification_pool.id,
) for row in raw_data.values
]
golden_data = pd.read_csv(f'./data/{LANGUAGE}_classification_golden.tsv', sep='\t', error_bad_lines=False)
golden_data
# Creating control tasks from golden data
golden_tasks = [
toloka.task.Task(
input_values={
'text_id': str(row[0]),
'text': row[1],
},
known_solutions=[
toloka.task.BaseTask.KnownSolution(
output_values={'is_correct': row[2]}
)
],
pool_id=classification_pool.id,
) for row in golden_data.values
]
# Calling the API to create a new tasks
# This may take some time
tasks_op = toloka_client.create_tasks_async(
classification_tasks + golden_tasks,
allow_defaults=True
)
op_res = toloka_client.wait_operation(tasks_op)
print(
f'Total tasks: {op_res.details["total_count"]}',
f'Total failed: {op_res.details["failed_count"]}',
f'Total success: {op_res.details["success_count"]}',
f'Total valid: {op_res.details["valid_count"]}',
f'Total not valid: {op_res.details["not_valid_count"]}',
sep='\n'
)
# ## 2. Labeling texts into audio files
# ### 2.1. Recording project
# In this project performers will record the given text.
#
# We want a bit different variants of interaction with task interface for mobile devices (with Toloka App) and broswer (Web-based Toloka) due to abillity to use built-in recording functions in the Toloka Mobile App.
# +
# Toloka assets for using Handlebars engine
recording_assets = toloka.project.view_spec.ClassicViewSpec.Assets(
script_urls=["$TOLOKA_ASSETS/js/toloka-handlebars-templates.js"]
)
# We will using Voice Recording preset from Web-version. In this way it's possible to configure custom interface.
project_interface = toloka.project.view_spec.ClassicViewSpec(
script=open('./templates/recording/recording_template.js').read().strip(),
markup=open('./templates/recording/recording_template.html').read().strip(),
styles=open('./templates/recording/recording_template.css').read().strip(),
assets=recording_assets
)
# Setting up the project with defined parameters and interface
recording_project = toloka.project.Project(
assignments_issuing_type=toloka.project.Project.AssignmentsIssuingType.AUTOMATED,
public_name=open(f'./instructions/recording/{LANGUAGE}_project_name.txt').read().strip(),
private_comment='Recording texts',
public_description=open(f'./instructions/recording/{LANGUAGE}_short_instructions.txt').read().strip(),
public_instructions=open(f'./instructions/recording/{LANGUAGE}_public_instructions.html').read().strip(),
task_spec=toloka.project.task_spec.TaskSpec(
input_spec={
'text': toloka.project.field_spec.StringSpec(),
'text_id': toloka.project.field_spec.StringSpec(required=False),
},
output_spec={
'audio_record': toloka.project.field_spec.FileSpec()
},
view_spec=project_interface,
),
)
# -
# Calling the API to create a new project
# If you have already created all pools and projects you can just get it using toloka_client.get_project('your marking project id')
recording_project = toloka_client.create_project(recording_project)
print(f'Created marking project with id {recording_project.id}')
print(f'To view the project, go to: https://{toloka_domain}/requester/project/{recording_project.id}')
# <table align="center">
# <tr><td>
# <img src="./img/recording_project_interface.png"
# alt="vlz_iface" width="800">
# </td></tr>
# <tr><td align="center">
# <b>Figure 4.</b> How performers will see the tasks
# </td></tr>
# </table>
#
# <table align="center">
# <tr><td>
# <img src="./img/recording_project_instruction.png"
# alt="vlz_inst" width="800">
# </td></tr>
# <tr><td align="center">
# <b>Figure 5.</b> How performers will see the instruction
# </td></tr>
# </table>
# ### 2.2. Recording training
# Generally speaking, we could use training and admit only trained performers to the real tasks, however, since the task itself is quite simple, and its acceptance method is not automatic, it is easier to overpay a little for the verification project.
#
# **However, we strongly recommend to you make training in your projects, as it improves the quality of the performers' work and does not allow unscrupulous performers to waste your money.**
# ### 2.3. Recording pool
# In this pool, trained performers will record the text they see.
#
# About some parameters:
# * **Manual solution acceptance** - we need performers who will verify tasks done in marking project (we will set up verification project for it later).
# * **Overlap** - we need one audio fragment per each text (actually we can increase overlap as if our model works fine with duplicating text data).
# * We want **filter performers** by their knowledge of language of texts from dataset.
# * We need also allow performers do tasks from mobile app and browser both.
#
# About quality control:
# * We want to ban performers who answers too fast.
# * We want to ban performers who fails Captcha suspiciously often.
# * We want to ban lazy performers who skips tasks until find an easy one.
# * We want to ban performers who too much deviates from majority opinion.
# * We want to increase overlap every time the task was rejected (in other word we need to return text which corresponds to rejected record back to the recording pool)
# +
# Setting up pool
recording_pool = toloka.pool.Pool(
project_id=recording_project.id,
private_name='[Recording texts] Recording pool',
may_contain_adult_content=True,
will_expire=datetime.datetime.utcnow() + datetime.timedelta(days=365),
reward_per_assignment=0.015,
auto_accept_solutions=False,
auto_accept_period_day=21,
assignment_max_duration_seconds=60*7,
filter=toloka.filter.FilterAnd(
[
toloka.filter.Languages.in_(LANGUAGE.upper()),
toloka.filter.FilterOr(
[
toloka.filter.ClientType.eq(toloka.filter.ClientType.ClientType.BROWSER),
toloka.filter.ClientType.eq(toloka.filter.ClientType.ClientType.TOLOKA_APP)
]
)
]
),
defaults=toloka.pool.Pool.Defaults(
default_overlap_for_new_task_suites=1
),
)
# Setting task mixing configuration (1 task per page)
recording_pool.set_mixer_config(
real_tasks_count=1,
golden_tasks_count=0,
training_tasks_count=0
)
# Setting up pool quality control
# Banning performer who answers too fast
recording_pool.quality_control.add_action(
collector=toloka.collectors.AssignmentSubmitTime(
history_size=5,
fast_submit_threshold_seconds=30
),
conditions=[toloka.conditions.FastSubmittedCount > 2],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration_unit=toloka.user_restriction.DurationUnit.PERMANENT,
private_comment='Fast responses'
)
)
# Banning performer who answers too fast (another case)
recording_pool.quality_control.add_action(
collector=toloka.collectors.AssignmentSubmitTime(
history_size=5,
fast_submit_threshold_seconds=15
),
conditions=[toloka.conditions.FastSubmittedCount > 0],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration_unit=toloka.user_restriction.DurationUnit.PERMANENT,
private_comment='Fast responses'
)
)
# Banning performer by captcha criteria
recording_pool.quality_control.add_action(
collector=toloka.collectors.Captcha(history_size=5),
conditions=[toloka.conditions.FailRate >= 60],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration=3,
duration_unit=toloka.user_restriction.DurationUnit.DAYS,
private_comment='Captcha'
)
)
# Banning performer who skips some tasks in a row
recording_pool.quality_control.add_action(
collector=toloka.collectors.SkippedInRowAssignments(),
conditions=[toloka.conditions.SkippedInRowCount > 2],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration=15,
duration_unit='DAYS',
private_comment='Lazy performer',
)
)
# Increasing overlap for the task if the assignment was rejected
recording_pool.quality_control.add_action(
collector=toloka.collectors.AssignmentsAssessment(),
conditions=[toloka.conditions.AssessmentEvent == toloka.conditions.AssessmentEvent.REJECT],
action=toloka.actions.ChangeOverlap(delta=1, open_pool=True)
)
# -
# Calling the API to create a new pool
recording_pool = toloka_client.create_pool(recording_pool)
print(f'Created pool with id {recording_pool.id}')
print(f'To view the pool, go to: https://{toloka_domain}/requester/project/{recording_project.id}/pool/{recording_pool.id}')
# ### 2.4. Verification project
# In this project, the performers will check that the voice records are correct, that is, they completely contain the corresponding text, and also do not contain technical defects or noise.
# +
# Interface elements
radio_group_field = tb.fields.RadioGroupFieldV1(
data=tb.data.OutputData(path='is_correct'),
label='Is this record correct and whether it corresponds to the text?',
validation=tb.conditions.RequiredConditionV1(),
options=[
tb.fields.GroupFieldOption(
label='Yes',
value='yes'),
tb.fields.GroupFieldOption(
label='No',
value='no')
]
)
text_block = tb.view.TextViewV1(
label='Text',
content=tb.data.InputData(path='text')
)
audio_block = tb.view.AudioViewV1(
url=tb.data.InputData(path='audio_record'),
label='Record',
validation=tb.conditions.PlayedFullyConditionV1(), # we want to make sure that performer will listen full record
)
# Creating interface which performers will see using previous elements
project_interface = toloka.project.view_spec.TemplateBuilderViewSpec(
config=tb.TemplateBuilder(
view=tb.view.ListViewV1(
items=[
text_block,
audio_block,
radio_group_field
]
)
)
)
# Setting up the project with defined parameters and interface
verification_project = toloka.project.Project(
assignments_issuing_type=toloka.project.Project.AssignmentsIssuingType.AUTOMATED,
public_name=open(f'./instructions/verification/{LANGUAGE}_project_name.txt').read().strip(),
private_comment='Verification for recorded texts',
public_description=open(f'./instructions/verification/{LANGUAGE}_short_instructions.txt').read().strip(),
public_instructions=open(f'./instructions/verification/{LANGUAGE}_public_instructions.html').read().strip(),
task_spec=toloka.project.task_spec.TaskSpec(
input_spec={
'audio_record': toloka.project.field_spec.StringSpec(), # we will put URLs instead of files here
'text': toloka.project.field_spec.StringSpec(),
'text_id': toloka.project.field_spec.StringSpec(required=False),
'assignment_id': toloka.project.field_spec.StringSpec(required=False),
},
output_spec={
'is_correct': toloka.project.field_spec.StringSpec(
allowed_values=[
'yes',
'no'
]
)
},
view_spec=project_interface,
),
)
# -
# Calling the API to create a new project
verification_project = toloka_client.create_project(verification_project)
print(f'Created project with id {verification_project.id}')
print(f'To view the project, go to: https://{toloka_domain}/requester/project/{verification_project.id}')
# <table align="center">
# <tr><td>
# <img src="./img/verification_project_interface.png"
# alt="verif_iface" width="800">
# </td></tr>
# <tr><td align="center">
# <b>Figure 6.</b> How performers will see the tasks
# </td></tr>
# </table>
#
# <table align="center">
# <tr><td>
# <img src="./img/verification_project_instruction.png"
# alt="verif_inst" width="800">
# </td></tr>
# <tr><td align="center">
# <b>Figure 7.</b> How performers will see the instruction
# </td></tr>
# </table>
# ### 2.4. Verification training
# +
# Setting up training
verification_training = toloka.training.Training(
project_id=verification_project.id,
private_name='[Verification for vocalizing texts] Training',
may_contain_adult_content=True,
mix_tasks_in_creation_order=True,
shuffle_tasks_in_task_suite=True,
training_tasks_in_task_suite_count=5,
assignment_max_duration_seconds=60*20,
task_suites_required_to_pass=1,
retry_training_after_days=1,
inherited_instructions=True,
)
# Calling the API to create a new project
verification_training = toloka_client.create_training(verification_training)
print(f'Created training with id {verification_training.id}')
print(f'To view the training, go to: https://{toloka_domain}/requester/project/{verification_project.id}/training/{verification_training.id}')
# -
# Let's start working with our object storage.
#
# First of all, you need to create bucket for your audio files.
#
# Code below will do it for you, all you need is come up with an unoccupied unique name for bucket.
#
# **You can get more details about naming buckets and its naming [here](https://cloud.yandex.com/en-ru/docs/storage/concepts/bucket).**
# this function tries to create bucket with given name
def create_bucket(bucket_name):
session = boto3.session.Session(
region_name="us-east-1",
aws_secret_access_key=aws_secret_access_key,
aws_access_key_id=aws_access_key
)
s3 = session.client(
service_name="s3",
endpoint_url="https://storage.yandexcloud.net",
aws_secret_access_key=aws_secret_access_key,
aws_access_key_id=aws_access_key
)
try:
s3.create_bucket(Bucket=bucket_name, ACL='public-read')
print("Success!")
except Exception:
print("Bucket hasn't created, because its name is already busy! Change it and try again.")
# +
RECORDS_BUCKET_NAME = 'voice-records' # change name here if its already busy
create_bucket(RECORDS_BUCKET_NAME)
# -
# this function uploads file to a given bucket and returns direct download link for it
def load_image_on_yandex_storage(bucket_name, file_path, img_id=None):
if img_id is None:
img_id = os.path.split(file_path)[-1]
session = boto3.session.Session(
region_name="us-east-1",
aws_secret_access_key=aws_secret_access_key,
aws_access_key_id=aws_access_key
)
s3 = session.client(
service_name="s3",
endpoint_url="https://storage.yandexcloud.net",
aws_secret_access_key=aws_secret_access_key,
aws_access_key_id=aws_access_key
)
s3.upload_file(file_path, bucket_name, img_id)
return f"https://storage.yandexcloud.net/{bucket_name}/{img_id}"
answers = pd.read_csv(f'./data/{LANGUAGE}_records/answers.tsv', sep='\t', index_col=['filename'])
answers
# +
last_ind = 0
verification_training_tasks = []
dir_path = f'./data/{LANGUAGE}_records'
for record_name in os.listdir(dir_path):
file_path = os.path.join(dir_path, record_name)
if not os.path.isfile(file_path):
continue
ext = os.path.splitext(file_path)[-1]
if ext not in ['.mp3', '.aac', '.ogg', '.m4a', '.mp4']:
continue
url = load_image_on_yandex_storage(RECORDS_BUCKET_NAME,
file_path,
f'{LANGUAGE}/verification_training/verification_training_{last_ind}{ext}')
last_ind += 1
hint = answers.loc[record_name].get('hint')
task = toloka.task.Task(
input_values={
'audio_record': url,
'text': answers.loc[record_name]['text'],
},
known_solutions=[
toloka.task.BaseTask.KnownSolution(
output_values={'is_correct': answers.loc[record_name]['answer']}
)
],
pool_id=verification_training.id,
infinite_overlap=True,
message_on_unknown_solution="" if pd.isna(hint) else hint
)
verification_training_tasks.append(task)
print(f'You can check that records appeared in your bucket:\nhttps://storage.yandexcloud.net/{RECORDS_BUCKET_NAME}/ (XML)\nor in console mode here:\nhttps://console.cloud.yandex.ru/ (choose Object Storage)')
# -
# <table align="center">
# <tr><td>
# <img src="./img/object_storage.png"
# alt="obj_storage" width="800">
# </td></tr>
# <tr><td align="center">
# <b>Figure 8.</b> How you will see your bucket in the concole mode of the object storage
# </td></tr>
# </table>
#
# <table align="center">
# <tr><td>
# <img src="./img/verification_training.png"
# alt="verif_train" width="800">
# </td></tr>
# <tr><td align="center">
# <b>Figure 9.</b> How performers will see training tasks
# </td></tr>
# </table>
# Calling the API to create a new tasks
tasks_op = toloka_client.create_tasks_async(verification_training_tasks)
op_res = toloka_client.wait_operation(tasks_op)
print(
f'Total tasks: {op_res.details["total_count"]}',
f'Total failed: {op_res.details["failed_count"]}',
f'Total success: {op_res.details["success_count"]}',
f'Total valid: {op_res.details["valid_count"]}',
f'Total not valid: {op_res.details["not_valid_count"]}',
sep='\n'
)
# ### 2.5. Verification pool
# +
# Setting up pool
verification_pool = toloka.pool.Pool(
project_id=verification_project.id,
private_name='[Verification for recording texts] Pool',
may_contain_adult_content=True,
will_expire=datetime.datetime.utcnow() + datetime.timedelta(days=365),
reward_per_assignment=0.03,
auto_accept_solutions=True,
assignment_max_duration_seconds=60*30,
defaults=toloka.pool.Pool.Defaults(
default_overlap_for_new_task_suites=3
),
filter=toloka.filter.FilterAnd(
[
toloka.filter.Languages.in_(LANGUAGE.upper()),
toloka.filter.FilterOr(
[
toloka.filter.ClientType.eq(toloka.filter.ClientType.ClientType.BROWSER),
toloka.filter.ClientType.eq(toloka.filter.ClientType.ClientType.TOLOKA_APP)
]
)
]
),
)
# Setting task mixing configuration (20 tasks per page)
verification_pool.set_mixer_config(
real_tasks_count=20,
golden_tasks_count=0,
training_tasks_count=0
)
# Setting up pool quality control
# Banning performer who answers too fast
verification_pool.quality_control.add_action(
collector=toloka.collectors.AssignmentSubmitTime(
history_size=5,
fast_submit_threshold_seconds=30
),
conditions=[toloka.conditions.FastSubmittedCount > 2],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration_unit=toloka.user_restriction.DurationUnit.PERMANENT,
private_comment='Fast responses'
)
)
# Banning performer by captcha criteria
verification_pool.quality_control.add_action(
collector=toloka.collectors.Captcha(history_size=5),
conditions=[toloka.conditions.FailRate >= 60],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration=3,
duration_unit=toloka.user_restriction.DurationUnit.DAYS,
private_comment='Captcha'
)
)
# Banning performer who skips some tasks in a row
verification_pool.quality_control.add_action(
collector=toloka.collectors.SkippedInRowAssignments(),
conditions=[toloka.conditions.SkippedInRowCount > 2],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration=15,
duration_unit='DAYS',
private_comment='Lazy performer',
)
)
# Banning performer by majority vote criteria
verification_pool.quality_control.add_action(
collector=toloka.collectors.MajorityVote(history_size=5, answer_threshold=2),
conditions=[
toloka.conditions.TotalAnswersCount > 5,
toloka.conditions.CorrectAnswersRate < 60,
],
action=toloka.actions.RestrictionV2(
scope=toloka.user_restriction.UserRestriction.PROJECT,
duration_unit=toloka.user_restriction.DurationUnit.PERMANENT,
private_comment='Majority vote low quality'
)
)
verification_pool = toloka_client.create_pool(verification_pool)
print(f'Created pool with id {verification_pool.id}')
print(f'To view the training, go to: https://{toloka_domain}/requester/project/{verification_project.id}/pool/{verification_pool.id}')
# -
# ## 3. Running the pipeline
# Now we will run a whole pipeline.
#
# If you have some questions check the pipeline scheme again in the beginning of this notebook.
# Let's define each our action by the function below.
# +
# Common pool functions
def wait_pool_for_close(pool, sleep_time=60):
# updating pool info
pool = toloka_client.get_pool(pool.id)
while not pool.is_closed():
print(
f'\t{datetime.datetime.now().strftime("%H:%M:%S")}\t'
f'Pool {pool.id} has status {pool.status}.'
)
time.sleep(sleep_time)
# updating pool info
pool = toloka_client.get_pool(pool.id)
def wait_pool_for_submit(pool, sleep_time=60, min_count=0):
request = toloka.search_requests.AssignmentSearchRequest(
status=toloka.assignment.Assignment.SUBMITTED, # Only take completed tasks that haven't been accepted or rejected
pool_id=pool.id,
)
while True:
# updating pool info
pool = toloka_client.get_pool(pool.id)
count = len(list(toloka_client.get_assignments(request)))
print(
f'\t{datetime.datetime.now().strftime("%H:%M:%S")}\t'
f'Pool {pool.id} has {count} submitted tasks.'
)
if count > min_count or pool.is_closed():
return count
time.sleep(sleep_time)
def aggregate_pool_results(pool):
print(f'Started aggregation results for the pool {pool.id}:\n',
f'https://{toloka_domain}/requester/operations/project/{pool.project_id}/pool/{pool.id}',
sep='')
aggregation_operation = toloka_client.aggregate_solutions_by_pool(
type='DAWID_SKENE',
pool_id=pool.id,
fields=[toloka.aggregation.PoolAggregatedSolutionRequest.Field(name='is_correct')]
)
aggregation_operation = toloka_client.wait_operation(aggregation_operation)
print(f'Finished aggregation results for the pool {pool.id}:\n',
f'https://{toloka_domain}/requester/operations/project/{pool.project_id}/pool/{pool.id}',
sep='')
aggregation_result = toloka_client.find_aggregated_solutions(aggregation_operation.id)
results = aggregation_result.items
while aggregation_result.has_more:
aggregation_result = toloka_client.find_aggregated_solutions(
aggregation_operation.id,
task_id_gt=aggregation_result.items[len(aggregation_result.items) - 1].task_id,
)
results = results + aggregation_result.items
return results
def get_solution_attachment_url(solution):
attachment = toloka_client.get_attachment(solution.output_values['audio_record'])
ext = os.path.splitext(attachment.name)[-1]
if ext not in ['.m4a', '.flac', '.mp3', 'mp4', '.wav', '.ogg', '.wma', '.aac', '.ape', '']:
return None, False
if ext == '':
ext = '.m4a' # extension by default
# downloading record on disk
filename = f"{attachment.id}{ext}"
with open(filename, "wb+") as f:
toloka_client.download_attachment(attachment.id, f)
# uploading attachment on Yandex Cloud Object Storage
url = load_image_on_yandex_storage( # we defined this one in verification project section earlier
RECORDS_BUCKET_NAME,
filename,
'{}/recording-pool-{}/{}'.format(
LANGUAGE,
recording_pool.id,
filename
)
)
# deleting record from disk
try:
os.remove(f'./{filename}')
except Exception:
print(f'Failed to remove file {filename}.')
return url
# +
# Getting classification results and running recording pool
def prepare_recording_tasks(recording_pool, classification_pool, confidence_lvl=0.6):
recording_tasks = []
for result in aggregate_pool_results(classification_pool):
# finding inputs for aggregated task
inputs = toloka_client.get_task(result.task_id).input_values
# if the correct text with the necessary confidence level, creating recording task
if result.output_values['is_correct'] == 'yes' and result.confidence >= confidence_lvl:
task = toloka.task.Task(
input_values=inputs,
pool_id=recording_pool.id
)
recording_tasks.append(task)
print(f'Prepared {len(recording_tasks)} recording tasks for pool:\n',
f'https://{toloka_domain}/requester/project/{recording_pool.project_id}/pool/{recording_pool.id}',
sep='')
return recording_tasks
def run_recording_pool(recording_pool, recording_tasks):
# Calling the API to create a new tasks
tasks_op = toloka_client.create_tasks_async(
recording_tasks,
allow_defaults=True,
open_pool=True
)
toloka_client.wait_operation(tasks_op)
print('Opened pool:\n',
f'https://{toloka_domain}/requester/project/{recording_pool.project_id}/pool/{recording_pool.id}',
sep='')
# +
# Getting recording assignments and running verification pool
def prepare_verification_tasks(verification_pool):
verification_tasks = [] # Tasks that we will send for verification
request = toloka.search_requests.AssignmentSearchRequest(
status=toloka.assignment.Assignment.SUBMITTED, # Only take completed tasks that haven't been accepted or rejected
pool_id=recording_pool.id,
)
# Create and store new tasks
for assignment in toloka_client.get_assignments(request):
for task, solution in zip(assignment.tasks, assignment.solutions):
record_url, correct = get_solution_attachment_url(solution)
if not correct:
toloka_client.reject_assignment(assignment.id,
'Incorrect format for audio file.')
continue
verification_tasks.append(
toloka.task.Task(
input_values={
'text': task.input_values['text'],
'text_id': task.input_values.get('text_id', ''),
'assignment_id': assignment.id,
'audio_record': record_url
},
pool_id=verification_pool.id,
)
)
print(f'Generated {len(verification_tasks)} new verification tasks')
return verification_tasks
def run_verification_pool(verification_pool, verification_tasks):
# Calling the API to create a new tasks
verification_tasks_op = toloka_client.create_tasks_async(
verification_tasks,
allow_defaults=True
)
toloka_client.wait_operation(verification_tasks_op)
verification_tasks_result = [
task
for task in toloka_client.get_tasks(pool_id=verification_pool.id)
if not task.known_solutions
]
task_to_assignment = {}
for task in verification_tasks_result:
task_to_assignment[task.id] = task.input_values['assignment_id']
# Open the verification pool
op_res = toloka_client.open_pool(verification_pool.id)
op_res = toloka_client.wait_operation(op_res)
print(f'Opened pool:\n',
f'https://{toloka_domain}/requester/project/{verification_pool.project_id}/pool/{verification_pool.id}',
sep='')
return task_to_assignment
def set_answers_status(verification_results, task_to_assignment, links, confidence_lvl=0.6):
print('Started adding results to recording tasks')
for result in tqdm(verification_results):
# skipping not needed results
if result.task_id not in task_to_assignment:
continue
# finding inputs for aggregated task
inputs = toloka_client.get_task(result.task_id).input_values
assignment_id = inputs['assignment_id']
# accepting task in recording project if record is correct with the necessary confidence level
if result.output_values['is_correct'] == 'yes' and result.confidence >= confidence_lvl:
try:
toloka_client.accept_assignment(assignment_id,
'Well done!')
links[assignment_id] = inputs['audio_record'] # saving urls for getting resulst later
except Exception:
pass # Already processed this assignment
else:
try:
toloka_client.reject_assignment(assignment_id,
'Record is incorrect. Check instructions for more details.')
except Exception:
pass # Already processed this assignment
task_to_assignment.pop(result.task_id, None)
print('Finished adding results to recording tasks')
return links
# -
# Now we can run the continious pipeline.
# +
# Run the pipeline
links = {}
# Opening our trainings and start first project - classification project
toloka_client.open_pool(classification_training.id)
toloka_client.open_pool(verification_training.id)
toloka_client.open_pool(classification_pool.id)
while True:
print('\nWaiting for classification pool to close...')
wait_pool_for_close(classification_pool)
print(f'Classification pool {classification_pool.id} is finally closed!')
# Preparing tasks for recording project
if recording_pool.is_closed():
recording_tasks = prepare_recording_tasks(
recording_pool,
classification_pool,
confidence_lvl=0.8
)
# Opening pool in recording project with correct texts (from classification project)
# We do it once because we were waiting for whole input texts from classification project
run_recording_pool(recording_pool, recording_tasks)
# Updating pools info
recording_pool = toloka_client.get_pool(recording_pool.id)
verification_pool = toloka_client.get_pool(verification_pool.id)
# Waiting any submitted tasks
print('\nWaiting submitted tasks for recording pool...')
submitted = wait_pool_for_submit(recording_pool)
# Make sure all the tasks are done
if recording_pool.is_closed() and verification_pool.is_closed() and submitted == 0:
print('All the tasks are done!')
break
# Preparing tasks for verification project
verification_tasks = prepare_verification_tasks(verification_pool)
# Adding tasks to the verification pool and open it
task_to_assignment = run_verification_pool(verification_pool, verification_tasks)
print('\nWaiting for verification pool to close')
wait_pool_for_close(verification_pool)
print(f'Verification pool {verification_pool.id} is finally closed!')
# Getting verification results aggregation
verification_results = aggregate_pool_results(verification_pool)
# Rejecting/accepting submitted records (in recording project) based on verification aggregation
links = set_answers_status(verification_results,
task_to_assignment,
links,
confidence_lvl=0.6)
print(f'Results received at {datetime.datetime.now()}')
# -
# P.S.: It is often more profitable to use a high confidence level in the initial project and a bit lower confidence level in the verification project than to do the opposite, since this will save money: we will be less likely to let incorrect texts dive deep into our pipeline. However, the confidence level directly affects the quality of the resulting dataset, so making it too low is not advisable at all.
# ### Getting the results
# You can download data from web-version or use code snippet below.
def get_recording_results(recording_pool, links=None, download_records=False, path=f'./results/{LANGUAGE}_records'):
request = toloka.search_requests.AssignmentSearchRequest(
status=toloka.assignment.Assignment.ACCEPTED, # Only take completed tasks that have been accepted
pool_id=recording_pool.id,
)
dataset = {
'text_id': [],
'text': [],
'audio_record': []
}
if download_records:
# creating directories in path
try:
os.makedirs(path)
print(f'Created directories in path {path}')
except FileExistsError:
print(f'Using already existing directory {path}')
abs_path = os.path.abspath(path)
# Getting results for recording pool.
# If download_recors=False then dataset will contain direct urls that we used in verification project in audio_record column
# If download_recors=True then dataset will contain paths to the downloaded on disk records in audio_record column
for assignment in toloka_client.get_assignments(request):
for task, solution in zip(assignment.tasks, assignment.solutions):
text_id = task.input_values.get('text_id', '')
text = task.input_values['text']
dataset['text_id'].append(text_id)
dataset['text'].append(text)
if not download_records:
if links is None:
raise Exception('\"links\" param must be specified with flag \"download_records=False\"')
record_url = links[assignment.id]
dataset['audio_record'].append(record_url)
else:
attachment = toloka_client.get_attachment(solution.output_values['audio_record'])
ext = os.path.splitext(attachment.name)[-1]
if ext == '':
ext = '.m4a' # extension by default
# downloading record on disk
filepath = os.path.join(abs_path, f"{attachment.id}{ext}")
with open(filepath, "wb+") as f:
toloka_client.download_attachment(attachment.id, f)
print(f'Downloaded record: {filepath}')
dataset['audio_record'].append(filepath)
print('Finished getting results.')
# converting to pandas dataframe (for comfortable .tsv export)
return pd.DataFrame.from_dict(dataset)
# +
# dataset = get_recording_results(recording_pool, links, download_records=False)
# You can change dowloading flag by uncomment line below
dataset = get_recording_results(recording_pool, download_records=True)
# -
PATH_TO_SAVE = f'./results/{LANGUAGE}_dataset.tsv'
dataset.to_csv(PATH_TO_SAVE, sep='\t', index=False)
# ### Cleaning up the storage
# In this section we will clean our storage from the junk records we used in pipeline before.
# This function deletes files from storage
# You can configure what kind of records you want to delete from your bucket in storage
def cleanup_storage(bucket_name,
links,
recording_pool=None,
delete_rejected=True,
delete_accepted=False,
delete_training=False):
session = boto3.session.Session(
region_name="us-east-1",
aws_secret_access_key=aws_secret_access_key,
aws_access_key_id=aws_access_key
)
s3 = session.client(
service_name="s3",
endpoint_url="https://storage.yandexcloud.net",
aws_secret_access_key=aws_secret_access_key,
aws_access_key_id=aws_access_key
)
# In this case we need to go to the https://storage.yandexcloud.net/<BUCKET>/<LANGUAGE>/recording-pool-<ID>/...
if delete_rejected or delete_accepted:
if recording_pool is None:
raise Exception("While deleting ACCEPTED and REJECTED recording pool must be defined")
acc_files = set([os.path.split(url)[-1] for _, url in links.items()])
prefix = f'{LANGUAGE}/recording-pool-{recording_pool.id}/'
objects = s3.list_objects(Bucket=RECORDS_BUCKET_NAME, Prefix=prefix)
if 'Contents' in objects:
for record in objects['Contents']:
filename = os.path.split(record['Key'])[-1]
# deleting record with verdict ACCEPTED
if filename in acc_files and delete_accepted:
s3.delete_object(Bucket=bucket_name, Key=prefix + filename)
print(f"Deleted .../{bucket_name}/{prefix + filename}")
# deleting record with verdict REJECTED
if filename not in acc_files and delete_rejected:
s3.delete_object(Bucket=bucket_name, Key=prefix + filename)
print(f"Deleted .../{bucket_name}/{prefix + filename}")
# deleting directory (we needed delete all inside it before)
if delete_accepted and delete_rejected:
s3.delete_object(Bucket=bucket_name, Key=prefix)
print(f"Deleted .../{bucket_name}/{prefix}")
# deleting verification training records
# (from https://storage.yandexcloud.net/<BUCKET>/<LANGUAGE>/verification-training/...)
if delete_training:
prefix = f'{LANGUAGE}/verification_training/'
objects = s3.list_objects(Bucket=bucket_name, Prefix=prefix)
if 'Contents' in objects:
for record in objects['Contents']:
filename = os.path.split(record['Key'])[-1]
s3.delete_object(Bucket=bucket_name, Key=prefix + filename)
print(f"Deleted .../{bucket_name}/{prefix + filename}")
# deleting directory (we needed delete all inside it before)
s3.delete_object(Bucket=bucket_name, Key=prefix)
print(f"Deleted .../{bucket_name}/{prefix}")
# cleaning up our storage from rejected and training records
cleanup_storage(
RECORDS_BUCKET_NAME,
links,
recording_pool=recording_pool,
delete_rejected=True,
delete_accepted=False,
delete_training=True
)
|
examples/speech_recognition/ASR_pipeline.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Praxis-QR/BDSN/blob/main/Pipeline_1_Customer_Conversion.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="NC3PUiZCnAjb"
# #Predicting Customer Conversion with Logistic Regression
# + [markdown] id="blYZy6RCDeX2"
# #Install
# + colab={"base_uri": "https://localhost:8080/"} id="NFD28JjJQS66" outputId="ca15cc30-574a-4bd0-b5b6-6dcd23748b8b"
# !pip3 -q install pyspark
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('Praxis').getOrCreate()
# + [markdown] id="OAyNYK99ESFe"
# #Load Data
# + id="tIXoEpOEopWV"
# !wget -O Data_Set.csv -q https://raw.githubusercontent.com/Praxis-QR/BDSN/main/Documents/CustomerConversion_GG.csv
# + id="izY801pZKkgP"
# Load and Read the dataset
dfCusCon=spark.read.csv('Data_Set.csv',inferSchema=True,header=True)
# + id="xog4TsCmmEEY" colab={"base_uri": "https://localhost:8080/"} outputId="89659e0b-1e6f-4c6c-92bd-7f35591a8ae7"
# Chech the datatypes of the inputs
dfCusCon.printSchema()
# + [markdown] id="pf0VZsPxLCM1"
# # Basic EDA :
#
# To understand the data set and gather informations about the data
# + colab={"base_uri": "https://localhost:8080/"} id="oywDvXbEK0ht" outputId="be2c9c60-6f69-4fbc-bc81-56c7ab987824"
# Check the shape of the dataset
print((dfCusCon.count(), len(dfCusCon.columns)))
# + colab={"base_uri": "https://localhost:8080/"} id="PAobh46Brcq_" outputId="f4664f2f-5237-43ed-e326-9fcbaa741f8c"
# Look at the dataset using SPARK
dfCusCon.show(5)
# + [markdown] id="RwILHVrOrOgn"
# Country and Platform are categorical and would have to be converted to numerical values
# + id="HzvHXBUvtMmj" colab={"base_uri": "https://localhost:8080/"} outputId="bc79a29e-c220-4c7b-f85c-95ed7ad7ecb8"
# Check the statistical measures of the dataset
dfCusCon.describe().show()
# + [markdown] id="PjQK0lcEuMyu"
# The average age of visitors is close to 28 years, and they viewed around 9 web pages during the website visit.
# + id="vXYZ6MrktxZo" colab={"base_uri": "https://localhost:8080/"} outputId="8c5ca22c-d369-4173-c5ad-fbcce6759021"
# Check individual columns using group By function to understand the data in deeply.
dfCusCon.groupBy('Country').count().show()
# + [markdown] id="THYNVtUmQQI6"
# Maximum number of visitors are from Indonesia.
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="26A6yHVnGVsk" outputId="da10985c-1118-4e31-f74f-02289a8ef945"
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
new_df = dfCusCon.toPandas()
plt.figure(figsize=(10,6))
sns.barplot(y = new_df.groupby('Platform').count()['Web_pages_viewed'], x = new_df.reset_index().groupby('Platform').count()['Web_pages_viewed'].index)
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="Le_qJQwLJROP" outputId="dca5212a-be38-4617-8e32-f68d41a9e3ee"
plt.figure(figsize=(10,6))
sns.barplot(y = new_df.groupby('Age').count()['Web_pages_viewed'], x = new_df.reset_index().groupby('Age').count()['Web_pages_viewed'].index)
# + id="ZRoG8VLxvfjk" colab={"base_uri": "https://localhost:8080/"} outputId="d61fbd23-17a0-4605-b759-aec89f20b100"
dfCusCon.groupBy('Platform').count().show()
# + [markdown] id="Qd8BhEjzvprL"
# Total numbers of Yahoo Platform users are the highest.
# + id="F1spStlmv7SO" colab={"base_uri": "https://localhost:8080/"} outputId="4e017047-b4c2-4d46-9b3c-1adc890012d1"
dfCusCon.groupBy('Status').count().show()
# + [markdown] id="0cMFAxnNwK-g"
# Equal number of users who are converted and non-converted.
# + id="DAOYb97mwSTs" colab={"base_uri": "https://localhost:8080/"} outputId="1bfade76-7f25-43cf-8efc-0f4c3701e6d5"
dfCusCon.groupBy('Country').mean().show()
# + [markdown] id="VRQWF758wpND"
# The average number of web_page_viewed is highest in Malaysia and lowest in Brazil.
# + id="2zBKdlONxbK4" colab={"base_uri": "https://localhost:8080/"} outputId="222aaf27-eb76-42d6-9ec6-de915c4a5372"
dfCusCon.groupBy('Platform').mean().show()
# + [markdown] id="MrWo0WEFxw5s"
# The average number of web_page_viewed is highest for Google Platform.
# + id="5jouQLANyMbF" colab={"base_uri": "https://localhost:8080/"} outputId="36dfe634-86ef-415e-bd94-aca7e2c33e42"
dfCusCon.groupBy('Status').mean().show()
# + [markdown] id="SFISnHQ_y7j9"
# There is a strong connection between the conversion
# status and the number of Web_pages_viewed along with repeat_Visitor.
# + [markdown] id="JIypSydfRPsf"
# # Feature Engineering :
#
# Converting the Data for ML purposes
# + id="VNXOfJw2Wspi"
from pyspark.ml.feature import StringIndexer
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import OneHotEncoder
from pyspark.ml.classification import LogisticRegression
# + [markdown] id="-ZuPM-knvmW_"
# ##Platform Data
# + id="JdQWTT9hWwPZ" colab={"base_uri": "https://localhost:8080/"} outputId="6934247a-b813-4369-d479-82d6bb3c26b6"
#Platform_indexer = StringIndexer(inputCol="Platform", outputCol="Platform_Num").fit(df)
Platform_indexer = StringIndexer(inputCol="Platform", outputCol="Platform_Num")
dfWork = Platform_indexer.fit(dfCusCon).transform(dfCusCon)
#dfWork.show(5,False)
dfWork.show(5)
# + id="l7htzpkLXFwT" colab={"base_uri": "https://localhost:8080/"} outputId="8a32008b-7fcf-4a6e-edf0-736bbe484fa7"
dfWork.groupBy('Platform').count().orderBy('count', ascending=False).show(5)
# + id="Upeyy31DXZyI" colab={"base_uri": "https://localhost:8080/"} outputId="58388aeb-3b20-4617-ed74-6e7ad430e44a"
dfWork.groupBy('Platform_Num').count().orderBy('count', ascending=False).show(5)
# + id="22Cqupu8fhct" colab={"base_uri": "https://localhost:8080/"} outputId="ba1c64d8-26f9-45f7-f715-21f2510e3633"
Platform_encoder = OneHotEncoder(inputCol="Platform_Num", outputCol="Platforms_Vector")
Platform_encoder.setDropLast(False) #setDropLast : https://stackoverflow.com/questions/39500213/why-does-sparks-onehotencoder-drop-the-last-category-by-default
#ohe = Platform_encoder.fit(df)
dfWork = Platform_encoder.fit(dfWork).transform(dfWork)
dfWork.show(5)
# + id="u_AGe31ugM9H" colab={"base_uri": "https://localhost:8080/"} outputId="68d84ad4-cd87-4946-f578-fa80982ad920"
dfWork.groupBy('Platforms_Vector').count().orderBy('count', ascending=False).show(5,False)
# + [markdown] id="hewbXQxJgk7-"
# ##Country Data
# + id="OGuzRv7Sght0" colab={"base_uri": "https://localhost:8080/"} outputId="f06eeb03-9d12-4763-beb6-9c8fbad97222"
#Country_indexer = StringIndexer(inputCol="Country", outputCol="Country_Num").fit(df)
Country_indexer = StringIndexer(inputCol="Country", outputCol="Country_Num")
dfWork = Country_indexer.fit(dfWork).transform(dfWork)
dfWork.show(5)
# + id="0qhkGMe-hbsQ" colab={"base_uri": "https://localhost:8080/"} outputId="8d3b5a34-1c5b-4104-d5fb-89a2d9d69251"
dfWork.groupBy('Country').count().orderBy('count', ascending = False).show(5,False)
# + id="G9b3mfSmhnEE" colab={"base_uri": "https://localhost:8080/"} outputId="7b6a9106-4bdc-431a-ab64-7094a19ac35e"
dfWork.groupBy('Country_Num').count().orderBy('count', ascending = False).show(5,False)
# + id="CkqKDQfFhxVS"
Country_encoder =OneHotEncoder(inputCol="Country_Num", outputCol="Country_Vector")
Country_encoder.setDropLast(False)
#ohhe = Country_encoder.fit(df)
dfWork = Country_encoder.fit(dfWork).transform(dfWork)
# + id="9Zlvm4AMiO-d" colab={"base_uri": "https://localhost:8080/"} outputId="b6a125c6-967f-4ce7-e317-1afd46a3d0ca"
dfWork.select(['Country','Country_Num','Country_Vector']).show(5)
# + id="nCCQDvNsib8b" colab={"base_uri": "https://localhost:8080/"} outputId="55484258-71b9-48e7-f03e-eb613fd930b4"
dfWork.groupBy('Country_Vector').count().orderBy('count', ascending = False).show(5,False)
# + [markdown] id="nmrtLzZkj2o6"
# Categoricals have been converted into Numericals.<br>
# Now to Create the Feature Vector
# + id="N3IxBwCgir11" colab={"base_uri": "https://localhost:8080/"} outputId="0de95e28-5abe-4027-ae89-6892f47ec457"
# Now it is needed to assemble all of the input columns into a single vector that would act as the input feature for the Logistic Regression model.
Feature_assembler = VectorAssembler(inputCols=['Platforms_Vector','Country_Vector','Age', 'Repeat_Visitor',
'Web_pages_viewed'], outputCol="features")
dfWork = Feature_assembler.transform(dfWork)
dfWork.show(5)
# + id="nQ4YQpXpk0he" colab={"base_uri": "https://localhost:8080/"} outputId="4ff57bec-d4a6-4bcc-e751-10f6cbbd14bc"
dfWork[['features','Status']].show(5,False)
# + id="BbV6JWyllFAV" colab={"base_uri": "https://localhost:8080/"} outputId="ed2d6ac2-dacc-448d-9815-7a40f78b9701"
dfWork.printSchema()
# + [markdown] id="LJKdwEj-iqEF"
# One extra column named features, which is nothing but a combination of all the input features represented as a Single Dense Vector.
# + [markdown] id="Epg5Dqebk1oE"
# ## Reducing the DataSet
# + id="y7L2FMC6mtac" colab={"base_uri": "https://localhost:8080/"} outputId="0aeb13ab-92a0-4457-a8bf-07d5036977d9"
# Now select only features column as input and the Status column as output for training the logistic regression model.
#model_df=df.select(['features','Status'])
dfWork = dfWork.select(['features','Status'])
dfWork.show(10, False)
# + [markdown] id="Om4xUysYnLPX"
# ## Splitting the Dataset :
#
# Split the dataset into a training and test dataset in order to train and evaluate the performance of the logistic regression model. I split it in a 75/25 ratio and train our model on 75% of the dataset.
# + id="liGRm91ToXqh" colab={"base_uri": "https://localhost:8080/"} outputId="83b2c92c-ad01-46d4-85aa-2571d75f7818"
dfTrain, dfTest = dfWork.randomSplit([0.75,0.25])
print( dfTrain.count(), dfTest.count())
# + id="gID8e-TPpVbq" colab={"base_uri": "https://localhost:8080/"} outputId="37e055e3-bfce-4726-8e1f-36e2144aaa66"
dfTrain.groupBy('Status').count().show()
# + [markdown] id="HL3z8iLNpd2o"
# This ensures we have a balance set of the target class (Status) into the
# training and test set.
# + id="_B1sJxkcplLY" colab={"base_uri": "https://localhost:8080/"} outputId="a6da8000-4959-4eb6-cfcd-9cbd0ea8a7e4"
dfTest.groupBy('Status').count().show()
# + [markdown] id="x1_LxSM3qtAg"
# ## Build and Train Logistic Regression Model:
#
# I build and train the logistic regression model using features as the input column and status as the output column.
# + id="_grEDfF_rGTY"
#LogReg =LogisticRegression(labelCol='Status').fit(training_df)
LogReg =LogisticRegression(labelCol='Status')
# + [markdown] id="LQvYEk0SrN8m"
# ## Training Results:
#
# We can access the predictions made by the model using the evaluate function in SPARK that executes all the steps in an optimized way. It gives another Dataframe that contains four columns in total, including prediction and probability.
# + id="CwrHtslXrwCy" colab={"base_uri": "https://localhost:8080/"} outputId="a74c3ffc-da3d-42a7-ed64-c564440c08ba"
TrainResults =LogReg.fit(dfTrain).evaluate(dfTrain).predictions
# + colab={"base_uri": "https://localhost:8080/"} id="TFk1HoQKnMAY" outputId="b54ea376-88c3-4bd4-a173-d8496cf43daf"
TrainResults.count()
# + id="SgpNwTdGr0gz" colab={"base_uri": "https://localhost:8080/"} outputId="1bcf8b35-289a-459c-f198-1342001493fa"
TrainResults.filter(TrainResults['Status']==1).filter(TrainResults['prediction']==1).select(['Status','prediction','probability']).show(10,False)
# + [markdown] id="b_xIoX3msEq_"
# ## Evaluate Logistic Regression Model on Test Data
#
# Now check the performance of the
# model on test data. So the evaluate function is used, to make predictions on the test data.
# + id="7btBzbrnsnqv" colab={"base_uri": "https://localhost:8080/"} outputId="5597d51c-338d-49fc-c0c2-fff780bce7b2"
#results=log_reg.evaluate(test_df).predictions
TestResults = LogReg.fit(dfTrain).evaluate(dfTest).predictions
# + id="53xz0FvmtPIr" colab={"base_uri": "https://localhost:8080/"} outputId="8fbe102a-f8fe-4d1f-8b8a-729a2221df84"
TestResults.printSchema()
# + id="CRxI7GOmtTc9" colab={"base_uri": "https://localhost:8080/"} outputId="cbebf48f-15a8-47a1-ed9f-7ad3a97673d6"
TestResults.select(['Status','prediction']).show(10,False)
# + [markdown] id="1wBoVMmItfa4"
# ### Confusion Matrix:
#
# As this is a classification problem, I use a confusion matrix to
# gauge the performance of the model.
# + id="5DmAVXZ4tzJL"
tp = TestResults[(TestResults.Status == 1) & (TestResults.prediction == 1)].count()
tn = TestResults[(TestResults.Status == 0) & (TestResults.prediction == 0)].count()
fp = TestResults[(TestResults.Status == 0) & (TestResults.prediction == 1)].count()
fn = TestResults[(TestResults.Status == 1) & (TestResults.prediction == 0)].count()
# + [markdown] id="PQQ7jTPhuF2c"
# ### Accuracy:
#
# It is the most basic metric for evaluating any classifier.
# + id="yrdfrdg6uDxu" colab={"base_uri": "https://localhost:8080/"} outputId="25586be7-bc16-4cb9-b8a4-f678911fbf68"
accuracy=float((tp+tn) /(TestResults.count()))
print('Accuracy = ',accuracy)
# + [markdown] id="44E1sVq9un4J"
# ### Recall:
#
# Recall rate shows how much of the positive class cases we are able to predict correctly out of the total positive class observations.
# + id="AcwUhz9quyi1" colab={"base_uri": "https://localhost:8080/"} outputId="53dd3ba8-cf3c-4ab4-837e-61e688892d7f"
recall = float(tp)/(tp + fn)
print('Recall = ',recall)
# + [markdown] id="lVAJJ_Tbu_OL"
# ### Precision:
#
# Precision rate talks about the number of true positives predicted correctly out of all the predicted positives observations.
# + id="GuGVa4mRvJEv" colab={"base_uri": "https://localhost:8080/"} outputId="da120e8d-449e-442a-f849-1676155d4138"
precision = float(tp) / (tp + fp)
print('Precision = ',precision)
# + [markdown] id="VS1RlYijpoJB"
# #Pipeline
# + id="iDF0rK2FtJof"
from pyspark.ml import Pipeline
# + colab={"base_uri": "https://localhost:8080/"} id="L--LkCOgqfOd" outputId="49f1fb74-20d1-4e9d-cd17-f58c63339ef5"
dfCusCon.show(5,False)
# + colab={"base_uri": "https://localhost:8080/"} id="RTtRgsobr9NL" outputId="dd24acad-8731-45d2-b2b5-a8bbc66a66ee"
dfTrainP, dfTestP = dfCusCon.randomSplit([0.75,0.25])
print( dfTrainP.count(), dfTestP.count())
# + id="n0IymtVKquxx"
Platform_indexer = StringIndexer(inputCol="Platform", outputCol="Platform_Num")
Platform_encoder = OneHotEncoder(inputCol="Platform_Num", outputCol="Platforms_Vector")
Platform_encoder.setDropLast(False)
Country_indexer = StringIndexer(inputCol="Country", outputCol="Country_Num")
Country_encoder = OneHotEncoder(inputCol="Country_Num", outputCol="Country_Vector")
Country_encoder.setDropLast(False)
Feature_assembler = VectorAssembler(inputCols=['Platforms_Vector','Country_Vector','Age', 'Repeat_Visitor','Web_pages_viewed'], outputCol="features")
LogReg =LogisticRegression(labelCol='Status',featuresCol='features')
# + id="JClbKsgxsUc_"
mlPipeLine = Pipeline(stages=[Platform_indexer,Platform_encoder, Country_indexer, Country_encoder, Feature_assembler,LogReg])
# training model pipeline with data
mlModel = mlPipeLine.fit(dfTrainP)
TestResults = mlModel.transform(dfTestP)
# + colab={"base_uri": "https://localhost:8080/"} id="kc-UPa10tWue" outputId="8c0e8727-c3fe-44f4-f4ff-03d4c7c02e67"
TestResults.printSchema()
# + colab={"base_uri": "https://localhost:8080/"} id="CIhtvU0vt9os" outputId="866aad97-6725-4b0d-a492-b2aa8a38763e"
TestResults.select(['Status','prediction']).show(10,False)
# + [markdown] id="0pGyk8mPuJEK"
# ## Confusion Matrix
# + id="Txw6_YY4uNVQ"
tp = TestResults[(TestResults.Status == 1) & (TestResults.prediction == 1)].count()
tn = TestResults[(TestResults.Status == 0) & (TestResults.prediction == 0)].count()
fp = TestResults[(TestResults.Status == 0) & (TestResults.prediction == 1)].count()
fn = TestResults[(TestResults.Status == 1) & (TestResults.prediction == 0)].count()
# + [markdown] id="Td4cEU1ruYwW"
# ## Accuracy
# + colab={"base_uri": "https://localhost:8080/"} id="322wRZLgubKx" outputId="f985e526-81fb-4636-b204-18cefe272ee2"
accuracy_P=float((tp+tn) /(TestResults.count()))
print('Accuracy with Pipeline = ',accuracy_P)
# + [markdown] id="jIn90q89urRr"
# ## Recall
# + colab={"base_uri": "https://localhost:8080/"} id="hkesaacduv41" outputId="5ae60282-dbf3-476b-9207-8ca04d401cb9"
recall_P = float(tp)/(tp + fn)
print('Recall with Pipeline = ',recall_P)
# + [markdown] id="hRjFwRebwq5A"
# ##Precision
# + colab={"base_uri": "https://localhost:8080/"} id="aXv0piVQwtH5" outputId="239acc9a-85df-474d-f6a1-ee146d87592b"
precisionP = float(tp) / (tp + fp)
print('Precision with Pipeline = ',precisionP)
# + [markdown] id="YxgYBq3YxIp1"
# #Comparison
# + colab={"base_uri": "https://localhost:8080/"} id="UsNWohoVxOWf" outputId="ec7314fa-3226-41e5-df7a-ef2b1da9db69"
print('Accuracy :', round(accuracy_P,2), ' (with Pipeline) ', round(accuracy,2),' (without)')
print('Recall :', round(recall_P,2), ' (with Pipeline) ', round(recall,2),' (without)')
print('Precision :', round(precisionP,2), ' (with Pipeline) ', round(precision,2),' (without)')
|
Pipeline_1_Customer_Conversion.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook 1: Why is Machine Learning difficult?
# ## Overview
#
# In this notebook, we will get our hands dirty trying to gain intuition about why machine learning is difficult.
#
# Our task is going to be a simple one, fitting data with polynomials of different order. Formally, this goes under the name of polynomial regression. Here we will do a series of exercises that are intended to give the reader intuition about the major challenges that any machine learning algorithm faces.
#
# ## Learning Goal
#
# We will explore how our ability to predict depends on the number of data points we have, the "noise" in the data, and our knowledge about relevant features. The goal is to build intuition about why prediction is difficult and discuss general strategies for overcoming these difficulties.
#
#
# ## The Prediction Problem
#
# Consider a probabilistic process that gives rise to labeled data $(x,y)$. The data is generated by drawing samples from the equation
# $$
# y_i= f(x_i) + \eta_i,
# $$
# where $f(x_i)$ is some fixed, but (possibly unknown) function, and $\eta_i$ is a Gaussian, uncorrelate noise variable such that
# $$
# \langle \eta_i \rangle=0 \\
# \langle \eta_i \eta_j \rangle = \delta_{ij} \sigma
# $$
# We will refer to the $f(x_i)$ as the **true features** used to generate the data.
#
# To make prediction, we will consider a family of functions $g_\alpha(x;\theta_\alpha)$ that depend on some parameters $\theta_\alpha$. These functions respresent the **model class** that we are using to try to model the data and make predictions. The $g_\alpha(x;\theta_\alpha)$ encode the class of **features** we are using to represent the data.
#
# To learn the parameters $\boldsymbol{\theta}$, we will train our models on a **training data set** and then test the effectiveness of the model on a <i>different</i> dataset, the **test data set**. The reason we must divide our data into a training and test dataset is that the point of machine learning is to make accurate predictions about new data we have not seen. As we will see below, models that give the best fit to the training data do not necessarily make the best predictions on the test data. This will be a running theme that we will encounter repeatedly in machine learning.
#
#
# For the remainder of the notebook, we will focus on polynomial regression. Our task is to model the data with polynomials and make predictions about the new data that we have not seen.
# We will consider two qualitatively distinct situations:
# <ul>
# <li> In the first case, the process that generates the underlying data is in the model class we are using to make predictions. For polynomial regression, this means that the functions $f(x_i)$ are themselves polynomials.
# <li>In the second case, our data lies outside our model class. In the case of polynomial regression, this could correspond to the case where the $f(x_i)$ is a 10-th order polynomial but $g_\alpha(x;\theta_\alpha)$ are polynomials of order 1 or 3.
# </ul>
#
# In the exercises and discussion we consider 3 model classes:
# <ul>
# <li> the case where the $g_\alpha(x;\theta_\alpha)$ are all polynomials up to order 1 (linear models),
# <li> the case where the $g_\alpha(x;\theta_\alpha)$ are all polynomials up to order 3,
# <li> the case where the $g_\alpha(x;\theta_\alpha)$ are all polynomials up to order 10.
# </ul>
#
# To measure our ability to predict, we will learn our parameters by fitting our training dataset and then making predictions on our test data set. One common measure of predictive performance of our algorithm is to compare the predictions,$\{y_j^\mathrm{pred}\}$, to the true values $\{y_j\}$. A commonly employed measure for this is the sum of the mean square-error (MSE) on the test set:
# $$
# MSE= \frac{1}{N_\mathrm{test}}\sum_{j=1}^{N_\mathrm{test}} (y_j^\mathrm{pred}-y_j)^2
# $$
# We will return to this in later notebooks. For now, we will try to get a qualitative picture by examining plots on test and training data.
#
# ## Fitting vs. predicting when the data is in the model class
#
#
# We start by considering the case:
# $$
# f(x)=2x.
# $$
# Then the data is clearly generated by a model that is contained within all three model classes we are using to make predictions (linear models, third order polynomials, and tenth order polynomials).
#
#
# Run the code for the following cases:
# <ul>
# <li> For $f(x)=2x$, $N_{\mathrm{train}}=10$ and $\sigma=0$ (noiseless case), train the three classes of models (linear, third-order polynomial, and tenth order polynomial) for a training set when $x_i \in [0,1]$. Make graphs comparing fits for different order of polynomials. Which model fits the data the best?
# <li> Do you think that the data that has the least error on the training set will also make the best predictions? Why or why not? Can you try to discuss and formalize your intuition? What can go right and what can go wrong?
# <li>Check your answer by seeing how well your fits predict newly generated test data (including on data outside the range you fit on, for example $x \in [0,1.2]$) using the code below. How well do you do on points in the range of $x$ where you trained the model? How about points outside the original training data set?
# <li>Repeat the exercises above for $f(x)=2x$, $N_{\mathrm{train}}=10$, and $\sigma=1$. What changes?
# <li>Repeat the exercises above for $f(x)=2x$, $N_{\mathrm{train}}=100$, and $\sigma=1$. What changes?
# <li> Summarize what you have learned about the relationship between model complexity (number of parameters), goodness of fit on training data, and the ability to predict well.
# </ul>
#
#
# ## Fitting vs. predicting when the data is not in the model class
# Thus far, we have considered the case where the data is generated using a model contained in the model class. Now consider $f(x)=2x-10x^5+15x^{10}$. *Notice that the for linear and third-order polynomial the true model $f(x)$ is not contained in model class $g_\alpha(x)$* .
#
# <ul>
# <li> Repeat the exercises above fitting and predicting for $f(x)=2x-10x^5+15x^{10}$ for $N_{\mathrm{train}}=10,100$ and $\sigma=0,1$. Record your observations.
# <li> Do better fits lead to better predictions?
# <li> What is the relationship between the true model for generating the data and the model class that has the most predictive power? How is this related to the model complexity? How does this depend on the number of data points $N_{\mathrm{train}}$ and $\sigma$?
# <li> Summarize what you think you learned about the relationship of knowing the true model class and predictive power.
#
# # Training the models:
# +
import numpy as np
# %matplotlib inline
from sklearn import datasets, linear_model
from sklearn.preprocessing import PolynomialFeatures
from matplotlib import pyplot as plt, rcParams
def fcn_linear(x):
return 2 * x
def fcn_10(x):
return 2*x-10*x**5+15*x**10
class GenerateData():
def __init__(self, fcn):
self.fcn = fcn
def y(self, min_data, max_data, N_data, sigma_data):
x = np.linspace(min_data, max_data, N_data)
return x, self.fcn(x) + sigma_data*np.random.randn(N_data)
class PolynomialFit():
def __init__(self, x, y, degree=1):
self.poly = PolynomialFeatures(degree=degree)
X = self.poly.fit_transform(x[:,np.newaxis]) # Construct polynomial features
self.sol = linear_model.LinearRegression()
self.sol.fit(X, y)
def predict(self, xplot):
Xplot = self.poly.fit_transform(xplot[:,np.newaxis])
return self.sol.predict(Xplot)
# +
# The Training Data
N_train = 100
sigma_train = 1.0
max_train = 0.05
min_train = 0.95
function = fcn_linear
data_function = GenerateData(fcn_linear)
xtrain, ytrain = data_function.y(min_train, max_train, N_train, sigma_train)
# +
#This is Python Notebook to walk through polynomial regression examples
#We will use this to think about regression
# %matplotlib inline
# Linear Regression : create linear regression object
clf = linear_model.LinearRegression()
clf.fit(xtrain[:, np.newaxis], ytrain) # sklearn requires design matrix (N_train, N_features). reshape x (N_train, 1)
# Polynomial Regression
poly3 = PolynomialFit(xtrain, ytrain, 3)
poly7 = PolynomialFit(xtrain, ytrain, 7)
poly10 = PolynomialFit(xtrain, ytrain, 10)
fig = plt.figure(figsize=(8, 6))
plt.plot(xtrain, ytrain, "o", ms=8, alpha=0.5, label='Training')
plt.plot(xplot, clf.predict(xplot[:, np.newaxis]), label='Linear')
plt.plot(xplot, poly3.predict(xplot), label='Poly 3')
plt.plot(xplot, poly7.predict(xplot), label='Poly 7')
plt.plot(xplot, poly10.predict(xplot), label='Poly 10')
plt.legend(loc='best')
plt.ylim([-7,7])
plt.xlabel("x")
plt.ylabel("y")
Title="N=%i, $\sigma=%.2f$"%(N_train,sigma_train)
plt.title(Title+" (train)")
# Linear Filename
filename_train="train-linear_N=%i_noise=%.2f.pdf"%(N_train, sigma_train)
# Tenth Order Filename
#filename_train="train-o10_N=%i_noise=%.2f.pdf"%(N_train, sigma_train)
# Saving figure and showing results
#plt.savefig(filename_train)
plt.grid()
plt.show()
# -
# # Testing the fitted models
# +
# Number of test data
N_test = 1000
sigma_test = 1.0
min_test = 0.00
max_test = 1.05
# Generate random grid points (x) in the interval [0, max_x]:
# Note some points will be drawn outside the training interval
xtest, ytest = data_function.y(min_test, max_test, N_test, sigma_test)
# + format="tab"
# Generate Test Data
# %matplotlib inline
# Make design matrices for prediction
x_plot=np.linspace(min_test,max_test, 200)
plt.plot(xtest, ytest, 'o', ms=10, alpha=0.5, label='test data')
plt.plot(x_plot, clf.predict(x_plot[:, np.newaxis]), label='Linear')
plt.plot(x_plot, poly3.predict(x_plot), label='Poly 3')
plt.plot(x_plot, poly7.predict(x_plot), label='Poly 7')
plt.plot(x_plot, poly10.predict(x_plot), label='Poly 10')
plt.legend(loc='best')
plt.xlabel('x')
plt.ylabel('y')
plt.legend(loc='best')
Title="N=%i, $\sigma=%.2f$"%(N_test,sigma_test)
plt.title(Title+" (pred.)")
plt.tight_layout()
plt.ylim((-6,12))
# Linear Filename
filename_test="pred-linear_N=%i_noise=%.2f.pdf"%(N_test, sigma_test)
# Tenth Order Filename
#filename_test=Title+"pred-o10.pdf"
# Saving figure and showing results
plt.savefig(filename_test)
plt.grid()
plt.show()
# -
|
jupyter_notebooks/notebooks/NB1_CII-ML_is_difficult.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="eyUmQ24P-f3y"
# # An Investigation of Political Partisanship Across Time using Data Science
#
# By <NAME> and <NAME>
# + [markdown] id="urA9MNBS_FEE"
# ## Introduction and Motivation
#
#
#
#
#
#
#
#
# + [markdown] id="VyBcNm1xonbX"
# Politics has been at the forefront of many people’s minds in recent years, especially during this year’s pandemic. Due to political news and activism being spread or discussed in virtually every social media platform, more and more people are forming opinions about recent events. Opposing views have always brought conflict, but in the digitalization age of today, this has become more pronounced.
#
# This year’s presidential election created international interest and discussion, leading to a record voter turnout. In contrast, congressional elections were discussed little. In fact, although virtually every American citizen knows who the current president of the United States is, many do not know their representatives in the two chambers of the Legislative Branch: the Senate and the House of Representatives. These representatives are responsible for the creation of the laws of the United States of America. If congress cannot come to an agreement, the government will grind to a halt, regardless of the divisiveness of the American people.
#
# What happens when congress can’t agree? The consequences range depending on the importance of the bill, but disagreements in recent years have led to the [shutdown](http://www.crfb.org/papers/qa-everything-you-should-know-about-government-shutdowns) of the federal government and the absence of stimulus packages to the American people during this pandemic.
#
# Why can’t agreements be reached? The modern American political system is, for all intents and purposes, a [two-party system](https://www.history.com/topics/us-presidents/america-101-why-do-we-have-a-two-party-system-video) that the Democratic and Republican parties have dominated. In recent years, it seems that representatives increasingly vote according to the will of the parties; thus, if there is no clear majority in a chamber of Congress, it becomes hard to pass legislature.
#
# This paper attempts to determine if political partisanship has increased by gathering congressional voting records after 1940 and seeing if an increasing number of congressmen have voted together along party lines. Individual features of each vote are also included such as the type of motion, the representativeness of the decision, and the title of the vote. This project follows the Data Science steps of
# 1. Data Collection
# 2. Data Processing
# 3. Exploratory Analysis & Data Visualization
# 4. Analysis, Hypothesis Testing, & Machine Learning
# 5. Insight & Policy Decision
#
# By the end, we create a model that shows the change in political partisanship over recent years and can predict the political partisanship of future congressional votes.
# + [markdown] id="ylcHZcHpof4a"
# ## Table of Contents
# + [markdown] id="q4IyFWlBokaR"
#
#
# 1. Part 1: Data Collection
# >* Identify a raw data source
# >* Scrape the data
#
# 2. Part 2: Data Processing
# >* Part 2a: Data Pre-processing
# >>* Convert the individual arrays of information into two pandas dataframes
# >>* Clean the scraped data
# >>* Batch Download
# >* Part 2b: Data Post-processing
# >>* Batch Uploads
# >>* Delete attribute that gets added during csv conversion
# >>* Batch Stitching
# >>* Final Cleaning
# >>* Final Cleaning
# >>* Final Post-Processing
#
# **Checkpoint: Use our data to avoid the scraping steps above**
#
# 3. Part 3: Exploratory Analysis & Data Visualization
# >* Senate Representation and PercentYea Analysis
# >* Partisanship in Congress
# >>* Partisanship in the Senate
# >>* Partisanship in the House
# 4. Part 4: Analysis, Hypothesis Testing and Machine Learning
# >* The Necessity of Averaging
# >* Senate: Multilinear Linear Regression
# >* House: Multilinear Linear Regression
# >* Senate Ridge Regression
# >* House Ridge Regression
# >* Senate Nearest Neighbor
# >* House Nearest Neighbor
# 5. Part 5: Insight, Policy Decision, and Importance
# >* Summary
# >* Financial Importance
# >* Social Importance
# >* The future of this project
#
# + [markdown] id="8-PZh_aG_Vgt"
# # Part 1: Data Collection: Identify a raw data source
# + [markdown] id="HbbDexBzu_Xq"
# ## Identify a raw data source
#
# Data science requires data. In an ideal world, you get the data directly from the source. In this case, the closest thing to the source is government websites such as congress.gov, senate.gov, or house.gov. At a glance, this data would appear easy to scrape from (example: https://clerk.house.gov/Votes/2020235). However, this website does not appear to have easily accessible votes from the mid-1940’s. Instead, we turn to a website called [GovTrack.us](https://www.govtrack.us/), which is a reputable source that appears to have done data collection from websites in the government domain, as well as other public data sources such as https://voteview.com/. In this website, the voting records of from the 1st congress in 1789 are viewable in a format that can be automatically scraped.
#
# Clearly, the US was very different in the 18th and 19th centuries. For one, there were a lot more political parties, including the Federalist party which is no longer influential today. Therefore, we narrowed the scope of our project to start at the beginning of 1941. The year was chosen due to party realignment reasons detailed here (https://history.house.gov/Exhibitions-and-Publications/BAIC/Historical-Essays/Keeping-the-Faith/Party-Realignment--New-Deal/) and also because this is a breakpoint in how these webpages were labeled in the GovTrack.us website, and we wanted to make scraping as streamlined as possible.
# + [markdown] id="I98VdTKnzkH4"
# ## Scrape the Data
# + [markdown] id="kQc4vZbc_941"
# ### Import dependencies and libraries for project
#
#
# + id="LHAvwBdb_9GN"
import numpy as np
import pandas as pd
import requests
from bs4 import BeautifulSoup
import re
from urllib.request import urlopen, Request
import matplotlib.pyplot as plt
from urllib.error import HTTPError
from google.colab import files
from time import sleep
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
import math
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from numpy.polynomial.polynomial import polyfit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn import neighbors
from math import sqrt
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import Ridge
from sklearn.model_selection import RepeatedKFold
from numpy import mean
from numpy import std
from numpy import absolute
from pandas import read_csv
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
# + [markdown] id="1EtTo5mOAQFK"
# ### Set up preliminary datastructures
#
# Although the url naming convention for votes was fairly consistent, there was a few years that did not follow the same pattern. They are specified in the dictionary called houseFirstBill
# + id="V98UEnMyAuO1"
# required for establishing https connection
hdr = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',}
#dictionary to speed up scraping
houseFirstBill = {
1972: 321,
1974: 542,
1976: 613,
1978:707,
1980:673,
1982:355,
1984:499,
1986:440,
1988:489,
}
# + [markdown] id="iONdNpCI1BFg"
# ### Scrape the data
#
# The scrape method goes to each vote webpage starting in either the house or the senate. Parameters:
#
#
# * chamber - specify which chamber is scraped
# * congressNumStart - the first congress to scrape. Ensure it lines up with year
# * congressNumEnd - specifies the last congresss to scrape
# * year - the start year to scape. Ensure it lines up with congressNumStart
# * interval- how many votes in a year are scraped in a row. An interval of 1 would mean that every vote is scraped, an interval two means that every other vote is scraped, and so on
#
# **Explanation of the scrape**
# Each vote takes a couple seconds to scrape over my internet connection, so from start to finish the scraping process would have taken multiple days to complete if executed all at once.
#
# How to scrape all the house data:
# * scrape('House',11,116, 1941, 1)
#
# How to scrape all the senate data:
# * scrape('Senate',11,116, 1941, 1)
#
# During the scraping process, because we used Google Colab and due to the large number of votes that are available to gather, code execution would frequently stop and all the data scraped would be lost.
#
# As this happened multiple times over the 2 week scraping process, I realized that the execuion would stop more frequently in the later end of the scrape. Therefore, I did a preliminary scrape for both chambers to get as much of the data as I could. Then, using the parameters of the scrape function, I got the data decade by decade. In the post processing step later on in this project, the data from the numerous individual scrapes are combined.
#
# How to scrape the senate data from the 1980's:
# * scrape('House',96,101, 1979, 1)
#
# If you are looking to test this scrape yourself, you might find that getting every vote even from a decade takes some time. However, if you choose a smaller time frame AND a bigger interval, you can get a sample of votes quickly.
#
# How to scrape every 20th vote from the 1980's
# * scrape('House',96,101, 1979, 20)
#
# Important notes:
# 1. When specifying the start year, ensure it aligns with the FIRST year for that congress. For example, the 101st congress lasts from 1979 to 1980. Please choose 1979 as the start year, even if you only want the 80's.
# 2. How do you know which congresses go which years, and so forth? Go to https://www.govtrack.us/congress/votes and look at the dropdown filter on the left side of the page.
# 3. When scraping a subset of votes, hereby called a **"batch"**, run the *entirity* of the following cell so that the previous batch's results are cleared from the arrays.
# 4. Don't be alarmed if you see errors while scraping, 404's are expected as part of the scraping process and 502s are handled with a constant retry policy that is simplistic, un-optimized, yet effective. For example: running the above scrape yields:
#
# >>```
# starting House : 96 year 1979 vote# 1
# HTTP Error 404: Not Found https://www.govtrack.us/congress/votes/96-1979/h681
# starting House : 96 year 1980 vote# 673
# HTTP Error 404: Not Found https://www.govtrack.us/congress/votes/96-1980/h1293
# ```
#
# >>This is fine, its simply telling you that it tried to scrape votes from 1 to 681, before realizing that there are no more votes for that year. Then it tells you which vote it will start off on in the next year.
# + id="deCVXhlnA5o3" colab={"base_uri": "https://localhost:8080/"} outputId="d0d2d8ce-8289-41a1-f354-fae756ef952c"
voteID = 0 #the id for votes in the congress table and repeated as an index in the votes table
CID = 0 # a unique identifier for year, congress combination
# lists to store the scraped information
CIDs = []
CongressNames = []
Dates = []
CongressYears = []
SeqVoteIDs = []
Titles = []
Motions =[]
Representations = []
PercentYea = []
URLS = []
Chambers = []
VoteIDs = []
TypesofVotes = []
PercentsofVote = []
NumbersofVotes = []
Pubs = []
Dems = []
#the method that performs the actual scaping
def scrape(chamber, congressNumStart, congressNumEnd, year, interval):
global CID
global voteID
year -=1 # need to subtract one in the begining so the addition in the loop works
congressNumEnd +=1
for congressNum in range(congressNumStart, congressNumEnd): #end at 116
for i in range(2):
foundFirst = False
year +=1
#the vote number for that congress and year
if chamber == 'House':
voteNum = houseFirstBill.get(year, 1)
else:
voteNum = 1
print("starting ", chamber, ": ", congressNum, "year " , year, "vote#", voteNum)
#get all webpages that you can from the senate
while True:
url = 'https://www.govtrack.us/congress/votes/' + str(congressNum) + '-' + str(year) + '/' + str(chamber[0]).lower() + str(voteNum)
#make request
req = Request(url, headers=hdr)
#try to read response
try:
response = urlopen(req).read()
#give to soup to organize
soup = BeautifulSoup(response, 'lxml')
# print("found ", url)
foundFirst = True
URLS.append(url)
Titles.append(soup.h1.text)
date = soup.find("div",{"style":"margin: 2px 0 16px 0; font-size: 12px; font-weight: normal; color: black; line-height: 125%; padding-bottom: 6px; border-bottom: 1px solid #CCA;"}).text
Dates.append(date)
Chambers.append(chamber)
#get extra info
motion = soup.find('p',{"style":"margin: 1em 0 0 0; font: 12px/20px serif;"})
if motion is None:
Motions.append("unknown")
else:
Motions.append(motion.text)
representation = soup.find('p',{"style":"margin: 0; font: 12px/20px serif;"})
if representation is None:
Representations.append("unknown")
else:
Representations.append(re.findall('\d+', representation.text)[0])
#get vote info into DF, its the second to last table
table = soup.find_all('table')[len(soup.find_all('table'))-2]
df1 = pd.read_html(str(table), flavor='html5lib')[0]
#update listCongress columns
CIDs.append(CID)
CongressNames.append(congressNum)
CongressYears.append(year)
SeqVoteIDs.append(voteID)
if df1['Unnamed: 0'].size > 0 and df1.loc[0, 'Unnamed: 0'] == 'Yea':
PercentYea.append(str(df1.loc[0, 'All Votes'])[:-1])
else:
PercentYea.append('unknown')
#update vote columns
for index, row in df1.iterrows():
VoteIDs.append(voteID)
TypesofVotes.append(df1.loc[index, 'Unnamed: 0'])
NumbersofVotes.append(df1.loc[index, 'All Votes.1'])
Pubs.append(df1.loc[index, 'Republicans'])
Dems.append(df1.loc[index, 'Democrats'])
voteID +=1
voteNum += interval
CID +=1
except HTTPError as err:
print(err, url)
if err.code == 404:
if foundFirst:
break
elif voteNum > 750:
break
else:
voteNum += interval
elif err.code == 502:
sleep(2)
#!!!change the line below to scrape a batch of your choosing!!!
scrape('House',96,97, 1979, 30)
# + [markdown] id="Mv7liJslAsre"
# # Part 2: Data Processing
# + [markdown] id="7-ul0pgLBeFU"
# ## Part 2a: Data Pre-processing
#
# At this point, you have successfully scraped a batch of votes from a single chamber into arrays. These arrays are then put in pandas dataframes before being cleaned. How should this information be organized? The solution we devised was a dual table/dataframe system.
#
# The first table would list each vote scraped: a single vote would have a single entry. Attributes:
# * CID - the unique indentifier for a year, congress combination
# * CName - the name of the congress, represented numerically, ex(101)
# * Date - the date of that the vote occured
# * Chamber - the chamber of the vote
# * VoteID - the unique identifier for a vote.
# * Motions - The motion specified for each vote.
# * PercentYea - the percent of votes that supported the motion
# * Representation - " The Yea votes represented 47% of the country’s population by apportioning each state’s population to its voting senators." - GovTrack.us
# * URL - the URL that each vote can be viewed. Acts as a good manual "sanity check" to ensure that the entries have the correct data.
# * VoteTitle - the title of the vote
#
# The second table goes into detail for each vote. Each entry lists a type of action for each vote and who voted for that action. Attributes:
# * VID - the vote ID that identifies each vote. Each vote should have 1+ rows for the 1+ actions
# * Type - the type of action that can be done on each vote. There will always be a Yea action for each vote. There will almost always be a Nay action, and occassionally actions for Present and Not Voting.
# * Number - how many total people took that action
# * Republicans - how many republicans took that action
# * Democrats - how many democrats took that action
# * Others - although we confined the range of years so that Democrats and Republicans would be the major parties in play, the website occassionally lists the votes of Independents, Progressives, or unknowns that add a couple votes to the total. We are combining these three groups because they are not gaurenteed to be present in every congress.
#
# Note: the current code has each batch's CID and VoteID start at 0. We then "fix" these values in post processing so they are unique for the combined data. If you want to avoid that step, you can add an extra parameter to the scrape function that specifies the initial value that CID and VoteID should have.
#
# + [markdown] id="eYERr1t8BnxM"
# ### Convert the individual arrays of information into the two dataframes
#
# Dataframes called
# * congressTable
# * voteTable
#
#
# + id="NcB4UHPtBl5o"
congressTable = pd.DataFrame(columns=["CID", "CName", "Date", "Chamber", "VoteID", 'Motions', 'PercentYea','Representation', "URL"])
voteTable = pd.DataFrame(columns=["VID", "Type", "Number", "Republicans", "Democrats", "Others"])
congressTable['CID'] = CIDs
congressTable['CName'] = CongressNames
congressTable['Date'] = Dates
congressTable['Chamber'] = Chambers
congressTable['VoteID'] = SeqVoteIDs
congressTable['VoteTitle'] = Titles
congressTable['Motions'] = Motions
congressTable['PercentYea'] = PercentYea
congressTable['Representation'] = Representations
congressTable['URL'] = URLS
voteTable['VID'] = VoteIDs
voteTable['Type'] = TypesofVotes
voteTable['Number'] = NumbersofVotes
voteTable['Democrats'] = Dems
voteTable['Republicans'] = Pubs
# + [markdown] id="TQLfyM9JB2bh"
# ### Clean the scraped data
#
# The initial scrapes take a while, so virtually no cleaning takes place during the scraping process so it can complete as quickly as possible. The majority of the cleaning of the data takes place here, mostly doing things like trimming html tags
# + id="PmBtnH8GB3gN"
#calc others (progressives, independents, unknowns)
voteTable['Others'] = voteTable['Number'] - voteTable['Republicans'] - voteTable['Democrats']
# perform mapping from month abbreviation to number
m = {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr':4,
'May':5,
'Jun':6,
'Jul':7,
'Aug':8,
'Sep':9,
'Oct':10,
'Nov':11,
'Dec':12
}
#method that converts raw month to a datetime value
def convertToDT(string):
match = re.findall('\d+', string)
day = match[0]
year = match[1]
month = re.findall('[A-Z][a-z]+', string)[0]
return pd.to_datetime( str(m[month]) + "/" + str(day) + "/" + str(year) )
#cleaning down for every row
for index, row in congressTable.iterrows():
#convert dates to date time
congressTable.loc[index, 'Date'] = convertToDT(str(congressTable.loc[index, 'Date']))
#clean motions into motion
string1= re.sub('\n|\s', '', congressTable.loc[index, 'Motions'])
matches = string1.split('.')
if matches[1] == 'unknownRequired':
matches[1] = 'unknown'
congressTable.loc[index, 'Motion'] = matches[0]
congressTable.loc[index, 'RequirementsToPass'] = matches[1]
#clean representations
congressTable.loc[index, 'Representation'] = re.sub('\n', '', congressTable.loc[index, 'Representation'])
#clean title
congressTable.loc[index, 'VoteTitle'] = re.sub('\n', '', congressTable.loc[index, 'VoteTitle'])
#clean yea
if(congressTable.loc[index,'PercentYea'] == 'na'):
congressTable.loc[index,'PercentYea'] = 0
# drop motions column
congressTable.drop(columns=['Motions'], inplace=True)
# + [markdown] id="fvCpW0cbB72J"
# ### Batch Download
#
# At this stage, you have a batch of cleaned vote data in two dataframes. Although there is no foreign key relationship, the tables are linked by the VoteID. For the purposes of this project, download the two csvs, knowing that you HAVE to preserve the voteID link, the voteIDs can change later, but a entry in the congressTable needs to map to the same set of entries in the voteTable.
# + id="5yY8chHuB9q3" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="213ce6e2-9c35-4c5f-e4d7-90dc30fcaef8"
voteTable.to_csv('voteTable.csv')
congressTable.to_csv('congressTable.csv')
files.download('voteTable.csv')
files.download('congressTable.csv')
# + [markdown] id="J_WoEiHRuUmF"
# ## Part 2b: Data Post-processing
#
# If you have a number of batch csv files for the senate and house, we need to re-upload them and join them. The exact code below will depend on the how many batches you have, this is what we did for our batches.
# + [markdown] id="XVKmUJ6HEkN6"
# ### Batch Uploads
#
# This code is specific for Google Collab execution
# + id="-sdc4J1VEiPc"
#Necessary to bring in the csv file from the drive for someone that does not have it locally.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
#senate
linkCT03A = "https://drive.google.com/file/d/1_8QrsbNSGfQpekb0XH9N3uwSK2ON0vXf/view?usp=sharing";
linkCT03B = "https://drive.google.com/file/d/1PW_ZDISK_pTYXvGL74_dE-0__BLqDMWQ/view?usp=sharing";
linkVT03A = "https://drive.google.com/file/d/1-LcPvUfYNPsWByHzchiAlBOftIlp__eV/view?usp=sharing";
linkVT03B = "https://drive.google.com/file/d/1hyHekveQCgVC9_vqyHP5ku9Nolrqvd_T/view?usp=sharing";
linkS1990 = "https://drive.google.com/file/d/188IA0q6s6mdpWKcQHRrxlw1VjSBXQXNb/view?usp=sharing";
linkS2000 = "https://drive.google.com/file/d/1WJWfcKg_R27j2jtxM1iVSWcU9Rhtp7kE/view?usp=sharing";
linkS2010 = "https://drive.google.com/file/d/13xTwibyBr3xCyrJNctFJC_reCgBww7HC/view?usp=sharing";
linkSV1990 = "https://drive.google.com/file/d/14otYJU6hkob_vrGR_lzozEGGPBGo45Xe/view?usp=sharing";
linkSV2000 = "https://drive.google.com/file/d/1BXTbhVCKE5UJdoMqMbkz5uWmY23bD8r8/view?usp=sharing";
linkSV2010 = "https://drive.google.com/file/d/1BtaXljJ_6Hr1WK2H1pET71rdE2vcPh2v/view?usp=sharing";
#house
linkHT03A = "https://drive.google.com/file/d/1VhJl_MAZ-qY3Q-UfKHgHi0sVAWdrdfYI/view?usp=sharing";
linkHVT03A = "https://drive.google.com/file/d/1HSbymM3QzshxHxO4W2uxfpvJleP1bQgm/view?usp=sharing";
linkHT1970 = "https://drive.google.com/file/d/1T3vJsVjSPBBlr8NYk4aL-AQfXHSJp8Dk/view?usp=sharing"
linkHT1980 = "https://drive.google.com/file/d/10a5YAj0cBS_eU6k-1eBKVjq-GjBymBtg/view?usp=sharing"
linkHT1990 = "https://drive.google.com/file/d/17KKG-kmfVbDh5aU4eaqymTzaH5Vt7dhj/view?usp=sharing"
linkHT2000 = "https://drive.google.com/file/d/1oDYT6JObLeP2ckuVTwKr6qVYW0_DfDDv/view?usp=sharing"
linkHT2010 = "https://drive.google.com/file/d/1CGOQLuNf_a6NupPdxI51SYyecoB7w68B/view?usp=sharing"
linkHVT1970 = "https://drive.google.com/file/d/1zhNFLkQO_wtgoPUFGxXGNpCa-6ObdpAC/view?usp=sharing"
linkHVT1980 = "https://drive.google.com/file/d/11qI7YlATHsFi9zNEAgEMaUVdYfRns_cr/view?usp=sharing"
linkHVT1990 = "https://drive.google.com/file/d/1cU5QTCOp7qamXYE29p1O4IFBCsoAB6Cs/view?usp=sharing"
linkHVT2000 = "https://drive.google.com/file/d/1diV1rEC3xLo5jbnSU6soK-rnZcwuVYUr/view?usp=sharing"
linkHVT2010 = "https://drive.google.com/file/d/1DbT7Ksf6_cWC4VtCwqGtbfen1NnGct0E/view?usp=sharing"
idC03A = linkCT03A.split("/")[-2]
idC03B = linkCT03B.split("/")[-2]
idS1990 = linkS1990.split("/")[-2]
idS2000 = linkS2000.split("/")[-2]
idS2010 = linkS2010.split("/")[-2]
idV03A = linkVT03A.split("/")[-2]
idV03B = linkVT03B.split("/")[-2]
idSV1990 = linkSV1990.split("/")[-2]
idSV2000 = linkSV2000.split("/")[-2]
idSV2010 = linkSV2010.split("/")[-2]
idH03A = linkHT03A.split("/")[-2]
idH1970 = linkHT1970.split("/")[-2]
idH1980 = linkHT1980.split("/")[-2]
idH1990 = linkHT1990.split("/")[-2]
idH2000 = linkHT2000.split("/")[-2]
idH2010 = linkHT2010.split("/")[-2]
idHV03A = linkHVT03A.split("/")[-2]
idHV1970 = linkHVT1970.split("/")[-2]
idHV1980 = linkHVT1980.split("/")[-2]
idHV1990 = linkHVT1990.split("/")[-2]
idHV2000 = linkHVT2000.split("/")[-2]
idHV2010 = linkHVT2010.split("/")[-2]
#senate data
downloadedC03A = drive.CreateFile({'id':idC03A})
downloadedC03A.GetContentFile('senateTable03a.csv')
downloadedC03B = drive.CreateFile({'id':idC03B})
downloadedC03B.GetContentFile('senateTable03b.csv')
downloadedV03A = drive.CreateFile({'id':idV03A})
downloadedV03A.GetContentFile('senateVoteTable03a.csv')
downloadedV03B = drive.CreateFile({'id':idV03B})
downloadedV03B.GetContentFile('senateVoteTable03b.csv')
downloadedS1990 = drive.CreateFile({'id':idS1990})
downloadedS1990.GetContentFile('senDec1990.csv')
downloadedS2000 = drive.CreateFile({'id':idS2000})
downloadedS2000.GetContentFile('senDec2000.csv')
downloadedS2010 = drive.CreateFile({'id':idS2010})
downloadedS2010.GetContentFile('senDec2010.csv')
downloadedSV1990 = drive.CreateFile({'id':idSV1990})
downloadedSV1990.GetContentFile('senVDec1990.csv')
downloadedSV2000 = drive.CreateFile({'id':idSV2000})
downloadedSV2000.GetContentFile('senVDec2000.csv')
downloadedSV2010 = drive.CreateFile({'id':idSV2010})
downloadedSV2010.GetContentFile('senVDec2010.csv')
#house data
downloadedH03A = drive.CreateFile({'id':idH03A})
downloadedH03A.GetContentFile('houseTable03a.csv')
downloadedHV03A = drive.CreateFile({'id':idHV03A})
downloadedHV03A.GetContentFile('houseVoteTable03a.csv')
downloadedH1970 = drive.CreateFile({'id':idH1970})
downloadedH1970.GetContentFile('houDec1970.csv')
downloadedHV1970 = drive.CreateFile({'id':idHV1970})
downloadedHV1970.GetContentFile('houVDec1970.csv')
downloadedH1980 = drive.CreateFile({'id':idH1980})
downloadedH1980.GetContentFile('houDec1980.csv')
downloadedHV1980 = drive.CreateFile({'id':idHV1980})
downloadedHV1980.GetContentFile('houVDec1980.csv')
downloadedH1990 = drive.CreateFile({'id':idH1990})
downloadedH1990.GetContentFile('houDec1990.csv')
downloadedHV1990 = drive.CreateFile({'id':idHV1990})
downloadedHV1990.GetContentFile('houVDec1990.csv')
downloadedH2000 = drive.CreateFile({'id':idH2000})
downloadedH2000.GetContentFile('houDec2000.csv')
downloadedHV2000 = drive.CreateFile({'id':idHV2000})
downloadedHV2000.GetContentFile('houVDec2000.csv')
downloadedH2010 = drive.CreateFile({'id':idH2010})
downloadedH2010.GetContentFile('houDec2010.csv')
downloadedHV2010 = drive.CreateFile({'id':idHV2010})
downloadedHV2010.GetContentFile('houVDec2010.csv')
#senate
inputedCongressTablea = pd.read_csv('senateTable03a.csv')
inputedCongressTableb = pd.read_csv('senateTable03b.csv')
ist1990 = pd.read_csv('senDec1990.csv')
ist2000 = pd.read_csv('senDec2000.csv')
ist2010 = pd.read_csv('senDec2010.csv')
inputedVoteTablea = pd.read_csv('senateVoteTable03a.csv')
inputedVoteTableb = pd.read_csv('senateVoteTable03b.csv')
isvt1990 = pd.read_csv('senVDec1990.csv')
isvt2000 = pd.read_csv('senVDec2000.csv')
isvt2010 = pd.read_csv('senVDec2010.csv')
#house
inputedHouseTablea = pd.read_csv('houseTable03a.csv')
inputedhouseVoteTablea = pd.read_csv('houseVoteTable03a.csv')
iht1970 = pd.read_csv('houDec1970.csv')
iht1980 = pd.read_csv('houDec1980.csv')
iht1990 = pd.read_csv('houDec1990.csv')
iht2000 = pd.read_csv('houDec2000.csv')
iht2010 = pd.read_csv('houDec2010.csv')
ihvt1970 = pd.read_csv('houVDec1970.csv')
ihvt1980 = pd.read_csv('houVDec1980.csv')
ihvt1990 = pd.read_csv('houVDec1990.csv')
ihvt2000 = pd.read_csv('houVDec2000.csv')
ihvt2010 = pd.read_csv('houVDec2010.csv')
# + [markdown] id="V4d0-LOSFGm8"
# ### Delete attribute that gets added during csv conversion
# + id="qISGwULaFWm_"
#additional columns that need to be deleted
del ist1990['Unnamed: 0']
del ist2000['Unnamed: 0']
del ist2010['Unnamed: 0']
del isvt1990['Unnamed: 0']
del isvt2000['Unnamed: 0']
del isvt2010['Unnamed: 0']
del inputedCongressTablea['Unnamed: 0']
del inputedCongressTableb['Unnamed: 0']
del inputedVoteTablea['Unnamed: 0']
del inputedVoteTableb['Unnamed: 0']
# + id="VXwFNQ_bW-w0"
#delete artifacts from Excel
del iht1970['Unnamed: 0']
del iht1980['Unnamed: 0']
del iht1990['Unnamed: 0']
del iht2000['Unnamed: 0']
del iht2010['Unnamed: 0']
del ihvt1970['Unnamed: 0']
del ihvt1980['Unnamed: 0']
del ihvt1990['Unnamed: 0']
del ihvt2000['Unnamed: 0']
del ihvt2010['Unnamed: 0']
del inputedHouseTablea['Unnamed: 0']
del inputedhouseVoteTablea['Unnamed: 0']
# + [markdown] id="9nr9V7RhFcQz"
# ### Batch Stitching
# The next set of steps is to take the different senate/house tables and senate/house vote tables and to stitch them all together into a singular senate/house table and singular senate/house vote table.
#
# Again, the most important thing is to preseve the link between the voteIDs of the two tables, although we also ensure that there are no duplicate CIDs.
# + id="0zXIyCJr49x4"
#columns that needed to be deleted and duplicate data from senate table b needed to be dropped
inputedCongressTableb = inputedCongressTableb[:8763] #only keep stuff before 1990 because everything after will be added by decade
# + id="1zpDIMuYdHTr"
#merging the congressTable together since were seperated due to size issues. This is putting a + b together
inputedCongressTableb['CID'] = inputedCongressTableb['CID'] + len(inputedCongressTablea['CID']);
inputedCongressTableb['VoteID'] = inputedCongressTableb['VoteID'] + len(inputedCongressTablea['VoteID']);
frames = [inputedCongressTablea, inputedCongressTableb];
inputedCongressTable = pd.concat(frames, sort = False);
inputedCongressTable = inputedCongressTable.reset_index();
del inputedCongressTable['index'];
#changing all the types to int or float for later operations
inputedCongressTable.astype({'CID': 'int32'});
inputedCongressTable.astype({'CName' : 'int32'});
inputedCongressTable.astype({'VoteID': 'int32'});
inputedCongressTable['PercentYea'] = inputedCongressTable['PercentYea'].replace("unknown", np.NaN);
inputedCongressTable.astype({'PercentYea': 'float'});
inputedCongressTable['Representation'] = inputedCongressTable['Representation'].replace("unknown", np.NaN);
inputedCongressTable.astype({'Representation': 'float'});
# + id="VlpIc-yeXl_0"
#This function takes in 2 dataframes that are used to add to the second argument
#needed to work over all the different decades that we have
def fix(smaller, addingTo):
smaller['CID'] = smaller['CID'] + addingTo.iloc[len(addingTo)-1, 0] + 1
smaller['VoteID'] = smaller['VoteID'] + addingTo.iloc[len(addingTo)-1, 4] + 1
frames = [addingTo, smaller]
addingTo = pd.concat(frames, sort = False)
addingTo = addingTo.reset_index()
del addingTo['index']
return addingTo
# + id="h2o3tYH7XfQV"
inputedCongressTable = fix(ist1990, inputedCongressTable)
inputedCongressTable = fix(ist2000, inputedCongressTable)
inputedCongressTable = fix(ist2010, inputedCongressTable)
# + id="LBciszBObS7s"
inputedVoteTableb = inputedVoteTableb[:25517] # needed to find in the csv file b where 1991 first started and drop that to use the proper decades table
#this was cross referenced via VID in the corresponding senateTable data
# + id="uIESLti8dSi1"
#merging votetables a + b of senate together
inputedVoteTableb['VID'] = inputedVoteTableb['VID'] + len(inputedCongressTablea['CID']) #ensuring proper VID on b starting from where a left off
#concatenating them together
frames = [inputedVoteTablea, inputedVoteTableb]
inputedVoteTable = pd.concat(frames, sort = False)
inputedVoteTable = inputedVoteTable.reset_index()
del inputedVoteTable['index']
# + id="qIxXefU2ZRru"
def fixVT(smaller, addingTo):
#merging votetables a + b of senate together
smaller['VID'] = smaller['VID'] + addingTo.iloc[len(addingTo['VID'])- 1, 0] + 1 #ensuring proper VID on b starting from where a left off
#concatenating them together
frames = [addingTo, smaller]
addingTo = pd.concat(frames, sort = False)
addingTo = addingTo.reset_index()
del addingTo['index']
return addingTo
# + id="K-cnxrd_Zt2e"
#call our functions to correctly stitch everything together
inputedVoteTable = fixVT(isvt1990, inputedVoteTable)
inputedVoteTable = fixVT(isvt2000, inputedVoteTable)
inputedVoteTable = fixVT(isvt2010, inputedVoteTable)
# + id="84DgI8NJWput"
#call our functions to correctly stitch everything together
inputedHouseTable = fix(iht1970, inputedHouseTablea);
inputedHouseTable = fix(iht1980, inputedHouseTable);
inputedHouseTable = fix(iht1990, inputedHouseTable);
inputedHouseTable = fix(iht2000, inputedHouseTable);
inputedHouseTable = fix(iht2010, inputedHouseTable);
# + id="FIKON4MJY73Q"
#call out functions to correctly stitch everything together
inputedHouseVoteTable = fixVT(ihvt1970, inputedhouseVoteTablea);
inputedHouseVoteTable = fixVT(ihvt1980, inputedHouseVoteTable);
inputedHouseVoteTable = fixVT(ihvt1990, inputedHouseVoteTable);
inputedHouseVoteTable = fixVT(ihvt2000, inputedHouseVoteTable);
inputedHouseVoteTable = fixVT(ihvt2010, inputedHouseVoteTable);
# + [markdown] id="GLbLh5xhwCfo"
# ### Final Cleaning
#
# Ensure dates are in date-time format so we can easily reference months/years later in later ML sections.
# + id="YR9Cj59_-k9R"
#Change to date time
inputedHouseTable['Date'] = pd.to_datetime(inputedHouseTable['Date'], format='%Y-%m-%d')
# + id="L9oumBwjdVdy"
#Changing the date from string to date time
inputedCongressTable['Date'] = pd.to_datetime(inputedCongressTable['Date'], format='%Y-%m-%d')
# + [markdown] id="up6R9NT4wU36"
# ### Final Post-Processing
#
# The more variables or attributes that are provided, the more options that you have when building machine learning models. We wanted to incorporate three more attributes to each vote that are not explicitly generated in the scrape that might aid in building a comprehensive model.
#
# 1. 4Code - This attribute represents the years that a president has been in office, per term. For example, Obama was elected in 2008, so 2008 has a 4code of 0. 2009, 2010, and 2011 have the codes 1,2, and 3 respectively. He was re-elected in 2012, so 2012 has a 4code of of 0. This pattern repeats. Because presidents have four year terms, this is essentially year % 4
# 2. 8Code - This attribut represents the years that a president has been in office, over both terms. As with code4, 2008 has a 4code of 0. 2009, 2010, and 2011 still have the codes 1,2, and 3 respectively. However, 2012 has a code8 == 4 and 2013, 2014, 2015 have code8's of 5, 6, and 7 respectively.
# 3. Month - the month that the vote occured.
# + id="wj3DF3a-qoVl"
#Note that you have to manually enter the 8Code converter dictionary based on election results.
CodeConverter = {
1940: 0,
1941: 1,
1942: 2,
1943: 3,
1944: 4,
1945: 5,
1946: 6,
1947: 7,
1948: 0,
1949: 1,
1950: 2,
1951: 3,
1952: 0,
1953: 1,
1954: 2,
1955: 3,
1956: 4,
1957: 5,
1958: 6,
1959: 7,
1960: 0,
1961: 1,
1962: 2,
1963: 3,
1964: 4,
1965: 5,
1966: 6,
1967: 7,
1968: 0,
1969: 1,
1970: 2,
1971: 3,
1972: 4,
1973: 5,
1974: 6,
1975: 7,
1976: 0,
1977: 1,
1978: 2,
1979: 3,
1980: 0,
1981: 1,
1982: 2,
1983: 3,
1984: 4,
1985: 5,
1986: 6,
1987: 7,
1988: 0,
1989: 1,
1990: 2,
1991: 3,
1992: 0,
1993: 1,
1994: 2,
1995: 3,
1996: 4,
1997: 5,
1998: 6,
1999: 7,
2000: 0,
2001: 1,
2002: 2,
2003: 3,
2004: 4,
2005: 5,
2006: 6,
2007: 7,
2008: 0,
2009: 1,
2010: 2,
2011: 3,
2012: 4,
2013: 5,
2014: 6,
2015: 7,
2016: 0,
2017: 1,
2018: 2,
2019: 3,
2020: 0,
}
# + id="lt07eVhskoIU"
#adding the code converter digits to the respective spots in the dataframe to be used for ML later!
for index, row in inputedCongressTable.iterrows():
inputedCongressTable.loc[index, '4Code'] = inputedCongressTable.loc[index, 'Date'].year %4
inputedCongressTable.loc[index, '8Code'] = CodeConverter[inputedCongressTable.loc[index, 'Date'].year]
inputedCongressTable.loc[index, 'Month'] = inputedCongressTable.loc[index, 'Date'].month
# + id="ZguIwIwxWrRQ"
for index, row in inputedHouseTable.iterrows():
inputedHouseTable.loc[index, '4Code'] = inputedHouseTable.loc[index, 'Date'].year %4
inputedHouseTable.loc[index, '8Code'] = CodeConverter[inputedHouseTable.loc[index, 'Date'].year]
inputedHouseTable.loc[index, 'Month'] = inputedHouseTable.loc[index, 'Date'].month
# + [markdown] id="FA1RfIlguuhL"
# ### Download finished data
#
# At this point, we reccommend the final tables, so you can have a saved copy of them, and you can follow the steps of everyone else who is coming in at that point.
# + id="yTJP4orfu6mO" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="956983ec-6f5f-4e3f-db03-7b2ba437bfac"
inputedCongressTable.to_csv('senateCongressTable.csv')
inputedHouseTable.to_csv('houseCongressTable.csv')
inputedVoteTable.to_csv('senateVoteTable.csv')
inputedHouseVoteTable.to_csv('houseVoteTable.csv')
files.download('senateCongressTable.csv')
files.download('houseCongressTable.csv')
files.download('senateVoteTable.csv')
files.download('houseVoteTable.csv')
# + [markdown] id="LC3m3dpeNNtQ"
# # Checkpoint: Use our data to avoid scraping
#
# At this point, we have finalized 4 tables that have all the raw data needed for the rest of the project. We reccommend that people interested in recreating this project to begin code execution at this point, before we begin further analysis.
# + [markdown] id="gsdbT5YywzFB"
# ### Read in CSVs
# + id="mSvp4AqhN3mA"
inputedCongressTable = pd.read_csv('/content/senateCongressTable.csv')
inputedHouseTable = pd.read_csv('/content/houseCongressTable.csv')
inputedVoteTable = pd.read_csv('/content/senateVoteTable.csv')
inputedHouseVoteTable = pd.read_csv('/content/houseVoteTable.csv')
# + [markdown] id="MSKX0m_5wwQ9"
# ### Clean dataframes
#
# Same methods as before
# + id="fDtzIBQtw7cM"
del inputedCongressTable['Unnamed: 0']
del inputedHouseTable['Unnamed: 0']
del inputedVoteTable['Unnamed: 0']
del inputedHouseVoteTable['Unnamed: 0']
# + [markdown] id="VzEBcna33GZh"
# You are now ready to continue on to part 3
# + [markdown] id="mm0WsqC9L1ig"
# # Part 3: Exploratory Analysis & Data Visualization
# + [markdown] id="jR5E7CmFOPg9"
# ### Senate Representation and PercentYea Analysis
#
# The Senate tables have the attribute Representation that was scraped into the congressTable dataframe. This attribute indicates what percent of the population is represented by the verdict of the motion: either Nay or Yea. The verdict can be determined by looking at the PercentYea attribute: percentYea values over 50 are Yea.
# + id="WHxr3OME8K1z" colab={"base_uri": "https://localhost:8080/"} outputId="8336eeec-f6ae-4622-8590-0988b0ecbc8c"
#Handle the unknowns
inputedCongressTable['PercentYea'] = inputedCongressTable['PercentYea'].replace("unknown", np.NaN)
inputedCongressTable['PercentYea'] = inputedCongressTable['PercentYea'].astype(float)
inputedCongressTable['Representation'] = inputedCongressTable['Representation'].replace("unknown", np.NaN)
inputedCongressTable['Representation'] = inputedCongressTable['Representation'].astype(float)
print("Percent of America represented by the winning vote on average over the entire dataset: " + str(inputedCongressTable['Representation'].mean()))
print("Percent of Senators that voted Yea on average over the entire dataset: " + str(inputedCongressTable['PercentYea'].mean()))
# + [markdown] id="YaAAuXay8T_g"
# On average 71% of Americans are represented by the winning vote in the senate and 62% of the time a senator will vote yes.
#
# This may seem rather odd, how can senators represent almost 3/4 of the country on average from 1941 - 2020? It is important to remember that during this time period there were periods of unity where large chunks of the country would be in agreement for a particular bill. The media tends to skew how we see partisanship. This is not trying to explain away partisanship, because that does exists and that will be seen! However, it is important to note that throughout US history congress has not always been this divided, at least by this metric.
#
# The high senate Yea vote can be partially explained by what was explained previously. The more recent years could still contribute to this trend. Let us take a look at that directly! What does Percent Yea over time look like?
# + id="ThpvQfnC8X5M" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="45f83637-3665-4cc1-cef1-b0f3e2706d31"
grouped = inputedCongressTable.groupby('CName');
toPlot = grouped['PercentYea'].agg(np.mean);
keys = grouped.groups.keys();
plt.scatter(keys, toPlot);
plt.title('Percent Voting Yea in the Senate')
plt.xlabel('Congress Number')
plt.ylabel('Percent Yea')
plt.show()
# + [markdown] id="C5AffbmK8bZx"
# When we group by individual congress ID over time it seems as if senators were voting on average more for yes! Could this indicate more unity across the parties?
#
# While this is unlikely that cannot be ruled out. One possible explanation would be that only bills that are "certain" to pass will be brought to the floor in order to avoid wasting time. Most deals are done behind closed doors away from the public. See this example for how Deocrats managed to push through [Health Care Reform](http://www.digitaljournal.com/article/285771).
# While that is only one example the HuffPost made a link [here](https://www.huffpost.com/entry/backroom-deals-a-bipartis_b_442330) about these examples throughout history!
#
# + id="u7BZvnsc8eTb" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="740864d5-5f59-4b56-b3c2-7a72a56d941b"
grouped = inputedCongressTable.groupby('CName');
toPlot = grouped['Representation'].agg(np.mean);
keys = grouped.groups.keys();
plt.scatter(keys, toPlot);
plt.title('Representation Percent in the Senate')
plt.xlabel('Congress ID')
plt.ylabel('Representation Percentage')
plt.show()
# + [markdown] id="ZXodBvHO8gVn"
# Going back to representation of the people we see an interesting inverted U. This is the first strong example of partisanship. Why? Well the senate is equally weighted among all states (Two senators each), as a result a smaller state like Wyoming will have equal reprsenation with a behemoth like New York, this was well documented by [The Guardian](https://www.theguardian.com/us-news/2018/nov/08/democrats-republicans-senate-majority-minority-rule).
#
# Because of this we can see a reversion to lower representation percentage as republicans only represent smaller parts of the country but have equal and at times even larger say in the vote in the senate
#
#
# + [markdown] id="84lG4YyLRlqz"
# ## Partisanship in Congress
#
# Let us now switch our focus to seeing the difference between Republicans and Democrats when voting over time.
# + [markdown] id="tgGjBeZvTB-U"
# ### Partisanship in the Senate
# + [markdown] id="rWlBdtp-j-nw"
# We need a way to define a vote ID to a "partisanship score" meaning, "was a specific vote particularly partisan". We will define this as the number of republicans that voted along party lines, vs the number of democrats that voted across party lines. The formulas are listed below the cell!
# + id="RP1ZP7wafLgD"
currVID = 0;
repConsensusList = [];
demConsensusList = [];
cooperationList = [];
for i in range(len(inputedCongressTable['VoteID'])):
#print(i)
temp = inputedVoteTable[inputedVoteTable['VID'].isin([i,i])]
#print(temp)
repSum = temp['Republicans'].sum();
demSum = temp['Democrats'].sum();
if len(temp) > 1 and temp.iloc[0][1] == 'Yea' and temp.iloc[1][1] == 'Nay': # if we have a yea and a nay use them
repY = temp.iloc[0][3]
repN = temp.iloc[1][3]
repConsensus = abs(repY - repN) / repSum
demY = temp.iloc[0][4]
demN = temp.iloc[1][4]
demConsensus = abs(demY - demN) / demSum
cooperation = abs((repY/repSum) - (demY/demSum))
repConsensusList.append(repConsensus)
demConsensusList.append(demConsensus)
cooperationList.append(cooperation)
else: #otherwise we have republican disagreement by not voting with the party
repY = temp.iloc[0][3]
repN = repSum - repY
repConsensus = abs(repY - repN) / repSum
demY = temp.iloc[0][4]
demN = repSum - repY
demConsensus = abs(demY - demN) / demSum
cooperation = abs((repY/repSum) - (demY/demSum))
repConsensusList.append(repConsensus)
demConsensusList.append(demConsensus)
cooperationList.append(cooperation)
# + [markdown] id="SjMC-390ks7k"
# demC will be the consensus among democrats on some bill computed as:
# |Democrat Yea Vote - Democrat Nay Vote| / Total Democrat votes
# This is calculated exactly the same for republicans
# Cooperation the inverse of partisanship is calculated as
# | (Republican Yea/Republican Total) - (Democrat Yea / Democrat Total) |
# + id="YBZ1IoHnfqUi"
#Use a copy to preserve the original table
ict = inputedCongressTable.copy()
#turning to numpy array for insertion
rCLA = np.array(repConsensusList);
dCLA = np.array(demConsensusList);
cCLA = np.array(cooperationList);
ict.insert(11,"repC",rCLA)
ict.insert(11,"demC",dCLA)
ict.insert(11,"Partisanship",cCLA)
# + [markdown] id="Ap9CYSWmlAOO"
# After having the respective demC and repC values we can being analyzing it. Let us take a look at the data given if we can plot a regression directly.
# + id="NSyXoC9Vo21t" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="9e562262-6a9c-42d6-f5b4-13a6f6d6903a"
plt.scatter(ict['Date'], ict['demC']);
plt.title('Democratic Cooperation over the Years')
plt.xlabel('Year')
plt.ylabel('Democratic Cooperation Percentage')
plt.show()
# + [markdown] id="za9D_ti06cax"
# This graph is too hectic to decipher anything, so let us average on the years for both democrats and republicans to see if a trend exists.
# + id="84An8O1AhOBF"
#method that allows to pass in some datframe and the column to use for temp variable as averaging per year
def avgDataOverYears(home, columnName):
toReturn = [];
for i in range(1941,2020,1): #change after new tables come in
#print(i)
temp = home[home['year'].isin([i,i])]
#print(temp)
avgVal = temp[columnName].mean();
# if math.isnan(demCAvg):
# print(i)
toReturn.append(avgVal)
return toReturn;
# + id="MpAn8W_IZjy6"
tempDf = ict.copy() #make another copy to not affect the previous
tempDf['year'] = pd.DatetimeIndex(tempDf['Date']).year #isolate year as a variable to allow for use in regression as a variable
# + id="elDM_I5TcqBV"
demCAvgRunning = avgDataOverYears(tempDf, 'demC')
repCAvgRunning = avgDataOverYears(tempDf, 'repC')
coopAvgRunning = avgDataOverYears(tempDf, 'Partisanship')
# + id="4JfovpfL7WOt" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="deec8647-e8b8-405b-dd38-84ec328f2d81"
rCAR = np.array(repCAvgRunning);
years = [i for i in range(1941,2020,1)]
year = range(1941,2020,1)
b,m = polyfit(years,rCAR, 1)
plt.plot(year, m*year +b, label=("Slope = " + str(m)));
plt.scatter(year, rCAR);
plt.title('Republican Consensus over the Years')
plt.xlabel('Year')
plt.ylabel('Percent in Agreement')
plt.legend(loc="upper left")
plt.show()
# + id="O9o5Zixeurm_" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="25a250a3-e7b0-447d-df5c-444648a41da7"
dCAR = np.array(demCAvgRunning);
years = [i for i in range(1941,2020,1)]
year = range(1941,2020,1)
b,m = polyfit(years,dCAR, 1)
plt.plot(year, m*year +b, label=("Slope = " + str(m)));
plt.scatter(year, dCAR);
plt.title('Democratic Consensus over the Years')
plt.xlabel('Year')
plt.ylabel('Percent in Agreement')
plt.legend(loc="upper left")
plt.show()
# + [markdown] id="-k1OW41Rmj14"
# **Observations**
# + [markdown] id="Cp3xhpL47afh"
# Strong correlation for both parties to have more in-group consensus over time. We do see slight drops from the 60-80's and then a rapid increase thereafter!
#
# This is supported by [Time](https://time.com/2862299/how-the-united-states-is-growing-more-partisan-in-10-charts/) which presented work by the Pew Research Center. The graph titled Polrization and Presidential Approval shows smaller differences between Republicans and Democrats during the 60-80's.
#
# Small drops are seen every four/eight years. That is of much interest and will be explored in the Machine Learning section.
#
# It can be noted that democratic consensus has increased a lot more to catch up, and even surpass, average republican consensus. If both republican/democratic in-group cooperativity has increased partisanship must have also increased, right?
#
# + id="Aa7FtJEJjTAt" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="6d3ab601-ae5f-4f6c-a5c1-4f677597914c"
cCar = np.array(coopAvgRunning);
years = [i for i in range(1941,2020,1)]
year = range(1941,2020,1)
b,m = polyfit(years,cCar, 1)
plt.plot(year, m*year +b, label=("Slope = " + str(m)));
plt.scatter(year, cCar);
plt.title('Partisanship over the Years')
plt.xlabel('Year')
plt.ylabel('Percent in Agreement')
plt.legend(loc="upper left")
plt.show()
# + [markdown] id="ufgPxKN8kHF0"
# Yes, that is exactly what we see the Partisanship graph mirrors the Democratic and Republican division.
# + [markdown] id="2yPlZPrS5CE1"
# ### Partisanship in the House
#
# Let us now do the exact same thing with the House!
# + id="tiPH28zA0zB3" colab={"base_uri": "https://localhost:8080/"} outputId="e0919bf3-7182-4f83-ba89-ba8012f388c2"
#Handle the unknowns
inputedHouseTable['PercentYea'] = inputedHouseTable['PercentYea'].replace("unknown", np.NaN)
inputedHouseTable['PercentYea'] = inputedHouseTable['PercentYea'].astype(float)
inputedHouseTable['Representation'] = inputedHouseTable['Representation'].replace("unknown", np.NaN)
inputedHouseTable['Representation'] = inputedHouseTable['Representation'].astype(float)
print("No representation data available")
print("Percent of house members that voted Yea on average over the entire dataset: " + str(inputedHouseTable['PercentYea'].mean()))
# + [markdown] id="acjH50_LZ7WK"
# We see in the house that on average over the years congressmen vote around yea about 70% of the time. This is about 8% higher than Senators. There is no current research on why this is, this remains to be investegated in the future.
# + id="dOE6MiqL1m1U" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="2aab37c6-8f48-4835-e87e-b9efe149bfe6"
grouped = inputedHouseTable.groupby('CName');
toPlot = grouped['PercentYea'].agg(np.mean);
keys = grouped.groups.keys();
plt.scatter(keys, toPlot);
plt.title('Percent Voting Yea in the House')
plt.xlabel('Congress ID')
plt.ylabel('Percent Yea')
plt.show()
# + [markdown] id="nPsn4DJ9R72S"
# The house is voting Yea much more than the senate as seen previously. It also experiences the four/eight year swings!
# + id="R7QDNeOC2Mf3"
currVID = 0;
repConsensusList = [];
demConsensusList = [];
cooperationList = [];
for i in range(len(inputedHouseTable['VoteID'])):
temp = inputedHouseVoteTable[inputedHouseVoteTable['VID'].isin([i,i])]
#print(temp)
repSum = temp['Republicans'].sum();
demSum = temp['Democrats'].sum();
if len(temp) > 1 and temp.iloc[0][1] == 'Yea' and temp.iloc[1][1] == 'Nay': # if we have a yea and a nay use them
repY = temp.iloc[0][3]
repN = temp.iloc[1][3]
repConsensus = abs(repY - repN) / repSum
demY = temp.iloc[0][4]
demN = temp.iloc[1][4]
demConsensus = abs(demY - demN) / demSum
cooperation = abs((repY/repSum) - (demY/demSum))
repConsensusList.append(repConsensus)
demConsensusList.append(demConsensus)
cooperationList.append(cooperation)
else: #otherwise we have republican disagreement by not voting with the party
repY = temp.iloc[0][3]
repN = repSum - repY
repConsensus = abs(repY - repN) / repSum
demY = temp.iloc[0][4]
demN = repSum - repY
demConsensus = abs(demY - demN) / demSum
cooperation = abs((repY/repSum) - (demY/demSum))
repConsensusList.append(repConsensus)
demConsensusList.append(demConsensus)
cooperationList.append(cooperation)
# + id="A-fP9dRA4pJA"
ictH = inputedHouseTable.copy()
rCLA = np.array(repConsensusList);
dCLA = np.array(demConsensusList);
cCLA = np.array(cooperationList);
ictH.insert(11,"repC",rCLA)
ictH.insert(11,"demC",dCLA)
ictH.insert(11,"Partisanship",cCLA)
# + id="vclt6Aggaj9I" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="362387bb-5882-4863-b3a6-cb123d2957ac"
plt.scatter(ictH['Date'], ictH['repC']);
plt.title('Democratic Cooperation over the Years')
plt.xlabel('Year')
plt.ylabel('Republican Cooperation Percentage')
plt.show()
# + [markdown] id="xVkFhT0oKPkE"
# Again this is way to hectic so we will average on the year.
# + id="-JJWeRlQe7cY"
tempDf = ictH.copy() #make another copy to not affect the previous
tempDf['year'] = pd.DatetimeIndex(tempDf['Date']).year #isolate year as a variable to allow for use in regression as a variable
# + id="E2TAoMm1c4UY"
demCAvgRunning = avgDataOverYears(tempDf, 'demC')
repCAvgRunning = avgDataOverYears(tempDf, 'repC')
coopAvgRunning = avgDataOverYears(tempDf, 'Partisanship')
# + id="qe9wM7_-fCjO" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="3dd2ab2f-adb4-45ea-96a6-597cb1db58cb"
rCAR = np.array(repCAvgRunning);
years = [i for i in range(1941,2020,1)]
year = range(1941,2020,1)
b,m = polyfit(years,rCAR, 1)
plt.plot(year, m*year +b, label=("Slope = " + str(m)));
plt.scatter(year, rCAR);
plt.title('Republican Consensus over the Years')
plt.xlabel('Year')
plt.ylabel('Percent in Agreement')
plt.legend(loc="upper left")
plt.show()
# + id="P0e-xM8EfCjQ" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="21e39fb4-3b25-4512-aefc-156efe8124bf"
dCAR = np.array(demCAvgRunning);
years = [i for i in range(1941,2020,1)]
year = range(1941,2020,1)
b,m = polyfit(years,dCAR, 1)
plt.plot(year, m*year +b, label=("Slope = " + str(m)));
plt.scatter(year, dCAR);
plt.title('Democratic Consensus over the Years')
plt.xlabel('Year')
plt.ylabel('Percent in Agreement')
plt.legend(loc="upper left")
plt.show()
# + [markdown] id="zasSBkn4nFBt"
# **Observations**
# + [markdown] id="1T1PSWaWTi--"
# Similarly to the Senate we see broad increase from both parties. However it is important to note that there is much less cooperation from the democrats in the House. On average they only have 60% cooperation in 2020 based on the regression line, whereas the republicans have about 82% cooperation!
#
# The divide in the democratic party has been [documented](https://time.com/4951191/divided-democratic-party-debates-its-future/) previously, starting as early as 2017. One reason for why this was not seen in the Senate is that the Senate requires increased cooperation among Demoracts to fight the Republican majority.
#
# + id="DkPE-gWJdAe1" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="9689080c-316f-4c91-90df-9d959b06555c"
cCar = np.array(coopAvgRunning);
years = [i for i in range(1941,2020,1)]
year = range(1941,2020,1)
b,m = polyfit(years,cCar, 1)
plt.plot(year, m*year +b, label=("Slope = " + str(m)));
plt.scatter(year, cCar);
plt.title('Partisanship over the Years')
plt.xlabel('Year')
plt.ylabel('Percent in Agreement')
plt.legend(loc="upper left")
plt.show()
# + [markdown] id="RvE9Nc3dUSTg"
# As expected we see an increase in partisanship over the years. This research supports work done by [Pew](https://www.pewresearch.org/fact-tank/2014/06/12/polarized-politics-in-congress-began-in-the-1970s-and-has-been-getting-worse-ever-since/) that the current Congress polarization began in the 1970's. Both the Senate and House Partisanship graphs show a hard turn upward in 1970.
#
# The other notiable kink in the graph 2001. That can be explained by unity following the 9/11 terrorist attacks.
# + [markdown] id="GLvmAqd8G5Tc"
# # Part 4: Analysis, Hypothesis Testing and Machine Learning
# + [markdown] id="FrpvRi9TT4WE"
# ## The Necessity of Averaging
#
# As we saw above, there are too many points in a single year to do any predicitve measurements, the scatter is too great. However if averaged over every year general trends over time can be extracted, and that is the goal in this section.
# + id="F8x4JeFqaWU3"
#Need to isolate year
pML = ict.copy()
pMLH = tempDf.copy()
pML['year'] = pd.DatetimeIndex(pML['Date']).year
pMLH['year'] = pd.DatetimeIndex(pMLH['Date']).year
# + id="7eWaTZf1X8km"
#making a dataframe for average data on year for senate
demCAvgRunning = avgDataOverYears(pML, 'demC')
repCAvgRunning = avgDataOverYears(pML, 'repC')
pY = avgDataOverYears(pML, 'PercentYea')
Rep = avgDataOverYears(pML, 'Representation')
Partisanship = avgDataOverYears(pML, 'Partisanship')
fourCode = avgDataOverYears(pML, '4Code')
eightCode = avgDataOverYears(pML, '8Code')
Month = avgDataOverYears(pML, 'Month')
# + id="Q0Fm9kaxiA6W"
data = {'DemC' : demCAvgRunning,
'RepC' : repCAvgRunning,
'percentYea' : pY,
'Rep' : Rep,
"Partisanship" : Partisanship,
'Year' : [i for i in range(1941, 2020, 1)],
'4Code': fourCode,
'8Code' : eightCode,
'Month' : Month
}
df = pd.DataFrame(data)
# + id="7sdtnaodlEIb"
#making a dataframe for average data on year for house
demCAvgRunning = avgDataOverYears(pMLH, 'demC')
repCAvgRunning = avgDataOverYears(pMLH, 'repC')
pY = avgDataOverYears(pMLH, 'PercentYea')
Rep = avgDataOverYears(pMLH, 'Representation')
Partisanship = avgDataOverYears(pMLH, 'Partisanship')
fourCode = avgDataOverYears(pMLH, '4Code')
eightCode = avgDataOverYears(pMLH, '8Code')
Month = avgDataOverYears(pMLH, 'Month')
# + id="9Up8G3-TYlwo"
data = {'DemC' : demCAvgRunning,
'RepC' : repCAvgRunning,
'percentYea' : pY,
'Rep' : Rep,
"Partisanship" : Partisanship,
'Year' : [i for i in range(1941, 2020, 1)],
'4Code': fourCode,
'8Code' : eightCode,
'Month' : Month
}
dfH = pd.DataFrame(data)
# + [markdown] id="9hx8nt79U67l"
# We now want to see if the partisanship that was documented through single linear regressions in our previous graphs can be supported by multiple variables!
# + [markdown] id="DoIWX3V8o9JU"
# ## Senate: Multilinear Linear Regression
#
# DemC + RepC + Percent Yea = Partisanship?
# + [markdown] id="ZJFdRzVYV2mC"
# A multilinear linear regression was chosen as the first analysis because it would be expected that with democratic and republican cooperation increasing partisanship should also increase. Percent Yea was also tested to see if it was a reliable source for prediciton. It could have gone one of two ways:
# 1. Senators agree more because they only vote on what is expected
# 2. Partisanship always gets in the way.
#
# The results seem to indicate a mixture as it was an inconsequential value.
# + id="6v2L3RhboeHr" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="afb68a1a-bacc-4fce-9b2b-007042401f97"
ax = plt.axes(projection ="3d")
# Creating plot
ax.scatter3D(df['DemC'], df['RepC'], df['percentYea'], color = "green")
plt.title("simple 3D scatter plot")
# show plot
plt.show()
# + [markdown] id="xMkhNvz9VDUF"
# There is possible correlation in this graph, lets dive further
# + id="GH_YoBMrVDZY" colab={"base_uri": "https://localhost:8080/"} outputId="bb0bafa6-543b-4f02-cbc6-3e334dc5ca71"
#Make numpy array which will be our feature vector
endArr = np.array([[1,df.iloc[0][0], df.iloc[0][1], df.iloc[0][2]], [1, df.iloc[1][0], df.iloc[1][1], df.iloc[1][2]]])
tempArr = np.array([])
for i in range(2,len(df),1):
endArr = np.append(endArr,[[1, df.iloc[i][0], df.iloc[i][1], df.iloc[i][2]]],axis=0)
regression_model = LinearRegression()
x = np.array(endArr)
regression_model.fit(x, df['Partisanship'])
print('Slope:' ,regression_model.coef_)
# + id="QKli-0DAHtS1" colab={"base_uri": "https://localhost:8080/"} outputId="c0bfd359-d91b-4a9b-8356-98531e2754ec"
#Randomly picks training and test data
X_train, X_test, y_train, y_test = train_test_split(x, df['Partisanship'], test_size=0.2)
regression_model.fit(X_train, y_train)
scores = cross_val_score(regression_model, X_train, y_train, cv=10)
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
# + [markdown] id="v6L2-DDIVhxE"
# We can see if over a 10 K Fold split there is strong predictive qualities
# + id="d_Me21yQ7yjI" colab={"base_uri": "https://localhost:8080/"} outputId="d769b6d7-2a3d-4d32-e812-9649e125c9eb"
regression_model.score(X_test, y_test)
# + [markdown] id="zU5LfDiFpSYc"
# A linear regression multivariate model looking at Democratic Coopertion, Republican Cooperation and Percent of Senators voting yea CAN predict Partisanship over time. However, it is important to note that based on the given slopes percent yea cannot be a reliable source.
# The model works around 93% accuracy which is extremely reliable.
# + [markdown] id="jhPe9GzWZBEp"
# ## House: Multilinear Linear Regression
#
# DemC + RepC + Percent Yea = Partisanship?
# + id="9Okbp2QtZBEq" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="eb5e521f-e650-4635-f6ae-221202143378"
ax = plt.axes(projection ="3d")
# Creating plot
ax.scatter3D(dfH['DemC'], dfH['RepC'], dfH['percentYea'], color = "green")
plt.title("simple 3D scatter plot")
# show plot
plt.show()
# + [markdown] id="9uKewF46ZBEq"
# There is possible correlation in this graph, lets dive further
# + id="s2gnQsQdZBEq" colab={"base_uri": "https://localhost:8080/"} outputId="e48a1bdc-74a8-4bcc-84d8-f12b9aaca694"
#Make numpy array which will be our feature vector
endArr = np.array([[1,dfH.iloc[0][0], dfH.iloc[0][1], dfH.iloc[0][2]], [1, dfH.iloc[1][0], dfH.iloc[1][1], dfH.iloc[1][2]]])
tempArr = np.array([])
for i in range(2,len(dfH),1):
endArr = np.append(endArr,[[1, dfH.iloc[i][0], dfH.iloc[i][1], dfH.iloc[i][2]]],axis=0)
regression_model = LinearRegression()
x = np.array(endArr)
regression_model.fit(x, dfH['Partisanship'])
print('Slope:' ,regression_model.coef_)
# + id="Dehm050NZBEr" colab={"base_uri": "https://localhost:8080/"} outputId="db19cdee-b818-40a1-cfea-f01ed7373489"
#Randomly picks training and test data
X_train, X_test, y_train, y_test = train_test_split(x, dfH['Partisanship'], test_size=0.2)
regression_model.fit(X_train, y_train)
scores = cross_val_score(regression_model, X_train, y_train, cv=10)
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
# + [markdown] id="qibFkRWtZBEr"
# We can see if over a 10 K Fold split there is strong predictive qualities
# + id="MW-YGFqZZBEr" colab={"base_uri": "https://localhost:8080/"} outputId="23c778aa-e732-47d1-e000-40043f8e5e2e"
regression_model.score(X_test, y_test)
# + [markdown] id="lLY5b4v9ZBEr"
# A linear regression multivariate model looking at Democratic Coopertion, Republican Cooperation and Percent of representatives voting yea CAN predict Partisanship over time. However, it is important to note that based on the given slopes percent yea cannot be a reliable source.
# The model works around 92% accuracy which is reliable, but slightly lower than the same model for the Senate.
# + [markdown] id="8Fuxc7n8plQT"
# ## Senate Ridge Regression
# + [markdown] id="gaaie7ITZz6v"
# A Ridge regression was chosen because we believed their might be some correlation between predictor variables, and the ridge regression allows for those to be considered in the model.
# + id="JRFu72-P77Gw"
#Make numpy array which will be our feature vector
endArr = np.array([[1,df.iloc[0][0], df.iloc[0][1], df.iloc[0][2]], [1, df.iloc[1][0], df.iloc[1][1], df.iloc[1][2]]])
tempArr = np.array([])
for i in range(2,len(df),1):
endArr = np.append(endArr,[[1, df.iloc[i][0], df.iloc[i][1], df.iloc[i][2]]],axis=0)
regression_model = LinearRegression()
x = np.array(endArr)
X_train, X_test, y_train, y_test = train_test_split(x, df['Partisanship'], test_size=0.2)
# define model
model = Ridge(alpha=1.0)
# define model evaluation method
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
model.fit(X_train, y_train)
# evaluate model
scores = cross_val_score(model, X_train, y_train, cv=cv, n_jobs=-1)
# + id="gLirsUz6p3xK" colab={"base_uri": "https://localhost:8080/"} outputId="14b5fdb4-c5c6-42c5-e79d-9843d7f7171c"
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
# + id="VwO9mfUQ8uNG" colab={"base_uri": "https://localhost:8080/"} outputId="a4a0d8e0-257c-4b3b-f110-66f630077a92"
model.score(X_test, y_test)
# + [markdown] id="W84Jh5jZaNXx"
# The ridge regression model did not perform as well on the data as the linear model did we can reject the null hypothesis that there was correlation between predictor values.
# + [markdown] id="24C-J4mEacEu"
# ## House Ridge Regression
# + [markdown] id="i2g6XfjxacEv"
# A Ridge regression was chosen because we believed their might be some correlation between predictor variables, and the ridge regression allows for those to be considered in the model.
# + id="G7ZnmwxcacEv"
#Make numpy array which will be our feature vector
endArr = np.array([[1,dfH.iloc[0][0], dfH.iloc[0][1], dfH.iloc[0][2]], [1, dfH.iloc[1][0], dfH.iloc[1][1], dfH.iloc[1][2]]])
tempArr = np.array([])
for i in range(2,len(dfH),1):
endArr = np.append(endArr,[[1, dfH.iloc[i][0], dfH.iloc[i][1], dfH.iloc[i][2]]],axis=0)
regression_model = LinearRegression()
x = np.array(endArr)
X_train, X_test, y_train, y_test = train_test_split(x, dfH['Partisanship'], test_size=0.2)
# define model
model = Ridge(alpha=1.0)
# define model evaluation method
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
model.fit(X_train, y_train)
# evaluate model
scores = cross_val_score(model, X_train, y_train, cv=cv, n_jobs=-1)
# + id="VlpuErjfacEw" colab={"base_uri": "https://localhost:8080/"} outputId="71a04f3c-8951-423d-9d0f-8f707da3a56d"
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
# + id="8YT124g4acEz" colab={"base_uri": "https://localhost:8080/"} outputId="dd127f47-b4f5-40ed-b318-b44f8ec09c9e"
model.score(X_test, y_test)
# + [markdown] id="VXqJ022tacE0"
# The ridge regression model did not perform well at ALL on the data at all. We can reject the null hypothesis that there was correlation between predictor values.
# + [markdown] id="PahxyebdmH4p"
# ## Senate Nearest Neighbor
#
# Can we use the Nearest Neighbor algorithm to predict partisanship swings during election cycles? Our intuition was that congress would be more divided during election years (4code ==0 or 8code == 0) and less divided during the middle of a term. We found that the 8code is a better predictor, so that is used below.
# + id="VN1ej7hClxwo" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="0126859b-cc8b-4d1a-b64c-7820974e3cc3"
ax = plt.axes(projection ="3d")
# Creating plot
ax.scatter3D(df['Month'], df['Year'], df['8Code'], color = "green")
plt.title("simple 3D scatter plot")
# show plot
plt.show()
# + id="Qf_Lpp5olxwp"
#Make numpy array which will be our feature vector
endArr = np.array([[1,df.iloc[0][8], df.iloc[0][5], df.iloc[0][7]], [1, df.iloc[1][8], df.iloc[1][5], df.iloc[1][7]]])
tempArr = np.array([])
for i in range(2,len(df),1):
endArr = np.append(endArr,[[1, df.iloc[i][8], df.iloc[i][5], df.iloc[i][7]]],axis=0)
x = np.array(endArr)
#shuffle first
X_train, X_test, y_train, y_test = train_test_split(x, df['Partisanship'], test_size=0.1)
# + id="pjRy-fyilttL"
rmse_val = [] #to store rmse values for different k
for K in range(40):
K = K+1
model = neighbors.KNeighborsRegressor(n_neighbors = K, weights = 'distance', metric='manhattan')
model.fit(X_train, y_train) #fit the model
pred=model.predict(X_test) #make prediction on test set
error = sqrt(mean_squared_error(y_test, pred)) #calculate rmse
rmse_val.append(error) #store rmse values
# + id="zzCAWVhwtFK1" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="325c5a61-eaaa-4542-92ee-58207b2174f2"
curve = pd.DataFrame(rmse_val) #elbow curve
curve.plot()
# + id="7VCeQhsus1Ez"
model = neighbors.KNeighborsRegressor(n_neighbors = 22, weights = 'distance', metric='manhattan')
# + id="HDKeylSrtPaw" colab={"base_uri": "https://localhost:8080/"} outputId="26789343-03f2-4fef-bb2f-3e77f3626593"
model.fit(X_train, y_train)
# + id="EzeIvYvytTV4" colab={"base_uri": "https://localhost:8080/"} outputId="8a3a4278-feb2-4b0c-f887-4c1976c47ae1"
model.score(X_test, y_test)
# + [markdown] id="jFtjzxODbIvu"
# Our intuition that there is an increase in partisanship around election years. This can be supported by the KNearestNeighboor algorithm for the clusters that occur in the 3D space around the given years in question.
# + [markdown] id="ydplZ8oRbgCl"
# ## House Nearest Neighbor
#
# Similarly, can we use the Nearest Neighbor algorithm to predict partisanship swings during election cycles in the House?
# + id="8u_6Pks_bgCm" colab={"base_uri": "https://localhost:8080/", "height": 248} outputId="6bf69618-2ae6-4610-9265-61b171348b33"
ax = plt.axes(projection ="3d")
# Creating plot
ax.scatter3D(dfH['Month'], dfH['Year'], dfH['8Code'], color = "green")
plt.title("simple 3D scatter plot")
# show plot
plt.show()
# + id="hqPCuG9TbgCp"
#Make numpy array which will be our feature vector
endArr = np.array([[1,dfH.iloc[0][8], dfH.iloc[0][5], dfH.iloc[0][7]], [1, dfH.iloc[1][8], dfH.iloc[1][5], dfH.iloc[1][7]]])
tempArr = np.array([])
for i in range(2,len(dfH),1):
endArr = np.append(endArr,[[1, dfH.iloc[i][8], dfH.iloc[i][5], dfH.iloc[i][7]]],axis=0)
x = np.array(endArr)
#shuffle first
X_train, X_test, y_train, y_test = train_test_split(x, dfH['Partisanship'], test_size=0.1)
# + id="eab9SCn4bgCq"
rmse_val = [] #to store rmse values for different k
for K in range(40):
K = K+1
model = neighbors.KNeighborsRegressor(n_neighbors = K, weights = 'distance', metric='manhattan')
model.fit(X_train, y_train) #fit the model
pred=model.predict(X_test) #make prediction on test set
error = sqrt(mean_squared_error(y_test, pred)) #calculate rmse
rmse_val.append(error) #store rmse values
# + id="I7wAYb7GbgCq" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="21011ed6-a45e-4d66-a0b8-f48636e54838"
curve = pd.DataFrame(rmse_val) #elbow curve
curve.plot()
# + id="VevQGdSSbgCr"
model = neighbors.KNeighborsRegressor(n_neighbors = 9, weights = 'distance', metric='manhattan')
# + id="d2nrRB93bgCr" colab={"base_uri": "https://localhost:8080/"} outputId="901f90e2-ae5e-4410-cb94-b7bf7bb8ccec"
model.fit(X_train, y_train)
# + id="b4EstBSJbgCr" colab={"base_uri": "https://localhost:8080/"} outputId="2b5c0a94-efbb-4d50-a354-5c095a5284ce"
model.score(X_test, y_test)
# + [markdown] id="Ttehre-ibgCr"
# The same four/eight year uptick in partisanship exists in the House!
# + [markdown] id="frgya0Q4HARu"
# # Part 5: Insight, Policy Decision, and Importance
# + [markdown] id="jLW6jt9gHJbP"
# ## Summary
#
# Overall, it is clear that congressional partisanship, at least how we defined it for the purposes of this essay has increased over the last 80 years. Both parties have grown more partisan at around the same rate, and this behavior exists in both houses. Using this scraped data, we are able to build models using machine learning that can predict partisanship with considerable accuracy
# + [markdown] id="b2o_FlzWf7yV"
# ## Financial Importance
#
# Being able to predict political partisanship allows us to predict the passage of legislature, an ability that has limitless political potential. To give a recent example, in times of economic depression such as the one caused by the COVID pandemic, there have been numerous efforts to pass legislature to provide stimulus packages to American people and businessess. When this occurs, there is a considerable economic uptick. Stock brokers make billions of dollars by predicting such an uptick; accordingly, models such as these have significant importance for quants, as this [article](https://medium.com/@bradfordcross/machine-learning-vs-quants-the-advantages-of-machine-learning-in-finance-f6e8d19ebf9f) details
# + [markdown] id="-H8Mx74khSqD"
# ## Social Importance
#
# At times, political polarization appears to be a insurmountable problem. How can the country unite in times of hardship? The first step in moving forward is recognizing historical patterns; in this case, our project details the patterns of partisanship.
#
# In the age of digitalization where echo chambers dictate much of what people hear and therefore the opinions they form, this problem is made more challenging. But machine learning algorithms and the data visualization of political partisanship may be the perfect method of penetrating digital echo chambers and act as a wake up call to the American people. If we can realize the trajectory of where we are as a society and make a renewed push to find common ground, we may be able to work together to benefit everyone. This is not a partisan report, both parties are responsible for where we are today, and both have the power to shape the future of our country.
# + [markdown] id="gBAL5Tspjq8-"
# ## The future of this project
#
# There are two main ways to take this project to the next level. First, as this paper has distinguished multiple times, there is a difference between the partisanship of the people of the US and the congressional representatives of the US. This project deals with the latter, but what of the former? Did one come before, or perhaps cause the other? To determine this, additional data sources are needed to measure citizen partisanship. However, due to digitalization revolutionizing information distribution, it will be difficult to standardize this change over time.
#
# Second, we did not use the title or the type of motion in this project at all, despite scraping it when available. An important future step for this project would be applying natural language processing to text associated with each vote to put them in different categories. Our intuition is that certain types of votes invoke greater partisanship. A obvious recent example would be the nomination of Supreme Court Justices.
|
PoliticalPartisanshipInCongress_notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PyCity Schools Analysis
#
# * As a whole, schools with higher budgets, did not yield better test results. By contrast, schools with higher spending per student actually ($645-$675) underperformed compared to schools with smaller budgets (<$585 per student).
#
# * As a whole, smaller and medium sized schools dramatically out-performed large sized schools on passing math performances (89-91% passing vs 67%).
#
# * As a whole, charter schools out-performed the public district schools across all metrics. However, more analysis will be required to glean if the effect is due to school practices or the fact that charter schools tend to serve smaller student populations per school.
# ---
# +
# Dependencies and Setup
import pandas as pd
import numpy as np
# File to Load
school_data = "Resources/schools_complete.csv"
student_data = "Resources/students_complete.csv"
# Read School and Student Data File and store into Pandas Data Frames
school_data = pd.read_csv(school_data)
student_data = pd.read_csv(student_data)
# Combine the data into a single dataset
schl_data = pd.merge(student_data, school_data, how="left", on=["school_name", "school_name"])
schl_data.head()
# -
# ## District Summary
#
# * Calculate the total number of schools
#
# * Calculate the total number of students
#
# * Calculate the total budget
#
# * Calculate the average math score
#
# * Calculate the average reading score
#
# * Calculate the overall passing rate (overall average score), i.e. (avg. math score + avg. reading score)/2
#
# * Calculate the percentage of students with a passing math score (70 or greater)
#
# * Calculate the percentage of students with a passing reading score (70 or greater)
#
# * Create a dataframe to hold the above results
#
# * Optional: give the displayed data cleaner formatting
# +
# total number of schools
total_dist_schools = schl_data['School ID'].nunique()
# total number of students
total_dist_students = schl_data['Student ID'].nunique()
# total budget
total_dist_budget = (schl_data['budget']/schl_data['size']).sum()
# average math score
avg_dist_math = schl_data.math_score.mean()
# average reading score
avg_dist_reading = schl_data.reading_score.mean()
# % passing math
dist_pass_math = ((schl_data['math_score'] >= 70).sum() / total_dist_students) * 100
# % passing reading
dist_pass_reading = ((schl_data['reading_score'] >= 70).sum() / total_dist_students) * 100
# overall passing rate
dist_overall_pass = (dist_pass_math + dist_pass_reading) / 2
dist_summary = pd.DataFrame({'Total Schools': [total_dist_schools],
'Total Students': [total_dist_students],
'Total Budget': [total_dist_budget],
'Average Math Score': [avg_dist_math],
'Average Reading Score': [avg_dist_reading],
'% Passing Math': [dist_pass_math],
'% Passing Reading': [dist_pass_reading],
'% Overall Passing Rate': [dist_overall_pass]})
dist_summary
# -
# ## School Summary
# * Create an overview table that summarizes key metrics about each school, including:
# * School Name
# * School Type
# * Total Students
# * Total School Budget
# * Per Student Budget
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
#
# * Create a dataframe to hold the above results
# +
# set up
schl_summary = schl_data
unique_schl = schl_summary.drop_duplicates(subset = 'School ID', keep = 'first')
index_schl = unique_schl.set_index(['school_name'])
# school type
index_schl['School Type'] = index_schl['type']
# total students
index_schl['Total Students'] = index_schl['size']
# total school budget
index_schl['School Budget'] = index_schl['budget']
# per student budget
index_schl['Budget Per Student'] = (index_schl['budget'])/(index_schl['size'])
# avg math score
index_schl['Average Math Score'] = schl_summary.groupby(['school_name']).math_score.mean()
# avg reading score
index_schl['Average Reading Score'] = schl_summary.groupby(['school_name']).reading_score.mean()
# % passing math
num_math = schl_summary[schl_summary['math_score'] >= 70]
math_schl = num_math.groupby(['school_name']).count()['Student ID']
index_schl['Percent Passing Math'] = (math_schl/(index_schl['size'])) * 100
# % passing reading
num_reading = schl_summary[schl_summary['reading_score'] >= 70]
reading_schl = num_reading.groupby(['school_name']).count()['Student ID']
index_schl['Percent Passing Reading'] = (reading_schl/(index_schl['size'])) * 100
# overall passing
index_schl['Overall Passing Rate'] = (((math_schl/(index_schl['size'])) * 100)+((reading_schl/(index_schl['size'])) * 100)) / 2
schl_summ = index_schl[['School Type', 'Total Students', 'School Budget', 'Budget Per Student', 'Average Math Score', 'Average Reading Score', 'Percent Passing Math', 'Percent Passing Reading', 'Overall Passing Rate']]
schl_summ
# -
# ## Top Performing Schools (By Passing Rate)
# * Sort and display the top five schools in overall passing rate
schl_summ.sort_values(by = ['Overall Passing Rate'], ascending = False).head()
# ## Bottom Performing Schools (By Passing Rate)
# * Sort and display the five worst-performing schools
schl_summ.sort_values(by = ['Overall Passing Rate']).head()
# ## Math Scores by Grade
# * Create a table that lists the average Reading Score for students of each grade level (9th, 10th, 11th, 12th) at each school.
#
# * Create a pandas series for each grade. Hint: use a conditional statement.
#
# * Group each series by school
#
# * Combine the series into a dataframe
#
# * Optional: give the displayed data cleaner formatting
# +
# ninth
nine = schl_data.loc[schl_data['grade'] == '9th']
math9 = nine.groupby('school_name')['math_score'].mean()
# tenth
ten = schl_data.loc[schl_data['grade'] == '10th']
math10 = ten.groupby('school_name')['math_score'].mean()
# eleventh
eleven = schl_data.loc[schl_data['grade'] == '11th']
math11 = eleven.groupby('school_name')['math_score'].mean()
# twenfth
twelve = schl_data.loc[schl_data['grade'] == '12th']
math12 = twelve.groupby('school_name')['math_score'].mean()
math_grades = pd.DataFrame({'9th':math9,
'10th':math10,
'11th':math11,
'12th':math12})
math_grades
# -
# ## Reading Score by Grade
# * Perform the same operations as above for reading scores
# +
# ninth
nine = schl_data.loc[schl_data['grade'] == '9th']
read9 = nine.groupby('school_name')['reading_score'].mean()
# tenth
ten = schl_data.loc[schl_data['grade'] == '10th']
read10 = ten.groupby('school_name')['reading_score'].mean()
# eleventh
eleven = schl_data.loc[schl_data['grade'] == '11th']
read11 = eleven.groupby('school_name')['reading_score'].mean()
# twenfth
twelve = schl_data.loc[schl_data['grade'] == '12th']
read12 = twelve.groupby('school_name')['reading_score'].mean()
reading_grades = pd.DataFrame({'9th':read9,
'10th':read10,
'11th':read11,
'12th':read12})
reading_grades
# -
# ## Scores by School Spending
# * Create a table that breaks down school performances based on average Spending Ranges (Per Student). Use 4 reasonable bins to group school spending. Include in the table each of the following:
# * Average Math Score
# * Average Reading Score
# * % Passing Math
# * % Passing Reading
# * Overall Passing Rate (Average of the above two)
# Sample bins. Feel free to create your own bins.
spending_bins = [0, 585, 615, 645, 675]
group_names = ["<$585", "$586-615", "$616-645", "$645-675"]
score_spending = schl_summ[['Average Math Score', 'Average Reading Score', "Percent Passing Math", "Percent Passing Reading", 'Overall Passing Rate']].groupby(pd.cut(schl_summ["Budget Per Student"], bins = spending_bins, labels = group_names)).mean()
score_spending
# ## Scores by School Size
# * Perform the same operations as above, based on school size.
# Sample bins. Feel free to create your own bins.
size_bins = [0, 1000, 2000, 5000]
size_names = ["Small (<1000)", "Medium (1000-2000)", "Large (2000-5000)"]
score_size = schl_summ[['Average Math Score', 'Average Reading Score', 'Percent Passing Math', 'Percent Passing Reading', 'Overall Passing Rate']].groupby(pd.cut(schl_summ['Total Students'], bins = size_bins, labels = size_names)).mean()
score_size
# ## Scores by School Type
# * Perform the same operations as above, based on school type.
type_bins = [0, 1, 2]
type_names = ['District', 'Charter']
schl_typ = schl_summ
schl_typ['School Type'] = schl_summ['School Type'].replace({'Charter': 1, 'District':2})
score_type = schl_typ[['Average Math Score', 'Average Reading Score', 'Percent Passing Math', 'Percent Passing Reading', 'Overall Passing Rate']].groupby(pd.cut(schl_typ['School Type'], bins = type_bins, labels = type_names)).mean()
score_type
|
PyCitySchools/PyCitySchools.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Lets filter some data from home-assistant, in this case the outside temperatures recorded by dark-sky
#
# https://pythonprogramming.net/rolling-statistics-data-analysis-python-pandas-tutorial/
#
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.rolling.html
# ls
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal
# %matplotlib inline
df = pd.read_csv('darksky_temperature.csv', index_col=0)
df.index = pd.to_datetime(df.index) # Conver index to timestamp
df.columns = ['raw']
df.head()
df['savgol'] = scipy.signal.savgol_filter(x=df['raw'].values, window_length=201, polyorder=2)
# Lets look at a noisy few days
# +
start = '2018-2-16'
end = '2018-2-17'
opacity = 0.99
plt.figure(figsize=(20,22))
plt.plot(df['raw'].loc[start:end], 'r', alpha=opacity, label='raw');
plt.plot(pd.rolling_mean(df['raw'].loc[start:end], 100), 'b--', label='rolling_mean');
plt.plot(df['savgol'].loc[start:end], 'g', label='savgol');
plt.legend();
# -
|
Home-assistant/filtering-ha-data/Filtering home-assistant data.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Mikaner/reinforcement/blob/main/PrioritizedExperienceReplay.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="3_RZmOP3Ys8G" colab={"base_uri": "https://localhost:8080/"} outputId="dc74b73a-617a-4425-a7b3-87461d6be2a4"
# !apt-get -qq -y install libcusparse8.0 libnvrtc8.0 libnvtoolsext1 > /dev/null
# !ln -snf /usr/lib/x86_64-linux-gnu/libnvrtc-builtins.so.8.0 /usr/lib/x86_64-linux-gnu/libnvrtc-builtins.so
# !apt -qq install xvfb freeglut3-dev ffmpeg> /dev/null
# !pip -q install gym
# !pip -q install JSAnimation
# !pip -q install pyglet
# !pip -q install pyopengl
# !pip -q install pyvirtualdisplay
# + id="vx2W14wg_w9s"
from pyvirtualdisplay import Display
display = Display(visible=0, size=(1024, 768))
display.start()
import os
os.environ["DISPLAY"] = f":{display.display}"
# https://github.com/ponty/PyVirtualDisplay/issues/54
# + id="S6gigdRH_11H"
# 動画の描画関数の宣言
# 参考URL: http://nbviewer.jupyter.org/github/patrickmineault/xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb
from JSAnimation.IPython_display import display_animation
from matplotlib import animation
#from IPython.display import display
from IPython.display import HTML
def make_anim(frames):
plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0),
dpi=72)
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames),
interval=50)
return anim
def save_frames_as_gif(frames):
"""
DISPLAYs a list of frames as a gif, with controls
"""
anim = make_anim(frames)
anim.save('movie_cartpole_DQN.mp4')
#display(display_animation(anim, default_mode='loop'))
return anim.to_jshtml()
# + id="ea7sU7mMf4rx"
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import gym
# + id="Utq10Ue4AvaV"
# namedtupleを生成
from collections import namedtuple
Transition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward'))
# + id="QDVlVO79BLtr"
ENV = 'CartPole-v0'
GAMMA = 0.99
MAX_STEPS = 200
NUM_EPISODES = 500
# + id="9k9_Z1koBUvM"
# ミニバッチ学習を実現するために
# 経験を保存するメモリクラスを定義します
class ReplayMemory:
def __init__(self, CAPACITY):
self.capacity = CAPACITY
self.memory = []
self.index = 0
def push(self, state, action, state_next, reward):
'''transition = (state, action, state_next, reward)をメモリに保存する'''
if len(self.memory) < self.capacity:
self.memory.append(None) # メモリが満タンでないときは足す
# namedtupleのTransitionを使用し、値とフィールド名をペアにして保存します
self.memory[self.index] = Transition(state, action, state_next, reward)
self.index = (self.index + 1) % self.capacity # 保存するindexを1つずらす(最大の場合は最初に上書き)
def sample(self, batch_size):
'''batch_size分だけ、ランダムに保存内容を取り出す'''
return random.sample(self.memory, batch_size)
def __len__(self):
'''関数lenに対して、現在の変数memoryの長さを返す'''
return len(self.memory)
# + id="5l3WYTZWFU60"
TD_ERROR_EPSILON = 0.0001
class TDerrorMemory:
def __init__(self, CAPACITY):
self.capacity = CAPACITY # メモリの最大長さ
self.memory = [] # 経験を保存する変数
self.index = 0 # 保存するindexを示す変数
def push(self, td_error):
if len(self.memory) < self.capacity:
self.memory.append(None) # メモリが満タンでないときは足す
self.memory[self.index] = td_error
self.index = (self.index + 1) % self.capacity # 保存するindexを1つずつずらす
def __len__(self):
'''関数lenに対して、現在の変数memoryの長さを返す'''
return len(self.memory)
def get_prioritized_indexes(self, batch_size):
'''TD誤差に応じた確率でindexを取得'''
# TD誤差の和を計算
sum_absolute_td_error = np.sum(np.absolute(self.memory))
sum_absolute_td_error += TD_ERROR_EPSILON * len(self.memory)
# batch_size文の乱数を生成して、昇順に並べる
rand_list = np.random.uniform(0, sum_absolute_td_error, batch_size)
rand_list = np.sort(rand_list)
# 作成した乱数で串刺しにして、インデックスを求める
indexes = []
idx = 0
tmp_sum_absolute_td_error = 0
for rand_num in rand_list:
while tmp_sum_absolute_td_error < rand_num:
tmp_sum_absolute_td_error += (
abs(self.memory[idx]) + TD_ERROR_EPSILON)
idx += 1
if idx >= len(self.memory):
idx = len(self.memory) - 1
indexes.append(idx)
return indexes
def update_td_error(self, updated_td_errors):
'''TD誤差の更新'''
self.memory = updated_td_errors
# + id="QDHEqUOZ44tk"
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self, n_in, n_mid, n_out):
super(Net, self).__init__()
self.fc1 = nn.Linear(n_in, n_mid)
self.fc2 = nn.Linear(n_mid, n_mid)
self.fc3 = nn.Linear(n_mid, n_out)
def forward(self, x):
h1 = F.relu(self.fc1(x))
h2 = F.relu(self.fc2(h1))
output = self.fc3(h2)
return output
# + id="9S5orRy_LT17"
# エージェントが持つ脳となるクラス。DQNを実行する。
# Q関数をディープラーニングのネットワークをクラスとして定義
import random
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
BATCH_SIZE = 32
CAPACITY = 10000 # メモリの最大値(メモリが足りなくなるなんてことがあるのかしら)
class Brain:
def __init__(self, num_states, num_actions):
self.num_actions = num_actions # CartPoleの行動(右に左に押す)の2を取得
# 経験を記憶するメモリオブジェクトを生成
self.memory = ReplayMemory(CAPACITY)
# ニューラルネットワークを構築
n_in, n_mid, n_out = num_states, 32, num_actions
self.main_q_network = Net(n_in, n_mid, n_out)
self.target_q_network = Net(n_in, n_mid, n_out)
print(self.main_q_network)
self.optimizer = optim.Adam(self.main_q_network.parameters(), lr=0.0001)
self.td_error_memory = TDerrorMemory(CAPACITY)
def replay(self, episode):
'''Experience Replayでネットワークの結合パラメータを学習'''
# -------------------------------------------------
# 1. メモリサイズの確認
# -------------------------------------------------
# 1.1 メモリサイズがミニバッチより小さい間は何もしない
if len(self.memory) < BATCH_SIZE:
return
# -------------------------------------------------
# 2. ミニバッチの作成
# -------------------------------------------------
# make_minibatchに関数化
self.batch, self.state_batch, self.action_batch, self.reward_batch, self.non_final_next_states = self.make_minibatch(episode)
# -------------------------------------------------
# 3. 教師信号となるQ(s_t, a_t)値を求める
# -------------------------------------------------
# get_expected_state_action_valuesに関数化
self.expected_state_action_values = self.get_expected_state_action_values()
# -------------------------------------------------
# 4. 結合パラメータの更新
# -------------------------------------------------
# update_main_q_networkに関数化
self.update_main_q_network()
def decide_action(self, state, episode):
''' 現在の状況に応じて、行動を決定する '''
# ε-greedy法で徐々に最適行動のみを採用する
epsilon = 0.5 * (1 / (episode + 1))
if epsilon <= np.random.uniform(0, 1):
self.main_q_network.eval() # ネットワークを推論モードに切り替える
with torch.no_grad():
action = self.main_q_network(state).max(1)[1].view(1, 1)
# ネットワークの出力の最大値のindexを取り出します = max(1)[1]
# .view(1, 1)は[torch.LongTensor of size 1] を size 1x1 に変換します
else:
# 0, 1の行動をランダムに返す
action = torch.LongTensor([[random.randrange(self.num_actions)]]) # 0, 1の行動をランダムに返す
# actionは[torch.LongTensor of size 1x1]の形になります
return action
def make_minibatch(self, episode):
'''ミニバッチの作成'''
if episode < 30:
transitions = self.memory.sample(BATCH_SIZE)
else:
# TD誤差に応じてミニバッチを取り出すに変更
indexes = self.td_error_memory.get_prioritized_indexes(BATCH_SIZE)
transitions = [self.memory.memory[n] for n in indexes]
#print("transitions")
#print(transitions)
#print(*transitions)
batch = Transition(*zip(*transitions)) #named_tuple
#print("batch")
#print(batch)
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])
return batch, state_batch, action_batch, reward_batch, non_final_next_states
def get_expected_state_action_values(self):
self.main_q_network.eval()
self.target_q_network.eval()
# ネットワークが出力したQ(s_t, a_t)を求める
self.state_action_values = self.main_q_network(self.state_batch).gather(1, self.action_batch)
# CartPoleがdoneになっておらず、next_stateが存在するかチェックするインデックスマスク
non_final_mask = torch.ByteTensor(tuple(map(lambda s: s is not None, self.batch.next_state)))
# ByteTensorとは: 8-bit integer (unsigned)のテンソル行列
# True or Falseの値になっているので、1か0の値に直される
next_state_values = torch.zeros(BATCH_SIZE)
a_m = torch.zeros(BATCH_SIZE).type(torch.LongTensor)
# 次の状態での最大Q値の行動a_mをMain Q-Networkから求める
# 最後の[1]で行動に対応したindexが返る
a_m[non_final_mask] = self.main_q_network(self.non_final_next_states).detach().max(1)[1]
# 次の状態があるものだけフィルターし、size 32を32×1へ
a_m_non_final_next_states = a_m[non_final_mask].view(-1, 1)
# 次の状態があるindexの、行動a_mのQ値をtarget Q-Networkから求める
# detach()で取り出す
# squeeze()でsize[minibatch×1]を[minibatch]へ
next_state_values[non_final_mask] = self.target_q_network(self.non_final_next_states).gather(1, a_m_non_final_next_states).detach().squeeze()
# 教師となるQ値を求めるQ学習の式
expected_state_action_values = self.reward_batch + GAMMA * next_state_values
return expected_state_action_values
def update_main_q_network(self):
self.main_q_network.train()
loss = F.smooth_l1_loss(self.state_action_values, self.expected_state_action_values.unsqueeze(1))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def update_target_q_network(self):
''' target Q-Networkをmainと同じにする '''
self.target_q_network.load_state_dict(self.main_q_network.state_dict())
def update_td_error_memory(self):
'''TD誤差メモリに格納されているTD誤差を更新する'''
# ネットワークを推論モードに切り替える
self.main_q_network.eval()
self.target_q_network.eval()
# 全メモリでミニバッチを作成
trainsitions = self.memory.memory
batch = Transition(*zip(*trainsitions))
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
non_final_next_states = torch.cat([s for s in batch.next_state if s is not None])
# ネットワークが出力したQ(s_t, a_t)を求める
state_action_values = self.main_q_network(state_batch).gather(1, action_batch)
# cartpoleがdoneになっておらず、next_stateがあるかをチェックするインデックスマスクを作成
non_final_mask = torch.ByteTensor(tuple(map(lambda s: s is not None, batch.next_state)))
# まずは全部0にしておく、サイズはメモリの長さである
next_state_values = torch.zeros(len(self.memory))
a_m = torch.zeros(len(self.memory)).type(torch.LongTensor)
# 次の状態での最大Q値の行動a_mをMain Q-Networkから求める
# 最後の[1]で行動に対応したindexが返る
a_m[non_final_mask] = self.main_q_network(non_final_next_states).detach().max(1)[1]
# 次の状態があるものだけにフィルターし、size 32を32x1へ
a_m_non_final_next_states = a_m[non_final_mask].view(-1, 1)
# 次の状態があるindexの、行動a_mのQ値をtarget Q-Networkから求める
# detach()で取り出す
# squeeze()でsize[minibatch×1]を[minibatch]に
next_state_values[non_final_mask] = self.target_q_network(non_final_next_states).gather(1, a_m_non_final_next_states).detach().squeeze()
# TD誤差を求める
td_errors = (reward_batch + GAMMA * next_state_values) - state_action_values.squeeze()
# TD誤差メモリを更新、Tensorをdetach()で取り出し、NumPyにしてから、Pythonのリストまで変換
self.td_error_memory.memory = td_errors.detach().numpy().tolist()
# + id="SvoCVmTk8sNW"
class Agent:
def __init__(self, num_states, num_actions):
'''課題の状態と行動の数を設定する'''
self.brain = Brain(num_states, num_actions)
# エージェントが行動を決定するための頭脳を生成
def update_q_function(self, episode):
'''Q関数を更新'''
self.brain.replay(episode)
def get_action(self, state, episode):
'''行動を決定する'''
action = self.brain.decide_action(state, episode)
return action
def memorize(self, state, action, state_next, reward):
'''memoryオブジェクトに、state, action, state_next, rewardの内容を保存する'''
self.brain.memory.push(state, action, state_next, reward)
def update_target_q_function(self):
'''target Q-Networkをmainと同じにする'''
self.brain.update_target_q_network()
def memorize_td_error(self, td_error):
'''TD誤差メモリにTD誤差を格納'''
self.brain.td_error_memory.push(td_error)
def update_td_error_memory(self):
'''TD誤差メモリに格納されているTD誤差を更新する'''
self.brain.update_td_error_memory()
# + id="bitNEWgSy4dd"
class Environment:
def __init__(self):
self.env = gym.make(ENV) # 実行する課題を設定
self.num_states = self.env.observation_space.shape[0]
# 課題の状態と行動の数を設定
self.num_actions = self.env.action_space.n # CartPoleの行動(右に左に押す)の2を取得
# 環境内で行動するAgentを設定
self.agent = Agent(self.num_states, self.num_actions)
def run(self):
'''実行'''
episode_10_list = np.zeros(10) # 10試行分の立ち続けたstep数を格納し、平均ステップ数を出力に利用
complete_episodes = 0 # 195ステップ以上連続で立ち続けた試行数
episode_final = False # 最後の試行フラグ
frames = [] # 最後の試行を確認する動画用のフレーム
for episode in range(NUM_EPISODES):
observation = self.env.reset() # 環境の初期化
state = observation # 観測をそのまま状態sとして使用
state = torch.from_numpy(state).type(torch.FloatTensor) # numpy変数をPyTorchのテンソルに変換
# FloatTensor of size 4をsize 1×4に変換
state = torch.unsqueeze(state, 0)
for step in range(MAX_STEPS):
if episode_final or episode==NUM_EPISODES-1: # 最終試行ではframesに各時刻の画像を追加していく
frames.append(self.env.render(mode='rgb_array'))
action = self.agent.get_action(state, episode) # 行動を求める
#print(action)
# 行動a_tの実行により、s_{t+1}とdoneフラグを求める
# actionから.item()を指定して、中身を取り出す
observation_next, _, done, _ = self.env.step(action.item()) # rewardとinfoは使わないので_
# 報酬を与える。さらにepisodeの終了評価と、state_nextを設定する
if done: # ステップ数が200経過するか、一定角度以上傾いた場合
state_next = None
# 直近10episodeの立てたstep数リストに追加
episode_10_list = np.hstack((episode_10_list[1:], step + 1))
if step < 195:
reward = torch.FloatTensor([-1.0]) # 途中でこけたら罰則として報酬-1を与える
complete_episodes = 0 # 連続性高記録をリセット
else:
reward = torch.FloatTensor([1.0]) # 立ったまま終了時は報酬1を与える
complete_episodes += 1
else:
reward = torch.FloatTensor([0.0]) # 普段は報酬0
state_next = observation_next # 観測をそのまま状態とする
state_next = torch.from_numpy(state_next).type(torch.FloatTensor) # numpy変数をPyTorchのテンソルに変換
# FloatTensor of size 4 を size 1×4 に変換 ←ここ何やってんの?テンソルにした意味なに?
state_next = torch.unsqueeze(state_next, 0)
# メモリに経験を追加
self.agent.memorize(state, action, state_next, reward)
# TD誤差メモリにTD誤差を追加
self.agent.memorize_td_error(0)
# PrioritizedExperienceReplayでQ関数を更新する
self.agent.update_q_function(episode)
# 観測の更新
state = state_next
# 終了時の処理
if done:
print('%d Episode: Finished after %d steps : 10試行の平均step数 = %.1lf' % (episode, step + 1, episode_10_list.mean()))
# TD誤差メモリの中身を更新する
self.agent.update_td_error_memory()
# DDQN学習の際に追加。
# 2試行に1度Target Q-Networkをmainと同じにコピーする
if(episode % 2) == 0:
self.agent.update_target_q_function()
break
if episode_final or episode==NUM_EPISODES-1:
# 動画を保存
anim = save_frames_as_gif(frames)
return anim
if complete_episodes >= 10:
print('Ten consective success !!')
episode_final = True
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="aCEOWpYePqWx" outputId="ef2bd2be-0e73-4c81-9986-add75342cd93"
cartpole_env = Environment()
anim = cartpole_env.run()
# + colab={"base_uri": "https://localhost:8080/", "height": 497} id="bo8FYCObX5EA" outputId="95fbfe51-0245-4313-9a7f-1082172148da"
HTML(anim)
|
PrioritizedExperienceReplay.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Shifted Sphere Function with D=50
# Import libraries
import numpy as np
import pandas as pd
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import time
# -
# Read data from csv
raw_data = pd.read_csv("./Shifted_Sphere.csv")
shifted_sphere = raw_data["val"].tolist()
print(shifted_sphere)
print(type(shifted_sphere))
# Parameters
Dim = 50
bias = -450
lower_bound = -100
upper_bound = 100
list_result = []
# Last parameters
def func(a, data=shifted_sphere, dim=Dim, f_bias=bias):
F = 0
for i in range(dim - 1):
z = x[i] - data[i]
F += z**2
res = F + f_bias
return res
def solution(ak):
res = function(ak)
list_result.append(res)
return sol_res
# random uniform distribution
def distr(dim, lower_bound, upper_bound):
xmin = lower_bound * np.ones(dim)
xmax = upper_bound * np.ones(dim)
x0 = np.random.uniform(min(xmin), max(xmax), dim)
return x0
# Solution
def result(dimension, lower_bound, upper_bound):
global solution
x0 = result(dimension, lower_bound, upper_bound)
solution = minimize(sol_set, x0, bounds=(lower_bound, upper_bound), method='BFGS', callback=sol_set)
return solution, list_result
# plot curve
def plot(solution):
fig = plt.figure(figsize=(16, 13))
plt.plot(solution)
plt.title("Shifted Sphere Function with D=50", fontsize=12)
plt.xlabel("Time", fontsize=10)
plt.ylabel("Fitness", fontsize=10)
plt.show()
# computational time
timer1 = time.time()
# compute computational time
timer2 = time.time()
comp_time = timer2 - timer1
# Print solutions
print("Function: Shifted Sphere Function (F1)\n")
print("02. Parameters:")
print("\nDimension:", Dim)
print("\nSearch space: [", lower_bound, ",", upper_bound, "]")
print("\nBias:", bias)
print("\n03. Final results:")
sol_df = pd.DataFrame(solution.x, columns=[''])
sol_df.to_csv("Shifted_Sphere_50.csv", sep=",")
print("\n - Solutions:", sol_df)
print("\n - Fitness:", round(sol.fun, 2))
print("\nNumber of function evaluations:", sol.nfev)
print("\nStopping criterion:", sol.nit, "iterations")
print("\nComputational time:", round(comp_time, 2), "seconds\n")
print("==========================================================================")
# Plot and save convergence curve
plot_fitness(sol_list)
|
1-Shifted_Sphere/Shifted_Sphere_50.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
# -
# Load MPG dataset
mpg = pd.read_csv ("https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv")
mpg.head ()
# Default legend
p = ggplot(mpg, aes('displ', 'hwy', color='manufacturer')) + geom_point(size=5) + ggsize(500,250)
p
# Layout the legend in two columns
p + scale_color_discrete(guide=guide_legend(ncol=2)) + ggtitle('Two columns legend')
# Same and fill by rows
p + scale_color_discrete(guide=guide_legend(ncol=2, byrow=True)) \
+ ggtitle('Two columns legend filled by rows')
# Adjust legend and axis
# - five rows legend
# - draw legend below
# - remove axis
p + scale_color_discrete(guide=guide_legend(nrow=5)) \
+ theme_minimal() + theme(legend_position='bottom') \
+ ggtitle('Five rows legend and below') \
+ ggsize(500,400)
# Set color to city MPG and shape to drive·train
p1 = ggplot(mpg, aes(x='displ', y='hwy')) + geom_point(aes(color='cty', shape='drv'), size=5) + ggsize(700, 350)
p1
# Change legend position and orientation
p1 + theme(legend_position=[1, 1], legend_justification=[1, 1], legend_direction='horizontal')
# Adjust colorbar size
# Change order in drive-train legend
# Set labels to legends and axis
# Remove vertical axis line
p1 + theme(legend_position=[1, 1], legend_justification=[1, 1], legend_direction='horizontal') \
+ scale_color_continuous(name='City MPG', low='dark_blue', high='light_blue', guide=guide_colorbar(barheight=10, barwidth=300)) \
+ scale_shape(name='Drive-train', breaks=['f', 'r', '4'], labels=['front', 'rear', '4X4']) \
+ xlab('Engine displacement (L)') + ylab('Highway MPG')
|
source/examples/cookbook/legend_and_axis.ipynb
|