code stringlengths 2.5k 150k | kind stringclasses 1 value |
|---|---|
<a href="https://colab.research.google.com/github/mancunian1792/causal_scene_generation/blob/master/causal_model/game_characters/GameCharacter_ImageClassification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from google.colab import drive
drive.mount("/content/gdrive", force_remount=True)
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import to_categorical
from keras.preprocessing import image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from tqdm import tqdm
from skimage.transform import rotate
from skimage.util import random_noise
from skimage.filters import gaussian
root_path = 'gdrive/My Drive/causal_scene_generation/game_characters/'
train_path = root_path + 'train/'
test_path = root_path + 'test/'
train_images = train_path + 'images/'
test_images = test_path + 'images/'
train_csv = train_path + 'train.csv'
test_csv = test_path + 'test.csv'
def preprocess(imgPath, filePath):
images = []
# Transform each image in the imgPath and add it to the input array
data = pd.read_csv(filePath)
for imgFile in tqdm(data["filename"]):
imgFullPath = imgPath + imgFile + ".png"
img = image.load_img(imgFullPath, target_size=(400,400,3), grayscale=False)
img = image.img_to_array(img)
img = img/255
images.append(img)
features = np.array(images)
# Get the labels for each
target = data.drop(["filename"], axis=1)
return features, target
def augmentData(features, target):
augmented_features = []
augmented_target = []
for idx in tqdm(range(features.shape[0])):
augmented_features.append(features[idx])
augmented_features.append(rotate(features[idx], angle=45, mode = 'wrap'))
augmented_features.append(np.fliplr(features[idx]))
augmented_features.append(np.flipud(features[idx]))
augmented_features.append(random_noise(features[idx],var=0.2**2))
for i in range(5):
augmented_target.append(target.iloc[idx, :])
return np.asarray(augmented_features), pd.DataFrame(augmented_target, columns= target.columns)
x_train, y_train = preprocess(train_images, train_csv)
x_train_augment, y_train_augment = augmentData(x_train, y_train)
del x_train, y_train
x_test, y_test = preprocess(test_images, test_csv)
x_test, x_validate, y_test, y_validate = train_test_split(x_test, y_test, random_state = 3000, test_size = 0.2)
plt.imshow(x_validate[2])
# Size of vector is 64 * 64 * 3 -> resize ((64 *64*3), 1)
# (/255 )
# Convert to grayscale.->
# The output shape
op_shape = y_train_augment.shape[1]
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation="relu", input_shape=(400,400,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(10, 10), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(10, 10), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(op_shape, activation='sigmoid'))
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(x_train_augment, y_train_augment, epochs=10, validation_data=(x_test, y_test), batch_size=64)
model.save(root_path+"model-both-images.hdf5")
prediction = model.predict(x_validate)
prediction[0]
del x_train_augment, y_train_augment, x_test, y_test
```
### Attempt 2 - Image Classification
This time, i am splitting the images and modify the labels. The image classification will try to predict the entity (actor/reactor), character(satyr/golem), type(1/2/3) and entity_doing (action/reaction) and entity_doing_type(Idle/Attacking/Hurt/Die/Walking/Taunt)
```
# Modify the labels (Do - encoding)
splits_path = root_path + 'splits/'
splits_images = splits_path + 'images/'
splits_dataset = splits_path + 'split_dataset.csv'
df = pd.read_csv(splits_dataset)
df["type"] = df.type.str.extract('(\d+)')
images = df["img_name"]
target = df.drop(["img_name"], axis=1)
target = pd.get_dummies(target)
def processSplitImages(imgPath, filenames):
images_data = []
for img in tqdm(filenames):
imgFullPath = imgPath + img + ".png"
img = image.load_img(imgFullPath, target_size=(400,400,3), grayscale=False)
img = image.img_to_array(img)
img = img/255
images_data.append(img)
features = np.array(images_data)
return features
img_features = processSplitImages(splits_images, images)
# Split into train and test . And then augment the train data.
features_train, features_test, target_train, target_test = train_test_split(img_features, target, stratify=target, test_size=0.2)
del img_features, target
# Augmenting train data -> Not able to allocate enough RAM
#feature_train_augmented, target_augmented = augmentData(features_train, target_train)
op_shape = target_train.shape[1]
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=(5, 5), activation="relu", input_shape=(400,400,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=32, kernel_size=(10, 10), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(10, 10), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(filters=64, kernel_size=(5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(op_shape, activation='sigmoid'))
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
from keras.callbacks import ModelCheckpoint
filepath=root_path + "weights-{epoch:02d}-{val_accuracy:.3f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy',
verbose=1, mode='max')
callbacks_list = [checkpoint]
model.fit(features_train, target_train, epochs=10, validation_data=(features_test, target_test), batch_size=64, callbacks=callbacks_list)
```
| github_jupyter |
```
%reload_ext autoreload
%autoreload 2
from fastai.tabular import *
```
# Rossmann
## Data preparation
To create the feature-engineered train_clean and test_clean from the Kaggle competition data, run `rossman_data_clean.ipynb`. One important step that deals with time series is this:
```python
add_datepart(train, "Date", drop=False)
add_datepart(test, "Date", drop=False)
```
```
path = Config().data_path()/'rossmann'
train_df = pd.read_pickle(path/'train_clean')
train_df.head().T
n = len(train_df); n
```
### Experimenting with a sample
```
idx = np.random.permutation(range(n))[:2000]
idx.sort()
small_train_df = train_df.iloc[idx[:1000]]
small_test_df = train_df.iloc[idx[1000:]]
small_cont_vars = ['CompetitionDistance', 'Mean_Humidity']
small_cat_vars = ['Store', 'DayOfWeek', 'PromoInterval']
small_train_df = small_train_df[small_cat_vars + small_cont_vars + ['Sales']]
small_test_df = small_test_df[small_cat_vars + small_cont_vars + ['Sales']]
small_train_df.head()
small_test_df.head()
categorify = Categorify(small_cat_vars, small_cont_vars)
categorify(small_train_df)
categorify(small_test_df, test=True)
small_test_df.head()
small_train_df.PromoInterval.cat.categories
small_train_df['PromoInterval'].cat.codes[:5]
fill_missing = FillMissing(small_cat_vars, small_cont_vars)
fill_missing(small_train_df)
fill_missing(small_test_df, test=True)
small_train_df[small_train_df['CompetitionDistance_na'] == True]
```
### Preparing full data set
```
train_df = pd.read_pickle(path/'train_clean')
test_df = pd.read_pickle(path/'test_clean')
len(train_df),len(test_df)
procs=[FillMissing, Categorify, Normalize]
cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen',
'Promo2Weeks', 'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear',
'State', 'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_fw', 'StateHoliday_bw',
'SchoolHoliday_fw', 'SchoolHoliday_bw']
cont_vars = ['CompetitionDistance', 'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC',
'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h',
'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE',
'AfterStateHoliday', 'BeforeStateHoliday', 'Promo', 'SchoolHoliday']
dep_var = 'Sales'
df = train_df[cat_vars + cont_vars + [dep_var,'Date']].copy()
test_df['Date'].min(), test_df['Date'].max()
cut = train_df['Date'][(train_df['Date'] == train_df['Date'][len(test_df)])].index.max()
cut
valid_idx = range(cut)
df[dep_var].head()
data = (TabularList.from_df(df, path=path, cat_names=cat_vars, cont_names=cont_vars, procs=procs,)
.split_by_idx(valid_idx)
.label_from_df(cols=dep_var, label_cls=FloatList, log=True)
.add_test(TabularList.from_df(test_df, path=path, cat_names=cat_vars, cont_names=cont_vars))
.databunch())
doc(FloatList)
```
## Model
```
max_log_y = np.log(np.max(train_df['Sales'])*1.2)
y_range = torch.tensor([0, max_log_y], device=defaults.device)
learn = tabular_learner(data, layers=[1000,500], ps=[0.001,0.01], emb_drop=0.04,
y_range=y_range, metrics=exp_rmspe)
learn.model
len(data.train_ds.cont_names)
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(5, 1e-3, wd=0.2)
learn.save('1')
learn.recorder.plot_losses(last=-1)
learn.load('1');
learn.fit_one_cycle(5, 3e-4)
learn.fit_one_cycle(5, 3e-4)
```
(10th place in the competition was 0.108)
```
test_preds=learn.get_preds(DatasetType.Test)
test_df["Sales"]=np.exp(test_preds[0].data).numpy().T[0]
test_df[["Id","Sales"]]=test_df[["Id","Sales"]].astype("int")
test_df[["Id","Sales"]].to_csv("rossmann_submission.csv",index=False)
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
matplotlib.pyplot.style.use('seaborn')
matplotlib.rcParams['figure.figsize'] = (15, 5)
%matplotlib inline
import math
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
```
## $a^{n} \cdot a^{m} = a^{n+m}$
## $\frac{a^{n}}{a^{m}} = a^{n-m}$
## $\left(a^{n} \right)^{m} = a^{n\cdot m}$
## $a^{x} \cdot b^{x} = \left(a \cdot b \right)^{x}$
## $\frac{a^{x}}{b^{x}} = \left(\frac{a}{b} \right)^{x}$
$a^{-n} = \frac{1}{a^{n}}$
$a^{-1} = \frac{1}{a}$
---
## $\sqrt[n]{a^{k}} = \left(\sqrt[n]{a} \right)^{k}$
## $\left(a^{k} \right)^{\frac{1}{n}} = \left(a^{\frac{1}{n}} \right)^{k} = a^{\frac{k}{n}}$
## $\sqrt[n]{a} = \sqrt[n \cdot k]{a^{k}}$
## $a^{\frac{1}{n}} = \left(a^{k} \right)^{\frac{1}{n \cdot k}} = a^{\frac{k}{n \cdot k}}$
## $\sqrt[n]{\sqrt[k]{a}} = \sqrt[n \cdot k]{a}$
## $\left( a^{\frac{1}{k}} \right)^{\frac{1}{n}} = a^{\frac{1}{k} \cdot \frac{1}{n}} = a^{\frac{1}{k \cdot n}}$
#### $\sqrt[n]{ab} = \sqrt[n]{a} \cdot \sqrt[n]{b}$
#### $ab^{\frac{1}{n}} = a^{\frac{1}{n}} \cdot b^{\frac{1}{n}}$
#### $\sqrt[n]{\frac{a}{b}} = \frac{\sqrt[n]{a}}{\sqrt[n]{b}}$
#### $\left(\frac{a}{b} \right)^{\frac{1}{x}} = \frac{a^{\frac{1}{x}}}{b^{\frac{1}{x}}}$
---
## $\log_{a}b = C \Leftrightarrow a^{C}=b$
## $\log_{a}bc = \log_{a}b + \log_{a}c$
## $\log_{a}\frac{b}{c} = \log_{a}b - \log_{a}c$
## $\log_{a^{k}}b^{m} = \frac{m}{k}\log_{a}b$
## $a^{\log_{a}b} = b$
## $\log_{a}b = \frac{\log_{x}b}{\log_{x}a}$
# Examples:
$\sqrt[4]{x} + \sqrt{x} = 6$
$x^{\frac{1}{4}} + x^{\frac{1}{2}} = 6$
$a = x^{\frac{1}{2}}$
$a^{\frac{1}{2}} + a = 6$
$a^{\frac{1}{2}} = 6 - a$
$a = (6-a)^{2}$
---
$2\sqrt{8b^3} \cdot 9\sqrt{18b}$
$2\sqrt{8b^3} \cdot 9\sqrt{18b} = 18\sqrt{2^3b^3 18b}$
$2\sqrt{8b^3} \cdot 9\sqrt{18b} = 18\sqrt{2^3b^3 18b} = 18\sqrt{2^3b^3 \cdot 2 \cdot 3^2b}$
$2\sqrt{8b^3} \cdot 9\sqrt{18b} = 18\sqrt{2^3b^3 18b} = 18\sqrt{2^3b^3 \cdot 2 \cdot 3^2b} =
18\sqrt{2^4 b^4 \cdot 3^2}$
$18\sqrt{2^4 b^4 \cdot 3^2} = 18 \cdot 2^2 \cdot 3 b^2 = 216b^2$
# Log Equations
$\log_{2} (x+2) = 3$
$x+2 = 2^3$
$x = 6$
---
$\log_{9} (3^x) = 15$
$3^x = 9^{15}$
$3^x = 3^{2 \cdot 15}$
$x = 30$
---
$\log_{x} (36) = 2$
$36 = x^2$
$\pm \sqrt{36} = x$
$x = \pm 6$
$\log_{x}$: $x > 0$
---
$\log_{9} x = \frac{1}{2}$
$x = 9^{\frac{1}{2}}$
$x = \sqrt{9}$
$x = 3$
---
$\log_{5} 25 = 2x$
$25 = 5^{2x}$
$5^2 = 5^{2x}$
$2 = 2x$
---
$\log_{3} (3^{2x} - 3^x + 1) = x$
$3^{2x} - 3^x + 1 = 3^x$
$(3^{x})^2 - 2(3^x) + 1 = 0$
$a = 3^{x}$
$a^2 - 2a + 1 = 0$
$a + b = -2$
$ab = 1$
$(-1,-1)$
$(a-1)^2 = 0$
$a = 1$
$1 = 3^{x}$
$x=0$
---
$\log_{16} (3x + 1) = 2$
$\frac{1}{4} \log_{2} (3x + 1) = 2$
$\frac{4}{4} \log_{2} (3x + 1) = 2 \cdot 4$
$3x + 1 = 2^8$
$x = 85$
---
$\log_{5} x + \log_{3} x = 0$
$\frac{\log_{x} x}{\log_{x} 5} + \frac{\log_{x} x}{\log_{x} 3} = 0$
$\frac{1}{\log_{x} 5} + \frac{1}{\log_{x} 3} = 0$
$\frac{1}{\log_{x} 5} = -\frac{1}{\log_{x} 3}$
$(\frac{1}{\log_{x} 5})^{-1} = (\frac{-1}{\log_{x} 3})^{-1}$
${\log_{x} 5} = -\log_{x} 3$
${\log_{x} 5} + \log_{x} 3 = 0$
${\log_{x} 15} = 0$
$15 = x^0$ Impossible, so $x = 1$:
$\log_{5} 1 + \log_{3} 1 = 0$
$0 + 0 = 0$
---
$\frac{\log_{10} 8x}{\log_{10} |7x+3|} = 1$
$\log_{10} 8x = \log_{10} |7x+3|$
$10^{\log_{10} 8x} = 10^{\log_{10} |7x+3|}$
$8x = 7x+3$
$x = 3$
---
$\log_{5} x - \log_{25} x + \log_{\sqrt{5}} x = -5$
$\frac{2}{2}\log_{5} x - \frac{1}{2}\log_{5} x + \frac{2}{2}\log_{\sqrt{5}} x = -5$
$\log_{5} x \cdot(\frac{2}{2} - \frac{1}{2}) + 2\log_{5} x = -5$
$\frac{1}{2}\log_{5} x + 2\log_{5} x = -5$
$\log_{5} x \cdot (\frac{1}{2} + 2) = -5$
$2.5\log_{5} x = -5$
$\log_{5} x = -2$
$5^{\log_{5} x} = 5^{-2}$
$x = \frac{1}{5^2}$
| github_jupyter |
Code testing for https://github.com/pymc-devs/pymc3/pull/2986
```
import numpy as np
import pymc3 as pm
import pymc3.distributions.transforms as tr
import theano.tensor as tt
from theano.scan_module import until
import theano
import matplotlib.pylab as plt
import seaborn as sns
%matplotlib inline
```
# Polar transformation
```
# Polar to Cartesian
def backward(y):
# y = [r, theta]
x = tt.zeros(y.shape)
x = tt.inc_subtensor(x[0], y[0]*tt.cos(y[1]))
x = tt.inc_subtensor(x[1], y[0]*tt.sin(y[1]))
return x
def forward(x):
# y = [r, theta]
y = tt.zeros(x.shape)
y = tt.inc_subtensor(y[0], tt.sqrt(tt.square(x[0]) + tt.square(x[1])))
if y[0] != 0:
if x[1] < 0:
theta = -tt.arccos(x[0]/y[0])
else:
theta = tt.arccos(x[0]/y[0])
y = tt.inc_subtensor(y[1], theta)
return y
y = tt.vector('polar')
y.tag.test_value=np.asarray([1., np.pi/2])
f_inv = backward(y)
J, _ = theano.scan(lambda i, f, x: tt.grad(f[i], x),
sequences=tt.arange(f_inv.shape[0]),
non_sequences=[f_inv, y])
Jacob_f1 = theano.function([y], J)
Jacob_f1(np.asarray([1., np.pi/2]))
J2 = pm.theanof.jacobian(f_inv, [y])
Jacob_f2 = theano.function([y], J2)
Jacob_f2(np.asarray([1., np.pi/2]))
%timeit Jacob_f1(np.asarray([1., np.pi/2]))
%timeit Jacob_f2(np.asarray([1., np.pi/2]))
class VectorTransform(tr.Transform):
def jacobian_det(self, x):
f_inv = self.backward(x)
J, _ = theano.scan(lambda i, f, x: tt.grad(f[i], x),
sequences=tt.arange(f_inv.shape[0]),
non_sequences=[f_inv, x])
return tt.log(tt.abs_(tt.nlinalg.det(J)))
class Nealfun(VectorTransform):
name = "Neal_funnel"
def backward(self, y):
x = tt.zeros(y.shape)
x = tt.inc_subtensor(x[0], y[0] / 3.)
x = tt.inc_subtensor(x[1:], y[1:] / tt.exp(y[0] / 2))
return x
def forward(self, x):
y = tt.zeros(x.shape)
y = tt.inc_subtensor(y[0], x[0] * 3.)
y = tt.inc_subtensor(y[1:], tt.exp(x[0] * 3. / 2) * x[1:])
return y
y = tt.vector('y')
y.tag.test_value = np.zeros(101)
nealfun = Nealfun()
f_inv = nealfun.backward(y)
J1, _ = theano.scan(lambda i, f, x: tt.grad(f[i], x),
sequences=tt.arange(f_inv.shape[0]),
non_sequences=[f_inv, y])
Jacob_f1 = theano.function([y], J1)
J2 = pm.theanof.jacobian(f_inv, [y])
Jacob_f2 = theano.function([y], J2)
%timeit Jacob_f1(np.zeros(101))
%timeit Jacob_f2(np.zeros(101))
```
# Copulas
Background reading http://twiecki.github.io/blog/2018/05/03/copulas/
More information https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Gaussian_Copula.ipynb
```
import scipy.stats as st
norm = st.norm()
def norm_cdf(x):
return x_unif
def copulas_forward_func(nsample, cov, marg1_ppf, marg2_ppf):
mvnorm = st.multivariate_normal(mean=[0, 0], cov=cov)
# Generate random samples from multivariate normal with correlation .5
x = mvnorm.rvs(nsample)
x_unif = norm.cdf(x)
x_trans = np.vstack([marg1_ppf(x_unif[:, 0]), marg2_ppf(x_unif[:, 1])]).T
return x_trans, x_unif, x
cov = np.asarray([[1., 0.725], [0.725, 1.]])
marg1_ppf = st.gumbel_r().ppf
marg2_ppf = st.beta(a=10, b=2).ppf
x_trans, x_unif, x = copulas_forward_func(10000, cov, marg1_ppf, marg2_ppf)
sns.jointplot(x[:, 0], x[:, 1], kind='kde', stat_func=None)
sns.jointplot(x_unif[:, 0], x_unif[:, 1], kind='hex',
stat_func=None, joint_kws=dict(gridsize=50))
sns.jointplot(x_trans[:, 0], x_trans[:, 1], kind='kde',
stat_func=None, xlim=(-2, 6), ylim=(.6, 1.0),)
plt.tight_layout()
xrange = np.linspace(-2, 6, 200)
plt.hist(x_trans[:, 0], xrange, density='pdf')
plt.plot(xrange, st.gumbel_r.pdf(xrange));
def gumbel_cdf(value, mu, beta):
return tt.exp(-tt.exp(-(value-mu)/beta))
```
Beta CDF
```
from theano.scan_module import until
max_iter=200
value_, a, b = x_trans[:, 1], 10., 2.
value = theano.shared(np.reshape(value_, (1,len(value_))))
EPS = 3.0e-7
qab = a + b
qap = a + 1.0
qam = a - 1.0
def _step(i, az, bm, am, bz):
tem = i + i
d = i * (b - i) * value / ((qam + tem) * (a + tem))
d =- (a + i) * i * value / ((qap + tem) * (a + tem))
ap = az + d * am
bp = bz + d * bm
app = ap + d * az
bpp = bp + d * bz
aold = az
am = ap / bpp
bm = bp / bpp
az = app / bpp
bz = tt.ones_like(bz)
return (az, bm, am, bz), until(tt.sum(tt.lt(tt.abs_(az - aold), (EPS * tt.abs_(az)))))
(az, bm, am, bz), _ = theano.scan(_step,
sequences=[tt.arange(1, max_iter)],
outputs_info=[tt.ones_like(value),
tt.ones_like(value),
tt.ones_like(value),
1. - qab * value / qap])
def cont_fraction_beta(value_, a, b, max_iter=500):
'''Evaluates the continued fraction form of the incomplete Beta function.
Derived from implementation by Ali Shoaib (https://goo.gl/HxjIJx).
'''
EPS = 1.0e-20
qab = a + b
qap = a + 1.0
qam = a - 1.0
value = theano.shared(value_)
def _step(i, az, bm, am, bz):
tem = i + i
d = i * (b - i) * value / ((qam + tem) * (a + tem))
d = - (a + i) * i * value / ((qap + tem) * (a + tem))
ap = az + d * am
bp = bz + d * bm
app = ap + d * az
bpp = bp + d * bz
aold = az
am = ap / bpp
bm = bp / bpp
az = app / bpp
bz = tt.ones_like(bz)
return (az, bm, am, bz), until(tt.sum(tt.lt(tt.abs_(az - aold), (EPS * tt.abs_(az)))))
(az, bm, am, bz), _ = theano.scan(_step,
sequences=[tt.arange(1, max_iter)],
outputs_info=[tt.ones_like(value),
tt.ones_like(value),
tt.ones_like(value),
1. - qab * value / qap])
return az[-1]
def beta_cdf(value, a, b):
log_beta = tt.gammaln(a+b) - tt.gammaln(a) - tt.gammaln(b)
log_beta += a * tt.log(value) + b * tt.log(1 - value)
cdf = tt.switch(
tt.lt(value, (a + 1) / (a + b + 2)),
tt.exp(log_beta) * cont_fraction_beta(value, a, b) / a,
1. - tt.exp(log_beta) * cont_fraction_beta(1. - value, b, a) / b
)
return cdf
def normal_ppf(value):
return -np.sqrt(2.) * tt.erfcinv(2. * value)
functmp = theano.function([],
tt.stack([gumbel_cdf(x_trans[:, 0], 0., 1.),
beta_cdf(x_trans[:, 1], 10., 2.)]).T
)
x_ = functmp()
x_
x_unif
np.sum(~np.isfinite(x_))
with pm.Model() as model:
# r∼Uniform(−1,1)
r = pm.Uniform('r',lower=-1, upper=1)
cov = pm.Deterministic('cov',
tt.stacklists([[1., r],
[r, 1.]]))
a = pm.HalfNormal('alpha', 5., testval=10.)
b = pm.HalfNormal('beta', 2.5, testval=2.)
loc = pm.Normal('loc', 0., 5., testval=0.)
scale = pm.HalfNormal('scale', 2.5, testval=1.)
tr_func = normal_ppf(
tt.stack([gumbel_cdf(x_trans[:, 0], loc, scale),
beta_cdf(x_trans[:, 1], a, b)]).T
)
pm.MvNormal('obs', np.zeros(2), cov=cov, observed=tr_func)
pm.Gumbel('marg0', loc, scale, observed=x_trans[:, 0])
pm.Beta('marg1', a, b, observed=x_trans[:, 1])
```
The beta CDF does not quite work - use another distribution instead
```
from scipy.special import logit
xrange = np.linspace(0, 1, 200)
plt.hist(x_trans[:, 1], xrange, density='pdf')
logitnormpdf = st.norm.pdf(logit(xrange), loc=1.725, scale=.8) * 1/(xrange * (1-xrange))
plt.plot(xrange, logitnormpdf);
def logitnorm_cdf(value, mu, sd):
return .5 + .5*(tt.erf((pm.math.logit(value)-mu)/(np.sqrt(2)*sd)))
tr_func = normal_ppf(
tt.stack([gumbel_cdf(x_trans[:, 0], 0., 1.),
logitnorm_cdf(x_trans[:, 1], 1.725, .8)]).T
)
functmp = theano.function([], tr_func)
x_ = functmp()
sns.jointplot(x_[:, 0], x_[:, 1], kind='kde', stat_func=None);
np.sum(~np.isfinite(x_[:, 1]))
with pm.Model() as model:
# r∼Uniform(−1,1)
r = pm.Uniform('r',lower=-1, upper=1)
cov = pm.Deterministic('cov',
tt.stacklists([[1., r],
[r, 1.]]))
loc = pm.Normal('loc', 0., 5., testval=0.)
scale = pm.HalfNormal('scale', 2.5, testval=1.)
mu = pm.Normal('mu', 1., 1., testval=1.725)
sd = pm.HalfNormal('sd', .5, testval=.8)
tr_func = normal_ppf(
tt.stack([gumbel_cdf(x_trans[:, 0], loc, scale),
logitnorm_cdf(x_trans[:, 1], mu, sd)]).T
)
pm.MvNormal('obs', np.zeros(2), cov=cov, observed=tr_func)
pm.Gumbel('marg0', loc, scale, observed=x_trans[:, 0])
pm.LogitNormal('marg1', mu, sd, observed=x_trans[:, 1])
with model:
map1 = pm.find_MAP()
map1
_, ax = plt.subplots(1, 2, figsize=(10, 3))
x0 = np.linspace(-2, 6, 200)
ax[0].hist(x_trans[:, 0], x0, density='pdf')
ax[0].plot(x0, st.gumbel_r.pdf(x0, loc=map1['loc'], scale=map1['scale']))
x1 = np.linspace(0, 1, 200)
ax[1].hist(x_trans[:, 1], x1, density='pdf')
logitnormpdf = st.norm.pdf(logit(x1), loc=map1['mu'], scale=map1['sd']) * 1/(x1 * (1-x1))
ax[1].plot(x1, logitnormpdf);
with pm.Model() as model_marg:
loc = pm.Normal('loc', 0., 5., testval=0.)
scale = pm.HalfNormal('scale', 2.5, testval=1.)
mu = pm.Normal('mu', 1., 1., testval=1.725)
sd = pm.HalfNormal('sd', .5, testval=.8)
pm.Gumbel('marg0', loc, scale, observed=x_trans[:, 0])
pm.LogitNormal('marg1', mu, sd, observed=x_trans[:, 1])
map_ = pm.find_MAP()
map_
_, ax = plt.subplots(1, 2, figsize=(10, 3))
x0 = np.linspace(-2, 6, 200)
ax[0].hist(x_trans[:, 0], x0, density='pdf')
ax[0].plot(x0, st.gumbel_r.pdf(x0, loc=map_['loc'], scale=map_['scale']))
x1 = np.linspace(0, 1, 200)
ax[1].hist(x_trans[:, 1], x1, density='pdf')
logitnormpdf = st.norm.pdf(logit(x1), loc=map_['mu'], scale=map_['sd']) * 1/(x1 * (1-x1))
ax[1].plot(x1, logitnormpdf);
from pymc3.theanof import gradient
def jacobian_det(f_inv_x, x):
grad = tt.reshape(gradient(tt.sum(f_inv_x), [x]), x.shape)
return tt.log(tt.abs_(grad))
xt_0 = theano.shared(x_trans[:, 0])
xt_1 = theano.shared(x_trans[:, 1])
with pm.Model() as model2:
# r∼Uniform(−1,1)
r = pm.Uniform('r',lower=-1, upper=1)
cov = pm.Deterministic('cov',
tt.stacklists([[1., r],
[r, 1.]]))
loc = pm.Normal('loc', 0., 5., testval=0.)
scale = pm.HalfNormal('scale', 2.5, testval=1.)
mu = pm.Normal('mu', 1., .5, testval=1.725)
sd = pm.HalfNormal('sd', .5, testval=.8)
tr_func = normal_ppf(
tt.stack([gumbel_cdf(xt_0, loc, scale),
logitnorm_cdf(xt_1, mu, sd)]).T
)
pm.MvNormal('obs', np.zeros(2), cov=cov, observed=tr_func)
pm.Potential('jacob_det0', jacobian_det(normal_ppf(gumbel_cdf(xt_0, loc, scale)), xt_0))
pm.Potential('jacob_det1', jacobian_det(normal_ppf(logitnorm_cdf(xt_1, mu, sd)), xt_1))
map_ = pm.find_MAP()
_, ax = plt.subplots(1, 2, figsize=(10, 3))
x0 = np.linspace(-2, 6, 200)
ax[0].hist(x_trans[:, 0], x0, density='pdf')
ax[0].plot(x0, st.gumbel_r.pdf(x0, loc=map_['loc'], scale=map_['scale']))
x1 = np.linspace(0, 1, 200)
ax[1].hist(x_trans[:, 1], x1, density='pdf')
logitnormpdf = st.norm.pdf(logit(x1), loc=map_['mu'], scale=map_['sd']) * 1/(x1 * (1-x1))
ax[1].plot(x1, logitnormpdf);
```
Kumaraswamy distribution
```
from scipy.special import logit
xrange = np.linspace(0, 1, 200)
plt.hist(x_trans[:, 1], xrange, density='pdf')
Kumaraswamypdf = lambda x, a, b: a*b*np.power(x, a-1)*np.power(1-np.power(x, a), b-1)
plt.plot(xrange, Kumaraswamypdf(xrange, 8, 2));
def Kumaraswamy_cdf(value, a, b):
return 1 - tt.pow(1 - tt.pow(value, a), b)
tr_func = normal_ppf(
tt.stack([gumbel_cdf(x_trans[:, 0], 0., 1.),
Kumaraswamy_cdf(x_trans[:, 1], 8, 2)]).T
)
functmp = theano.function([], tr_func)
x_ = functmp()
sns.jointplot(x_[:, 0], x_[:, 1], kind='kde', stat_func=None);
np.sum(~np.isfinite(x_[:, 1]))
with pm.Model() as model_marg:
a = pm.HalfNormal('alpha', 5., testval=10.)
b = pm.HalfNormal('beta', 2.5, testval=2.)
loc = pm.Normal('loc', 0., 5., testval=0.)
scale = pm.HalfNormal('scale', 2.5, testval=1.)
pm.Gumbel('marg0', loc, scale, observed=x_trans[:, 0])
pm.Kumaraswamy('marg1', a, b, observed=x_trans[:, 1])
map_ = pm.find_MAP()
_, ax = plt.subplots(1, 2, figsize=(10, 3))
x0 = np.linspace(-2, 6, 200)
ax[0].hist(x_trans[:, 0], x0, density='pdf')
ax[0].plot(x0, st.gumbel_r.pdf(x0, loc=map_['loc'], scale=map_['scale']))
x1 = np.linspace(0, 1, 200)
ax[1].hist(x_trans[:, 1], x1, density='pdf')
ax[1].plot(x1, Kumaraswamypdf(x1, map_['alpha'], map_['beta']));
with pm.Model() as model2:
# r∼Uniform(−1,1)
r = pm.Uniform('r',lower=-1, upper=1)
cov = pm.Deterministic('cov',
tt.stacklists([[1., r],
[r, 1.]]))
a = pm.HalfNormal('alpha', 5.)
b = pm.HalfNormal('beta', 2.5)
loc = pm.Normal('loc', 0., 5.)
scale = pm.HalfNormal('scale', 2.5)
tr_func = normal_ppf(
tt.stack([gumbel_cdf(xt_0, loc, scale),
Kumaraswamy_cdf(xt_1, a, b)]).T
)
pm.MvNormal('obs', np.zeros(2), cov=cov, observed=tr_func)
pm.Potential('jacob_det0', jacobian_det(normal_ppf(gumbel_cdf(xt_0, loc, scale)), xt_0))
pm.Potential('jacob_det1', jacobian_det(normal_ppf(Kumaraswamy_cdf(xt_1, a, b)), xt_1))
map_ = pm.find_MAP()
_, ax = plt.subplots(1, 2, figsize=(10, 3))
x0 = np.linspace(-2, 6, 200)
ax[0].hist(x_trans[:, 0], x0, density='pdf')
ax[0].plot(x0, st.gumbel_r.pdf(x0, loc=map_['loc'], scale=map_['scale']))
x1 = np.linspace(0, 1, 200)
ax[1].hist(x_trans[:, 1], x1, density='pdf')
ax[1].plot(x1, Kumaraswamypdf(x1, map_['alpha'], map_['beta']));
map_
with model2:
trace = pm.sample()
_, ax = plt.subplots(1, 2, figsize=(10, 3))
x0 = np.linspace(-2, 6, 200)
ax[0].hist(x_trans[:, 0], x0, density='pdf')
ax[0].plot(x0, st.gumbel_r.pdf(x0, loc=trace['loc'].mean(), scale=trace['scale'].mean()))
x1 = np.linspace(0, 1, 200)
ax[1].hist(x_trans[:, 1], x1, density='pdf')
ax[1].plot(x1, Kumaraswamypdf(x1, trace['alpha'].mean(), trace['beta'].mean()));
```
| github_jupyter |
# Mean Normalization
In machine learning we use large amounts of data to train our models. Some machine learning algorithms may require that the data is *normalized* in order to work correctly. The idea of normalization, also known as *feature scaling*, is to ensure that all the data is on a similar scale, *i.e.* that all the data takes on a similar range of values. For example, we might have a dataset that has values between 0 and 5,000. By normalizing the data we can make the range of values be between 0 and 1.
In this lab you will be performing a particular form of feature scaling known as *mean normalization*. Mean normalization will not only scale the data but will also ensure your data has zero mean.
# To Do:
You will start by importing NumPy and creating a rank 2 ndarray of random integers between 0 and 5,000 (inclusive) with 1000 rows and 20 columns. This array will simulate a dataset with a wide range of values. Fill in the code below
```
# import NumPy into Python
# Create a 1000 x 20 ndarray with random integers in the half-open interval [0, 5001).
X =
# print the shape of X
```
Now that you created the array we will mean normalize it. We will perform mean normalization using the following equation:
$\mbox{Norm_Col}_i = \frac{\mbox{Col}_i - \mu_i}{\sigma_i}$
where $\mbox{Col}_i$ is the $i$th column of $X$, $\mu_i$ is average of the values in the $i$th column of $X$, and $\sigma_i$ is the standard deviation of the values in the $i$th column of $X$. In other words, mean normalization is performed by subtracting from each column of $X$ the average of its values, and then by dividing by the standard deviation of its values. In the space below, you will first calculate the average and standard deviation of each column of $X$.
```
# Average of the values in each column of X
ave_cols =
# Standard Deviation of the values in each column of X
std_cols =
```
If you have done the above calculations correctly, then `ave_cols` and `std_cols`, should both be vectors with shape `(20,)` since $X$ has 20 columns. You can verify this by filling the code below:
```
# Print the shape of ave_cols
# Print the shape of std_cols
```
You can now take advantage of Broadcasting to calculate the mean normalized version of $X$ in just one line of code using the equation above. Fill in the code below
```
# Mean normalize X
X_norm =
```
If you have performed the mean normalization correctly, then the average of all the elements in $X_{\tiny{\mbox{norm}}}$ should be close to zero. You can verify this by filing the code below:
```
# Print the average of all the values of X_norm
# Print the minimum value of each column of X_norm
# Print the maximum value of each column of X_norm
```
You should note that since $X$ was created using random integers, the above values will vary.
# Data Separation
After the data has been mean normalized, it is customary in machine learnig to split our dataset into three sets:
1. A Training Set
2. A Cross Validation Set
3. A Test Set
The dataset is usually divided such that the Training Set contains 60% of the data, the Cross Validation Set contains 20% of the data, and the Test Set contains 20% of the data.
In this part of the lab you will separate `X_norm` into a Training Set, Cross Validation Set, and a Test Set. Each data set will contain rows of `X_norm` chosen at random, making sure that we don't pick the same row twice. This will guarantee that all the rows of `X_norm` are chosen and randomly distributed among the three new sets.
You will start by creating a rank 1 ndarray that contains a random permutation of the row indices of `X_norm`. You can do this by using the `np.random.permutation()` function. The `np.random.permutation(N)` function creates a random permutation of integers from 0 to `N - 1`. Let's see an example:
```
# We create a random permutation of integers 0 to 4
np.random.permutation(5)
```
# To Do
In the space below create a rank 1 ndarray that contains a random permutation of the row indices of `X_norm`. You can do this in one line of code by extracting the number of rows of `X_norm` using the `shape` attribute and then passing it to the `np.random.permutation()` function. Remember the `shape` attribute returns a tuple with two numbers in the form `(rows,columns)`.
```
# Create a rank 1 ndarray that contains a random permutation of the row indices of `X_norm`
row_indices =
```
Now you can create the three datasets using the `row_indices` ndarray to select the rows that will go into each dataset. Rememeber that the Training Set contains 60% of the data, the Cross Validation Set contains 20% of the data, and the Test Set contains 20% of the data. Each set requires just one line of code to create. Fill in the code below
```
# Make any necessary calculations.
# You can save your calculations into variables to use later.
# Create a Training Set
X_train =
# Create a Cross Validation Set
X_crossVal =
# Create a Test Set
X_test =
```
If you performed the above calculations correctly, then `X_tain` should have 600 rows and 20 columns, `X_crossVal` should have 200 rows and 20 columns, and `X_test` should have 200 rows and 20 columns. You can verify this by filling the code below:
```
# Print the shape of X_train
# Print the shape of X_crossVal
# Print the shape of X_test
```
| github_jupyter |
In this notebook we will use the boundary exploration algorithm to fully explore the parameter space of a generic Markov chain.
Last updated by: Jonathan Liu, 10/22/2020
```
#Import necessary packages
%matplotlib inline
import numpy as np
from scipy.spatial import ConvexHull
import matplotlib.pyplot as plt
import scipy.special as sps
from IPython.core.debugger import set_trace
from numba import njit, prange
import numba as numba
from datetime import date
import time as Time
#Set number of threads
numba.set_num_threads(28)
#Simulation for calculating onset times for a generic Markov chain
#This function is now deprecated since the Gillespie algorithm is much more accurate and efficient
def CalculatetOn_GenericMarkovChain(time,dt,Q,n,N_cells):
#Calculates the onset time for a linear Markov chain with forward and backward rates.
#The transition rate can be time-varying, but is the same
#global rate for each transition. The model assumes n states, beginning
#in the 1st state. Using finite timesteps and a Markov chain formalism, it
#simulates N_cells realizations of the overall time it takes to reach the
#nth state.
# Inputs:
# time: simulation time vector
# dt: simulation timestep
# Q: 3D transition rate matrix, where q_kji is the transition rate at time k from state i to j for i =/= j and
# q_kii is the sum of transition rates out of state i
# n: number of states
# N_cells: number of cells to simulate
# Outputs:
# t_on: time to reach the final state for each cell (length = N_cells)
## Setup variables
t_on = np.empty(N_cells) #Time to transition to final ON state for each cell
t_on[:] = np.nan
state = np.zeros(N_cells, dtype=int) #State vector describing current state of each cell
finished_states = np.zeros(N_cells, dtype=int) #Vector storing finished statuses of each cell
## Run simulation
#Loop over time
#q = waitbar(0,'Running simulation...')
for i in range(len(time)):
if np.sum(finished_states) == N_cells: #If all cells have turned on, stop the simulation
#print('Halting simulation since all cells have turned on.')
break
#Simulate binomial random variable to see if each cell has transitioned
#If the input transition rate is a nan, this will manifest as never
#transitioning.
#Find indices of that have not finished yet
incompleteCells = np.transpose(np.where(finished_states != 1))
#Loop over cells
for j in incompleteCells:
#The probability that a state i switches is given by -Q_ii * dt
p = -Q[i,state[j],state[j]] * dt #Probability of transition at this timestep for this cell
transitioned = np.random.binomial(1,p,1) #Binary transition decision for this cell
#The state to transition to is given by the ratio of the individual rates in the column j over the total rate -Q_ii
if transitioned == 1:
Q_temp = np.copy(Q) #Temporary matrix where we will remove Q_ii for this cell and state
Q_temp[i,state[j],state[j]] = 0
pState = np.squeeze(Q_temp[i,:,state[j]]/-Q[i,state[j],state[j]])
#print(Q[i,:,:])
newState = np.random.choice(n, 1, p=pState)
#print('cell ' + str(j) + ' transitioned from state ' + str(state[j]) + \
# ' to state ' + str(newState) + 'at time ' + str(time[i]))
state[j] = newState
#Record the time if it transitioned to the new state
if newState == n-1:
t_on[j] = time[i]
#See if any states have reached the ON state
finished_states[state == n-1] = 1
return t_on
#Function to generate a random transition matrix for a generic Markov chain with n states, and an irreversible
#transition into the final state.
#Inputs:
# n: number of states
# k_min: minimum transition rate
# k_max: maximum transition rate
#pseudocode
#generate 2D matrix based on n
#loop over each index, if indices are connected by 1 then generate a value (except final state)
#Calculate diagonal elements from summing columns to zero
#
def MakeRandomTransitionMatrix(n, k_min, k_max):
#Initialize the transition matrix
Q = np.zeros((n,n))
#Loop through transition indices (note that the final column is all zeros since it's an absorbing state)
for i in range(n):
for j in range(n-1):
#If the indices are exactly one apart (i.e. adjacent states), then make a transition rate
if np.abs(i-j) == 1:
Q[i,j] = np.random.uniform(k_min,k_max)
#Calculate the diagonal elements by taking the negative of the sum of the column
for i in range(n-1):
Q[i,i] = -np.sum(Q[:,i])
return Q
#Function to mutate a transition matrix for a generic Markov chain with an irreversible transition
#into the final absorbing state. For each element in Q, the function picks a random number between
#[1-s,1+s] and multiplies it with the element
#Inputs:
# Q: n x n transition matrix, where n is the number of states
# s: mutation factor (q_ij -> q_ij * s)
def MutateTransitionMatrix(Q,s):
#Loop over transition matrix indices
for i in range(Q.shape[0]):
for j in range(Q.shape[1]):
#Adjacent states
if i != j:
Q[i,j] = Q[i,j] * np.random.uniform(1-s,1+s)
#Reset the diagonals to zero
elif i == j:
Q[i,j] = 0
#Recalculate the diagonal entries
for i in range(Q.shape[0]-1):
Q[i,i] = -np.sum(Q[:,i])
return Q
#Simulation for calculating onset times for a generic Markov chain using Gillespie algorithm
#Using vectorized formulation for faster speed
def CalculatetOn_GenericMarkovChainGillespie(Q,n,N_cells):
#Calculates the onset time for a linear Markov chain with forward and backward rates.
#The transition rate can be time-varying, but is the same
#global rate for each transition. The model assumes n states, beginning
#in the 1st state. Using the Gillespie algorithm and a Markov chain formalism, it
#simulates N_cells realizations of the overall time it takes to reach the
#nth state.
#For now, this only works with steady transition rates. Later we will modify this to account
#for time-varying rates.
# Inputs:
# Q: transition rate matrix, where q_ji is the transition rate from state i to j for i =/= j and
# q_ii is the sum of transition rates out of state i
# n: number of states
# N_cells: number of cells to simulate
# Outputs:
# t_on: time to reach the final state for each cell (length = N_cells)
## Setup variables
t_on = np.zeros(N_cells) #Time to transition to final ON state for each cell
state = np.zeros(N_cells, dtype=int) #State vector describing current state of each cell
## Run simulation
# We will simulate waiting times for each transition for each cell and stop once each cell has
# reached the final state
#Set diagonal entries in transition matrix to nan since self transitions don't count
for i in range(n):
Q[i,i] = 0
#Construct the transition vector out of each cell's current state
Q_states = np.zeros((N_cells,n))
while np.sum(state) < (n-1)*N_cells:
Q_states = np.transpose(Q[:,state])
#Generate random numbers in [0,1] for each cell
randNums = np.random.random(Q_states.shape)
#Calculate waiting times for each entry in the transition matrix
#Make sure to suppress divide by zero warning
with np.errstate(divide='ignore'):
tau = (1/Q_states) * np.log(1/randNums)
#Find the shortest waiting time to figure out which state we transitioned to for each cell
tau_min = np.amin(tau, axis=1)
newState = np.argmin(tau, axis=1)
#Replace infinities with zero, corresponding to having reached the final state
newState[tau_min==np.inf] = n-1
tau_min[tau_min==np.inf] = 0
#Update the state and add the waiting time to the overall waiting time
state = newState
t_on += tau_min
return t_on
#Simulation for calculating onset times for a generic Markov chain using Gillespie algorithm
#Using vectorized formulation for faster speed
def CalculatetOn_GenericMarkovChainGillespieTime(Q,n,t_d,N_cells):
#Calculates the onset time for a linear Markov chain with forward and backward rates.
#The transition rate can be time-varying, but is the same
#global rate for each transition. The model assumes n states, beginning
#in the 1st state. Using the Gillespie algorithm and a Markov chain formalism, it
#simulates N_cells realizations of the overall time it takes to reach the
#nth state.
#This considers time-dependent transition rates parameterized by a diffusion timescale t_d.
#The time-dependent rate has the form r ~ (1 - exp(-t/t_d)). For now, we assume only the forwards
#rates have the time-dependent profile, and that backwards rates are time-independent.
# Inputs:
# Q: 3D transition rate matrix, where q_kji is the transition rate at time k from state i to j for i =/= j and
# q_kii is the sum of transition rates out of state i
# n: number of states
# t_d: diffusion timescale of time-dependent transition rate
# N_cells: number of cells to simulate
# Outputs:
# t_on: time to reach the final state for each cell (length = N_cells)
## Setup variables
t_on = np.zeros(N_cells) #Time to transition to final ON state for each cell
time = np.zeros(N_cells) #Vector of current time for each cell
state = np.zeros(N_cells, dtype=int) #State vector describing current state of each cell
## Run simulation
# We will simulate waiting times for each transition for each cell and stop once each cell has
# reached the final state
#Set diagonal entries in transition matrix to nan since self transitions don't count
for i in range(n):
Q[i,i] = 0
#Define the diffusion timescale matrix t_d (finite for forwards rates, effectively 0 for backwards rates)
t_d_mat = np.zeros((n,n))
t_d_mat[:,:] = 0.00000001 #Non forwards transitions are essentially 0 diffusive timescale
for i in range(n):
for j in range(n-1):
#Forwards rates
if i == j + 1:
t_d_mat[i,j] = t_d
#Construct the transition vector out of each cell's current state
Q_states = np.zeros((N_cells,n))
#Construct the diffusion timescale vector for each cell
t_d_states = np.zeros((N_cells,n))
while np.sum(state) < (n-1)*N_cells:
Q_states = np.transpose(Q[:,state])
t_d_states = np.transpose(t_d_mat[:,state])
#Construct the current time vector for each cell
time_states = np.transpose(np.tile(time,(n,1)))
#Generate random numbers in [0,1] for each cell
randNums = np.random.random(Q_states.shape)
#Calculate waiting times for each entry in the transition matrix
#Make sure to suppress divide by zero warning
#For the exponential profile, this uses the lambertw/productlog function. The steady-state
#case corresponds to t_d -> 0.
with np.errstate(divide='ignore', invalid='ignore'):
#Temp variables for readability
a = 1/Q_states * np.log(1/randNums)
b = -np.exp(-(a + t_d_states * np.exp(-time_states/t_d_states) + time_states)/t_d_states)
tau = np.real(t_d_states * sps.lambertw(b) + a + t_d_states *\
np.exp(-time_states / t_d_states))
#Find the shortest waiting time to figure out which state we transitioned to for each cell
tau_min = np.amin(tau, axis=1)
newState = np.argmin(tau, axis=1)
#Replace infinities with zero, corresponding to having reached the final state
newState[tau_min==np.inf] = n-1
tau_min[tau_min==np.inf] = 0
#Update the state and add the waiting time to the overall waiting time
state = newState
t_on += tau_min
time += tau_min
return t_on
#Verify time-dependent Gillespie algorithm with naive algorithm
n = 4
k_min = 0.5
k_max = 5
Q = MakeRandomTransitionMatrix(n,k_min,k_max)
N_cells = 1000
t_d = 7
dt = 0.1
time = np.arange(0,20,dt)
#Construct the time-dependent transition matrix for the naive simulation
Q_timedep = np.zeros((len(time),n,n))
for i in range(len(time)):
for j in range(n):
for k in range(n):
if j == k + 1:
Q_timedep[i,j,k] = Q[j,k] * (1 - np.exp(-time[i]/t_d))
elif j == k:
Q_timedep[i,j,k] = 0
else:
Q_timedep[i,j,k] = Q[j,k]
#Fix the diagonal entries
for i in range(len(time)):
for j in range(n):
Q_timedep[i,j,j] = -np.sum(Q_timedep[i,:,j])
t_on_static = CalculatetOn_GenericMarkovChainGillespie(Q,n,N_cells)
t_on_timedep = CalculatetOn_GenericMarkovChainGillespieTime(Q,n,t_d,N_cells)
t_on_naive = CalculatetOn_GenericMarkovChain(time,dt,Q_timedep,n,N_cells)
#Plot distribution
bins = np.arange(0,15,1)
plt.figure()
plt.hist(t_on_static,bins=bins,label='static',alpha=0.5)
plt.hist(t_on_timedep,bins=bins,label='Gillespie time-dependent',alpha=0.5)
plt.hist(t_on_naive,bins=bins,label='naive time-dependent',alpha=0.5)
plt.xlabel('onset time')
plt.legend()
```
# Parameter sweeping of Markov chain model
We're going to test the n=3 model, which has 3 free parameters, using the ideal limit of equal, irreversible rates, and considering small deviations in the form of backward rates.
```
# Parameter sweep
n = 3
k_min = 0.1
k_max = 5.1
k_step = 0.1
k_range = np.arange(k_min,k_max,k_step)
N_cells = 10000
dt = 0.1
time = np.arange(0,50,dt)
#Small deviation from ideal Gamma limit
k_backFrac = np.arange(0,1,0.25) #Fraction backwards/forwards transition
means_dev = np.zeros((len(k_range),len(k_backFrac)))
CV2s_dev = np.zeros((len(k_range),len(k_backFrac)))
for i in range(len(k_range)):
for j in range(len(k_backFrac)):
Q = np.array([[-k_range[i],k_range[i]*k_backFrac[j],0],\
[k_range[i],-(k_range[i]+k_range[i]*k_backFrac[j]),0],\
[0,k_range[i],0]])
t_on = CalculatetOn_GenericMarkovChainGillespie(Q,n,N_cells)
means_dev[i,j] = np.mean(t_on)
CV2s_dev[i,j] = np.var(t_on) / np.mean(t_on)**2
#Plot results
plt.figure()
plt.plot((n-1)/k_range,(1/(n-1))*np.ones(k_range.shape),'k--',label='Gamma limit')
for i in range(len(k_backFrac)):
plt.plot(means_dev[:,i],CV2s_dev[:,i],'.',label='backFrac = ' + str(k_backFrac[i]))
plt.xlabel('mean')
plt.ylabel('CV^2')
plt.legend()
```
It looks like the effect of increasing the backwards rates is to increase noise independent of mean, as expected.
```
print(sps.lambertw(0))
print(sps.lambertw(0.00001))
print(sps.lambertw(np.exp(-0)))
#Boundary exploration algorithm
#This function explores the 2D mean-CV2 space for a given model by following the same
#procedure as Eck et. al. 2020 (eLife). Briefly, it lays down a random parameter set, then
#slices the space into vertical and horizontal slices, finding the extremal points in each slice.
#It then mutates the parameter values for these points and recalculates the parameter space.
#This iterates until the total number of iterations has passed. The function keeps track
#of the boundary parameter values, as well as the means, CV2s, and overall parameter space areas.
#The models are generic Markov chains with nearest-neighbor transitions, and an irreversible transition
#into the final state.
#For now we are only considering the steady state
def BoundaryExploration(n_initial,iterations,slices,n_states,s,k_start,k_min,k_max,mean_max,\
N_cells,plots=True):
#Inputs:
# n_initial: number of initial points
# iterations: number of iterations
# slices: number of horizontal or vertical slices
# n_states: number of Markov chain states
# s: mutation growth factor (should be > 1)
# k_start: starting mean value for transition rates
# k_min: minimum transition rate
# k_max: maximum transition rate
# mean_meax: maximum bound on mean onset time
# T: simulation end time
# dt: simulation timestep
# N_cells: number of cells in each simulation
# plots: Boolean to display plots (true by default)
#Generate initial set of simulated data with random parameters.
#Initialize with nan arrays
mean_onset_total = np.zeros(n_initial) #Mean t_on values
CV2_onset_total = np.zeros(n_initial) #CV2iance t_on values
Q_values_total = np.zeros((n_initial,n_states,n_states)) #Transition matrices
mean_onset_total[:] = np.nan
CV2_onset_total[:] = np.nan
Q_values_total[:,:,:] = np.nan
area_total = [] #Area of boundary
for i in prange(n_initial):
print('Initializing point ' + str(i+1) + ' of ' + str(n_initial), end='\r')
#Initialize input parameters randomly.
#Create transition rates around the starting value with 25% width
k_min_start = k_start * 0.5
k_max_start = k_start * 1.5
Q = MakeRandomTransitionMatrix(n_states,k_min_start,k_max_start)
#Simulate the data with these parameters.
t_on = CalculatetOn_GenericMarkovChainGillespie(Q,n_states,N_cells)
#If there aren't at least 100 samples, skip this simulation
if np.sum(np.invert(np.isnan(t_on))) < 100:
continue
#Calculate mean and CV2
mean_onset = np.nanmean(t_on)
CV2_onset = np.nanvar(t_on)/np.nanmean(t_on)**2
#If mean is outside bounds, skip
if mean_onset > mean_max:
continue
#Save the results
mean_onset_total[i] = mean_onset
CV2_onset_total[i] = CV2_onset
Q_values_total[i,:,:] = Q
#Calculate the boundary
#Remove nans
noNanInd = np.invert(np.isnan(mean_onset_total)) #Indices of nans
mean_onset_total = mean_onset_total[noNanInd]
CV2_onset_total = CV2_onset_total[noNanInd]
Q_values_total = Q_values_total[noNanInd,:,:]
#Convex hulls
points = np.transpose(np.array([mean_onset_total,CV2_onset_total]))
hull = ConvexHull(points)
area_total.append(hull.area) #Save the area of the convex hull
vertices = hull.vertices #Indices of the points on the convex hull
#Keep only boundary points
#mean_onset_total = mean_onset_total[vertices]
#CV2_onset_total = CV2_onset_total[vertices]
#Q_values_total = Q_values_total[vertices]
#Plot results
if plots:
plt.close('all')
plt.figure('boundaryExploration')
plt.plot(mean_onset_total,CV2_onset_total,'b.',label='initial set')
plt.xlabel('mean onset time')
plt.ylabel('CV2 onset time')
plt.title('Boundary exploration')
plt.legend()
plt.figure('areaGrowth')
plt.plot(area_total,'r.-',label='boundary area')
plt.legend()
#Generate new parameter values using the boundary. Divide the initial
#region into horizontal and vertical slices (number of slices given by
#function input). Within each horizontal/vertical slice, the two points with the
#most extreme mean/CV2 in onset time are found. A new point is
#generated for each of these points with parameter values in a neighborhood
#of the "seed" datapoint. This neighborhood is "tighter"
#than the range of possible values for the initial set so that we can
#efficiently determine the boundary of the possible parameter space. The
#advantage of using this slice technique is that the boundary sampling is
#not biased towards regions of higher density.
print('')
for j in range(iterations):
print('Iteration ' + str(j+1) + ' of ' + str(iterations), end='\r')
#Calculate slices in mean onset time
min_mean_onset = mean_onset_total.min()
max_mean_onset = mean_onset_total.max()
mean_onset_slice_values = np.linspace(min_mean_onset,max_mean_onset,slices+1) #slices
#Continue to next iteration if this set has no range in the mean onset
if len(mean_onset_slice_values) == 0:
continue
#Calculate slices in CV2 onset time
min_CV2_onset = CV2_onset_total.min()
max_CV2_onset = CV2_onset_total.max()
CV2_onset_slice_values = np.linspace(min_CV2_onset,max_CV2_onset,slices+1) #slices
#Continue to next iteration if this set has no range in the CV2 onset
if len(CV2_onset_slice_values) == 0:
continue
#Loop through slices
newpoints = 1 #Number of new points for each extremal point
mean_onset_slice = np.zeros((slices,newpoints*4))
CV2_onset_slice = np.zeros((slices,newpoints*4))
Q_values_slice = np.zeros((slices,newpoints*4,n_states,n_states))
mean_onset_slice[:,:] = np.nan
CV2_onset_slice[:,:] = np.nan
Q_values_slice[:,:,:,:] = np.nan
for i in prange(slices):
#print('Iteration ' + str(j+1) + ' of ' + str(iterations) + \
# ', slice ' + str(i+1) + ' of ' + str(slices), end='\r')
#Identify extremal points within slices (4 maximum extremal points
#per iteration for the max/min for CV2_onset/t_on spread)
p = 1 #Counter to keep track of new points
#Keep track of new points for each parallel loop
mean_onset_p = np.zeros(newpoints*4)
CV2_onset_p = np.zeros(newpoints*4)
Q_values_p = np.zeros((newpoints*4,n_states,n_states))
mean_onset_p[:] = np.nan
CV2_onset_p[:] = np.nan
Q_values_p[:] = np.nan
for p in range(newpoints*4):
if p <= (newpoints*1):
index = np.intersect1d(np.where(mean_onset_total >= mean_onset_slice_values[i]),\
np.where(mean_onset_total <= mean_onset_slice_values[i+1]))
if len(index) == 0:
continue
else:
index = np.where(CV2_onset_total[index] == CV2_onset_total[index].min()) #Find index of minimum CV2_onset point
elif p <= (newpoints*2):
index = np.intersect1d(np.where(mean_onset_total >= mean_onset_slice_values[i]),\
np.where(mean_onset_total <= mean_onset_slice_values[i+1]))
if len(index) == 0:
continue
else:
index = np.where(CV2_onset_total[index] == CV2_onset_total[index].max()) #Find index of maximum CV2_onset point
elif p <= (newpoints*3):
index = np.intersect1d(np.where(CV2_onset_total >= CV2_onset_slice_values[i]),\
np.where(CV2_onset_total <= CV2_onset_slice_values[i+1]))
if len(index) == 0:
continue
else:
index = np.where(mean_onset_total[index] == mean_onset_total[index].min()) #Find index of minumum mean_onset point
elif p <= (newpoints*4):
np.intersect1d(np.where(CV2_onset_total >= CV2_onset_slice_values[i]),\
np.where(CV2_onset_total <= CV2_onset_slice_values[i+1]))
if len(index) == 0:
continue
else:
index = np.where(mean_onset_total[index] == mean_onset_total[index].max()) #Find index of maximum mean_onset point
#If no point found, continue
if len(index) == 0:
continue
#If multiple extremal indices found, keep the first one
if len(index) > 1:
index = index[0]
#Generate new points.
Q = MutateTransitionMatrix(np.squeeze(Q_values_total[index,:,:]),s)
#Check to see that new parameters lie within parameter bounds.
#JL 10/22/2020: Update this to repeat simulations until we get something within bounds
c = 0
for i in range(n_states):
for j in range(n_states):
if i != j and Q[i,j] != 0 and (Q[i,j] < k_min or Q[i,j] > k_max):
c = 1
if c == 1:
continue
#Simulate the data with these parameters.
t_on = CalculatetOn_GenericMarkovChainGillespie(Q,n_states,N_cells)
#If there aren't at least 100 samples, skip this simulation
if np.sum(np.invert(np.isnan(t_on))) < 100:
continue
#Calculate mean and CV2
mean_onset = np.nanmean(t_on)
CV2_onset = np.nanvar(t_on)/np.nanmean(t_on)**2
#If mean onset time is outside bounds, skip
if mean_onset > mean_max:
continue
#Save the data for each new point, for this slice
mean_onset_p[p] = mean_onset
CV2_onset_p[p] = CV2_onset
Q_values_p[p,:,:] = Q
#Save each slice's data into the whole iteration result.
mean_onset_slice[i,:] = mean_onset_p
CV2_onset_slice[i,:] = CV2_onset_p
Q_values_slice[i,:,:,:] = Q_values_p
#print('Slice complete')
#Save data from this iteration.
for u in range(slices):
for y in range(newpoints*4):
#Check if this simulation resulted in anything and skip otherwise
if np.isnan(mean_onset_slice[u,y]):
continue
mean_onset_total = np.concatenate((mean_onset_total,\
np.array([mean_onset_slice[u,y]])),axis=0)
CV2_onset_total = np.concatenate((CV2_onset_total,\
np.array([CV2_onset_slice[u,y]])),axis=0)
Q_values_total = np.concatenate((Q_values_total,np.reshape(Q_values_slice[u,y,:,:]\
,(1,n_states,n_states))),axis=0)
#Calculate the new boundary
#Convex hulls
points = np.transpose(np.array([mean_onset_total,CV2_onset_total]))
hull = ConvexHull(points)
area_total.append(hull.area) #Save the area of the convex hull
vertices = hull.vertices #Indices of the points on the convex hull
#Keep only boundary points
mean_onset_total = mean_onset_total[vertices]
CV2_onset_total = CV2_onset_total[vertices]
Q_values_total = Q_values_total[vertices]
#Plot results
if plots:
plt.figure('boundaryExploration')
plt.plot(mean_onset_total,CV2_onset_total,'k.')
plt.figure('areaGrowth')
plt.plot(area_total,'r.-',label='boundary area')
#Plot final results
if plots:
plt.figure('boundaryExploration')
plt.plot(mean_onset_total[vertices],CV2_onset_total[vertices],'r.-',label='final boundary')
plt.xlim(0,mean_max)
#Plot the Gamma distribution limit
k_Gamma = np.linspace(k_min,k_max,50)
plt.plot((n_states-1)/k_Gamma,(1/(n_states-1))*np.ones(k_Gamma.shape),'k--',label='Gamma distribution')
plt.legend()
#Save data
#Store relevant information in numpy savefile
#Saving: mean, CV2, area, Q, k_min, k_max, n_sites
filename = 'ParameterExplorationResults/' + str(date.today()) + '_n=' + str(n_states) + \
'_k_min=' + str(k_min) + '_k_max=' + str(k_max)
#np.savez(filename, mean_onset = mean_onset_total, CV2_onset = CV2_onset_total,\
# area = area_total, Q = Q_values_total)
#Testing the boundary exploration
n_initial = 50
iterations = 10
slices = 10
n_states = 3
s = 0.5
k_start = 2
k_min = 0
k_max = 4
mean_max = 10
N_cells = 50000
BoundaryExploration(n_initial,iterations,slices,n_states,s,k_start,k_min,k_max,mean_max,N_cells)
```
Interestingly, it looks like the boundary algorithm is having trouble exploring the parameter space. We know from sensitivity explorations that the general model should be able to smoothly explore the area around the Gamma distribution limit, but the exploration algorithm seems to be restricted to the upper left quadrant.
Testing the algorithm with a super naive function (e.g. treating the diagonals of a 2x2 matrix as x and y coordinates) indicates the exploration algorithm itself works. So the issue is something with how the parameter space of the Markov chain model gets explored. My hunch is that the stochastic nature of the simulation is interfering with determining the "smoothness" of the underlying feature space.
| github_jupyter |
```
import pandas as pd
import os
import hashlib
import requests
from bs4 import BeautifulSoup
from bs4.element import Comment
import urllib.parse
from tqdm.notebook import tqdm
import random
from multiprocessing import Pool
import spacy
import numpy as np
industries = pd.read_csv("industry_categories.csv")
industries.head()
salary_industries = pd.read_csv("Salary-Industries.csv")
salary_industries.head()
GOOGLE_API_KEY = 'AIzaSyCqd-BAzUsp6a2ICBETWebYYwoA3d3EeWk'
class KeyValueCache:
def __init__(self, data_dir):
self.data_dir = data_dir
if not os.path.isdir(self.data_dir):
os.mkdir(self.data_dir)
def hash_of(self, key):
return hashlib.md5(key.encode('utf-8')).hexdigest()
def file_for(self, key):
return os.path.join(self.data_dir, self.hash_of(key) + '.html')
def contains(self, key):
"""Checks if there is content for the key"""
return os.path.isfile(self.file_for(key))
def get(self, key):
"""Returns the value of the key"""
with open(self.file_for(key)) as f:
return f.read()
def put(self, key, value):
"""Stores value at the key"""
with open(self.file_for(key), 'w') as f:
f.write(value)
return value
cache = KeyValueCache(os.path.join('.', '.cache'))
# print(cache.hash_of(b'abc'))
# print(cache.file_for(b'abc'))
# print(cache.contains(b'abc'))
# print(cache.put(b'abc', 'abc value'))
# print(cache.get(b'abc'))
# print(cache.contains(b'abc'))
```
requests quickstart: https://requests.kennethreitz.org/en/master/user/quickstart/
```
static_proxies = pd.read_csv("utils/trusted_proxies.csv")['proxy'].to_list()
def request_proxy(url):
proxies = static_proxies
return random.choice(proxies)
def request_user_agent(url):
agents = [
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',
# 'Mozilla/5.0 (iPad; U; CPU OS 3_2_1 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Mobile/7B405',
# 'Mozilla/5.0 (Linux; Android 8.0.0; SM-G960F Build/R16NW) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.84 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 7.0; SM-G892A Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/60.0.3112.107 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 7.0; SM-G930VC Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/58.0.3029.83 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 6.0.1; SM-G935S Build/MMB29K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/55.0.2883.91 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 6.0.1; SM-G920V Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 5.1.1; SM-G928X Build/LMY47X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 6.0.1; Nexus 6P Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 7.1.1; G8231 Build/41.2.A.0.219; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/59.0.3071.125 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 6.0.1; E6653 Build/32.2.A.0.253) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 6.0; HTC One X10 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/61.0.3163.98 Mobile Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 6.0; HTC One M9 Build/MRA58K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.3'
# 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246',
# 'Mozilla/5.0 (X11; CrOS x86_64 8172.45.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.64 Safari/537.36',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9',
# 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36',
# 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1',
# 'Mozilla/5.0 (Linux; Android 7.0; Pixel C Build/NRD90M; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 6.0.1; SGP771 Build/32.2.A.0.253; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/52.0.2743.98 Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 6.0.1; SHIELD Tablet K1 Build/MRA58K; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/55.0.2883.91 Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 7.0; SM-T827R4 Build/NRD90M) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.116 Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 5.0.2; SAMSUNG SM-T550 Build/LRX22G) AppleWebKit/537.36 (KHTML, like Gecko) SamsungBrowser/3.3 Chrome/38.0.2125.102 Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 4.4.3; KFTHWI Build/KTU84M) AppleWebKit/537.36 (KHTML, like Gecko) Silk/47.1.79 like Chrome/47.0.2526.80 Safari/537.36',
# 'Mozilla/5.0 (Linux; Android 5.0.2; LG-V410/V41020c Build/LRX22G) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/34.0.1847.118 Safari/537.36',
# from Google desctop
# 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
# 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
# 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
# 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Safari/604.1.38',
# 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
# 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0',
# 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36 Edge/15.15063'
# 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'
]
return random.choice(agents)
def http_get(url):
headers = {
'User-Agent': request_user_agent(url),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=1',
'Accept-Encoding': 'identity;q=1'
}
proxy = request_proxy(url)
proxies = {
"http": proxy,
"https": proxy
}
response = requests.get(url, headers=headers, proxies=proxies, timeout=10, allow_redirects=True)
# handle HTTPSConnectionPool, ProxyError to retry with a different proxy
return response
def cached_get(url, cache):
"""gets the content of a url either from cache or by making HTTP request"""
if cache.contains(url):
#print("Cached: {}".format(url))
return cache.get(url)
else:
#print("GET: {}".format(url))
try:
response = http_get(url)
if response: # check for 200
return cache.put(url, response.text)
else:
raise Exception(response.status_code)
except Exception as err:
print("ERROR: {}".format(err))
return None
serp = cached_get('https://www.google.com/search?q=Tech%20-%20IT%20department%20of%20national%20insurance%20company', cache)
def extract_links(serp):
def external_url_from_href(href):
if href.startswith('/url'):
return urllib.parse.parse_qs(urllib.parse.urlparse('https://www.google.com' + href).query).get('q')[0]
else:
return href
soup = BeautifulSoup(serp, 'html.parser')
hrefs = []
blocks_v1 = soup.select('#rso div.bkWMgd div.g:not(.g-blk):not(#ZpxfC):not(.gws-trips__outer-card):not(.knavi)')
#print("Elements found: {}".format(len(blocks_v1)))
for div in blocks_v1:
for a in div.select('a:not(.fl):not(.ab_button)'):
hrefs.append(external_url_from_href(a.get('href')))
blocks_v2 = soup.select('#main div.ZINbbc div.kCrYT > a')
#print("Elements found: {}".format(len(blocks_v2)))
for a in blocks_v2:
hrefs.append(external_url_from_href(a.get('href')))
return hrefs
#print(a)
#glinks = ['https://www.google.com' + l.get('href') for l in links]
#site_links = [urllib.parse.parse_qs(urllib.parse.urlparse(gl).query).get('q')[0] for gl in glinks]
#return site_links
serp = cached_get('https://www.google.com/search?q=Auto%20rental', cache)
#print(cache.file_for('https://www.google.com/search?q=Auto%20rental'))
# serp = cached_get('https://www.google.com/search?q=Accounting', cache)
extract_links(serp)
def splitup_to_queries(items, separator = None):
for i in items:
chunks = i.split(separator) if separator else [i]
for chunk in chunks:
yield (i, chunk.strip())
if len(chunks) > 1:
yield (i, " ".join(chunks))
def queries_to_links(items):
for industry, query in items:
search_url = "https://www.google.com/search?q={}".format(urllib.parse.quote(query))
serp = cached_get(search_url, cache)
if not serp:
continue
links = extract_links(serp)
yield (industry, search_url, links)
def download_links(items):
for industry, search_url, links in items:
for url in links[0:3]:
link_html = cached_get(url, cache)
if not link_html:
yield (industry, 'error', url)
else:
yield (industry, 'success', url)
def first_n_links(items):
for industry, search_url, links in items:
for url in links[0:3]:
yield url
def download_url(items):
c = cache
def download(u):
result = 'success' if cached_get(url, c) else 'error'
return (url, result)
with Pool(processes=10) as pool:
resutls = pool.imap(download, items, chunksize=8)
def download_data_for_list(list_to_download, separator=None):
it = download_links(queries_to_links(splitup_to_queries(list_to_download, separator=separator)))
successes = 0
errors = 0
progress = tqdm(it, desc='Downloading', miniters=1)
for industry, status, url in progress:
if status == 'error':
errors = errors + 1
print("ERROR: {}".format(url))
else:
successes = successes + 1
progress.set_postfix(insudtry = industry, successes = successes, errors = errors)
def download_serps_for_list(list_to_download, separator=None):
it = queries_to_links(splitup_to_queries(list_to_download, separator=separator))
for industry, search_url, urls in tqdm(it, desc='Downloading', miniters=1):
if len(urls) < 5:
print("{} {}".format(len(urls), search_url))
def download_pages_for_list_multiprocess(list_to_download, separator=None):
urls = first_n_links(queries_to_links(splitup_to_queries(list_to_download, separator=separator)))
def download(url):
print(url)
page = cached_get(url, cache)
result = 'success' if page else 'error'
return (url, result)
with Pool(processes=4) as pool:
resutls = pool.imap_unordered(download, list(urls), chunksize=4)
#download_data_for_list(industries['industry'].to_list(), separator='/')
download_serps_for_list(salary_industries['Industry Ref'].dropna().sort_values().to_list())
def industry_to_queries(industry, separator = None):
chunks = industry.split(separator) if separator else [industry]
result = [chunk.strip() for chunk in chunks]
if len(chunks) > 1:
result.append(" ".join(chunks))
return result
def queries_to_links(items):
for industry, query in items:
search_url = "https://www.google.com/search?q={}".format(urllib.parse.quote(query))
serp = cached_get(search_url, cache)
if not serp:
continue
links = extract_links(serp)
yield (industry, search_url, links)
def create_industry_term_url_map(items, separator=None):
records = []
for industry in items:
for search_term in industry_to_queries(industry, separator=separator):
search_url = "https://www.google.com/search?q={}".format(urllib.parse.quote(search_term))
serp = cached_get(search_url, cache)
if not serp:
continue
links = extract_links(serp)
for link in links:
records.append((industry, search_term, link))
return pd.DataFrame.from_records(records, columns=['industry', 'term', 'url'])
# industry_targets_urls = create_industry_term_url_map(industries['industry'].to_list(), separator='/')
# industry_targets_urls.to_csv("./utils/industry_targets_urls.csv", index=False)
#industry_inputs_urls = create_industry_term_url_map(salary_industries['Industry Ref'].dropna().sort_values().to_list())
#industry_inputs_urls.to_csv("./utils/industry_inputs_urls.csv", index=False)
industry_targets_urls = pd.read_csv("./utils/industry_targets_urls.csv")
industry_inputs_urls = pd.read_csv("./utils/industry_inputs_urls.csv")
industry_inputs_urls.head()
import re
def tag_visible(element):
if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']:
return False
if isinstance(element, Comment):
return False
#if any(c.contains('hidden') for c in element.parent['class']):
# return False
return True
def text_from_html(body):
try:
soup = BeautifulSoup(body, 'html.parser')
texts = soup.find_all(text=True)
visible_texts = filter(tag_visible, texts)
visible_texts = map(lambda s: s.encode('utf-8', 'ignore').decode('utf-8'), visible_texts)
visible_texts = map(lambda s: re.sub(r'\s+', ' ', s).strip(), visible_texts)
visible_texts = filter(lambda s: len(s)>0, visible_texts)
visible_texts = filter(lambda s: len(s.split(' '))>5, visible_texts)
return ' '.join(visible_texts)
except:
return ''
print(cache.file_for('https://www.rightway.com/used-vehicles/'))
#page = cached_get('https://www.britannica.com/topic/finance', cache)
#text_from_html(page)
def extract_and_combine_text_from_urls(urls):
pages = [cache.get(url) for url in urls if cache.contains(url)]
texts = [text_from_html(page) for page in pages]
return " ".join(texts)
def is_downloaded(row):
return cache.contains(row['url'])
def file_for_url(row):
if cache.contains(row['url']):
return cache.file_for(row['url'])
else:
return None
def extract_text(row):
if cache.contains(row['url']):
return text_from_html(cache.get(row['url']))
else:
return None
def create_url_text_file(input_file, out_file):
df = pd.read_csv(input_file)
df['is_downloaded'] = df.apply(is_downloaded, axis=1)
df['file'] = df.apply(file_for_url, axis=1)
df['text'] = df.apply(extract_text, axis=1)
df.to_csv(out_file, index=False)
return df
industry_targets_url_text = create_url_text_file("./utils/industry_targets_urls.csv", "./utils/industry_targets_url_text.csv")
industry_targets_url_text.head()
industry_inputs_url_text = create_url_text_file("./utils/industry_inputs_urls.csv", "./utils/industry_inputs_url_text.csv")
industry_inputs_url_text.head()
industry_inputs_url_text['url'][industry_inputs_url_text['is_downloaded'] == False]
def combine_texts(series):
return " ".join([str(t) for t in series.values])
def create_text_file(input_file, out_file):
df = pd.read_csv(input_file)
df = df.groupby(['industry', 'term']).aggregate({'text': combine_texts}).reset_index()
df.to_csv(out_file, index=False)
return df
industry_targets_text = create_text_file("./utils/industry_targets_url_text.csv", "./utils/industry_targets_text.csv")
industry_targets_text.head()
industry_inputs_text = create_text_file("./utils/industry_inputs_url_text.csv", "./utils/industry_inputs_text.csv")
industry_inputs_text.head()
#industry_inputs_url_text['text'].apply(lambda r: len(r) if r else 0)
industry_inputs_url_text['file'].dropna()
industry_inputs_url_text['link_rank'] = (industry_inputs_url_text.groupby('term').cumcount()+1)
prioritized_urls = (
industry_inputs_url_text.loc[:,['term', 'url', 'link_rank', 'is_downloaded']]
.query('is_downloaded == False')
.sort_values(['link_rank', 'term'], ascending=[True, False])
#['url']
)
l = prioritized_urls.query('link_rank == 1')['term'].to_list()
prioritized_urls[prioritized_urls['term'].isin(l)]['url']
industry_inputs_url_text.groupby('term').aggregate({ 'is_downloaded': lambda g: any(g)}).query('is_downloaded == False')
def download(url):
page = cached_get(url, cache)
if page:
return ('success', url)
else:
return ('error', url)
def download_all(urls):
with Pool(10) as pool:
return pool.map(func, urls)
def first_few_url_for_each_term(term_url_df, n):
for g, rows in term_url_df.groupby('term'):
for url in rows['url'].to_list()[:n]:
yield url
l = [download(url) for url in tqdm(list(prioritized_urls[prioritized_urls['term'].isin(l)]['url']))]
s = 'asd\ud800sdf'.encode('utf-8', 'ignore').decode('utf-8')
print(s)
srs = pd.Series()
srs.loc[ 0 ] = s
srs.to_csv('testcase.csv')
```
| github_jupyter |
```
from nltk.corpus import stopwords
from nltk.cluster.util import cosine_distance
import numpy as np
import networkx as nx
from nltk.corpus import stopwords
from nltk.cluster.util import cosine_distance
import numpy as np
import networkx as nx
def read_article(file_name):
file = open(file_name, "r")
filedata = file.readlines()
article = filedata[0].split(". ")
sentences = []
for sentence in article:
print(sentence)
sentences.append(sentence.replace("[^a-zA-Z]", " ").split(" "))
sentences.pop()
return sentences
def sentence_similarity(sent1, sent2, stopwords=None):
if stopwords is None:
stopwords = []
sent1 = [w.lower() for w in sent1]
sent2 = [w.lower() for w in sent2]
all_words = list(set(sent1 + sent2))
vector1 = [0] * len(all_words)
vector2 = [0] * len(all_words)
# build the vector for the first sentence
for w in sent1:
if w in stopwords:
continue
vector1[all_words.index(w)] += 1
# build the vector for the second sentence
for w in sent2:
if w in stopwords:
continue
vector2[all_words.index(w)] += 1
return 1 - cosine_distance(vector1, vector2)
def build_similarity_matrix(sentences, stop_words):
# Create an empty similarity matrix
similarity_matrix = np.zeros((len(sentences), len(sentences)))
for idx1 in range(len(sentences)):
for idx2 in range(len(sentences)):
if idx1 == idx2: #ignore if both are same sentences
continue
similarity_matrix[idx1][idx2] = sentence_similarity(sentences[idx1], sentences[idx2], stop_words)
return similarity_matrix
def generate_summary(file_name, top_n=5):
stop_words = stopwords.words('english')
summarize_text = []
# Step 1 - Read text anc split it
sentences = read_article(file_name)
# Step 2 - Generate Similary Martix across sentences
sentence_similarity_martix = build_similarity_matrix(sentences, stop_words)
# Step 3 - Rank sentences in similarity martix
sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_martix)
scores = nx.pagerank(sentence_similarity_graph)
# Step 4 - Sort the rank and pick top sentences
ranked_sentence = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
print("Indexes of top ranked_sentence order are ", ranked_sentence)
for i in range(top_n):
summarize_text.append(" ".join(ranked_sentence[i][1]))
# Step 5 - Offcourse, output the summarize texr
print("Summarize Text: \n", ". ".join(summarize_text))
# let's begin
generate_summary( "butt.txt", 10)
```
| github_jupyter |
## Dependencies
```
import json, warnings, shutil, glob
from jigsaw_utility_scripts import *
from scripts_step_lr_schedulers import *
from transformers import TFXLMRobertaModel, XLMRobertaConfig
from tensorflow.keras.models import Model
from tensorflow.keras import optimizers, metrics, losses, layers
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
```
## TPU configuration
```
strategy, tpu = set_up_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
AUTO = tf.data.experimental.AUTOTUNE
```
# Load data
```
database_base_path = '/kaggle/input/jigsaw-data-split-roberta-192-ratio-2-upper/'
k_fold = pd.read_csv(database_base_path + '5-fold.csv')
valid_df = pd.read_csv("/kaggle/input/jigsaw-multilingual-toxic-comment-classification/validation.csv",
usecols=['comment_text', 'toxic', 'lang'])
print('Train samples: %d' % len(k_fold))
display(k_fold.head())
print('Validation samples: %d' % len(valid_df))
display(valid_df.head())
base_data_path = 'fold_3/'
fold_n = 3
# Unzip files
!tar -xvf /kaggle/input/jigsaw-data-split-roberta-192-ratio-2-upper/fold_3.tar.gz
```
# Model parameters
```
base_path = '/kaggle/input/jigsaw-transformers/XLM-RoBERTa/'
config = {
"MAX_LEN": 192,
"BATCH_SIZE": 128,
"EPOCHS": 4,
"LEARNING_RATE": 1e-5,
"ES_PATIENCE": None,
"base_model_path": base_path + 'tf-xlm-roberta-large-tf_model.h5',
"config_path": base_path + 'xlm-roberta-large-config.json'
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
```
## Learning rate schedule
```
lr_min = 1e-7
lr_start = 1e-7
lr_max = config['LEARNING_RATE']
step_size = len(k_fold[k_fold[f'fold_{fold_n}'] == 'train']) // config['BATCH_SIZE']
total_steps = config['EPOCHS'] * step_size
hold_max_steps = 0
warmup_steps = step_size * 1
decay = .9997
rng = [i for i in range(0, total_steps, config['BATCH_SIZE'])]
y = [exponential_schedule_with_warmup(tf.cast(x, tf.float32), warmup_steps, hold_max_steps,
lr_start, lr_max, lr_min, decay) for x in rng]
sns.set(style="whitegrid")
fig, ax = plt.subplots(figsize=(20, 6))
plt.plot(rng, y)
print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
```
# Model
```
module_config = XLMRobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFXLMRobertaModel.from_pretrained(config['base_model_path'], config=module_config)
last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
cls_token = last_hidden_state[:, 0, :]
output = layers.Dense(1, activation='sigmoid', name='output')(cls_token)
model = Model(inputs=[input_ids, attention_mask], outputs=output)
return model
```
# Train
```
# Load data
x_train = np.load(base_data_path + 'x_train.npy')
y_train = np.load(base_data_path + 'y_train_int.npy').reshape(x_train.shape[1], 1).astype(np.float32)
x_valid_ml = np.load(database_base_path + 'x_valid.npy')
y_valid_ml = np.load(database_base_path + 'y_valid.npy').reshape(x_valid_ml.shape[1], 1).astype(np.float32)
#################### ADD TAIL ####################
x_train = np.hstack([x_train, np.load(base_data_path + 'x_train_tail.npy')])
y_train = np.vstack([y_train, y_train])
step_size = x_train.shape[1] // config['BATCH_SIZE']
valid_step_size = x_valid_ml.shape[1] // config['BATCH_SIZE']
# Build TF datasets
train_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_train, y_train, config['BATCH_SIZE'], AUTO, seed=SEED))
valid_dist_ds = strategy.experimental_distribute_dataset(get_validation_dataset(x_valid_ml, y_valid_ml, config['BATCH_SIZE'], AUTO, repeated=True, seed=SEED))
train_data_iter = iter(train_dist_ds)
valid_data_iter = iter(valid_dist_ds)
# Step functions
@tf.function
def train_step(data_iter):
def train_step_fn(x, y):
with tf.GradientTape() as tape:
probabilities = model(x, training=True)
loss = loss_fn(y, probabilities)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
train_auc.update_state(y, probabilities)
train_loss.update_state(loss)
for _ in tf.range(step_size):
strategy.experimental_run_v2(train_step_fn, next(data_iter))
@tf.function
def valid_step(data_iter):
def valid_step_fn(x, y):
probabilities = model(x, training=False)
loss = loss_fn(y, probabilities)
valid_auc.update_state(y, probabilities)
valid_loss.update_state(loss)
for _ in tf.range(valid_step_size):
strategy.experimental_run_v2(valid_step_fn, next(data_iter))
# Train model
with strategy.scope():
model = model_fn(config['MAX_LEN'])
optimizer = optimizers.Adam(learning_rate=lambda:
exponential_schedule_with_warmup(tf.cast(optimizer.iterations, tf.float32),
warmup_steps, hold_max_steps, lr_start,
lr_max, lr_min, decay))
loss_fn = losses.binary_crossentropy
train_auc = metrics.AUC()
valid_auc = metrics.AUC()
train_loss = metrics.Sum()
valid_loss = metrics.Sum()
metrics_dict = {'loss': train_loss, 'auc': train_auc,
'val_loss': valid_loss, 'val_auc': valid_auc}
history = custom_fit(model, metrics_dict, train_step, valid_step, train_data_iter, valid_data_iter,
step_size, valid_step_size, config['BATCH_SIZE'], config['EPOCHS'],
config['ES_PATIENCE'], save_last=False)
# model.save_weights('model.h5')
# Make predictions
x_train = np.load(base_data_path + 'x_train.npy')
x_valid = np.load(base_data_path + 'x_valid.npy')
x_valid_ml_eval = np.load(database_base_path + 'x_valid.npy')
train_preds = model.predict(get_test_dataset(x_train, config['BATCH_SIZE'], AUTO))
valid_preds = model.predict(get_test_dataset(x_valid, config['BATCH_SIZE'], AUTO))
valid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO))
k_fold.loc[k_fold[f'fold_{fold_n}'] == 'train', f'pred_{fold_n}'] = np.round(train_preds)
k_fold.loc[k_fold[f'fold_{fold_n}'] == 'validation', f'pred_{fold_n}'] = np.round(valid_preds)
valid_df[f'pred_{fold_n}'] = valid_ml_preds
# Fine-tune on validation set
#################### ADD TAIL ####################
x_valid_ml_tail = np.hstack([x_valid_ml, np.load(database_base_path + 'x_valid_tail.npy')])
y_valid_ml_tail = np.vstack([y_valid_ml, y_valid_ml])
valid_step_size_tail = x_valid_ml_tail.shape[1] // config['BATCH_SIZE']
# Build TF datasets
train_ml_dist_ds = strategy.experimental_distribute_dataset(get_training_dataset(x_valid_ml_tail, y_valid_ml_tail,
config['BATCH_SIZE'], AUTO, seed=SEED))
train_ml_data_iter = iter(train_ml_dist_ds)
history_ml = custom_fit(model, metrics_dict, train_step, valid_step, train_ml_data_iter, valid_data_iter,
valid_step_size_tail, valid_step_size, config['BATCH_SIZE'], 1,
config['ES_PATIENCE'], save_last=False)
# Join history
for key in history_ml.keys():
history[key] += history_ml[key]
model.save_weights('model_ml.h5')
# Make predictions
valid_ml_preds = model.predict(get_test_dataset(x_valid_ml_eval, config['BATCH_SIZE'], AUTO))
valid_df[f'pred_ml_{fold_n}'] = valid_ml_preds
### Delete data dir
shutil.rmtree(base_data_path)
```
## Model loss graph
```
plot_metrics(history)
```
# Model evaluation
```
display(evaluate_model_single_fold(k_fold, fold_n, label_col='toxic_int').style.applymap(color_map))
```
# Confusion matrix
```
train_set = k_fold[k_fold[f'fold_{fold_n}'] == 'train']
validation_set = k_fold[k_fold[f'fold_{fold_n}'] == 'validation']
plot_confusion_matrix(train_set['toxic_int'], train_set[f'pred_{fold_n}'],
validation_set['toxic_int'], validation_set[f'pred_{fold_n}'])
```
# Model evaluation by language
```
display(evaluate_model_single_fold_lang(valid_df, fold_n).style.applymap(color_map))
# ML fine-tunned preds
display(evaluate_model_single_fold_lang(valid_df, fold_n, pred_col='pred_ml').style.applymap(color_map))
```
# Visualize predictions
```
pd.set_option('max_colwidth', 120)
print('English validation set')
display(k_fold[['comment_text', 'toxic'] + [c for c in k_fold.columns if c.startswith('pred')]].head(10))
print('Multilingual validation set')
display(valid_df[['comment_text', 'toxic'] + [c for c in valid_df.columns if c.startswith('pred')]].head(10))
```
# Test set predictions
```
x_test = np.load(database_base_path + 'x_test.npy')
test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE'], AUTO))
submission = pd.read_csv('/kaggle/input/jigsaw-multilingual-toxic-comment-classification/sample_submission.csv')
submission['toxic'] = test_preds
submission.to_csv('submission.csv', index=False)
display(submission.describe())
display(submission.head(10))
```
| github_jupyter |
# Amazon SageMaker Model Monitor
This notebook shows how to:
* Host a machine learning model in Amazon SageMaker and capture inference requests, results, and metadata
* Analyze a training dataset to generate baseline constraints
* Monitor a live endpoint for violations against constraints
---
## Background
Amazon SageMaker provides every developer and data scientist with the ability to build, train, and deploy machine learning models quickly. Amazon SageMaker is a fully-managed service that encompasses the entire machine learning workflow. You can label and prepare your data, choose an algorithm, train a model, and then tune and optimize it for deployment. You can deploy your models to production with Amazon SageMaker to make predictions and lower costs than was previously possible.
In addition, Amazon SageMaker enables you to capture the input, output and metadata for invocations of the models that you deploy. It also enables you to analyze the data and monitor its quality. In this notebook, you learn how Amazon SageMaker enables these capabilities.
---
## Setup
To get started, make sure you have these prerequisites completed.
* Specify an AWS Region to host your model.
* An IAM role ARN exists that is used to give Amazon SageMaker access to your data in Amazon Simple Storage Service (Amazon S3). See the documentation for how to fine tune the permissions needed.
* Create an S3 bucket used to store the data used to train your model, any additional model data, and the data captured from model invocations. For demonstration purposes, you are using the same bucket for these. In reality, you might want to separate them with different security policies.
```
import boto3
import os
import sagemaker
from sagemaker import get_execution_role
region = boto3.Session().region_name
role = get_execution_role()
sess = sagemaker.session.Session()
bucket = sess.default_bucket()
prefix = 'tf-2-workflow'
s3_capture_upload_path = 's3://{}/{}/monitoring/datacapture'.format(bucket, prefix)
reports_prefix = '{}/reports'.format(prefix)
s3_report_path = 's3://{}/{}'.format(bucket,reports_prefix)
print("Capture path: {}".format(s3_capture_upload_path))
print("Report path: {}".format(s3_report_path))
```
# PART A: Capturing real-time inference data from Amazon SageMaker endpoints
Create an endpoint to showcase the data capture capability in action.
### Deploy the model to Amazon SageMaker
Start with deploying the trained TensorFlow model from lab 03.
```
import boto3
def get_latest_training_job_name(base_job_name):
client = boto3.client('sagemaker')
response = client.list_training_jobs(NameContains=base_job_name, SortBy='CreationTime',
SortOrder='Descending', StatusEquals='Completed')
if len(response['TrainingJobSummaries']) > 0 :
return response['TrainingJobSummaries'][0]['TrainingJobName']
else:
raise Exception('Training job not found.')
def get_training_job_s3_model_artifacts(job_name):
client = boto3.client('sagemaker')
response = client.describe_training_job(TrainingJobName=job_name)
s3_model_artifacts = response['ModelArtifacts']['S3ModelArtifacts']
return s3_model_artifacts
latest_training_job_name = get_latest_training_job_name('tf-2-workflow')
print(latest_training_job_name)
model_path = get_training_job_s3_model_artifacts(latest_training_job_name)
print(model_path)
```
Here, you create the model object with the image and model data.
```
from sagemaker.tensorflow.model import TensorFlowModel
tensorflow_model = TensorFlowModel(
model_data = model_path,
role = role,
framework_version = '2.3.1'
)
from time import gmtime, strftime
from sagemaker.model_monitor import DataCaptureConfig
endpoint_name = 'tf-2-workflow-endpoint-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(endpoint_name)
predictor = tensorflow_model.deploy(
initial_instance_count=1,
instance_type='ml.m5.xlarge',
endpoint_name=endpoint_name,
data_capture_config=DataCaptureConfig(
enable_capture=True,
sampling_percentage=100,
destination_s3_uri=s3_capture_upload_path
)
)
```
### Prepare dataset
Next, we'll import the dataset. The dataset itself is small and relatively issue-free. For example, there are no missing values, a common problem for many other datasets. Accordingly, preprocessing just involves normalizing the data.
```
import numpy as np
from tensorflow.python.keras.datasets import boston_housing
from sklearn.preprocessing import StandardScaler
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
scaler = StandardScaler()
scaler.fit(x_train)
x_train = scaler.transform(x_train)
x_test = scaler.transform(x_test)
```
## Invoke the deployed model
You can now send data to this endpoint to get inferences in real time. Because you enabled the data capture in the previous steps, the request and response payload, along with some additional metadata, is saved in the Amazon Simple Storage Service (Amazon S3) location you have specified in the DataCaptureConfig.
This step invokes the endpoint with included sample data for about 3 minutes. Data is captured based on the sampling percentage specified and the capture continues until the data capture option is turned off.
```
%%time
import time
print("Sending test traffic to the endpoint {}. \nPlease wait...".format(endpoint_name))
flat_list =[]
for item in x_test:
result = predictor.predict(item)['predictions']
flat_list.append(float('%.1f'%(np.array(result))))
time.sleep(1.8)
print("Done!")
print('predictions: \t{}'.format(np.array(flat_list)))
```
## View captured data
Now list the data capture files stored in Amazon S3. You should expect to see different files from different time periods organized based on the hour in which the invocation occurred. The format of the Amazon S3 path is:
`s3://{destination-bucket-prefix}/{endpoint-name}/{variant-name}/yyyy/mm/dd/hh/filename.jsonl`
<b>Note that the delivery of capture data to Amazon S3 can require a couple of minutes so next cell might error. If this happens, please retry after a minute.</b>
```
s3_client = boto3.Session().client('s3')
result = s3_client.list_objects(Bucket=bucket, Prefix='tf-2-workflow/monitoring/datacapture/')
capture_files = [capture_file.get("Key") for capture_file in result.get('Contents')]
print("Found Capture Files:")
print("\n ".join(capture_files))
```
Next, view the contents of a single capture file. Here you should see all the data captured in an Amazon SageMaker specific JSON-line formatted file. Take a quick peek at the first few lines in the captured file.
```
def get_obj_body(obj_key):
return s3_client.get_object(Bucket=bucket, Key=obj_key).get('Body').read().decode("utf-8")
capture_file = get_obj_body(capture_files[-1])
print(capture_file[:2000])
```
Finally, the contents of a single line is present below in a formatted JSON file so that you can observe a little better.
```
import json
print(json.dumps(json.loads(capture_file.split('\n')[0]), indent=2))
```
As you can see, each inference request is captured in one line in the jsonl file. The line contains both the input and output merged together. In the example, you provided the ContentType as `text/csv` which is reflected in the `observedContentType` value. Also, you expose the encoding that you used to encode the input and output payloads in the capture format with the `encoding` value.
To recap, you observed how you can enable capturing the input or output payloads to an endpoint with a new parameter. You have also observed what the captured format looks like in Amazon S3. Next, continue to explore how Amazon SageMaker helps with monitoring the data collected in Amazon S3.
# PART B: Model Monitor - Baselining and continuous monitoring
In addition to collecting the data, Amazon SageMaker provides the capability for you to monitor and evaluate the data observed by the endpoints. For this:
1. Create a baseline with which you compare the realtime traffic.
1. Once a baseline is ready, setup a schedule to continously evaluate and compare against the baseline.
## 1. Constraint suggestion with baseline/training dataset
The training dataset with which you trained the model is usually a good baseline dataset. Note that the training dataset data schema and the inference dataset schema should exactly match (i.e. the number and order of the features).
From the training dataset you can ask Amazon SageMaker to suggest a set of baseline `constraints` and generate descriptive `statistics` to explore the data. For this example, upload the training dataset that was used to train the pre-trained model included in this example. If you already have it in Amazon S3, you can directly point to it.
### Prepare training dataset with headers
```
import pandas as pd
dt = pd.DataFrame(data = x_train,
columns = ["CRIM", "ZN", "INDUS", "CHAS","NOX","RM","AGE","DIS","RAD","TAX","PTRATIO","B","LSTAT"])
dt.to_csv("training-dataset-with-header.csv", index = False)
# copy over the training dataset to Amazon S3 (if you already have it in Amazon S3, you could reuse it)
baseline_prefix = prefix + '/baselining'
baseline_data_prefix = baseline_prefix + '/data'
baseline_results_prefix = baseline_prefix + '/results'
baseline_data_uri = 's3://{}/{}'.format(bucket,baseline_data_prefix)
baseline_results_uri = 's3://{}/{}'.format(bucket, baseline_results_prefix)
print('Baseline data uri: {}'.format(baseline_data_uri))
print('Baseline results uri: {}'.format(baseline_results_uri))
training_data_file = open("training-dataset-with-header.csv", 'rb')
s3_key = os.path.join(baseline_prefix, 'data', 'training-dataset-with-header.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(s3_key).upload_fileobj(training_data_file)
```
### Create a baselining job with training dataset
Now that you have the training data ready in Amazon S3, start a job to `suggest` constraints. `DefaultModelMonitor.suggest_baseline(..)` starts a `ProcessingJob` using an Amazon SageMaker provided Model Monitor container to generate the constraints.
```
from sagemaker.model_monitor import DefaultModelMonitor
from sagemaker.model_monitor.dataset_format import DatasetFormat
my_default_monitor = DefaultModelMonitor(
role=role,
instance_count=1,
instance_type='ml.m5.xlarge',
volume_size_in_gb=20,
max_runtime_in_seconds=3600,
)
my_default_monitor.suggest_baseline(
baseline_dataset=baseline_data_uri+'/training-dataset-with-header.csv',
dataset_format=DatasetFormat.csv(header=True),
output_s3_uri=baseline_results_uri,
wait=True
)
```
### Explore the generated constraints and statistics
```
s3_client = boto3.Session().client('s3')
result = s3_client.list_objects(Bucket=bucket, Prefix=baseline_results_prefix)
report_files = [report_file.get("Key") for report_file in result.get('Contents')]
print("Found Files:")
print("\n ".join(report_files))
import pandas as pd
baseline_job = my_default_monitor.latest_baselining_job
schema_df = pd.io.json.json_normalize(baseline_job.baseline_statistics().body_dict["features"])
schema_df.head(10)
constraints_df = pd.io.json.json_normalize(baseline_job.suggested_constraints().body_dict["features"])
constraints_df.head(10)
```
## 2. Analyzing collected data for data quality issues
### Create a schedule
You can create a model monitoring schedule for the endpoint created earlier. Use the baseline resources (constraints and statistics) to compare against the realtime traffic.
From the analysis above, you saw how the captured data is saved - that is the standard input and output format for Tensorflow models. But Model Monitor is framework-agnostic, and expects a specific format [explained in the docs](https://docs.aws.amazon.com/sagemaker/latest/dg/model-monitor-pre-and-post-processing.html#model-monitor-pre-processing-script):
- Input
- Flattened JSON `{"feature0": <value>, "feature1": <value>...}`
- Tabular `"<value>, <value>..."`
- Output:
- Flattened JSON `{"prediction0": <value>, "prediction1": <value>...}`
- Tabular `"<value>, <value>..."`
We need to transform the input records to comply with this requirement. Model Monitor offers _pre-processing scripts_ in Python to transform the input. The cell below has the script that will work for our case.
```
%%writefile preprocessing.py
import json
def preprocess_handler(inference_record):
input_data = json.loads(inference_record.endpoint_input.data)
input_data = {f"feature{i}": val for i, val in enumerate(input_data)}
output_data = json.loads(inference_record.endpoint_output.data)["predictions"][0][0]
output_data = {"prediction0": output_data}
return{**input_data}
```
We'll upload this script to an s3 destination and pass it as the `record_preprocessor_script` parameter to the `create_monitoring_schedule` call.
```
script_s3_dest_path = f"s3://{bucket}/{prefix}/artifacts/modelmonitor"
script_s3_dest = sagemaker.s3.S3Uploader.upload("preprocessing.py", script_s3_dest_path)
print(script_s3_dest)
from sagemaker.model_monitor import CronExpressionGenerator
from time import gmtime, strftime
mon_schedule_name = 'DEMO-tf-2-workflow-model-monitor-schedule-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
my_default_monitor.create_monitoring_schedule(
monitor_schedule_name=mon_schedule_name,
endpoint_input=predictor.endpoint,
record_preprocessor_script=script_s3_dest,
output_s3_uri=s3_report_path,
statistics=my_default_monitor.baseline_statistics(),
constraints=my_default_monitor.suggested_constraints(),
schedule_cron_expression=CronExpressionGenerator.hourly(),
enable_cloudwatch_metrics=True,
)
```
### Generating violations artificially
In order to get some result relevant to monitoring analysis, you can try and generate artificially some inferences with feature values causing specific violations, and then invoke the endpoint with this data
Looking at our RM and AGE features:
- RM - average number of rooms per dwelling
- AGE - proportion of owner-occupied units built prior to 1940
Let's simulate a situation where the average number of rooms is 0, and proportion of owner-occupied units built is 1000.
```
df_with_violations = pd.read_csv("training-dataset-with-header.csv")
df_with_violations["RM"] = 0
df_with_violations["AGE"] = 1000
df_with_violations
```
### Start generating some artificial traffic
The cell below starts a thread to send some traffic to the endpoint. Note that you need to stop the kernel to terminate this thread. If there is no traffic, the monitoring jobs are marked as `Failed` since there is no data to process.
```
from threading import Thread
from time import sleep
import time
def invoke_endpoint():
for item in df_with_violations.to_numpy():
result = predictor.predict(item)['predictions']
time.sleep(1)
def invoke_endpoint_forever():
while True:
invoke_endpoint()
thread = Thread(target = invoke_endpoint_forever)
thread.start()
# Note that you need to stop the kernel to stop the invocations
```
### Describe and inspect the schedule
Once you describe, observe that the MonitoringScheduleStatus changes to Scheduled.
```
desc_schedule_result = my_default_monitor.describe_schedule()
print('Schedule status: {}'.format(desc_schedule_result['MonitoringScheduleStatus']))
```
### List executions
The schedule starts jobs at the previously specified intervals. Here, you list the latest five executions. Note that if you are kicking this off after creating the hourly schedule, you might find the executions empty. You might have to wait until you cross the hour boundary (in UTC) to see executions kick off. The code below has the logic for waiting.
Note: Even for an hourly schedule, Amazon SageMaker has a buffer period of 20 minutes to schedule your execution. You might see your execution start in anywhere from zero to ~20 minutes from the hour boundary. This is expected and done for load balancing in the backend.
```
mon_executions = my_default_monitor.list_executions()
print("We created a hourly schedule above and it will kick off executions ON the hour (plus 0 - 20 min buffer.\nWe will have to wait till we hit the hour...")
while len(mon_executions) == 0:
print("Waiting for the 1st execution to happen...")
time.sleep(60)
mon_executions = my_default_monitor.list_executions()
```
### Inspect a specific execution (latest execution)
In the previous cell, you picked up the latest completed or failed scheduled execution. Here are the possible terminal states and what each of them mean:
* Completed - This means the monitoring execution completed and no issues were found in the violations report.
* CompletedWithViolations - This means the execution completed, but constraint violations were detected.
* Failed - The monitoring execution failed, maybe due to client error (perhaps incorrect role premissions) or infrastructure issues. Further examination of FailureReason and ExitMessage is necessary to identify what exactly happened.
* Stopped - job exceeded max runtime or was manually stopped.
```
latest_execution = mon_executions[-1] # latest execution's index is -1, second to last is -2 and so on..
#time.sleep(60)
latest_execution.wait(logs=False)
print("Latest execution status: {}".format(latest_execution.describe()['ProcessingJobStatus']))
print("Latest execution result: {}".format(latest_execution.describe()['ExitMessage']))
latest_job = latest_execution.describe()
if (latest_job['ProcessingJobStatus'] != 'Completed'):
print("====STOP==== \n No completed executions to inspect further. Please wait till an execution completes or investigate previously reported failures.")
report_uri=latest_execution.output.destination
print('Report Uri: {}'.format(report_uri))
```
### List the generated reports
```
from urllib.parse import urlparse
s3uri = urlparse(report_uri)
report_bucket = s3uri.netloc
report_key = s3uri.path.lstrip('/')
print('Report bucket: {}'.format(report_bucket))
print('Report key: {}'.format(report_key))
s3_client = boto3.Session().client('s3')
result = s3_client.list_objects(Bucket=report_bucket, Prefix=report_key)
report_files = [report_file.get("Key") for report_file in result.get('Contents')]
print("Found Report Files:")
print("\n ".join(report_files))
```
### Violations report
If there are any violations compared to the baseline, they will be listed here.
```
violations = my_default_monitor.latest_monitoring_constraint_violations()
pd.set_option('display.max_colwidth', -1)
constraints_df = pd.io.json.json_normalize(violations.body_dict["violations"])
constraints_df.head(10)
```
## Delete the resources
You can keep your endpoint running to continue capturing data. If you do not plan to collect more data or use this endpoint further, you should delete the endpoint to avoid incurring additional charges. Note that deleting your endpoint does not delete the data that was captured during the model invocations. That data persists in Amazon S3 until you delete it yourself.
But before that, you need to delete the schedule first.
```
my_default_monitor.delete_monitoring_schedule()
time.sleep(120) # actually wait for the deletion
predictor.delete_endpoint()
```
| github_jupyter |
### Importing the required libraries ###
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
import re
import zipfile
```
### UNZIP files ###
```
# Will unzip the files so that you can see them..
with zipfile.ZipFile("/kaggle/input/jigsaw-toxic-comment-classification-challenge/train.csv.zip","r") as z:
z.extractall(".")
import os
for dirname, _, filenames in os.walk('/kaggle/working/'):
for filename in filenames:
print(os.path.join(dirname, filename))
```
### Reading the Train File ###
```
# prepare text samples and their labels
print('Loading in comments...')
data = pd.read_csv("/kaggle/working/train.csv")
print(data.head())
# Feature Imformation
data.columns
# Data Dimension
data.shape
cols_target = ['obscene','insult','toxic','severe_toxic','identity_hate','threat']
# Check Missing Value
print(data["comment_text"].isna().sum())
# dropna
# check missing values in numeric columns
data.describe()
unlabelled_in_all = data[(data['toxic']!=1) & (data['severe_toxic']!=1) &
(data['obscene']!=1) & (data['threat']!=1) &
(data['insult']!=1) & (data['identity_hate']!=1)]
print('Percentage of unlabelled comments or good comments is ', len(unlabelled_in_all)/len(data)*100)
labelled_in_all = data[(data['toxic']==1) & (data['severe_toxic']==1) &
(data['obscene']==1) & (data['threat']==1) &
(data['insult']==1) & (data['identity_hate']==1)]
print('Percentage of comments which is present in all categories is ', len(labelled_in_all)/len(data)*100)
# let's see the total rows in train, test data and the numbers for the various categories
print('Total rows in train is {}'.format(len(data)))
print(data[cols_target].sum())
```
Next, let's examine the correlations among the target variables.
```
target_data = data[cols_target]
colormap = plt.cm.plasma
plt.figure(figsize=(7,7))
plt.title('Correlation of features & targets',y=1.05,size=14)
sns.heatmap(target_data.astype(float).corr(),linewidths=0.1,vmax=1.0,square=True,cmap=colormap,
linecolor='white',annot=True)
```
Indeed, it looks like some of the labels are higher correlated, e.g. insult-obscene has the highest at 0.74, followed by toxic-obscene and toxic-insult.
### Now this kind of problem is ###
1) Multi class problem and not Binary
2) Also all classes are not independent but rather dependent or correlated
3) A comment can belong to multiple classes at the same time for e.g. comment can be toxic and insulting at the same time
Let us simplify the problem by first classifying the comments as "block" vs "allow"
```
data['block'] =data[cols_target].sum(axis =1)
print(data['block'].value_counts())
data['block'] = data['block'] > 0
data['block'] = data['block'].astype(int)
print(data['block'].value_counts())
# look at the count plot for text length
sns.set()
sns.countplot(x="block" , data = data )
plt.show()
# Event Rate
print("Percentage Event Rate : " , round(100*data['block'].sum()/data.shape[0],2) , "%")
```
### Let us focus on comments ###
```
# Let's look at the character length for the rows in the training data and record these
data['char_length'] = data['comment_text'].apply(lambda x: len(str(x)))
# look at the histogram plot for text length
sns.set()
data['char_length'].hist()
plt.show()
```
Most of the text length are within 500 characters, with some up to 5,000 characters long.
### Clean the Comments Text ###
```
def clean_text(text):
text = text.lower()
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\'scuse", " excuse ", text)
text = re.sub('\W', ' ', text)
text = re.sub('\s+', ' ', text)
text = text.strip(' ')
return text
%%time
# clean the comment_text in train_df [Thanks to Pulkit Jha for the useful pointer.]
data['comment_text'] = data['comment_text'].map(lambda com : clean_text(com))
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data['comment_text'], data['block'], test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
# import and instantiate TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
vect = TfidfVectorizer(max_features = 10000, stop_words='english')
#vect = TfidfVectorizer(stop_words='english')
print(vect)
%%time
# learn the vocabulary in the training data, then use it to create a document-term matrix
X_train_dtm = vect.fit_transform(X_train)
# examine the document-term matrix created from X_train
X_train_dtm
X_train_dtm.shape
100*2792162/ (127656*10000)
%%time
# transform the test data using the earlier fitted vocabulary, into a document-term matrix
X_test_dtm = vect.transform(X_test)
# examine the document-term matrix from X_test
X_test_dtm
```
## Lets us build a binary classifier using Logistic Regression ##
```
# import and instantiate the Logistic Regression model
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
logreg = LogisticRegression(C=1, max_iter = 2000)
# train the model using X_train_dtm & y_train
logreg.fit(X_train_dtm, y_train)
# compute the training accuracy
y_pred_train = logreg.predict(X_train_dtm)
print('Training accuracy is {}'.format(accuracy_score(y_train, y_pred_train)))
# compute the predicted probabilities for X_test_dtm
y_pred_test = logreg.predict(X_test_dtm)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
#28507 -> comments are good and predeicted as good
#2014 -> comments are block and predicted as block
#164 -> comments are good but predicted as block
#1230 -> comments are block but predicted as good
(28507 + 2014)/(28507+2014+164+1230)
import sklearn.metrics as metrics
# calculate the fpr and tpr for all thresholds of the classification
probs = logreg.predict_proba(X_test_dtm)
preds = probs[:,1]
fpr, tpr, threshold = metrics.roc_curve(y_test, preds)
roc_auc = metrics.auc(fpr, tpr)
# method I: plt
import matplotlib.pyplot as plt
plt.title('Receiver Operating Characteristic')
plt.plot(fpr, tpr, 'b', label = 'AUC = %0.2f' % roc_auc)
plt.legend(loc = 'lower right')
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.show()
```
# Welcome to the curse of Accuracy, F1(help) to the rescue #
```
from sklearn.metrics import f1_score
print("F1 score on Test data : " ,f1_score(y_test,y_pred_test))
```
### In case of Class Imbalance - we use F1 score as a general measure for the model performance ###
Depending on the Business case - we need to fine tune the model
There is a Precision vs Recall Trade off
If you want to capture all toxic tweets - then some of the good twwets will be misclassified as bad tweets
```
y_pred_test = logreg.predict_proba(X_test_dtm)[:,1]
#print(y_pred_test)
y_pred_test = y_pred_test >= 0.2 # by default it is 0.5
y_pred_test = y_pred_test.astype(int)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
print("F1 score on Test data : " ,f1_score(y_test,y_pred_test))
```
# Let us use a tree base model #
```
%%time
from sklearn.metrics import f1_score
from sklearn.tree import DecisionTreeClassifier
dt_clf = DecisionTreeClassifier()
# train the model using X_train_dtm & y_train
dt_clf.fit(X_train_dtm, y_train)
# compute the training accuracy
y_pred_train = dt_clf.predict(X_train_dtm)
print('Training accuracy is {}'.format(accuracy_score(y_train, y_pred_train)))
# compute the predicted probabilities for X_test_dtm
y_pred_test = dt_clf.predict(X_test_dtm)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
print("F1 score on Test data : " ,f1_score(y_test,y_pred_test))
```
### Lets us try an Ensemble of Trees ###
```
%%time
from sklearn.metrics import f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
rf_clf = RandomForestClassifier()
# train the model using X_train_dtm & y_train
rf_clf.fit(X_train_dtm, y_train)
# compute the training accuracy
y_pred_train = rf_clf.predict(X_train_dtm)
print('Training accuracy is {}'.format(accuracy_score(y_train, y_pred_train)))
# compute the predicted probabilities for X_test_dtm
y_pred_test = rf_clf.predict(X_test_dtm)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
print("F1 score on Test data : " ,f1_score(y_test,y_pred_test))
# Fine Tuning Random Forest
y_pred_test = rf_clf.predict_proba(X_test_dtm)[:,1]
y_pred_test = y_pred_test >= 0.05 # by default it is 0.5
y_pred_test = y_pred_test.astype(int)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
print("F1 score on Test data : " ,f1_score(y_test,y_pred_test))
%%time
from sklearn.metrics import f1_score
from sklearn.linear_model import PassiveAggressiveClassifier
pa_clf = PassiveAggressiveClassifier()
# train the model using X_train_dtm & y_train
pa_clf.fit(X_train_dtm, y_train)
# compute the training accuracy
y_pred_train = pa_clf.predict(X_train_dtm)
print('Training accuracy is {}'.format(accuracy_score(y_train, y_pred_train)))
# compute the predicted probabilities for X_test_dtm
y_pred_test = pa_clf.predict(X_test_dtm)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
print("F1 score on Test data : " ,f1_score(y_test,y_pred_test))
```
### Passive Aggresive Classifier does not support prediction probability - so can't be fined ###
```
%%time
from sklearn.metrics import f1_score
import xgboost
xgb = xgboost.XGBClassifier()
# train the model using X_train_dtm & y_train
xgb.fit(X_train_dtm, y_train)
# compute the training accuracy
y_pred_train = xgb.predict(X_train_dtm)
print('Training accuracy is {}'.format(accuracy_score(y_train, y_pred_train)))
# compute the predicted probabilities for X_test_dtm
y_pred_test = xgb.predict(X_test_dtm)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
print("F1 score on Test data : " ,f1_score(y_test,y_pred_test))
# Fine Tuning XGBOOST
y_pred_test = xgb.predict_proba(X_test_dtm)[:,1]
y_pred_test = y_pred_test >= 0.06 # by default it is 0.5
y_pred_test = y_pred_test.astype(int)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
print("F1 score on Test data : " ,f1_score(y_test,y_pred_test))
```
### Advance Models - LightGBM ###
```
import lightgbm
parameters = {
'application': 'binary',
'objective': 'binary',
'metric': 'auc',
'is_unbalance': 'true',
'boosting': 'gbdt',
'num_leaves': 31,
'feature_fraction': 0.5,
'bagging_fraction': 0.5,
'bagging_freq': 20,
'learning_rate': 0.05,
'verbose': 0
}
train_data = lightgbm.Dataset(X_train_dtm, label=y_train)
test_data = lightgbm.Dataset(X_test_dtm, label=y_test)
clf = lightgbm.train(parameters,
train_data,
valid_sets=test_data,
num_boost_round=500,
early_stopping_rounds=10)
# Fine Tuning LIGHT GBM
y_pred_test = clf.predict(X_test_dtm)
y_pred_test = y_pred_test >= 0.35 # by default it is 0.5
y_pred_test = y_pred_test.astype(int)
print('Test accuracy is {}'.format(accuracy_score(y_test,y_pred_test)))
print(confusion_matrix(y_test,y_pred_test))
print("F1 score on Test data : " ,f1_score(y_test,y_pred_test))
```
## Model Explanation ##
```
import eli5
eli5.show_weights(logreg,vec = vect, top = 15) # logistic regression
# will give you top 15 features or words which makes a comment toxic
eli5.show_weights(xgb,vec = vect,top = 15) # XGBoost
# will give you top 15 features or words which makes a comment toxic
```
## Tweets Explanation ##
```
X_test.iloc[718]
eli5.show_prediction(logreg, vec = vect, doc = X_test.iloc[718])
```
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
import numpy as np
import pandas as pd
train_srp53 = pd.read_csv('/content/drive/MyDrive/Molecular Exploration/Data/sr-p53.smiles',
sep='\t',
names=['smiles', 'id', 'target'])
train_srp53.head()
len(train_srp53)
sum(train_srp53.target)
!pip install -q SmilesPE
```
### Tokenization of string compounds with SmilesPE (Byte pair encoding library with built-in tokenizers)
```
from SmilesPE.pretokenizer import atomwise_tokenizer
smi = 'CC[N+](C)(C)Cc1ccccc1Br'
toks = atomwise_tokenizer(smi)
print(toks)
```
***example of pretrained SMILES byte-pair encoding***
```
import requests
file_url = 'https://raw.githubusercontent.com/XinhaoLi74/SmilesPE/master/SPE_ChEMBL.txt'
r = requests.get(file_url, stream = True)
with open('/content/drive/MyDrive/Molecular Exploration/Data/BPE_codes.txt', 'wb') as file:
for block in r.iter_content(chunk_size = 1024):
if block:
file.write(block)
import codecs
from SmilesPE.tokenizer import *
spe_vob= codecs.open('/content/drive/MyDrive/DATA_2040/Molecular Exploration/Data/BPE_codes.txt')
spe = SPE_Tokenizer(spe_vob)
smi = 'CC[N+](C)(C)Cc1ccccc1Br'
bpe_encoding = spe.tokenize(smi)
# should get >>> 'CC [N+](C) (C)C c1ccccc1 Br'
```
*The output of the byte-pair encoding is a space-separated string of tokens, each token being a string. The example output below would be the input sequence to a model.*
```
bpe_encoding.split(' ')
```
### Looking at the byte-pair encoding alphabet across the whole (~8000 large) dataset
```
# initialize the pretrained BP encoder
spe = SPE_Tokenizer(spe_vob)
# initialize empyt vocabulary set
alphabet = set()
# traverse through data adding byte-pair tokens to vocabulary
for smi in train_srp53.smiles:
bpe_encoding = spe.tokenize(smi)
tkns = set(bpe_encoding.split(' '))
alphabet = alphabet.union(tkns)
```
***The alphabet for this training set is 1096 elements -- the whole alphabet used to train this BP encoder is ~3000 ==> what do we do to prepare for getting test samples with tokens unseen in the training set?***
```
len(alphabet)
from matplotlib import pyplot as plt
def smiles_to_token(row):
return atomwise_tokenizer(row['smiles'])
train_srp53['tokens'] = train_srp53.apply(lambda row: smiles_to_token(row), axis=1)
train_srp53.head()
vocab = set()
for smi in train_srp53.smiles:
tok = atomwise_tokenizer(smi)
tokens = set(tok)
vocab = vocab.union(tokens)
def CountFrequency(my_list):
# Creating an empty dictionary
freq = {}
for item in my_list:
if item in freq:
freq[item] += 1
else:
freq[item] = 1
return freq
token_appears_once = {}
token_freq = {}
token_prop = {k:[] for k in vocab}
smile_lengths = []
for i, row in train_srp53.iterrows():
token_dict = CountFrequency(row['tokens'])
smile_lengths.append(len(row['tokens']))
for token, count in token_dict.items():
if token in token_appears_once.keys():
token_appears_once[token] += 1
else:
token_appears_once[token] = 1
if token in token_freq.keys():
token_freq[token] += count
else:
token_freq[token] = count
for tok in token_prop.keys():
token_prop[tok].append(row['tokens'].count(tok) / len(row['tokens']))
print(token_appears_once['N'])
print(token_freq['N'])
print(len(token_prop['N']))
print(len(smile_lengths))
```
## EDA Plots
```
import plotly.express as px
from heapq import nlargest
def dict_to_df(d, N):
# N largest values in dictionary
# Using nlargest
res = nlargest(N, d, key = d.get)
df = pd.DataFrame(columns=['Token', 'Count'])
df['Token'] = res
counts = [d[token] for token in res]
df['Count'] = counts
return df
token_appears_df = dict_to_df(token_appears_once, 30)
fig = px.bar(token_appears_df, x='Token', y='Count')
fig.show()
token_freq_df = dict_to_df(token_freq, 30)
fig = px.bar(token_freq_df, x='Token', y='Count', log_y=True)
fig.show()
fig = px.histogram(pd.DataFrame(smile_lengths, columns=['Lengths']), x = 'Lengths')
fig.show()
prop_df = dict_to_df(token_prop, len(vocab))
prop_df['Average'] = prop_df.apply(lambda row: np.mean(row.Count), axis=1)
prop_df_sorted = prop_df.sort_values(by=['Average'], ascending=False)
# fig = px.box(prop_df_sorted.head(10), x='Token', y='Count')
# fig.show()
# prop_dict = {'C': token_prop['C'], '(':token_prop['(']}
# # prop_df = pd.DataFrame([token_prop['C'], token_prop['('], token_prop[')']], columns=['Carbon', '(', ')'])
# prop_df = pd.DataFrame(prop_dict)
# fig = px.box(prop_df, y=)
# fig.show()
```
| github_jupyter |
# Plagiarism Detection Model
Now that you've created training and test data, you are ready to define and train a model. Your goal in this notebook, will be to train a binary classification model that learns to label an answer file as either plagiarized or not, based on the features you provide the model.
This task will be broken down into a few discrete steps:
* Upload your data to S3.
* Define a binary classification model and a training script.
* Train your model and deploy it.
* Evaluate your deployed classifier and answer some questions about your approach.
To complete this notebook, you'll have to complete all given exercises and answer all the questions in this notebook.
> All your tasks will be clearly labeled **EXERCISE** and questions as **QUESTION**.
It will be up to you to explore different classification models and decide on a model that gives you the best performance for this dataset.
---
## Load Data to S3
In the last notebook, you should have created two files: a `training.csv` and `test.csv` file with the features and class labels for the given corpus of plagiarized/non-plagiarized text data.
>The below cells load in some AWS SageMaker libraries and creates a default bucket. After creating this bucket, you can upload your locally stored data to S3.
Save your train and test `.csv` feature files, locally. To do this you can run the second notebook "2_Plagiarism_Feature_Engineering" in SageMaker or you can manually upload your files to this notebook using the upload icon in Jupyter Lab. Then you can upload local files to S3 by using `sagemaker_session.upload_data` and pointing directly to where the training data is saved.
```
import pandas as pd
import boto3
import sagemaker
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# session and role
sagemaker_session = sagemaker.Session()
role = sagemaker.get_execution_role()
# create an S3 bucket
bucket = sagemaker_session.default_bucket()
```
## EXERCISE: Upload your training data to S3
Specify the `data_dir` where you've saved your `train.csv` file. Decide on a descriptive `prefix` that defines where your data will be uploaded in the default S3 bucket. Finally, create a pointer to your training data by calling `sagemaker_session.upload_data` and passing in the required parameters. It may help to look at the [Session documentation](https://sagemaker.readthedocs.io/en/stable/session.html#sagemaker.session.Session.upload_data) or previous SageMaker code examples.
You are expected to upload your entire directory. Later, the training script will only access the `train.csv` file.
```
# should be the name of directory you created to save your features data
data_dir = 'plagiarism_data'
# set prefix, a descriptive name for a directory
prefix = 'sagemaker/plagiarism-data'
# upload all data to S3
input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)
print(input_data)
```
### Test cell
Test that your data has been successfully uploaded. The below cell prints out the items in your S3 bucket and will throw an error if it is empty. You should see the contents of your `data_dir` and perhaps some checkpoints. If you see any other files listed, then you may have some old model files that you can delete via the S3 console (though, additional files shouldn't affect the performance of model developed in this notebook).
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# confirm that data is in S3 bucket
empty_check = []
for obj in boto3.resource('s3').Bucket(bucket).objects.all():
empty_check.append(obj.key)
print(obj.key)
assert len(empty_check) !=0, 'S3 bucket is empty.'
print('Test passed!')
```
---
# Modeling
Now that you've uploaded your training data, it's time to define and train a model!
The type of model you create is up to you. For a binary classification task, you can choose to go one of three routes:
* Use a built-in classification algorithm, like LinearLearner.
* Define a custom Scikit-learn classifier, a comparison of models can be found [here](https://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html).
* Define a custom PyTorch neural network classifier.
It will be up to you to test out a variety of models and choose the best one. Your project will be graded on the accuracy of your final model.
---
## EXERCISE: Complete a training script
To implement a custom classifier, you'll need to complete a `train.py` script. You've been given the folders `source_sklearn` and `source_pytorch` which hold starting code for a custom Scikit-learn model and a PyTorch model, respectively. Each directory has a `train.py` training script. To complete this project **you only need to complete one of these scripts**; the script that is responsible for training your final model.
A typical training script:
* Loads training data from a specified directory
* Parses any training & model hyperparameters (ex. nodes in a neural network, training epochs, etc.)
* Instantiates a model of your design, with any specified hyperparams
* Trains that model
* Finally, saves the model so that it can be hosted/deployed, later
### Defining and training a model
Much of the training script code is provided for you. Almost all of your work will be done in the `if __name__ == '__main__':` section. To complete a `train.py` file, you will:
1. Import any extra libraries you need
2. Define any additional model training hyperparameters using `parser.add_argument`
2. Define a model in the `if __name__ == '__main__':` section
3. Train the model in that same section
Below, you can use `!pygmentize` to display an existing `train.py` file. Read through the code; all of your tasks are marked with `TODO` comments.
**Note: If you choose to create a custom PyTorch model, you will be responsible for defining the model in the `model.py` file,** and a `predict.py` file is provided. If you choose to use Scikit-learn, you only need a `train.py` file; you may import a classifier from the `sklearn` library.
```
# directory can be changed to: source_sklearn or source_pytorch
!pygmentize source_sklearn/train.py
```
### Provided code
If you read the code above, you can see that the starter code includes a few things:
* Model loading (`model_fn`) and saving code
* Getting SageMaker's default hyperparameters
* Loading the training data by name, `train.csv` and extracting the features and labels, `train_x`, and `train_y`
If you'd like to read more about model saving with [joblib for sklearn](https://scikit-learn.org/stable/modules/model_persistence.html) or with [torch.save](https://pytorch.org/tutorials/beginner/saving_loading_models.html), click on the provided links.
---
# Create an Estimator
When a custom model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained; the `train.py` function you specified above. To run a custom training script in SageMaker, construct an estimator, and fill in the appropriate constructor arguments:
* **entry_point**: The path to the Python script SageMaker runs for training and prediction.
* **source_dir**: The path to the training script directory `source_sklearn` OR `source_pytorch`.
* **entry_point**: The path to the Python script SageMaker runs for training and prediction.
* **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`.
* **entry_point**: The path to the Python script SageMaker runs for training.
* **source_dir**: The path to the training script directory `train_sklearn` OR `train_pytorch`.
* **role**: Role ARN, which was specified, above.
* **train_instance_count**: The number of training instances (should be left at 1).
* **train_instance_type**: The type of SageMaker instance for training. Note: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.
* **sagemaker_session**: The session used to train on Sagemaker.
* **hyperparameters** (optional): A dictionary `{'name':value, ..}` passed to the train function as hyperparameters.
Note: For a PyTorch model, there is another optional argument **framework_version**, which you can set to the latest version of PyTorch, `1.0`.
## EXERCISE: Define a Scikit-learn or PyTorch estimator
To import your desired estimator, use one of the following lines:
```
from sagemaker.sklearn.estimator import SKLearn
```
```
from sagemaker.pytorch import PyTorch
```
```
# your import and estimator code, here
# import a PyTorch wrapper
from sagemaker.pytorch import PyTorch
# specify an output path
output_path = f"s3://{bucket}/{prefix}"
# instantiate a pytorch estimator
estimator = PyTorch(
entry_point="train.py",
source_dir="source_pytorch",
role=role,
framework_version="1.0",
train_instance_count=1,
train_instance_type="ml.c4.xlarge",
output_path=output_path,
sagemaker_session=sagemaker_session,
hyperparameters={
"input_features": 2,
"hidden_dim": 20,
"output_dim": 1,
"epochs": 160
})
```
## EXERCISE: Train the estimator
Train your estimator on the training data stored in S3. This should create a training job that you can monitor in your SageMaker console.
```
train_data_path = input_data + "/train.csv"
print(train_data_path)
%%time
# Train your estimator on S3 training data
estimator.fit({'train': train_data_path})
```
## EXERCISE: Deploy the trained model
After training, deploy your model to create a `predictor`. If you're using a PyTorch model, you'll need to create a trained `PyTorchModel` that accepts the trained `<model>.model_data` as an input parameter and points to the provided `source_pytorch/predict.py` file as an entry point.
To deploy a trained model, you'll use `<model>.deploy`, which takes in two arguments:
* **initial_instance_count**: The number of deployed instances (1).
* **instance_type**: The type of SageMaker instance for deployment.
Note: If you run into an instance error, it may be because you chose the wrong training or deployment instance_type. It may help to refer to your previous exercise code to see which types of instances we used.
```
%%time
# uncomment, if needed
from sagemaker.pytorch import PyTorchModel
model = PyTorchModel(
entry_point="predict.py",
role=role,
framework_version="1.0",
model_data=estimator.model_data,
source_dir="source_pytorch"
)
# deploy your model to create a predictor
predictor = model.deploy(initial_instance_count=1, instance_type="ml.t2.medium")
```
---
# Evaluating Your Model
Once your model is deployed, you can see how it performs when applied to our test data.
The provided cell below, reads in the test data, assuming it is stored locally in `data_dir` and named `test.csv`. The labels and features are extracted from the `.csv` file.
```
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
import os
# read in test data, assuming it is stored locally
test_data = pd.read_csv(os.path.join(data_dir, "test.csv"), header=None, names=None)
# labels are in the first column
test_y = test_data.iloc[:,0]
test_x = test_data.iloc[:,1:]
```
## EXERCISE: Determine the accuracy of your model
Use your deployed `predictor` to generate predicted, class labels for the test data. Compare those to the *true* labels, `test_y`, and calculate the accuracy as a value between 0 and 1.0 that indicates the fraction of test data that your model classified correctly. You may use [sklearn.metrics](https://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics) for this calculation.
**To pass this project, your model should get at least 90% test accuracy.**
```
# First: generate predicted, class labels
test_y_preds = predictor.predict(test_x)
"""
DON'T MODIFY ANYTHING IN THIS CELL THAT IS BELOW THIS LINE
"""
# test that your model generates the correct number of labels
assert len(test_y_preds)==len(test_y), 'Unexpected number of predictions.'
print('Test passed!')
# Second: calculate the test accuracy
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(test_y, test_y_preds)
print(accuracy)
## print out the array of predicted and true labels, if you want
print('\nPredicted class labels: ')
print(test_y_preds)
print('\nTrue class labels: ')
print(test_y.values)
```
### Question 1: How many false positives and false negatives did your model produce, if any? And why do you think this is?
** Answer**:
```
# code to evaluate the endpoint on test data
# returns a variety of model metrics
def evaluate(test_preds, test_labels, verbose=True):
# rounding and squeezing array
test_preds = np.squeeze(np.round(test_preds))
# calculate true positives, false positives, true negatives, false negatives
tp = np.logical_and(test_labels, test_preds).sum()
fp = np.logical_and(1-test_labels, test_preds).sum()
tn = np.logical_and(1-test_labels, 1-test_preds).sum()
fn = np.logical_and(test_labels, 1-test_preds).sum()
# calculate binary classification metrics
recall = tp / (tp + fn)
precision = tp / (tp + fp)
accuracy = (tp + tn) / (tp + fp + tn + fn)
# print metrics
if verbose:
print(pd.crosstab(test_labels, test_preds, rownames=['actuals'], colnames=['predictions']))
print("\n{:<11} {:.3f}".format('Recall:', recall))
print("{:<11} {:.3f}".format('Precision:', precision))
print("{:<11} {:.3f}".format('Accuracy:', accuracy))
print()
return {'TP': tp, 'FP': fp, 'FN': fn, 'TN': tn,
'Precision': precision, 'Recall': recall, 'Accuracy': accuracy}
metrics = evaluate(test_y_preds, test_y.values, True)
```
false positives is 1 and false negatives is 0. The result is pretty good. The reason may be 1. sample is small, 2. features is not enough and didn't describe too much characters of the text.
### Question 2: How did you decide on the type of model to use?
** Answer**:
The basic model of sklearn and pytorch are all linear model. The problem is linear inseparable. Thus, deep learning is better for this problem because the pytorch model stacks two layer linear models.
----
## EXERCISE: Clean up Resources
After you're done evaluating your model, **delete your model endpoint**. You can do this with a call to `.delete_endpoint()`. You need to show, in this notebook, that the endpoint was deleted. Any other resources, you may delete from the AWS console, and you will find more instructions on cleaning up all your resources, below.
```
# uncomment and fill in the line below!
predictor.delete_endpoint()
```
### Deleting S3 bucket
When you are *completely* done with training and testing models, you can also delete your entire S3 bucket. If you do this before you are done training your model, you'll have to recreate your S3 bucket and upload your training data again.
```
# deleting bucket, uncomment lines below
# bucket_to_delete = boto3.resource('s3').Bucket(bucket)
# bucket_to_delete.objects.all().delete()
```
### Deleting all your models and instances
When you are _completely_ done with this project and do **not** ever want to revisit this notebook, you can choose to delete all of your SageMaker notebook instances and models by following [these instructions](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-cleanup.html). Before you delete this notebook instance, I recommend at least downloading a copy and saving it, locally.
---
## Further Directions
There are many ways to improve or add on to this project to expand your learning or make this more of a unique project for you. A few ideas are listed below:
* Train a classifier to predict the *category* (1-3) of plagiarism and not just plagiarized (1) or not (0).
* Utilize a different and larger dataset to see if this model can be extended to other types of plagiarism.
* Use language or character-level analysis to find different (and more) similarity features.
* Write a complete pipeline function that accepts a source text and submitted text file, and classifies the submitted text as plagiarized or not.
* Use API Gateway and a lambda function to deploy your model to a web application.
These are all just options for extending your work. If you've completed all the exercises in this notebook, you've completed a real-world application, and can proceed to submit your project. Great job!
| github_jupyter |
```
# 任意选一个你喜欢的整数,这能帮你得到稳定的结果
seed = 2333 # todo
```
# 欢迎来到线性回归项目
若项目中的题目有困难没完成也没关系,我们鼓励你带着问题提交项目,评审人会给予你诸多帮助。
所有选做题都可以不做,不影响项目通过。如果你做了,那么项目评审会帮你批改,也会因为选做部分做错而判定为不通过。
其中非代码题可以提交手写后扫描的 pdf 文件,或使用 Latex 在文档中直接回答。
# 1 矩阵运算
## 1.1 创建一个 4*4 的单位矩阵
```
# 这个项目设计来帮你熟悉 python list 和线性代数
# 你不能调用任何NumPy以及相关的科学计算库来完成作业
# 本项目要求矩阵统一使用二维列表表示,如下:
A = [[1,2,3],
[2,3,3],
[1,2,5]]
B = [[1,2,3,5],
[2,3,3,5],
[1,2,5,1]]
# 向量也用二维列表表示
C = [[1],
[2],
[3]]
#TODO 创建一个 4*4 单位矩阵
I = [[1,0,0,0],
[0,1,0,0],
[0,0,1,0],
[0,0,0,1]]
```
## 1.2 返回矩阵的行数和列数
```
# TODO 返回矩阵的行数和列数
def shape(M):
return len(M), len(M[0])
# 运行以下代码测试你的 shape 函数
%run -i -e test.py LinearRegressionTestCase.test_shape
```
## 1.3 每个元素四舍五入到特定小数数位
```
# TODO 每个元素四舍五入到特定小数数位
# 直接修改参数矩阵,无返回值
def matxRound(M, decPts=4):
for row in range(len(M)):
for col in range(len(M[row])):
M[row][col] = round(M[row][col], decPts)
# 运行以下代码测试你的 matxRound 函数
%run -i -e test.py LinearRegressionTestCase.test_matxRound
```
## 1.4 计算矩阵的转置
```
# TODO 计算矩阵的转置
def transpose(M):
rows, cols = shape(M)
new = []
for col in range(cols):
new.append([])
for row in range(rows):
new[col].append(0)
for row in range(rows):
for col in range(cols):
new[col][row] = M[row][col]
return new
# Pythonic approach
# return [list(col) for col in zip(*M)]
# 运行以下代码测试你的 transpose 函数
%run -i -e test.py LinearRegressionTestCase.test_transpose
```
## 1.5 计算矩阵乘法 AB
```
# TODO 计算矩阵乘法 AB,如果无法相乘则raise ValueError
def dot(a, b):
return sum(i * j for i, j in zip(a, b))
def matxMultiply(A, B):
n, m = shape(A)
p, q = shape(B)
if m != p:
raise ValueError('dimension of matrices don\'t match')
result = []
for row in range(n):
result.append([])
for col in range(q):
# ith row jth column = dot product of ith row of A and jth column of B
# im not using [row][col] to index but rather just appending...
# just because i can
result[row].append(dot(A[row], [i[col] for i in B]))
return result
# 运行以下代码测试你的 matxMultiply 函数
%run -i -e test.py LinearRegressionTestCase.test_matxMultiply
```
---
# 2 Gaussian Jordan 消元法
## 2.1 构造增广矩阵
$ A = \begin{bmatrix}
a_{11} & a_{12} & ... & a_{1n}\\
a_{21} & a_{22} & ... & a_{2n}\\
a_{31} & a_{22} & ... & a_{3n}\\
... & ... & ... & ...\\
a_{n1} & a_{n2} & ... & a_{nn}\\
\end{bmatrix} , b = \begin{bmatrix}
b_{1} \\
b_{2} \\
b_{3} \\
... \\
b_{n} \\
\end{bmatrix}$
返回 $ Ab = \begin{bmatrix}
a_{11} & a_{12} & ... & a_{1n} & b_{1}\\
a_{21} & a_{22} & ... & a_{2n} & b_{2}\\
a_{31} & a_{22} & ... & a_{3n} & b_{3}\\
... & ... & ... & ...& ...\\
a_{n1} & a_{n2} & ... & a_{nn} & b_{n} \end{bmatrix}$
```
# TODO 构造增广矩阵,假设A,b行数相同
def augmentMatrix(A, b):
# assert n == shape(b)[0] # this is in the assumption!
new = [A[row] + b[row] for row in range(len(A))]
return new
# 运行以下代码测试你的 augmentMatrix 函数
%run -i -e test.py LinearRegressionTestCase.test_augmentMatrix
```
## 2.2 初等行变换
- 交换两行
- 把某行乘以一个非零常数
- 把某行加上另一行的若干倍:
```
# TODO r1 <---> r2
# 直接修改参数矩阵,无返回值
def swapRows(M, r1, r2):
if r1 != r2:
M[r1], M[r2] = M[r2], M[r1]
# 运行以下代码测试你的 swapRows 函数
%run -i -e test.py LinearRegressionTestCase.test_swapRows
## TODO r1 <--- r1 * scale
# scale为0是非法输入,要求 raise ValueError
# 直接修改参数矩阵,无返回值
def scaleRow(M, r, scale):
if scale == 0:
raise ValueError('cannot scale a matrix by zero')
for col in range(len(M[r])):
M[r][col] *= scale
# 运行以下代码测试你的 scaleRow 函数
%run -i -e test.py LinearRegressionTestCase.test_scaleRow
# TODO r1 <--- r1 + r2*scale
# 直接修改参数矩阵,无返回值
def addScaledRow(M, r1, r2, scale):
if scale == 0:
raise ValueError('cannot scale a matrix by zero')
for col in range(len(M[r1])):
M[r1][col] += scale * M[r2][col]
# 运行以下代码测试你的 addScaledRow 函数
%run -i -e test.py LinearRegressionTestCase.test_addScaledRow
```
## 2.3 Gaussian Jordan 消元法求解 Ax = b
### 2.3.1 算法
步骤1 检查A,b是否行数相同
步骤2 构造增广矩阵Ab
步骤3 逐列转换Ab为化简行阶梯形矩阵 [中文维基链接](https://zh.wikipedia.org/wiki/%E9%98%B6%E6%A2%AF%E5%BD%A2%E7%9F%A9%E9%98%B5#.E5.8C.96.E7.AE.80.E5.90.8E.E7.9A.84-.7Bzh-hans:.E8.A1.8C.3B_zh-hant:.E5.88.97.3B.7D-.E9.98.B6.E6.A2.AF.E5.BD.A2.E7.9F.A9.E9.98.B5)
对于Ab的每一列(最后一列除外)
当前列为列c
寻找列c中 对角线以及对角线以下所有元素(行 c~N)的绝对值的最大值
如果绝对值最大值为0
那么A为奇异矩阵,返回None (你可以在选做问题2.4中证明为什么这里A一定是奇异矩阵)
否则
使用第一个行变换,将绝对值最大值所在行交换到对角线元素所在行(行c)
使用第二个行变换,将列c的对角线元素缩放为1
多次使用第三个行变换,将列c的其他元素消为0
步骤4 返回Ab的最后一列
**注:** 我们并没有按照常规方法先把矩阵转化为行阶梯形矩阵,再转换为化简行阶梯形矩阵,而是一步到位。如果你熟悉常规方法的话,可以思考一下两者的等价性。
### 2.3.2 算法推演
为了充分了解Gaussian Jordan消元法的计算流程,请根据Gaussian Jordan消元法,分别手动推演矩阵A为***可逆矩阵***,矩阵A为***奇异矩阵***两种情况。
#### 推演示例
$Ab = \begin{bmatrix}
-7 & 5 & -1 & 1\\
1 & -3 & -8 & 1\\
-10 & -2 & 9 & 1\end{bmatrix}$
$ --> $
$\begin{bmatrix}
1 & \frac{1}{5} & -\frac{9}{10} & -\frac{1}{10}\\
0 & -\frac{16}{5} & -\frac{71}{10} & \frac{11}{10}\\
0 & \frac{32}{5} & -\frac{73}{10} & \frac{3}{10}\end{bmatrix}$
$ --> $
$\begin{bmatrix}
1 & 0 & -\frac{43}{64} & -\frac{7}{64}\\
0 & 1 & -\frac{73}{64} & \frac{3}{64}\\
0 & 0 & -\frac{43}{4} & \frac{5}{4}\end{bmatrix}$
$ --> $
$\begin{bmatrix}
1 & 0 & 0 & -\frac{3}{16}\\
0 & 1 & 0 & -\frac{59}{688}\\
0 & 0 & 1 & -\frac{5}{43}\end{bmatrix}$
#### 推演有以下要求:
1. 展示每一列的消元结果, 比如3*3的矩阵, 需要写三步
2. 用分数来表示
3. 分数不能再约分
4. 我们已经给出了latex的语法,你只要把零改成你要的数字(或分数)即可
5. 检查你的答案, 可以用[这个](http://www.math.odu.edu/~bogacki/cgi-bin/lat.cgi?c=sys), 或者后面通过单元测试后的`gj_Solve`
_你可以用python的 [fractions](https://docs.python.org/2/library/fractions.html) 模块辅助你的约分_
#### 以下开始你的尝试吧!
```
# 不要修改这里!
from helper import *
A = generateMatrix(3,seed,singular=False)
b = np.ones(shape=(3,1),dtype=int) # it doesn't matter
Ab = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了
printInMatrixFormat(Ab,padding=3,truncating=0)
```
请按照算法的步骤3,逐步推演***可逆矩阵***的变换。
在下面列出每一次循环体执行之后的增广矩阵。
要求:
1. 做分数运算
2. 使用`\frac{n}{m}`来渲染分数,如下:
- $\frac{n}{m}$
- $-\frac{a}{b}$
$ Ab = \begin{bmatrix}
-10 & 9 & 5 & 1 \\
-4 & 3 & -4 & 1 \\
-2 & 3 & 5 & 1 \end{bmatrix}$
$ --> \begin{bmatrix}
1 & -\frac{9}{10} & -\frac{5}{10} & -\frac{1}{10} \\
0 & -\frac{3}{5} & -6 & \frac{3}{5} \\
0 & \frac{6}{5} & 4 & \frac{4}{5} \end{bmatrix}$
$ --> \begin{bmatrix}
1 & 0 & \frac{5}{2} & \frac{1}{2} \\
0 & 1 & \frac{10}{3} & \frac{2}{3} \\
0 & 0 & -4 & 1 \end{bmatrix}$
$ --> \begin{bmatrix}
1 & 0 & 0 & \frac{9}{8} \\
0 & 1 & 0 & \frac{3}{2} \\
0 & 0 & 1 & -\frac{1}{4} \end{bmatrix}$
```
# 不要修改这里!
A = generateMatrix(3,seed,singular=True)
b = np.ones(shape=(3,1),dtype=int)
Ab = augmentMatrix(A.tolist(),b.tolist()) # 请确保你的增广矩阵已经写好了
printInMatrixFormat(Ab,padding=3,truncating=0)
```
请按照算法的步骤3,逐步推演***奇异矩阵***的变换。
在下面列出每一次循环体执行之后的增广矩阵。
要求:
1. 做分数运算
2. 使用`\frac{n}{m}`来渲染分数,如下:
- $\frac{n}{m}$
- $-\frac{a}{b}$
$ Ab = \begin{bmatrix}
6 & 1 & 9 & 1 \\
0 & 1 & 1 & 1 \\
-6 & 1 & -7 & 1 \end{bmatrix}$
$ --> \begin{bmatrix}
1 & \frac{1}{6} & \frac{3}{2} & \frac{1}{6} \\
0 & 1 & 1 & 1 \\
0 & 2 & 2 & 2 \end{bmatrix}$
$ --> \begin{bmatrix}
1 & 0 & \frac{4}{3} & 0 \\
0 & 1 & 1 & 0 \\
0 & 0 & 0 & 0 \end{bmatrix}$
### 2.3.3 实现 Gaussian Jordan 消元法
```
# TODO 实现 Gaussain Jordan 方法求解 Ax = b
""" Gaussian Jordan 方法求解 Ax = b.
参数
A: 方阵
b: 列向量
decPts: 四舍五入位数,默认为4
epsilon: 判读是否为0的阈值,默认 1.0e-16
返回列向量 x 使得 Ax = b
返回None,如果 A,b 高度不同
返回None,如果 A 为奇异矩阵
"""
from pprint import pprint
def gj_Solve(A, b, decPts=4, epsilon=1.0e-16):
size = len(A)
if size != len(b):
return None
C = augmentMatrix(A, b)
for c in range(size):
absCol = [abs(row[c]) for row in C[c:]]
maxNum = max(absCol)
if maxNum < epsilon: # singular matrix
return None
swapRows(C, absCol.index(maxNum) + c, c)
scaleRow(C, c, 1 / C[c][c])
for r in range(size):
if r == c or abs(C[r][c]) < epsilon:
continue
addScaledRow(C, r, c, -C[r][c])
solution = [[row[-1]] for row in C]
return solution
# 运行以下代码测试你的 gj_Solve 函数
%run -i -e test.py LinearRegressionTestCase.test_gj_Solve
```
###### (选做) 2.4 算法正确判断了奇异矩阵:
在算法的步骤3 中,如果发现某一列对角线和对角线以下所有元素都为0,那么则断定这个矩阵为奇异矩阵。
我们用正式的语言描述这个命题,并证明为真。
证明下面的命题:
**如果方阵 A 可以被分为4个部分: **
$ A = \begin{bmatrix}
I & X \\
Z & Y \\
\end{bmatrix} , \text{其中 I 为单位矩阵,Z 为全0矩阵,Y 的第一列全0}$,
**那么A为奇异矩阵。**
提示:从多种角度都可以完成证明
- 考虑矩阵 Y 和 矩阵 A 的秩
- 考虑矩阵 Y 和 矩阵 A 的行列式
- 考虑矩阵 A 的某一列是其他列的线性组合
**TODO** 证明:
由于 $A,\ I$ 为方阵,则 $Y$ 为方阵,易知,$X$ 的行数与 $Z$ 的列数一致。
设 $n$ 为 $I$ 的大小,并设 $x_1,\ x_2,\dots,\ x_n$ 为矩阵 $X$ 的第一列的元素;设 $A_i$ 为矩阵 $A$ 的第 $i$ 列。
由于 $Y$ 的第一列均为 0,那么 $A_{n+1}$($Y$ 所在的这一列)可以被表示为
$$ A_{n+1} = x_1 A_1 + x_2 A_2 + \cdots + x_n A_n = \sum_{i=1}^n x_i A_i $$
因此 $A_{n+1}$ 这一列并不是线性独立的(是 $A$ 前 $n$ 列的线性组合),又因 $A$ 为方阵,则得出 $A$ 不满秩,即 $A$ 是奇异矩阵,证毕。
# 3 线性回归
## 3.1 随机生成样本点
```
# 不要修改这里!
# 运行一次就够了!
from helper import *
from matplotlib import pyplot as plt
%matplotlib inline
X,Y = generatePoints(seed,num=100)
## 可视化
plt.xlim((-5,5))
plt.xlabel('x',fontsize=18)
plt.ylabel('y',fontsize=18)
plt.scatter(X,Y,c='b')
plt.show()
```
## 3.2 拟合一条直线
### 3.2.1 猜测一条直线
```
#TODO 请选择最适合的直线 y = mx + b
m1 = 2/5
b1 = 13.8
# 不要修改这里!
plt.xlim((-5,5))
x_vals = plt.axes().get_xlim()
y_vals = [m1*x+b1 for x in x_vals]
plt.plot(x_vals, y_vals, '-', color='r')
plt.xlabel('x',fontsize=18)
plt.ylabel('y',fontsize=18)
plt.scatter(X,Y,c='b')
plt.show()
```
### 3.2.2 计算平均平方误差 (MSE)
我们要编程计算所选直线的平均平方误差(MSE), 即数据集中每个点到直线的Y方向距离的平方的平均数,表达式如下:
$$
MSE = \frac{1}{n}\sum_{i=1}^{n}{(y_i - mx_i - b)^2}
$$
```
# TODO 实现以下函数并输出所选直线的MSE
def calculateMSE(X,Y,m,b):
error = 0
for x, y in zip(X, Y):
diff = (y - m * x - b) ** 2
error += diff
error /= len(X)
return error
# Pythonic approach
# return sum([(y - m * x - b) ** 2 for x, y in zip(X, Y)]) / len(Y)
print(calculateMSE(X,Y,m1,b1))
```
### 3.2.3 调整参数 $m, b$ 来获得最小的平方平均误差
你可以调整3.2.1中的参数 $m1,b1$ 让蓝点均匀覆盖在红线周围,然后微调 $m1, b1$ 让MSE最小。
## 3.3 (选做) 找到参数 $m, b$ 使得平方平均误差最小
**这一部分需要简单的微积分知识( $ (x^2)' = 2x $ )。因为这是一个线性代数项目,所以设为选做。**
刚刚我们手动调节参数,尝试找到最小的平方平均误差。下面我们要精确得求解 $m, b$ 使得平方平均误差最小。
定义目标函数 $E$ 为
$$
E = \frac{1}{2}\sum_{i=1}^{n}{(y_i - mx_i - b)^2}
$$
因为 $E = \frac{n}{2}MSE$, 所以 $E$ 取到最小值时,$MSE$ 也取到最小值。要找到 $E$ 的最小值,即要找到 $m, b$ 使得 $E$ 相对于 $m$, $E$ 相对于 $b$ 的偏导数等于0.
因此我们要解下面的方程组。
$$
\begin{cases}
\displaystyle
\frac{\partial E}{\partial m} =0 \\
\\
\displaystyle
\frac{\partial E}{\partial b} =0 \\
\end{cases}
$$
### 3.3.1 计算目标函数相对于参数的导数
首先我们计算两个式子左边的值
证明/计算:
$$
\frac{\partial E}{\partial m} = \sum_{i=1}^{n}{-x_i(y_i - mx_i - b)}
$$
$$
\frac{\partial E}{\partial b} = \sum_{i=1}^{n}{-(y_i - mx_i - b)}
$$
**TODO** 证明:
设 $u_i = (y_i - mx_i - b)$,我们可以重写目标函数 $E$ 为
$$ E = \frac{1}{2} \sum_{i=1}^n u_i^2 $$
设 $E_i = \frac{1}{2} u_i^2$,我们可以再重写目标函数 $E$ 为(trivially)
$$ E = \sum_{i=1}^n E_i$$
则,我们有
$$ \frac{\partial E}{\partial m} = \sum_{i=1}^n \frac{\partial E_i}{\partial m} \quad\textrm{和}\quad \frac{\partial E}{\partial b} = \sum_{i=1}^n \frac{\partial E_i}{\partial b} $$
我们先求 $E_i$ 关于 $m$ 的偏导数,即 $\frac{\partial E_i}{\partial m}$,根据链式法则,我们有,对于任意的 $i \in \{1,2,\dots,n\}$
$$ \frac{\partial E_i}{\partial m} = \frac{\mathrm{d} E_i}{\mathrm{d} u_i} \cdot \frac{\partial u_i}{\partial m} $$
在 $u_i$ 中,只有一项 $-mx_i$ 是和 $m$ 相关的,因此 $\frac{\partial u_i}{\partial m} = -x_i$,即
$$ \frac{\partial E_i}{\partial m} = u_i\cdot (-x_i) = -x_i(y_i - mx_i - b) $$
即
$$ \frac{\partial E}{\partial m} = \sum_{i=1}^n -x_i(y_i - mx_i - b) $$
同理,我们可得
$$ \frac{\partial E_i}{\partial b} = \frac{\partial E_i}{\partial u_i}\cdot\frac{\partial u_i}{\partial b} = u_i\cdot (-1) = -(y_i - mx_i - b) $$
即
$$ \frac{\partial E}{\partial b} = \sum_{i=1}^n -(y_i - mx_i - b) $$
证毕。
### 3.3.2 实例推演
现在我们有了一个二元二次方程组
$$
\begin{cases}
\displaystyle
\sum_{i=1}^{n}{-x_i(y_i - mx_i - b)} =0 \\
\displaystyle
\sum_{i=1}^{n}{-(y_i - mx_i - b)} =0 \\
\end{cases}
$$
为了加强理解,我们用一个实际例子演练。
我们要用三个点 $(1,1),\ (2,2),\ (3,2)$ 来拟合一条直线 $y = mx + b$, 请写出
- 目标函数 $E$,
- 二元二次方程组,
- 并求解最优参数 $m, b$
**TODO** 写出目标函数,方程组和最优参数
1. 目标函数 $E$
\begin{align}
E &= \frac{1}{2}\sum_{i=1}^n(y_i - mx_i - b)^2 \\
&= \frac{1}{2}\left((1 - m - b)^2 + (2 - 2m - b)^2 + (3 - 2m - b)^2\right) \\
&= \frac{1}{2}\left(9m^2 - 22m + 10mb - 12b + 3b^2 + 14\right) \\
&= \frac{9}{2}m^2 - 11m + 5mb - 6b + \frac{3}{2}b^2 + 7
\end{align}
2. 二元二次方程组
$$\begin{cases}
\displaystyle \sum_{i=1}^n -x_i(y_i - mx_i - b) = 0 \\
\displaystyle \sum_{i=1}^n -(y_i - mx_i - b) = 0 \\
\end{cases} \; \Rightarrow \;
\begin{cases}
\displaystyle -(1 - m - b) - 2(2 - 2m - b) - 3(2 - 3m - b) = 0 \\
\displaystyle -(1 - m - b) - (2 - 2m - b) - (2 - 3m - b) = 0 \\
\end{cases} \; \Rightarrow \;
\begin{cases}
\displaystyle -11 + 14m + 6b = 0 \\
\displaystyle -5 + 8m + 3b = 0 \\
\end{cases}$$
3. 求解最优参数 $m,\ b$
\begin{cases}
-11 + 14m + 6b = 0 \\
-5 + 8m + 3b = 0 \\
\end{cases}
$$ (1) + -2 \times (2) $$
$$ \Downarrow $$
\begin{cases}
\displaystyle -1 - 2m = 0 \quad \\
\displaystyle -5 + 8m + 3b = 0 \quad \\
\end{cases}
$$ \therefore\ m = -\frac{1}{2} $$
$$ \therefore\ b = \frac{11 - 14m}{6} = \frac{18}{6} = 3 $$
$$\therefore\begin{cases}
\displaystyle m = -\frac{1}{2}\\
\displaystyle b = 3\\
\end{cases}$$
### 3.3.3 将方程组写成矩阵形式
我们的二元二次方程组可以用更简洁的矩阵形式表达,将方程组写成矩阵形式更有利于我们使用 Gaussian Jordan 消元法求解。
请证明
$$
\begin{bmatrix}
\frac{\partial E}{\partial m} \\
\frac{\partial E}{\partial b}
\end{bmatrix} = X^TXh - X^TY
$$
其中向量 $Y$, 矩阵 $X$ 和 向量 $h$ 分别为 :
$$
Y = \begin{bmatrix}
y_1 \\
y_2 \\
... \\
y_n
\end{bmatrix}
,
X = \begin{bmatrix}
x_1 & 1 \\
x_2 & 1\\
... & ...\\
x_n & 1 \\
\end{bmatrix},
h = \begin{bmatrix}
m \\
b \\
\end{bmatrix}
$$
**TODO** 证明:
首先,
$$ X^\top X = \begin{bmatrix}
\displaystyle \sum_{i=1}^n x_i^2 & \displaystyle \sum_{i=1}^n x_i \\
\displaystyle \sum_{i=1}^n x_i & \displaystyle \sum_{i=1}^n 1
\end{bmatrix}$$
因此,
$$ X^\top Xh = \begin{bmatrix}
\displaystyle \sum_{i=1}^n mx_i^2 + \sum_{i=1}^n bx_i \\
\displaystyle \sum_{i=1}^n mx_i + \sum_{i=1}^n b
\end{bmatrix}$$
另外,
$$ X^\top Y = \begin{bmatrix}
\displaystyle \sum_{i=1}^n x_i y_i \\
\displaystyle \sum_{i=1}^n y_i
\end{bmatrix}$$
因此,
$$ X^\top Xh - X^\top Y = \begin{bmatrix}
\displaystyle \sum_{i=1}^n mx_i^2 + \sum_{i=1}^n bx_i - \sum_{i=1}^n x_i y_i \\
\displaystyle \sum_{i=1}^n mx_i + \sum_{i=1}^n b - \sum_{i=1}^n y_i
\end{bmatrix} = \begin{bmatrix}
\displaystyle \sum_{i=1}^n (mx_i^2 + bx_i - x_i y_i) \\
\displaystyle \sum_{i=1}^n (mx_i + b - y_i)
\end{bmatrix} = \begin{bmatrix}
\displaystyle \sum_{i=1}^n -x_i(y_i - mx_i - b) \\
\displaystyle \sum_{i=1}^n -(y_i - mx_i - b)
\end{bmatrix} = \begin{bmatrix}
\displaystyle \frac{\partial E}{\partial m} \\
\displaystyle \frac{\partial E}{\partial b}
\end{bmatrix}$$
证毕。
至此我们知道,通过求解方程 $X^TXh = X^TY$ 来找到最优参数。这个方程十分重要,他有一个名字叫做 **Normal Equation**,也有直观的几何意义。你可以在 [子空间投影](http://open.163.com/movie/2010/11/J/U/M6V0BQC4M_M6V2AJLJU.html) 和 [投影矩阵与最小二乘](http://open.163.com/movie/2010/11/P/U/M6V0BQC4M_M6V2AOJPU.html) 看到更多关于这个方程的内容。
### 3.4 求解 $X^TXh = X^TY$
在3.3 中,我们知道线性回归问题等价于求解 $X^TXh = X^TY$ (如果你选择不做3.3,就勇敢的相信吧,哈哈)
```
# TODO 实现线性回归
'''
参数:X, Y 存储着一一对应的横坐标与纵坐标的两个一维数组
返回:m,b 浮点数
'''
def linearRegression(X,Y):
X = [[i, 1] for i in X]
Y = [[i] for i in Y]
A = matxMultiply(transpose(X), X)
b = matxMultiply(transpose(X), Y)
x = gj_Solve(A, b)
return x[0][0], x[1][0]
m2,b2 = linearRegression(X,Y)
assert isinstance(m2,float),"m is not a float"
assert isinstance(b2,float),"b is not a float"
print(m2,b2)
```
你求得的回归结果是什么?
请使用运行以下代码将它画出来。
```
# 请不要修改下面的代码
x1,x2 = -5,5
y1,y2 = x1*m2+b2, x2*m2+b2
plt.xlim((-5,5))
plt.xlabel('x',fontsize=18)
plt.ylabel('y',fontsize=18)
plt.scatter(X,Y,c='b')
plt.plot((x1,x2),(y1,y2),'r')
plt.title('y = {m:.4f}x + {b:.4f}'.format(m=m2,b=b2))
plt.show()
```
你求得的回归结果对当前数据集的MSE是多少?
```
print(calculateMSE(X,Y,m2,b2))
```
| github_jupyter |
# Batch Processing!
#### A notebook to show some of the capilities available through the pCunch package
This is certainly not an exhaustive look at everything that the pCrunch module can do, but should hopefully provide some insight.
...or, maybe I'm just procrastinating doing more useful work.
```
# Python Modules and instantiation
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import time
import os
# %matplotlib widget
# ROSCO toolbox modules
from ROSCO_toolbox import utilities as rosco_utilities
# WISDEM modules
from wisdem.aeroelasticse.Util import FileTools
# Batch Analysis tools
from pCrunch import Processing, Analysis
from pCrunch import pdTools
# Instantiate fast_IO
fast_io = rosco_utilities.FAST_IO()
fast_pl = rosco_utilities.FAST_Plots()
import importlib
Processing = importlib.reload(Processing)
Analysis = importlib.reload(Analysis)
```
## Define file paths and filenames
I'm loading a case matrix that is output when using wisdem.aeroelasticse.CaseGen_General to run a series of batch runs to initialize the output files here.
Note that this isn't necessary, just my workflow in this notebook.
```
# point to some file paths
outfile_base = '/Users/nabbas/Documents/Projects/ROSCO_dev/DLC_Analysis/DLC_Outputs/5MW_Land_DLC11/'
fname_case_matrix = os.path.join(outfile_base,'case_matrix.yaml')
# Load case matrix into datafraome
case_matrix = FileTools.load_yaml(fname_case_matrix, package=1)
cm = pd.DataFrame(case_matrix)
# pull wind speed values from InflowWind filenames
windspeeds, seed, IECtype, cmw = Processing.get_windspeeds(cm, return_df=True)
cmw.head()
```
#### Comparison cases
I'm comparing two different controllers here, so I'm going to define two lists of output filenames, each corresponding to the output files from each controller
```
# Define controllers we care to separate things by
controllers = list(set(cmw[('ServoDyn', 'DLL_FileName')]))
controllers
# Parse find outfiles names
outfiles = []
for cont in controllers:
case_names = cmw[cmw[('ServoDyn','DLL_FileName')]==cont]['Case_Name']
outnames = list( outfile_base + case_names + '.outb' )
outfiles.append(outnames)
```
### outfiles
In the end, we just need a list of OpenFAST output files. Here, we have a structure that looks something like `[[], []]`. This could be extended any amount like `[[],[],...,[], []]`, or just be one list of strings `[]`.
## Now we can do some processing!
First, let's load the FAST_Processing class and initialize some parameters.
```
fp = Processing.FAST_Processing()
fp.OpenFAST_outfile_list = outfiles
fp.dataset_names = ['DLC1.1', 'DLC1.3']
fp.to = 30
fp.parallel_analysis = True
fp.save_LoadRanking = False
fp.save_SummaryStats = False
fp.verbose=True
# # Can defined specific variables for load ranking if desired
# fp.ranking_vars = [["RotSpeed"],
# ["OoPDefl1", "OoPDefl2", "OoPDefl3"],
# ['RootMxc1', 'RootMxc2', 'RootMxc3'],
# ['TwrBsFyt'],
# ]
```
#### The fast way to compare things.
We could now collect all of the summary stats and load rankings using:
```
stats,load_rankings = fp.batch_processing()
```
In `fp.batch_processing()` most of the analysis is done for any structure of data. I'm going to step through things a bit more piecewise in this notebook, however.
NOTE: The goal in `batch_processing` is to have a "do anything" script. It is a work in progress, but getting there...
```
# stats,load_rankings = fp.batch_processing()
```
## Design Comparisons
We can use fp.design_comparison to compare multiple sets of runs (like we are in this case...). This will generate summary stats and load rankings, running in parrallel when it can and is told to. `fp.batch_processing()` functionally does the same thing if we give it an outfile matrix with equal size lists. We'll show the design comparison here to show a break down
```
stats, load_ranking = fp.design_comparison(outfiles)
```
#### Breaking it down further...
`fp.batch_processing()` calls `Analysis.Loads_Analysls.full_loads_analysis()` to load openfast data, generate stats, and calculate load rankings. Because we defined `fp.parallel_analysis=True` this process was parallelized. This helps for speed and memory reasons, because now every openfast run is not saved. `fp.batch_processing()` then takes all of the output data and parses it back together.
Separately, we call call `Analysis.Loads_Analysls.full_loads_analysis()` with `return_FastData=True` and all of the fast data will be returned. Because we are comparing data though, we'll stick with the design comparison tools.
#### Loading data
We can also just load previously parsed data if we ran `FAST_Processing` with the `save_LoadRankings` and `save_SummaryStates` flags as True.
```
# Or load stats and load rankings
root = '/Users/nabbas/Documents/Projects/ROSCO_dev/DLC_Analysis/DLC_Outputs/5MW_Land_DLC11/stats/'
lrfile = [root+'dataset0_LoadRanking.yaml', root+'dataset1_LoadRanking.yaml']
sfile = [root+'dataset0_stats.yaml', root+'dataset1_stats.yaml']
fname_case_matrix = root+'../case_matrix.yaml'
stats = [FileTools.load_yaml(sf, package=1) for sf in sfile]
load_rankings = [FileTools.load_yaml(lf, package=1) for lf in lrfile]
case_matrix = FileTools.load_yaml(fname_case_matrix, package=1)
cm = pd.DataFrame(case_matrix)
```
### We can look at our data a bit further with pandas dataframes
The data here is just for a few runs for simplicity. Usually you'd do this for a LOT more cases...
```
stats_df = pdTools.dict2df(stats, names=['ROSCO', 'Legacy'])
stats_df.head()
```
### Load Ranking
Lets re-run the load ranking for the sake of example. We'll have to load the analysis tools, and then run the load ranking for the stats we just found
```
fa = Analysis.Loads_Analysis()
fa.t0 = 30
fa.verbose = False
```
Define the ranking variables and statiscits of interest. Note that `len(ranking_vars) == len(ranking_stats)`! We can pass this a list of stats (multiple runs), a dictionary with one run of stats, or a pandas dataframe with the requisite stats. If the inner list contains multiple OpenFAST channels, the load_rankings function will find the min/max/mean of the collection of the channels (e.g., max out of plane tip deflection of all three blades).
We'll also output a dictionary and a pandas DataFrame from `fa.load_ranking()`
```
fa.ranking_vars = [['TwrBsFxt'], ['OoPDefl1', 'OoPDefl2', 'OoPDefl3']]
fa.ranking_stats = ['max', 'min']
load_ranking, load_ranking_df = fa.load_ranking(stats_df, get_df=True)
load_ranking_df.head()
```
This is organized for each iteration of `[ranking_vars, ranking_stats]`. The stats are ordered accordingly, and `(stat)_case_idx` refers to the case name index of each load.
## Wind speed related analysis
We often want to make sense of some batch output data with data binned by windspeed. We can leverage the case-matrix from our output data to figure out the input wind speeds. Of course, `('InflowWind', 'Filename')` must exist in the case matrix. Lets load the wind speeds, save them, and append them to the case matrix as `('InflowWind', 'WindSpeed')`.
```
windspeed, seed, IECtype, cmw = Processing.get_windspeeds(cm, return_df=True)
cmw
```
### AEP
Now that we know the wind speeds that we were operating at, we can find the AEP. We define the turbine class here, and the cumulative distribution or probability density function
for the Weibull distribution per IEC 61400 is generated. We can then calculate the AEP.
If we first want to verify the PDF, we initialize the `power_production` function, define the turbine class, and can plot a PDF (or CDF) for a given range of wind speeds:
```
pp = Analysis.Power_Production()
pp.turbine_class = 2
Vrange = np.arange(2,26) # Range of wind speeds being considered
weib_prob = pp.prob_WindDist(Vrange,disttype='pdf')
plt.close('all')
plt.plot(Vrange, weib_prob)
plt.grid(True)
plt.xlabel("Wind Speed m/s")
plt.ylabel('Probability')
plt.title('Probability Density Function \n IEC Class 2 Wind Speeds ')
plt.show()
```
To get the AEP, we need to provide the wind speeds that the simulations were run for, and the corresponding average power results. Internally, in power_production.AEP, the mean power for a given average wind sped is multiplied times the wind speed's probability, then extrapolated to represent yearly production.
Note: this might throw a python warning due to some poor pandas indexing practices - to be cleaned up eventually!
To get the AEP for each, the process is simple:
```
AEP = pp.AEP(stats, windspeeds)
print('AEP = {}'.format(AEP))
```
##### About the wind speed warning:
Here, we get a warning about the input windspeed array. This is because we passed the complete array output from Processing.get_windspeeds to the AEP function. The input windspeeds to power_production.AEP must satisfy either of the following two conditions:
- each wind speed value corresponds to each each statistic value, so `len(windspeeds) = len(stats_df)`
- each wind speed value corresponds to each run in the case matrix, so `len(windspeeds) = len(cm)`
If the second of these conditions is satisfied, it is assumed that each dataset has the same wind speeds corresponding to each run. So, in this case, the wind speeds corresponding to DLC_1.1 and DLC_1.3 should be the same.
## Plotting
Finally, we can make some plots. There are a few tools we have at our disposal here. First, we can look at more plots that show our design performance as a function of wind speed. Notably, we can pass the stats dictionary or dataframe to these statistics-related scripts.
Currently, `an_plts.stat_curve()` can plot a "statistics curve" for of two types, a bar or a line graph.
A bar graph is useful to compare design cases easily:
```
plt.close()
an_plts = Analysis.wsPlotting()
an_plts.stat_curve(windspeed, stats, 'TwrBsFxt', 'bar', names=['ROSCO', 'Legacy'])
plt.show()
```
A line graph can be useful to show turbulent wind curves. Here we show the means with a first level of errorbars corresponding to standard deviations, and a second level showing minimums and maximums.
```
an_plts.stat_curve(windspeed, stats, 'GenPwr', 'line', stat_idx=0, names=['ROSCO'])
plt.show()
```
### Load Ranking (soon)
We can plot the load rankings...
... pulling this into `Analysis.py` is in progress.
First, we define how we will classify our comparisons. Most commonly this would be `('IEC','DLC')`, but I'm comparing controllers here. The `classifier_type` functionally refers to the channel of the case matrix to separate the data by, and the `classifier_names` are simply labels for the classifiers.
```
# Define a classification channel from the case-matrix
classifier_type = ('ServoDyn', 'DLL_FileName')
classifier_names = ['ROSCO', 'legacy']
# Plot load rankings
fig_list, ax_list = an_plts.plot_load_ranking(load_ranking, cm, classifier_type, classifier_names=classifier_names, n_rankings=10, caseidx_labels=True)
# modify axis labels
for ax in ax_list:
ax.set_xlabel('Controller [-]', fontsize=10, fontweight='bold')
plt.show()
```
### Time domain plotting
We can also look at our data from the time domain results.
We can compare any number of channels using the ROSCO toolbox plotting tools. First we'll load two cases to plot together, then plot the time histories.
```
# Load some time domain cases
filenames = [outfiles[0][70], outfiles[1][70]] # select the 70th run from each dataset
fast_data = fast_io.load_FAST_out(filenames, tmin=30)
# Change names so the legends make sense
fast_data[0]['meta']['name'] = 'ROSCO'
fast_data[1]['meta']['name'] = 'Legacy'
# Define the plots we want to make (can be as many or as few channels and plots as you would like...)
cases = {'Baseline': ['Wind1VelX', 'GenPwr', 'BldPitch1', 'GenTq', 'RotSpeed'],
'Blade' : ['OoPDefl1', 'RootMyb1']}
# plot
fast_pl.plot_fast_out(cases, fast_data)
plt.show()
```
### Spectral Analysis
We can additionally do some frequency domain analysis. Here, `spec_cases` is defined by `(channel, run)` where the run index corresponds to the desired plotting index in the loaded fast data.
```
spec_cases = [('RootMyb1', 0), ('TwrBsFxt', 1)]
twrfreq = .0716
twrfreq_label = ['Tower']
fig, ax = fast_pl.plot_spectral(fast_data, spec_cases,
show_RtSpeed=True, RtSpeed_idx=[0],
add_freqs=[twrfreq], add_freq_labels=twrfreq_label,
averaging='Welch')
ax.set_title('DLC_1.1')
plt.show()
```
### Other fun plots
Finally, we can plot the data distribution of any channels from our fast output data
```
channels = ['GenPwr']
caseid = [0,1]
an_plts.distribution(fast_data, channels, caseid, names=['ROSCO', 'Legacy'])
plt.show()
```
## In conclusion...
If you made it this far, thanks for reading...
There are a number of smaller subfunctionalities that are also available within these tools shows above. Perhaps most importantly, everything is fairly modularar - the hope being that these can provide some high-level tools that everyone can assimilate into their own workflows without too much disruption.
Please add, contribute, fix, etc... That would be great for everyone involved!
| github_jupyter |
<a href="https://colab.research.google.com/github/laicheil/force2019/blob/master/tf_keras_test.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip3 install --upgrade laicheil.force2019==0.post0.dev6
from laicheil.force2019 import something
something()
from tensorflow.python.client import device_lib
import numpy as np
device_lib.list_local_devices()
```
Upload the data files
```
from google.colab import files
uploaded = files.upload()
#for fn in uploaded.keys():
# print('User uploaded file "{name}" with length {length} bytes'.format(
# name=fn, length=len(uploaded[fn])))
!mkdir hackathon_training_data
!unzip hackathon_training_data.zip -d hackathon_training_data
!ls hackathon_training_data/
import os
import json
data_path = 'hackathon_training_data'
list_of_files = os.listdir(data_path)
num_of_files = len(list_of_files)
first_file_path = os.path.join(data_path, list_of_files[0])
#print (first_file_path)
with open(first_file_path,'r') as read_file:
shape_of_files = (num_of_files,) + np.asarray(json.load(read_file)).shape + (1, )
#print (shape_of_files)
data = np.zeros((shape_of_files))
labels = np.zeros(num_of_files)
labels_ce = np.zeros((num_of_files,2))
for i, filename in enumerate(os.listdir(data_path)):
full_path = os.path.join(data_path,filename)
labels[i] = int(filename.startswith('good'))
labels_ce[i, int(filename.startswith('good'))] = 1
with open(full_path,'r') as read_file:
data[i, :, :, 0] = np.asarray(json.load(read_file))
print('labels shape', labels.shape)
print('labels for CE shape', labels_ce.shape)
print('data shape', data.shape)
print(labels)
print(os.listdir(data_path))
```
Image data generators for the inputs
```
from tensorflow.keras.preprocessing import image
from sklearn.model_selection import train_test_split
datagen = image.ImageDataGenerator (
featurewise_center = True,
featurewise_std_normalization=True,
vertical_flip=True,
horizontal_flip=True,
rotation_range=90)
datagen.fit (data)
train_samples, validation_samples, train_labels, validation_labels = train_test_split(data, labels, test_size=.334)
train_generator = datagen.flow(train_samples, train_labels, batch_size=32)
validation_generator = datagen.flow(validation_samples , validation_labels , batch_size=32)
train_samples_ce, validation_samples_ce, train_labels_ce, validation_labels_ce = train_test_split(data, labels_ce, test_size=.334)
train_ce_generator = datagen.flow(train_samples_ce, train_labels_ce, batch_size=32)
validation_ce_generator = datagen.flow(validation_samples_ce , validation_labels_ce , batch_size=32)
test_ce_genera
```
[link text](https://)Loading the ResNet50 model from the tensorflow-keras library
```
from tensorflow.keras.applications.resnet50 import ResNet50
from tensorflow.keras.applications.resnet50 import preprocess_input, decode_predictions
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#config.gpu_options.per_process_gpu_memory_fraction = 0.33
from tensorflow.python.keras import backend as K
#with tf.device('/device:GPU:0'):
K.set_session (tf.Session (config = config))
print('DONE LOADING MODEL')
```
Callbacks
```
import datetime
now = datetime.datetime.now ()
date_str = now.strftime('%Y%m%d%H%M')
checkpoint_init_name = 'init_chkpnt_'+date_str+'.hdf5'
from tensorflow.python.keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint
callbacks = [
EarlyStopping (monitor='val_acc', patience=9, verbose=1),
ModelCheckpoint(checkpoint_init_name, monitor='val_acc', save_best_only=True, save_weights_only=True, verbose=1)
]
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input,Lambda, Dense, Flatten
from tensorflow.image import grayscale_to_rgb
## inputs
inputs = Input (shape=data.shape[1:])#samples.shape[1:]
#
## from grayscale to RGB, Xception needs 3 Channel input
x = Lambda (lambda x: grayscale_to_rgb (x), name='grayscale_to_rgb') (inputs)
base_model = ResNet50(weights='imagenet', input_tensor=x,include_top=False)
output = Flatten()(base_model.output)
output = Dense(1000, activation='relu')(output)
output = Dense(100, activation='relu')(output)
output = Dense(2, activation='softmax')(output)
## The model
num_layers = len(base_model.layers)
#for i, layer in enumerate (base_model.layers):
# layer.trainable = i < 8 or i > num_layers-8
model = Model (inputs=inputs, outputs=output)
model.compile(optimizer='nadam',
loss='categorical_crossentropy',
metrics=['accuracy'])
print(model.output_shape)
```
Train
```
model.fit_generator(train_ce_generator, steps_per_epoch=int(train_samples.shape[0]), epochs=100,validation_data=validation_ce_generator)
evaluation = model.evaluate_generator(validation_ce_generator)
print(evaluation)
```
Train using kfold
```
from sklearn.model_selection import KFold
k_checkpoint_basename = 'CHK_' + date_str + '_K'
kf = KFold (shuffle=True, n_splits=5)
last_good_model_weights = ''
k=0
for train_index, test_index in kf.split(data, labels_ce):
print('At fold K=',k,' with ', len(train_index), ' samples out of total ', data.shape[0])
kf_filepath=k_checkpoint_basename + str(k) + '.hdf5'
callbacks[-1].filepath = kf_filepath
history = model.fit_generator (generator = datagen.flow(data[train_index], labels_ce[train_index], batch_size=16),
validation_data = datagen.flow(data[test_index] , labels_ce[test_index] , batch_size=16),
steps_per_epoch = int(data.shape[0]/4),
epochs = 2,
callbacks = callbacks)
if os.path.isfile(kf_filepath):
#model.load_weights (kf_filepath) #Load best
last_good_model_weights = kf_filepath
if os.path.isfile(last_good_model_weights):
model.load_weights (last_good_model_weights)
evaluation = model.evaluate_generator(test_ce_generator)
print ('Evaluation Mean Squared Error on test data for k =', k, 'is:', evaluation*100.)
folds_map [k] = {
'evaluation' : evaluation,
'history' : history,
'filepath' : kf_filepath }
k += 1
evaluation = model.evaluate_generator(validation_ce_generator)
predict = model.predict_generator(validation_ce_generator)
print(evaluation)
print(predict)
```
| github_jupyter |
# MXNet with DALI - ResNet 50 example
## Overview
This example shows, how to use DALI pipelines with Apache MXNet.
## ResNet 50 pipeline
Let us first define a few global constants.
```
from __future__ import print_function
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
N = 8 # number of GPUs
batch_size = 128 # batch size per GPU
db_folder = "/data/imagenet/train-480-val-256-recordio/"
```
### The training pipeline
The training pipeline consists of the following steps:
* Data is first read from MXNet's recordIO file (the reader op is given a name `Reader` for later use)
* Then, images are decoded using nvJPEG
* RGB images are then randomly cropped and resized to the final size of (224, 224) pixels
* Finally, the batch is transposed from NHWC layout to NCHW layout, normalized and randomly mirrored.
`DALIClassificationIterator`, which we will use for interfacing with MXNet in this example, requires outputs of the pipeline to follow (image, label) structure.
```
class HybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(HybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
self.input = ops.MXNetReader(path = [db_folder+"train.rec"], index_path=[db_folder+"train.idx"],
random_shuffle = True, shard_id = device_id, num_shards = num_gpus,
pad_last_batch=True)
self.decode = ops.ImageDecoderRandomCrop(device = "mixed",
output_type = types.RGB,
random_aspect_ratio = [0.8, 1.25],
random_area = [0.1, 1.0],
num_attempts = 100)
self.resize = ops.Resize(device = "gpu", resize_x = 224, resize_y = 224)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (224, 224),
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability = 0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
images = self.resize(images)
output = self.cmnp(images, mirror = rng)
return [output, self.labels]
```
### The validation pipeline
The validation pipeline is similar to the training pipeline, but omits the random resized crop and random mirroring steps, as well as shuffling the data coming from the reader.
```
class HybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, num_gpus):
super(HybridValPipe, self).__init__(batch_size, num_threads, device_id, seed = 12 + device_id)
self.input = ops.MXNetReader(path = [db_folder+"val.rec"], index_path=[db_folder+"val.idx"],
random_shuffle = False, shard_id = device_id, num_shards = num_gpus,
pad_last_batch=True)
self.decode = ops.ImageDecoder(device = "mixed", output_type = types.RGB)
self.cmnp = ops.CropMirrorNormalize(device = "gpu",
dtype = types.FLOAT,
output_layout = types.NCHW,
crop = (224, 224),
mean = [0.485 * 255,0.456 * 255,0.406 * 255],
std = [0.229 * 255,0.224 * 255,0.225 * 255])
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
output = self.cmnp(images)
return [output, self.labels]
trainpipes = [HybridTrainPipe(batch_size=batch_size, num_threads=2, device_id = i, num_gpus = N) for i in range(N)]
valpipes = [HybridValPipe(batch_size=batch_size, num_threads=2, device_id = i, num_gpus = N) for i in range(N)]
```
### Using the MXNet plugin
MXNet data iterators need to know what is the size of the dataset. Since DALI pipelines may consist of multiple readers, potentially with differently sized datasets, we need to specify the reader which we ask for the epoch size. That is why we gave a name to readers in both training and validation pipelines.
In order to get the epoch size out of the reader, we need to build one of the training and one of the validation pipelines.
```
trainpipes[0].build()
valpipes[0].build()
print("Training pipeline epoch size: {}".format(trainpipes[0].epoch_size("Reader")))
print("Validation pipeline epoch size: {}".format(valpipes[0].epoch_size("Reader")))
```
Now we can make MXNet iterators out of our pipelines, using `DALIClassificationIterator` class.
```
from nvidia.dali.plugin.mxnet import DALIClassificationIterator
dali_train_iter = DALIClassificationIterator(trainpipes, reader_name="Reader", fill_last_batch=False)
dali_val_iter = DALIClassificationIterator(valpipes, reader_name="Reader", fill_last_batch=False)
```
## Training with MXNet
Once we have MXNet data iterators from `DALIClassificationIterator`, we can use them instead of MXNet's`mx.io.ImageRecordIter`. Here we show modified `train_imagenet.py` example that uses our DALI pipelines.
```
import os.path
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
from resnetn.common import find_mxnet, data, fit
import mxnet as mx
gpus_string = "".join(str(list(range(N)))).replace('[','').replace(']','')
s = ['--gpu', gpus_string,
'--batch-size', str(batch_size * N),
'--num-epochs', '1',
'--data-train', '/data/imagenet/train-480-val-256-recordio/train.rec',
'--data-val', '/data/imagenet/train-480-val-256-recordio/val.rec',
'--disp-batches', '100',
'--network', 'resnet-v1',
'--num-layers', '50',
'--data-nthreads', '40',
'--min-random-scale', '0.533',
'--max-random-shear-ratio', '0',
'--max-random-rotate-angle', '0',
'--max-random-h', '0',
'--max-random-l', '0',
'--max-random-s', '0',
'--dtype', 'float16']
# parse args
parser = argparse.ArgumentParser(description="train imagenet-1k",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
fit.add_fit_args(parser)
data.add_data_args(parser)
data.add_data_aug_args(parser)
# use a large aug level
data.set_data_aug_level(parser, 3)
parser.set_defaults(
# network
network = 'resnet',
num_layers = 50,
# data
num_classes = 1000,
num_examples = 1281167,
image_shape = '3,224,224',
min_random_scale = 1, # if input image has min size k, suggest to use
# 256.0/x, e.g. 0.533 for 480
# train
num_epochs = 80,
lr_step_epochs = '30,60',
dtype = 'float32'
)
args = parser.parse_args(s)
# load network
from importlib import import_module
net = import_module('resnetn.symbols.'+args.network)
sym = net.get_symbol(1000, 50, "3,224,224", dtype='float16')
def get_dali_iter(args, kv=None):
return (dali_train_iter, dali_val_iter)
# train
#fit.fit(args, sym, data.get_rec_iter)
fit.fit(args, sym, get_dali_iter)
```
| github_jupyter |
#manipulate_regonline_output
This notebook reads the RegOnline output into a pandas DataFrame and reworks it to have each row contain the attendee, the Doppler Primer Session, the Monday Breakout session, and the Tuesday breakout session in each row.
```
import re
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib
#%matplotlib inline
```
### Read the RegOnline output into a pandas DataFrame
```
df = pd.read_excel('/Users/matt/projects/EPRV/data/AttendeeReportCrop_20150703.xls', encoding='utf-8')
df.columns
df.loc[36:37]
```
### Extract the Sunday Sessions
RegOnline outputs multiple entries for each person, and each entry differs by the `AgendaItem`. `AgendaItem`s exist for all sessions happening on all days. In this section, we extract the sessions happening on Sunday, which are all prefixed by "Doppler Primer: ".
```
sundf = df[df['AgendaItem'].str.contains('Doppler Primer:')].copy()
len(sundf)
```
Let's create two new columns in our DataFrame: the `Primer`, and the `PrimerID`. The `Primer` column will contain the name of the Doppler Primer session (minus the `Doppler Primer: ` prefix), and the `PrimerID` will be a session identifier that will later be used in plotting.
```
sundf['PrimerID'] = 0
sundf['Primer'] = [re.search(r'(.*):\s(.*)$', item).group(2) for item in sundf['AgendaItem']]
sundf[['AgendaItem', 'Primer']].head(3)
sundf['Primer'].unique()
```
Now loop through the five unique sessions, updating the `PrimerID` column for each participant:
```
dopID = 0
for agItem in sundf['Primer'].unique():
sundf.loc[sundf['Primer'] == agItem, 'PrimerID'] = dopID
dopID += 1
```
Create an abbreviated code for each session. This will be added to the nametag to spark conversation among participants.
```
sun_ses = ['IC', 'DC', 'SM', 'SA', 'NA']
```
A quick preview of the first few rows to see the result:
```
sundf[['AgendaItem', 'Primer', 'PrimerID']].head(4)
```
### Extract the Monday Sessions
Now to do the same for the Monday sessions.
```
mondf = df[df['AgendaItem'].str.contains('Monday Break-out:')].copy()
len(mondf)
mondf['MonID'] = 0
mondf['Monday'] = [re.search(r'(.*):\s(.*)$', item).group(2) for item in mondf['AgendaItem']]
mondf['Monday'].unique()
monID = 0
for agItem in mondf['Monday'].unique():
mondf.loc[mondf['Monday'] == agItem, 'MonID'] = monID
monID += 1
mondf['Monday'].unique()
mon_ses = ['FS', 'NA', 'TC', 'BC', 'FC']
mondf[['AgendaItem', 'Monday', 'MonID']].head(4)
```
### Extract Tuesday Sessions
```
tuedf = df[df['AgendaItem'].str.contains('Tuesday Break-out:')].copy()
len(tuedf)
tuedf['TueID'] = 0
tuedf['Tuesday'] = [re.search(r'(.*):\s(.*)$', item).group(2) for item in tuedf['AgendaItem']]
tuedf['Tuesday'].unique()
tuesID = 0
for agItem in tuedf['Tuesday'].unique():
tuedf.loc[tuedf['Tuesday'] == agItem, 'TueID'] = tuesID
tuesID += 1
tuedf['Tuesday'].unique()
tue_ses = ['ST', 'DC', 'LB', 'PS', 'NA']
tuedf[['AgendaItem', 'Tuesday', 'TueID']].head(4)
```
### Combine the DataFrames
We only need to join on one field. However, pandas does something weird, where it creates multiple `GroupId_x` columns when joining multiple times. The simple solution is just to join on multiple columns since we know they're all consistent.
```
fulldf = df[['RegId', 'GroupId', 'FirstName', 'LastName', 'Company']]
print(len(fulldf))
fulldf = fulldf.drop_duplicates()
print(len(fulldf))
print(len(sundf))
print(len(mondf))
print(len(tuedf))
fulldf.columns
sundf.columns
newdf = pd.merge(fulldf, sundf, on=['RegId', 'GroupId', 'FirstName', 'LastName', 'Company'], how='left')
print(len(newdf))
newdf = pd.merge(newdf, mondf, on=['RegId', 'GroupId', 'FirstName', 'LastName', 'Company'], how='left')
print(len(newdf))
newdf = pd.merge(newdf, tuedf, on=['RegId', 'GroupId', 'FirstName', 'LastName', 'Company'], how='left')
print(len(newdf))
newdf.head(5)
newdf.columns
```
Now create a new DataFrame that is a subset of the `newdf` with only the columns of interest. Also, make sure the DataFrame is sorted by lastname, the index is reset, and it's a copy of `newdf` instead of a pointer to `newdf`.
```
finaldf = newdf[['FirstName', 'LastName', 'Company', 'Primer', 'PrimerID', 'Monday', 'MonID', 'Tuesday', 'TueID']].sort('LastName').reset_index().copy()
finaldf.head(5)
len(finaldf)
finaldf.columns
```
Now replace all empty cells for "Company" to a very general location:
```
finaldf.Company = ['Earth' if pd.isnull(company_el) else company_el for company_el in finaldf.Company]
```
Replace NaNs for PrimerID with the "Not Attending" ID:
```
finaldf.PrimerID = [4 if pd.isnull(primerid_el) else primerid_el for primerid_el in finaldf.PrimerID]
```
Check for NaNs in the Monday ID:
```
len(finaldf[pd.isnull(finaldf['MonID'])])
```
Replace NaNs for the MonID with the "Not Attending" ID:
```
finaldf.MonID = [4 if pd.isnull(monid_el) else monid_el for monid_el in finaldf.MonID]
len(finaldf[pd.isnull(finaldf['MonID'])])
```
Replace NaNs for the TueID with the "Not Attending" ID:
```
len(finaldf[pd.isnull(finaldf['TueID'])])
finaldf.TueID = [4 if pd.isnull(tueid_el) else tueid_el for tueid_el in finaldf.TueID]
len(finaldf[pd.isnull(finaldf['TueID'])])
```
Test out the wrap-around text for institute for participants that have long institution names. This regular expression will look for institutions (or Companies, as RegOnline refers to them), and find items that have a '/', and if no '/', either a '-', ',', or 'at' in the text. If so, add a newline character to make the text wrap around to the next line.
We'll first test the output on a participant's institution that contains both a '/' and a '-':
```
p = re.compile ('(/|^(?!.*/).*-|^(?!.*/).*,|^(?!.*/).*\sat\s)')
p.subn(r'\1\n', finaldf.loc[2].Company)[0]
```
And test a cell that is long, contains `at`, but `at` is part of a longer word:
```
p.subn(r'\1\n', finaldf.loc[53].Company)[0]
```
And a quick test on a few more institutions:
```
[p.sub(r'\1\n', company_el) if len(company_el) > 30 else company_el for company_el in finaldf.head(5).Company.values]
```
Now update the full `Company` column of the DataFrame:
```
finaldf.Company = [p.sub(r'\1\n', company_el) if len(company_el) > 30 else company_el for company_el in finaldf.Company.values]
```
## Plot Labels
Now that we have our DataFrame cleaned up the way we want it we can print the data to the Avery 5392 format. This format contains 6 4"x3" nametags per sheet.
```
png = mpimg.imread('/Users/matt/projects/EPRV/images/NameTag2.png')
png.shape
import matplotlib.font_manager as mfm
fontpaths = fontpaths=['/System/Library/Fonts/',
'/Library/Fonts',
'/Library/Fonts/Microsoft',
'/usr/X11/lib/X11/fonts',
'/opt/X11/share/fonts',
'/Users/matt/Library/Fonts']
blaa = mfm.findSystemFonts(fontpaths=fontpaths)
colors = ['#FFE2A9', '#4BA4D8', '#768085', '#BF5338', '#335B8F']
colors2 = ['#335B8F', '#BF5338', '#768085', '#4BA4D8', '#FFE2A9']
colors3 = ['#4BA4D8', '#FFE2A9', '#BF5338', '#768085', '#335B8F']
circ_ypos = 775
name_dict = {'family': 'YaleNew-Roman',
'color': '#D6E8E1',
'weight': 'bold',
'size': 28
}
company_dict = {'family': 'YaleNew-Roman',
'color': '#D6E8E1',
'weight': 'bold',
'size': 16
}
circle_dict = {'family': 'YaleNew-Roman',
'color': '#1D2523',
'weight': 'normal',
'size': 20
}
def change_name_size(name, name_dict):
if len(name) < 16:
name_dict['size'] = 28
elif ((len(name) >= 16) and (len(name) < 19)):
name_dict['size'] = 24
elif ((len(name) >= 19) and (len(name) < 24)):
name_dict['size'] = 20
elif ((len(name) >= 24) and (len(name) < 30)):
name_dict['size'] = 17
else:
name_dict['size'] = 16
return name_dict
def change_company_size(company, company_dict):
newlines = len(re.findall(r'\n', finaldf.loc[0].Company))
if newlines == 0:
if len(company) < 15:
company_dict['size'] = 18
elif ((len(company) >= 15) and (len(company) < 30)):
company_dict['size'] = 14
elif ((len(company) >= 30) and (len(company) < 40)):
company_dict['size'] = 12
elif ((len(company) >= 40) and (len(company) < 50)):
company_dict['size'] = 10
else:
company_dict['size'] = 8
else:
if len(company) < 15:
company_dict['size'] = 18
elif ((len(company) >= 15) and (len(company) < 40)):
company_dict['size'] = 14
elif ((len(company) >= 40) and (len(company) < 50)):
company_dict['size'] = 12
else:
company_dict['size'] = 10
return company_dict
# The HP Color LaserJet CP4020 offsets things by 1/16th of an inch left-to-right.
# This fudge factor should fix that:
hrz_fdg = 1. / 16./ 8.5
leftarr = np.array([0.0294, 0.5, 0.0294, 0.5, 0.0294, 0.5]) + hrz_fdg
bottomarr = [0.091, 0.091, 0.364, 0.364, 0.637, 0.637]
width = 0.4706
height = 0.273
# loop through the total number of pages:
for page in range(int(np.ceil((len(finaldf))/6.))):
print('Now on page: {}'.format(page))
fig = plt.figure(figsize=(8.5, 11))
for indx in range(6):
# add an if statement to handle the last page if there are less than
# six participants remaining:
if ((page*6 + indx) < len(finaldf)):
rect = [leftarr[indx], bottomarr[indx], width, height]
ax = fig.add_axes(rect)
ax.imshow(png)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
print(u'Now making name tag for: {} {}'.format(finaldf.loc[page*6 + indx].FirstName, finaldf.loc[page*6 + indx].LastName))
#add name text:
name = finaldf.loc[page*6 + indx].FirstName + ' ' + finaldf.loc[page*6 + indx].LastName
this_name_dict = change_name_size(name, name_dict)
ax.text(600, 500, name, fontdict=this_name_dict, horizontalalignment='center')
#add company text:
company = finaldf.loc[page*6 + indx].Company
this_co_dict = change_company_size(company, company_dict)
ax.text(600, 625, company, fontdict=this_co_dict, horizontalalignment='center')
#add circles for sessions:
circ1 = plt.Circle((750, circ_ypos), 70, color=colors[int(finaldf.loc[page*6 + indx].PrimerID)])
fig.gca().add_artist(circ1)
ax.text(750, circ_ypos + 27.5, sun_ses[int(finaldf.loc[page*6 + indx].PrimerID)], fontdict=circle_dict, horizontalalignment='center')
circ2 = plt.Circle((925, circ_ypos), 70, color=colors2[int(finaldf.loc[page*6 + indx].MonID)])
fig.gca().add_artist(circ2)
ax.text(925, circ_ypos + 27.5, mon_ses[int(finaldf.loc[page*6 + indx].MonID)], fontdict=circle_dict, horizontalalignment='center')
circ3 = plt.Circle((1100, circ_ypos), 70, color=colors3[int(finaldf.loc[page*6 + indx].TueID)])
fig.gca().add_artist(circ3)
ax.text(1100, circ_ypos + 27.5, tue_ses[int(finaldf.loc[page*6 + indx].TueID)], fontdict=circle_dict, horizontalalignment='center')
plt.savefig('../nametags/nameTags_bold_p'+str(page)+'.png', dpi=300)
finaldf.columns
finaldf.FirstName.values
finaldf.LastName.values
hrz_fdg = 1. / 16./ 8.5
leftarr = np.array([0.0294, 0.5, 0.0294, 0.5, 0.0294, 0.5])
leftarr + hrz_fdg
```
| github_jupyter |
# <center>Using Optimization in Hyperparameter settings in Deep Learning</center>
<center>by Cecilie Dura André</center>
<img src="https://blog.ml.cmu.edu/wp-content/uploads/2018/12/heatmap.001-min.jpeg" width="90%">
<p style="text-align: right;">Image from: https://blog.ml.cmu.edu/2018/12/12/massively-parallel-hyperparameter-optimization/</p>
---
In Deep Learning people often has to explore network structure, regularization, and optimization to get the best model. Thus, automated hyperparameter optimization (HPO) is needed and it is shown that tailed solution to a problem leads to the state-of-the-art performance of the model (Feurer and Hutter 2019). It also leads to fair comparisons of the model with different hyperparameters, thus the reproducibility of the studies would become better (J Bergstra, Yamins, and Cox).
The most basic HPO method is called Grid search or full factorial design. Here the user selects a couple of given values for each of the hyperparameters. Then, a grid search can be used to evaluate the Cartesian product. This requires a lot of computational memory and this model also suffers from the curse of dimensionality, when the number of hyperparameters becomes too big (“Design and Analysis of Experiments by Douglas Montgomery: A Supplement for Using JMP” 2013).
Alternatively to grid search there is also something called random search (James Bergstra and Bengio). Here a selected finite search space is given and the model can randomly select values for the hyperparameters within the search space. This method is preferred over grid search if one of the hyperparameters are more important than others (Hutter, Hoos, and Leyton-Brown). This method will often with time fine the optimum, but it takes a longer time than guided search methods.
Population-based methods, e.g. genetic algorithms, evolutionary algorithms, and evolutionary strategies, can also be used. Here a set of configurations is maintained. Small changes and different combinations are used to find a better configuration.
Bayesian optimization is the preferred method for HPO in tuning deep neural networks. By using Bayesian optimization in deep learning state-of-the-art results have been seen in image classification (Snoek, Larochelle, and Adams), (Snoek et al. 2015). Bayesian optimization is an iterative model, which first calculates the probabilistic surrogate model fitted to all observations. An acquisition function then determines different points, trade-offs, e.g.
---
# References
Bergstra, James, and Yoshua Bengio. “Random Search for Hyper-Parameter Optimization,” 25.
Bergstra, J, D Yamins, and D D Cox. “Making a Science of Model Search: Hyperparameter Optimization in Hundreds of Dimensions for Vision Architectures,” 9. “Design and Analysis of Experiments by Douglas Montgomery: A Supplement for Using JMP.” 2013, 26.
Feurer, Matthias, and Frank Hutter. 2019. “Hyperparameter Optimization.” In Automated Machine Learning, edited by Frank Hutter, Lars Kotthoff, and Joaquin Vanschoren, 3–33. Cham: Springer International Publishing. http://link.springer.com/10.1007/978-3-030-05318-5_1.
Hutter, Frank, Holger Hoos, and Kevin Leyton-Brown. “An Efficient Approach for Assessing Hyperparameter Importance,” 9.
Snoek, Jasper, Hugo Larochelle, and Ryan P Adams. “Practical Bayesian Optimization of Machine Learning Algorithms,” 9.
Snoek, Jasper, Oren Rippel, Kevin Swersky, Ryan Kiros, Nadathur Satish, Narayanan Sundaram, Md Mostofa Ali Patwary,
Prabhat, and Ryan P. Adams. 2015. “Scalable Bayesian Optimization Using Deep Neural Networks.” arXiv:1502.05700 [stat], July. http://arxiv.org/abs/1502.05700.
| github_jupyter |
```
from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
from matplotlib import cm
import torchvision
import torchvision.datasets as datasets
# Set random seed for reproducibility
manualseed = 43
random.seed(manualseed)
torch.manual_seed(manualseed)
from google.colab import drive
drive.mount('/content/gdrive')
!unzip /content/gdrive/MyDrive/cifar50.zip -d /content/gdrive/MyDrive/CIFAR50
!unzip /content/gdrive/MyDrive/cifar50_imbalance_0.02.zip -d /content/gdrive/MyDrive/CIFAR50_imbalance_0.02
```
Load and print label files to know the format of the provided labels.
- `cifar50_train.json`
- `cifar50_imbalance_0.02_train.json`
```
# TODO: load json files and print.
import json
import os
import pandas as pd
os.chdir("/content/gdrive/MyDrive/CIFAR50")
f = open('cifar50_train.json','r')
data = json.loads(f.read())
df = pd.json_normalize(data['annotations'])
print(df)
os.chdir("/content/gdrive/MyDrive/CIFAR50_imbalance_0.02")
f = open('cifar50_imbalance_0.02_train.json','r')
data = json.load(f)
df_imbalance = pd.json_normalize(data['annotations'])
print(df_imbalance)
```
Show some images with labels (class names) from dataset.
```
# Root directory for dataset
dataroot = "/content/gdrive/MyDrive/CIFAR50"
# Number of workers for dataloader
workers = 2
# Batch size during training
batch_size = 128
# Spatial size of training images. All images will be resized to this
# size using a transformer.
image_size = 64
# Learning rate for optimizers
lr = 0.0001
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
directory = "/content/gdrive/MyDrive/CIFAR50/images"
classes = [ f.name for f in os.scandir(directory) if f.is_dir() ]
print(classes)
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
class CIFAR50(Dataset):
def __init__(self, json_file,root_dir,transform=None,train=True):
'''
Reads json file, adds them to annotations.
'''
with open (json_file, mode='r') as f:
json_dump=f.read()
json_fomatted=json.loads(json_dump)
self.annotations= json_fomatted['annotations']
self.root_dir=root_dir
self.transform=transform
def __len__(self):
return (len(self.annotations))
def __getitem__(self,index):
'''
Returns an image and label based on annotations
'''
img_path=os.path.join(self.annotations[index]['fpath'].replace('\\','/'))
image_raw=cv2.imread(img_path)
image=image_raw[:,:,::-1].copy()
y_label=torch.tensor(int(self.annotations[index]['category_id']))
if self.transform:
image=self.transform(image)
return (image,y_label)
transform_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset = CIFAR50("/content/gdrive/MyDrive/CIFAR50/cifar50_train.json", "/content/gdrive/MyDrive/CIFAR50/images", transform = transform_cifar, train = True)
test_dataset = CIFAR50("/content/gdrive/MyDrive/CIFAR50/cifar50_test.json", "/content/gdrive/MyDrive/CIFAR50/images", transform = transform_cifar, train = False)
print("Print the training dataset before augmentation:\n ", train_dataset)
print("Print the testing dataset:\n ", test_dataset)
%cd '/content/gdrive/MyDrive'
import json
json_dump=""
with open ("/content/gdrive/MyDrive/CIFAR50/cifar50_train.json", mode='r') as f:
json_dump=f.read()
json_fomatted=json.loads(json_dump)
json_fomatted['annotations'][:5]
import matplotlib.pyplot as plt
import cv2
json_fomatted['annotations'][0]
fig,ax=plt.subplots(1,5)
for i in range(1,6):
img_path=json_fomatted['annotations'][i]['fpath'].replace('\\','/')
print(img_path)
img=cv2.imread("/content/gdrive/MyDrive/"+img_path)
ax[i-1].imshow(img[:,:,::-1])
ax[i-1].text(y=-8,x=0,s=json_fomatted['annotations'][i]['category'],color="g")
ax[i-1].axis('off')
fig.show()
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size= 128, shuffle=True, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, prefetch_factor=2,
persistent_workers=False)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,
shuffle=True, num_workers=workers)
print(train_loader)
print(test_loader)
# Decide which device we want to run on
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
iterator=iter(train_loader)
images, label=iterator.next()
plt.imshow(images[0].permute(1,2,0))
# plt.title(label_mapping[int(label[0])])
plt.title(int(label[0]))
for X, y in train_loader:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
```
## Train CNNs.
#### Use the CNN in HW2 to train the model on the balanced CIFAR50 dataset.
Train the CNN on the balanced CIFAR50 training set. Evaluate and report the classification accuracies on the testing set.
Note: You can use any network configurations you implemented in HW2.
```
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(400, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 100)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = torch.flatten(x, 1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net().to(device)
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
for epoch in range(20):
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = net(inputs)
outputs = outputs.to(device)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss/2000))
running_loss = 0.0
print('Finished Training')
n_correct=0
n_samples=0
with torch.no_grad():
for data in test_loader:
inputs_test, labels_test = data
inputs_test, labels_test = inputs_test.to(device), labels_test.to(device)
outputs_test = net(inputs_test)
outputs_test = outputs_test.to(device)
_, predicted = torch.max(outputs_test.data, 1)
n_samples+=labels_test.size(0)
n_correct+=(predicted==labels_test).sum().item()
print('Test accuracy:', (n_correct/n_samples*100))
```
#### Use the same CNN in HW2 to train the model on the imbalanced CIFAR50 dataset.
Train the CNN on the imbalanced CIFAR50 training set. Evaluate and report the classification accuracies on the testing set.
```
# Root directory for dataset
dataroot = "/content/gdrive/MyDrive/data/cifar50_imbalance_0.02"
# Number of workers for dataloader
workers = 2
# Batch size during training
batch_size = 128
# Spatial size of training images. All images will be resized to this
# size using a transformer.
image_size = 64
# Learning rate for optimizers
lr = 0.0002
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
directory = "/content/gdrive/MyDrive/data/cifar50_imbalance_0.02/images"
classes = [ f.name for f in os.scandir(directory) if f.is_dir() ]
print(classes)
transform_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
train_dataset_imbalanced = CIFAR50("/content/gdrive/MyDrive/data/cifar50_imbalance_0.02/cifar50_imbalance_0.02_train.json", "/content/gdrive/MyDrive/data/cifar50_imbalance_0.02/images", transform = transform_cifar, train = True)
test_dataset_imbalanced = CIFAR50("/content/gdrive/MyDrive/data/cifar50_imbalance_0.02/cifar50_imbalance_0.02_test.json", "/content/gdrive/MyDrive/data/cifar50_imbalance_0.02/images", transform = transform_cifar, train = False)
print("Print the training dataset before augmentation:\n ", train_dataset_imbalanced)
print("Print the testing dataset:\n ", test_dataset_imbalanced)
import json
json_dump=""
with open ("/content/gdrive/MyDrive/data/cifar50_imbalance_0.02/cifar50_imbalance_0.02_train.json", mode='r') as f:
json_dump=f.read()
json_fomatted=json.loads(json_dump)
json_fomatted['annotations'][:5]
import matplotlib.pyplot as plt
import cv2
json_fomatted['annotations'][0]
fig,ax=plt.subplots(1,5)
for i in range(1,6):
img_path=json_fomatted['annotations'][i]['fpath'].replace('\\','/')
img=cv2.imread("/content/gdrive/MyDrive/"+img_path)
ax[i-1].imshow(img[:,:,::-1])
ax[i-1].text(y=-8,x=0,s=json_fomatted['annotations'][i]['category'],color="Green")
ax[i-1].axis('off')
fig.show()
train_loader_imbalanced = torch.utils.data.DataLoader(train_dataset_imbalanced, batch_size= 128, shuffle=True, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, prefetch_factor=2,
persistent_workers=False)
test_loader_imbalanced = torch.utils.data.DataLoader(test_dataset_imbalanced, batch_size= 128, shuffle=True, sampler=None,
batch_sampler=None, num_workers=0, collate_fn=None,
pin_memory=False, drop_last=False, timeout=0,
worker_init_fn=None, prefetch_factor=2,
persistent_workers=False)
print(train_loader_imbalanced)
print(test_loader_imbalanced)
iterator=iter(train_loader_imbalanced)
images, label=iterator.next()
plt.imshow(images[0].permute(1,2,0))
# plt.title(label_mapping[int(label[0])])
plt.title(int(label[0]))
for X, y in train_loader_imbalanced:
print("Shape of X [N, C, H, W]: ", X.shape)
print("Shape of y: ", y.shape, y.dtype)
break
for epoch in range(20):
running_loss = 0.0
for i, data in enumerate(train_loader_imbalanced, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = net(inputs)
outputs = outputs.to(device)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss/2000))
running_loss = 0.0
print('Finished Training')
n_correct=0
n_samples=0
with torch.no_grad():
for data in test_loader_imbalanced:
inputs_test, labels_test = data
inputs_test, labels_test = inputs_test.to(device), labels_test.to(device)
outputs_test = net(inputs_test)
outputs_test = outputs_test.to(device)
_, predicted = torch.max(outputs_test.data, 1)
n_samples+=labels_test.size(0)
n_correct+=(predicted==labels_test).sum().item()
print('Test accuracy:', (n_correct/n_samples*100))
```
## Implement Tricks for LTR
Before starting this question, please read the paper for this homework: Bag of tricks for long-tailed visual recognition with deep convolutional neural networks.
According to this paper, select at least **three** tricks to implement on the imbalanced CIFAR50 training.
```
# TODO: trick 1 implementation
import glob
import pickle
class CIFAR_Dataset(Dataset):
data_dir = "/content/gdrive/MyDrive/data/cifar50_imbalance_0.02"
train = True
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
path = "/content/gdrive/MyDrive/data/cifar50_imbalance_0.02/images/*/train*.jpg"
def __init__(self, data_dir = "/content/gdrive/MyDrive/data/cifar50_imbalance_0.02/", train = True, transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])):
self.data_dir = data_dir
self.train = train
self.transform = transform
self.data = []
self.targets = []
path = "/content/gdrive/MyDrive/data/cifar50_imbalance_0.02/images/*/train*.jpg"
# Loading all the data depending on whether the dataset is training or testing
if self.train:
for filename in glob.glob(path):
for i in range(len(classes)):
#with open(data_dir + 'images/' + classes[i] + '/train_' + '.jpg' , 'rb') as f:
with open(filename , 'rb') as f:
#entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
self.targets.extend(entry['labels'])
else:
with open(data_dir + 'test_batch', 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
self.targets.extend(entry['labels'])
# Reshape it and turn it into the HWC format which PyTorch takes in the images
# Original CIFAR format can be seen via its official page
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1))
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
# Create a one hot label
label = torch.zeros(10)
label[self.targets[idx]] = 1.
# Transform the image by converting to tensor and normalizing it
if self.transform:
image = transform(self.data[idx])
# If data is for training, perform mixup, only perform mixup roughly on 1 for every 5 images
if self.train and idx > 0 and idx%5 == 0:
# Choose another image/label randomly
mixup_idx = random.randint(0, len(self.data)-1)
mixup_label = torch.zeros(10)
label[self.targets[mixup_idx]] = 1.
if self.transform:
mixup_image = transform(self.data[mixup_idx])
# Select a random number from the given beta distribution
# Mixup the images accordingly
alpha = 0.2
lam = np.random.beta(alpha, alpha)
image = lam * image + (1 - lam) * mixup_image
label = lam * label + (1 - lam) * mixup_label
return image, label
net = Net().to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=0.0002)
criterion = nn.CrossEntropyLoss()
best_Acc = 0
for epoch in range(20):
net.train()
# We train and visualize the loss every 100 iterations
for idx, (imgs, labels) in enumerate(train_loader_imbalanced):
imgs = imgs.to(device)
labels = labels.to(device)
preds = net(imgs)
loss = criterion(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx%100 == 0:
print("Epoch {} Iteration {}, Current Loss: {}".format(epoch, idx, loss))
# We evaluate the network after every epoch based on test set accuracy
net.eval()
with torch.no_grad():
total = 0
numCorrect = 0
for (imgs, labels) in test_loader_imbalanced:
imgs = imgs.to(device)
labels = labels.to(device)
preds = net(imgs)
numCorrect += (torch.argmax(preds, dim=-1) == torch.argmax(labels, dim=-1)).float().sum()
total += len(imgs)
acc = (numCorrect/total)*100
print("Current image classification accuracy at epoch {}: {}".format(epoch, acc))
if acc > best_Acc:
best_Acc = acc
# TODO: trick 2 implementation
def make_weights_for_balanced_classes(images, nclasses):
count = [0] * nclasses
for item in images:
count[item[1]] += 1
weight_per_class = [0.] * nclasses
N = float(sum(count))
for i in range(nclasses):
weight_per_class[i] = N/float(count[i])
weight = [0] * len(images)
for idx, val in enumerate(images):
weight[idx] = weight_per_class[val[1]]
return weight
net = Net().to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=0.0002)
criterion = nn.CrossEntropyLoss()
best_Acc = 0
for epoch in range(20):
net.train()
# We train and visualize the loss every 100 iterations
for idx, (imgs, labels) in enumerate(train_loader_imbalanced):
imgs = imgs.to(device)
labels = labels.to(device)
preds = net(imgs)
loss = criterion(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx%100 == 0:
print("Epoch {} Iteration {}, Current Loss: {}".format(epoch, idx, loss))
# We evaluate the network after every epoch based on test set accuracy
net.eval()
with torch.no_grad():
total = 0
numCorrect = 0
for (imgs, labels) in test_loader_imbalanced:
imgs = imgs.to(device)
labels = labels.to(device)
preds = net(imgs)
numCorrect += (torch.argmax(preds, dim=-1) == torch.argmax(labels, dim=-1)).float().sum()
total += len(imgs)
acc = (numCorrect/total)*100
print("Current image classification accuracy at epoch {}: {}".format(epoch, acc))
if acc > best_Acc:
best_Acc = acc
# TODO: trick 3 implementation
class_weight=[]
for root, subdir,files in os.walk(directory):
if len(files)>0:
class_weight.append(1/len(files))
class_weight = torch.FloatTensor(class_weight).to(device)
loss_fn = nn.CrossEntropyLoss(weight=class_weight)
net = Net().to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=0.0002)
criterion = nn.CrossEntropyLoss()
best_Acc = 0
for epoch in range(20):
net.train()
# We train and visualize the loss every 100 iterations
for idx, (imgs, labels) in enumerate(train_loader_imbalanced):
imgs = imgs.to(device)
labels = labels.to(device)
preds = net(imgs)
loss = criterion(preds, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if idx%100 == 0:
print("Epoch {} Iteration {}, Current Loss: {}".format(epoch, idx, loss))
# We evaluate the network after every epoch based on test set accuracy
net.eval()
with torch.no_grad():
total = 0
numCorrect = 0
for (imgs, labels) in test_loader_imbalanced:
imgs = imgs.to(device)
labels = labels.to(device)
preds = net(imgs)
numCorrect += (torch.argmax(preds, dim=-1) == torch.argmax(labels, dim=-1)).float().sum()
total += len(imgs)
acc = (numCorrect/total)*100
print("Current image classification accuracy at epoch {}: {}".format(epoch, acc))
if acc > best_Acc:
best_Acc = acc
```
Evaluate and report the classification performance on CIFAR50 testing set.
| github_jupyter |
# Deep learning for computer vision
This notebook will teach you to build and train convolutional networks for image recognition. Brace yourselves.
# CIFAR dataset
This week, we shall focus on the image recognition problem on cifar10 dataset
* 60k images of shape 3x32x32
* 10 different classes: planes, dogs, cats, trucks, etc.
<img src="cifar10.jpg" style="width:80%">
```
import numpy as np
from cifar import load_cifar10
X_train,y_train,X_val,y_val,X_test,y_test = load_cifar10("cifar_data")
class_names = np.array(['airplane','automobile ','bird ','cat ','deer ','dog ','frog ','horse ','ship ','truck'])
print (X_train.shape,y_train.shape)
import matplotlib.pyplot as plt
%matplotlib inline
plt.figure(figsize=[12,10])
for i in range(12):
plt.subplot(3,4,i+1)
plt.xlabel(class_names[y_train[i]])
plt.imshow(np.transpose(X_train[i],[1,2,0]))
```
# Building a network
Simple neural networks with layers applied on top of one another can be implemented as `torch.nn.Sequential` - just add a list of pre-built modules and let it train.
```
import torch, torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
# a special module that converts [batch, channel, w, h] to [batch, units]
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
```
Let's start with a dense network for our baseline:
```
model = nn.Sequential()
# reshape from "images" to flat vectors
model.add_module('flatten', Flatten())
# dense "head"
model.add_module('dense1', nn.Linear(3 * 32 * 32, 64))
model.add_module('dense1_relu', nn.ReLU())
model.add_module('dense2_logits', nn.Linear(64, 10)) # logits for 10 classes
```
As in our basic tutorial, we train our model with negative log-likelihood aka crossentropy.
```
def compute_loss(X_batch, y_batch):
X_batch = Variable(torch.FloatTensor(X_batch))
y_batch = Variable(torch.LongTensor(y_batch))
logits = model(X_batch)
return F.cross_entropy(logits, y_batch).mean()
# example
compute_loss(X_train[:5], y_train[:5])
```
### Training on minibatches
* We got 40k images, that's way too many for a full-batch SGD. Let's train on minibatches instead
* Below is a function that splits the training sample into minibatches
```
# An auxilary function that returns mini-batches for neural network training
def iterate_minibatches(X, y, batchsize):
indices = np.random.permutation(np.arange(len(X)))
for start in range(0, len(indices), batchsize):
ix = indices[start: start + batchsize]
yield X[ix], y[ix]
opt = torch.optim.SGD(model.parameters(), lr=0.01)
train_loss = []
val_accuracy = []
import time
num_epochs = 100 # total amount of full passes over training data
batch_size = 50 # number of samples processed in one SGD iteration
for epoch in range(num_epochs):
# In each epoch, we do a full pass over the training data:
start_time = time.time()
model.train(True) # enable dropout / batch_norm training behavior
for X_batch, y_batch in iterate_minibatches(X_train, y_train, batch_size):
# train on batch
loss = compute_loss(X_batch, y_batch)
loss.backward()
opt.step()
opt.zero_grad()
train_loss.append(loss.data.numpy())
# And a full pass over the validation data:
model.train(False) # disable dropout / use averages for batch_norm
for X_batch, y_batch in iterate_minibatches(X_val, y_val, batch_size):
logits = model(Variable(torch.FloatTensor(X_batch)))
y_pred = logits.max(1)[1].data.numpy()
val_accuracy.append(np.mean(y_batch == y_pred))
# Then we print the results for this epoch:
print("Epoch {} of {} took {:.3f}s".format(
epoch + 1, num_epochs, time.time() - start_time))
print(" training loss (in-iteration): \t{:.6f}".format(
np.mean(train_loss[-len(X_train) // batch_size :])))
print(" validation accuracy: \t\t\t{:.2f} %".format(
np.mean(val_accuracy[-len(X_val) // batch_size :]) * 100))
```
Don't wait for full 100 epochs. You can interrupt training after 5-20 epochs once validation accuracy stops going up.
```
```
```
```
```
```
```
```
```
```
### Final test
```
model.train(False) # disable dropout / use averages for batch_norm
test_batch_acc = []
for X_batch, y_batch in iterate_minibatches(X_test, y_test, 500):
logits = model(Variable(torch.FloatTensor(X_batch)))
y_pred = logits.max(1)[1].data.numpy()
test_batch_acc.append(np.mean(y_batch == y_pred))
test_accuracy = np.mean(test_batch_acc)
print("Final results:")
print(" test accuracy:\t\t{:.2f} %".format(
test_accuracy * 100))
if test_accuracy * 100 > 95:
print("Double-check, than consider applying for NIPS'17. SRSly.")
elif test_accuracy * 100 > 90:
print("U'r freakin' amazin'!")
elif test_accuracy * 100 > 80:
print("Achievement unlocked: 110lvl Warlock!")
elif test_accuracy * 100 > 70:
print("Achievement unlocked: 80lvl Warlock!")
elif test_accuracy * 100 > 60:
print("Achievement unlocked: 70lvl Warlock!")
elif test_accuracy * 100 > 50:
print("Achievement unlocked: 60lvl Warlock!")
else:
print("We need more magic! Follow instructons below")
```
## Task I: small convolution net
### First step
Let's create a mini-convolutional network with roughly such architecture:
* Input layer
* 3x3 convolution with 10 filters and _ReLU_ activation
* 2x2 pooling (or set previous convolution stride to 3)
* Flatten
* Dense layer with 100 neurons and _ReLU_ activation
* 10% dropout
* Output dense layer.
__Convolutional layers__ in torch are just like all other layers, but with a specific set of parameters:
__`...`__
__`model.add_module('conv1', nn.Conv2d(in_channels=3, out_channels=10, kernel_size=3)) # convolution`__
__`model.add_module('pool1', nn.MaxPool2d(2)) # max pooling 2x2`__
__`...`__
Once you're done (and compute_loss no longer raises errors), train it with __Adam__ optimizer with default params (feel free to modify the code above).
If everything is right, you should get at least __50%__ validation accuracy.
```
```
```
```
```
```
```
```
```
```
__Hint:__ If you don't want to compute shapes by hand, just plug in any shape (e.g. 1 unit) and run compute_loss. You will see something like this:
__`RuntimeError: size mismatch, m1: [5 x 1960], m2: [1 x 64] at /some/long/path/to/torch/operation`__
See the __1960__ there? That's your actual input shape.
## Task 2: adding normalization
* Add batch norm (with default params) between convolution and ReLU
* nn.BatchNorm*d (1d for dense, 2d for conv)
* usually better to put them after linear/conv but before nonlinearity
* Re-train the network with the same optimizer, it should get at least 60% validation accuracy at peak.
```
```
```
```
```
```
```
```
```
```
```
```
```
```
## Task 3: Data Augmentation
There's a powerful torch tool for image preprocessing useful to do data preprocessing and augmentation.
Here's how it works: we define a pipeline that
* makes random crops of data (augmentation)
* randomly flips image horizontally (augmentation)
* then normalizes it (preprocessing)
```
from torchvision import transforms
means = np.array((0.4914, 0.4822, 0.4465))
stds = np.array((0.2023, 0.1994, 0.2010))
transform_augment = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomRotation([-30, 30]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(means, stds),
])
from torchvision.datasets import CIFAR10
train_loader = CIFAR10("./cifar_data/", train=True, transform=transform_augment)
train_batch_gen = torch.utils.data.DataLoader(train_loader,
batch_size=32,
shuffle=True,
num_workers=1)
for (x_batch, y_batch) in train_batch_gen:
print('X:', type(x_batch), x_batch.shape)
print('y:', type(y_batch), y_batch.shape)
for i, img in enumerate(x_batch.numpy()[:8]):
plt.subplot(2, 4, i+1)
plt.imshow(img.transpose([1,2,0]) * stds + means )
raise NotImplementedError("Plese use this code in your training loop")
# TODO use this in your training loop
```
When testing, we don't need random crops, just normalize with same statistics.
```
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(means, stds),
])
test_loader = <YOUR CODE>
```
# Homework 2.2: The Quest For A Better Network
In this assignment you will build a monster network to solve CIFAR10 image classification.
This notebook is intended as a sequel to seminar 3, please give it a try if you haven't done so yet.
(please read it at least diagonally)
* The ultimate quest is to create a network that has as high __accuracy__ as you can push it.
* There is a __mini-report__ at the end that you will have to fill in. We recommend reading it first and filling it while you iterate.
## Grading
* starting at zero points
* +20% for describing your iteration path in a report below.
* +20% for building a network that gets above 20% accuracy
* +10% for beating each of these milestones on __TEST__ dataset:
* 50% (50% points)
* 60% (60% points)
* 65% (70% points)
* 70% (80% points)
* 75% (90% points)
* 80% (full points)
## Restrictions
* Please do NOT use pre-trained networks for this assignment until you reach 80%.
* In other words, base milestones must be beaten without pre-trained nets (and such net must be present in the e-mail). After that, you can use whatever you want.
* you __can__ use validation data for training, but you __can't'__ do anything with test data apart from running the evaluation procedure.
## Tips on what can be done:
* __Network size__
* MOAR neurons,
* MOAR layers, ([torch.nn docs](http://pytorch.org/docs/master/nn.html))
* Nonlinearities in the hidden layers
* tanh, relu, leaky relu, etc
* Larger networks may take more epochs to train, so don't discard your net just because it could didn't beat the baseline in 5 epochs.
* Ph'nglui mglw'nafh Cthulhu R'lyeh wgah'nagl fhtagn!
### The main rule of prototyping: one change at a time
* By now you probably have several ideas on what to change. By all means, try them out! But there's a catch: __never test several new things at once__.
### Optimization
* Training for 100 epochs regardless of anything is probably a bad idea.
* Some networks converge over 5 epochs, others - over 500.
* Way to go: stop when validation score is 10 iterations past maximum
* You should certainly use adaptive optimizers
* rmsprop, nesterov_momentum, adam, adagrad and so on.
* Converge faster and sometimes reach better optima
* It might make sense to tweak learning rate/momentum, other learning parameters, batch size and number of epochs
* __BatchNormalization__ (nn.BatchNorm2d) for the win!
* Sometimes more batch normalization is better.
* __Regularize__ to prevent overfitting
* Add some L2 weight norm to the loss function, PyTorch will do the rest
* Can be done manually or like [this](https://discuss.pytorch.org/t/simple-l2-regularization/139/2).
* Dropout (`nn.Dropout`) - to prevent overfitting
* Don't overdo it. Check if it actually makes your network better
### Convolution architectures
* This task __can__ be solved by a sequence of convolutions and poolings with batch_norm and ReLU seasoning, but you shouldn't necessarily stop there.
* [Inception family](https://hacktilldawn.com/2016/09/25/inception-modules-explained-and-implemented/), [ResNet family](https://towardsdatascience.com/an-overview-of-resnet-and-its-variants-5281e2f56035?gi=9018057983ca), [Densely-connected convolutions (exotic)](https://arxiv.org/abs/1608.06993), [Capsule networks (exotic)](https://arxiv.org/abs/1710.09829)
* Please do try a few simple architectures before you go for resnet-152.
* Warning! Training convolutional networks can take long without GPU. That's okay.
* If you are CPU-only, we still recomment that you try a simple convolutional architecture
* a perfect option is if you can set it up to run at nighttime and check it up at the morning.
* Make reasonable layer size estimates. A 128-neuron first convolution is likely an overkill.
* __To reduce computation__ time by a factor in exchange for some accuracy drop, try using __stride__ parameter. A stride=2 convolution should take roughly 1/4 of the default (stride=1) one.
### Data augmemntation
* getting 5x as large dataset for free is a great
* Zoom-in+slice = move
* Rotate+zoom(to remove black stripes)
* Add Noize (gaussian or bernoulli)
* Simple way to do that (if you have PIL/Image):
* ```from scipy.misc import imrotate,imresize```
* and a few slicing
* Other cool libraries: cv2, skimake, PIL/Pillow
* A more advanced way is to use torchvision transforms:
```
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(root=path_to_cifar_like_in_seminar, train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
```
* Or use this tool from Keras (requires theano/tensorflow): [tutorial](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html), [docs](https://keras.io/preprocessing/image/)
* Stay realistic. There's usually no point in flipping dogs upside down as that is not the way you usually see them.
```
```
```
```
```
```
```
```
There is a template for your solution below that you can opt to use or throw away and write it your way.
| github_jupyter |
# 基本程序设计
- 一切代码输入,请使用英文输入法
```
print('hello word')
print('hello word')
print 'hello'
```
## 编写一个简单的程序
- 圆公式面积: area = radius \* radius \* 3.1415
```
radius = 1
area = radius * radius * 3.1415
print(area)
radius = 1.0
area = radius * radius * 3.14 # 将后半部分的结果赋值给变量area
# 变量一定要有初始值!!!
# radius: 变量.area: 变量!
# int 类型
print(area)
```
### 在Python里面不需要定义数据的类型
## 控制台的读取与输入
- input 输入进去的是字符串
- eval
```
input('请输入半径:')
name=input('请输入名字:')
print(name,'666666')
input('请输入半径:')
radius = input('请输入半径') # input得到的结果是字符串类型
radius = float(radius)
area = radius * radius * 3.14
print('面积为:',area)
radius = eval(input('请输入半径'))
print(radius)
print(type(radius))
area = radius * radius * 3.1415
print(area)
gao = eval(input('请输入高'))
di = eval(input ('请输入底'))
area = gao*di*0.5
print(area)
import random
number = random.randint(0,10)
print(number)
if (number % 2) == 0:
print('中奖')
else:
print('谢谢惠顾')
```
- 在jupyter用shift + tab 键可以跳出解释文档
## 变量命名的规范
- 由字母、数字、下划线构成
- 不能以数字开头 \*
- 标识符不能是关键词(实际上是可以强制改变的,但是对于代码规范而言是极其不适合)
- 可以是任意长度
- 驼峰式命名
## 变量、赋值语句和赋值表达式
- 变量: 通俗理解为可以变化的量
- x = 2 \* x + 1 在数学中是一个方程,而在语言中它是一个表达式
- test = test + 1 \* 变量在赋值之前必须有值
## 同时赋值
var1, var2,var3... = exp1,exp2,exp3...
## 定义常量
- 常量:表示一种定值标识符,适合于多次使用的场景。比如PI
- 注意:在其他低级语言中如果定义了常量,那么,该常量是不可以被改变的,但是在Python中一切皆对象,常量也是可以被改变的
## 数值数据类型和运算符
- 在Python中有两种数值类型(int 和 float)适用于加减乘除、模、幂次
<img src = "../Photo/01.jpg"></img>
## 运算符 /、//、**
```
input()
```
## 运算符 %
```
25/4
25 //4
number=int(input('输入一个数'))
if number%2==0:
print('偶数')
else:
print('奇数')
seconds = eval(input('seconds:>>'))
mins = seconds // 60
seconds = seconds % 60
print(mins,"分",seconds,"秒")
week = eval(input('week:'))
plus_day = eval(input('days:'))
res = (week + plus_day) % 7
print(res)
```
## 科学计数法
- 1.234e+2
- 1.234e-2
```
import numpy as up
(3+4*x)/5 - (10*(y-5)*(a+b+c)/x) + 9*((4/x)+(9+x)/y)
# graph
part_1=(3+4*x)/5
prat_2=(10*(y-5)*(a+b+c)/x)
prat_3=9*((4/x)+(9+x)/y)
```
## 计算表达式和运算优先级
<img src = "../Photo/02.png"></img>
<img src = "../Photo/03.png"></img>
## 增强型赋值运算
<img src = "../Photo/04.png"></img>
## 类型转换
- float -> int
- 四舍五入 round
```
a = 1
float(a) # 强制转换
round(1.498895,3) #逗号后面的数字就是保留的位数
```
## EP:
- 如果一个年营业税为0.06%,那么对于197.55e+2的年收入,需要交税为多少?(结果保留2为小数)
- 必须使用科学计数法
```
round((197.55e+2)*0.06/100,2)
#输入月供,输出总还款数
贷款数 = eval(input('输入贷款数'))
月利率 = 0.01
年限= 5
月供 = (贷款数 * 月利率)/ (1-1(1+月利率)**(年限 * 12))
```
# Project
- 用Python写一个贷款计算器程序:输入的是月供(monthlyPayment) 输出的是总还款数(totalpayment)

# Homework
- 1
<img src="../Photo/06.png"></img>
```
import math
a=float(input('输入摄氏度'))
b=a*9/5+32
```
- 2
<img src="../Photo/07.png"></img>
```
import math
radius= eval(input('输入半径:'))
area=radius * radius * math.pi
print('底面积:',area)
length=eval(input('输入高:'))
volume=area * length
print('体积为:',volume)
```
- 3
<img src="../Photo/08.png"></img>
```
feet=eval(input('输入英尺'))
m=feet*0.305
print('%0.1f英尺为%0.1f米' %(feet,m))
```
- 6
<img src="../Photo/12.png"></img>
```
v0=eval(input('输入初始速度:'))
v1=eval(input('输入末速度:'))
t=eval(input('输入时间:'))
a=(v1-v0)/t
print('加速度;',a)
m=eval(input('输入水量'))
initialtemperature=eval(input('输入初始温度:'))
finaltemperature=eval(input('输入最终温度:'))
q=m*(finaltemperature - initialtemperature) * 4184
print('能量为:',q)
```
- 7 进阶
<img src="../Photo/13.png"></img>
```
money=eval(input('存到银行金额:'))
yuelilv=1+(0.05/12)
money1=money*yuelilv
money2=(money1+money)*yuelilv
money3=(money2+money)*yuelilv
money4=(money3+money)*yuelilv
money5=(money4+money)*yuelilv
money6=(money5+money)*yuelilv
print(round(money6,2))
balance=eval(input('输入差额'))
interestrate=eval(input('输入年利率'))
lixi=balance * (interestrate/1200)
print('利息为:',lixi)
```
- 8 进阶
<img src="../Photo/14.png"></img>
```
number=eval(input('输入0-1000之间的整数:'))
sum=number%10+(number%100)//10+(number%1000)//100
print(sum)
```
| github_jupyter |
# Loading Image Data
강아지와 고양이를 구분하는 이미지 분류기를 생성하기 위해서는 고양이와 강아지 사진을 모아야 한다. 임의로 수집된 다음과 같은 고양이/강아지 사진을 사용하자.

이 사진을 사용하여 CNN으로 이미지 분류기를 만들기 위해서는 해당 사진을 적절히 전처리하여야 한다.
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import torch
from torchvision import datasets, transforms
```
이미지를 전처리하기 위해 가장 쉽고 편한 방법은 `torchvision`패키지의 `datasets.ImageFolder` 을 사용하는 것이다. ([documentation](http://pytorch.org/docs/master/torchvision/datasets.html#imagefolder)). 일반적으로 `ImageFolder` 의 사용법은 다음과 같다.:
```python
dataset = datasets.ImageFolder('path/to/data', transform=transform)
```
`'path/to/data'` 은 이미지가 있는 디렉토리이다. `transform`은 이미지를 전처리하기 위한 방법이다. [`transforms`](http://pytorch.org/docs/master/torchvision/transforms.html) module은 `torchvision` 패키지의 서브모듈로 다양한 이미지 전처리 메소드를 제공한다.
ImageFolder는 다음과 같은 구조로 구성되어 있어야 한다.:
```
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
```
각각의 클래스의 이름으로 된 디렉토리가 있어야 한다. (예를 들면 `cat`, `dog`). 각 이미지의 label은 디렉토리의 이름과 같게 된다. 제공되는 Cat_Dog_data.zip 파일은 미리 train과 test로 나뉘어 있다.
### Transforms
`ImageFolder`로 사진 이미지를 읽어 들일 때 , 이미지 데이터를 신경망에서 처리할 수 있도록 적절하게 전처리 해야 한다. 일단 제각각인 사진의 크기를 같은 사이즈가 되도록 해야한다.
- `transforms.Resize()`
- `transforms.CenterCrop()`
- `transforms.RandomResizedCrop()` 등
`transforms.ToTensor()`로 이미지를 반드시 PyTorch tensors 로 변환해야 한다. 여러가지 변환은 `transforms.Compose()`로 묶어서 처리가 가능하다.
해당 전처리는 순서대로 수행된다.:
```python
transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor()])
```
참고) [documentation](http://pytorch.org/docs/master/torchvision/transforms.html).
### Data Loaders
`ImageFolder` 는 이미지를 전처리하여 데이터 셋으로 만든다. 이렇게 만들어진 이미지 데이터 셋을 [`DataLoader`](http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader)로 읽어들인다. `DataLoader`로 이미지와 이미지의 label을 읽어 들일 수 있다. shuffle하면 각 epoch 에서 데이터를 읽어 들이기 전에 이미지 데이터를 섞어준다.
```python
dataloader = torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True)
```
`dataloader` 는 iterator로 `next()`메소드로 for loop를 통해 반복적으로 읽어들인다.
```python
# Looping through it, get a batch on each loop
for images, labels in dataloader:
pass
# Get one batch
images, labels = next(iter(dataloader))
```
>**실습 :** `ImageFolder`로 `Cat_Dog_data/train` 폴더에서 이미지를 읽어 들여보시오. transforms을 정의하고 dataloader로 생성하시오.
```
data_dir = 'Cat_Dog_data/train'
transform = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor()])
dataset = datasets.ImageFolder(data_dir, transform=transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=True)
# Run this to test your data loader
images, labels = next(iter(dataloader))
image = images[0].numpy().transpose((1, 2, 0))
plt.imshow(image)
```
## Data Augmentation
이미지를 임의로 회전, 반전, 스케일 변환, crop등을 통해 다양하게 변환시킨다. 이렇게 이미지를 임의로 변형해서 신경망을 훈련하면 이미지 분류의 성능을 더 향상시킬수 있다.
다음과 같이 transform을 수행할 수 있다.:
```python
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5],
[0.5, 0.5, 0.5])])
```
`transforms.Normalize`로 이미지를 normalize 할 수 있다. means 과 standard deviations을 지정한다.
```input[channel] = (input[channel] - mean[channel]) / std[channel]```
Normalizing 을 하면 신경망의 학습이 더 잘 수행된다.
>**실습 :** train data와 test data에 대해 transforms를 정의한다 (normalization 은 일단 제외).
```
data_dir = 'Cat_Dog_data'
# TODO: Define transforms for the training data and testing data
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor()])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor()])
# Pass transforms in here, then run the next cell to see how the transforms look
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=10, shuffle=True)
testloader = torch.utils.data.DataLoader(test_data, batch_size=10, shuffle=True)
# change this to the trainloader or testloader
class_name=['Cat', 'Dog']
data_iter = iter(trainloader)
images, labels = next(data_iter)
fig, axes = plt.subplots(figsize=(10,4), ncols=4)
for ii in range(4):
ax = axes[ii]
image = images[ii].numpy().transpose((1,2,0))
ax.set_title(class_name[labels[ii].numpy()])
ax.imshow(image)
```
transform된 이미지를 확인해 보자.
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pymc3 as pm
from theano import tensor as T
import arviz
import os
import sys
from jupyterthemes import jtplot
jtplot.style(theme="monokai")
os.listdir()
lng = pd.read_csv("LNG.csv", index_col="Date")[["Adj Close"]]
dji = pd.read_csv("^DJI.csv", index_col="Date")[["Adj Close"]]
lng = lng.rename(columns={"Adj Close":"LNG close"})
dji = dji.rename(columns={"Adj Close":"DJI close"})
lng["LNG log return"] = np.log(lng["LNG close"]) - np.log(lng["LNG close"].shift(1))
dji["DJI log return"] = np.log(dji["DJI close"]) - np.log(dji["DJI close"].shift(1))
lng = lng.dropna()
dji = dji.dropna()
df = pd.merge(lng, dji, left_index=True, right_index=True)
df.head()
plt.figure(figsize=(15,10))
plt.scatter(df["DJI log return"], df["LNG log return"], alpha=0.5)
plt.title("Log LNG returns vs. log DJI returns")
plt.xlabel("DJI log return")
plt.ylabel("LNG log return")
plt.grid()
plt.show()
stacked = np.vstack([df["DJI log return"].values, df["LNG log return"].values])
sample_cov = np.cov(stacked)[0][1]
mkt_port_var = np.var(df["DJI log return"].values)
sample_beta = sample_cov/mkt_port_var
print("The sample estimate of beta is {}".format(sample_beta))
```
# MCMC beta estimation, using the multivariate normal model for log returns
```
data = np.column_stack((df["DJI log return"].values, df["LNG log return"]))
num_samps = 50000
with pm.Model() as model:
'''
The code for this model is adapted from Austin Rochford's blog post, available here: https://austinrochford.com/posts/2015-09-16-mvn-pymc3-lkj.html
'''
sigma = pm.Lognormal('sigma', np.zeros(2), np.ones(2), shape=2)
nu = pm.Uniform("nu", 0, 5)
C_triu = pm.LKJCorr('C_triu', nu, 2)
C = pm.Deterministic('C', T.fill_diagonal(C_triu[np.zeros((2, 2), dtype=np.int64)], 1.))
sigma_diag = pm.Deterministic('sigma_mat', T.nlinalg.diag(sigma))
cov = pm.Deterministic('cov', T.nlinalg.matrix_dot(sigma_diag, C, sigma_diag))
tau = pm.Deterministic('tau', T.nlinalg.matrix_inverse(cov))
mu = pm.MvNormal('mu', 0, tau, shape=2)
x_ = pm.MvNormal('x', mu, tau, observed=data)
step = pm.Metropolis()
trace_ = pm.sample(num_samps, step)
nburn = 5000
trace = trace_[nburn:]
pm.traceplot(trace)
# Compute matrix inverse directly
a11 = trace["cov"][:, 0, 0]
a12 = trace["cov"][:, 0, 1]
a21 = trace["cov"][:, 1, 0]
a22 = trace["cov"][:, 1, 1]
temp_matrices = np.array([[a22, -a12],[-a21, a11]])
prefactor = 1.0/(a11*a22 - a12*a21)
inv_matrices = prefactor*temp_matrices
mkt_vars = inv_matrices[0,0,:]
dji_lng_covs = inv_matrices[0,1,:]
betas = dji_lng_covs/mkt_vars
plt.figure(figsize=(15,10))
plt.hist(betas, bins=50)
plt.title("Posterior Samples of the Beta of LNG, using Multivariate Gaussian Model")
plt.savefig("lng_beta_normal.png")
plt.show()
print(np.array(betas).std())
print(np.array(betas).mean())
```
# MCMC beta estimation, using the multivariate student-t model for log returns
```
data = np.column_stack((df["DJI log return"].values, df["LNG log return"]))
num_samps = 50000
with pm.Model() as model:
'''
The code for this model is adapted from Austin Rochford's blog post, available here: https://austinrochford.com/posts/2015-09-16-mvn-pymc3-lkj.html
'''
sigma = pm.Lognormal('sigma', np.zeros(2), np.ones(2), shape=2)
nu = pm.Uniform("nu", 0, 5)
C_triu = pm.LKJCorr('C_triu', nu, 2)
C = pm.Deterministic('C', T.fill_diagonal(C_triu[np.zeros((2, 2), dtype=np.int64)], 1.))
sigma_diag = pm.Deterministic('sigma_mat', T.nlinalg.diag(sigma))
cov = pm.Deterministic('cov', T.nlinalg.matrix_dot(sigma_diag, C, sigma_diag))
tau = pm.Deterministic('tau', T.nlinalg.matrix_inverse(cov))
nu2 = pm.HalfNormal("nu2", sigma=1)
nu3 = 2.01 + nu2 # We assume support is roughly > 2
mu = pm.MvStudentT('mu', nu=nu3, Sigma=tau, mu=0, shape=2)
x_ = pm.MvStudentT('x', nu=nu3, Sigma=tau, mu=mu, observed=data)
step = pm.Metropolis()
trace_ = pm.sample(num_samps, step)
nburn = 5000
trace2 = trace_[nburn:]
pm.traceplot(trace2)
# Compute matrix inverse directly
a11 = trace2["cov"][:, 0, 0]
a12 = trace2["cov"][:, 0, 1]
a21 = trace2["cov"][:, 1, 0]
a22 = trace2["cov"][:, 1, 1]
temp_matrices = np.array([[a22, -a12],[-a21, a11]])
prefactor = 1.0/(a11*a22 - a12*a21)
inv_matrices = prefactor*temp_matrices
mkt_vars = inv_matrices[0,0,:]
dji_lng_covs = inv_matrices[0,1,:]
betas = dji_lng_covs/mkt_vars
plt.figure(figsize=(15,10))
plt.hist(betas, bins=50)
plt.title("Posterior Samples of the Beta of LNG, using Multivariate Student T model")
plt.savefig("lng_beta_student_t.png")
plt.show()
print(np.array(betas).std())
print(np.array(betas).mean())
```
| github_jupyter |
# Load MXNet model
In this tutorial, you learn how to load an existing MXNet model and use it to run a prediction task.
## Preparation
This tutorial requires the installation of Java Kernel. For more information on installing the Java Kernel, see the [README](https://github.com/awslabs/djl/blob/master/jupyter/README.md) to install Java Kernel.
```
%mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
%maven ai.djl:api:0.3.0-SNAPSHOT
%maven ai.djl:repository:0.3.0-SNAPSHOT
%maven ai.djl:model-zoo:0.3.0-SNAPSHOT
%maven ai.djl.mxnet:mxnet-engine:0.3.0-SNAPSHOT
%maven ai.djl.mxnet:mxnet-model-zoo:0.3.0-SNAPSHOT
%maven org.slf4j:slf4j-api:1.7.26
%maven org.slf4j:slf4j-simple:1.7.26
%maven net.java.dev.jna:jna:5.3.0
// See https://github.com/awslabs/djl/blob/master/mxnet/mxnet-engine/README.md
// for more MXNet library selection options
%maven ai.djl.mxnet:mxnet-native-auto:1.6.0-SNAPSHOT
import java.awt.image.*;
import java.nio.file.*;
import java.util.*;
import java.util.stream.*;
import ai.djl.*;
import ai.djl.inference.*;
import ai.djl.ndarray.*;
import ai.djl.ndarray.index.*;
import ai.djl.modality.*;
import ai.djl.modality.cv.*;
import ai.djl.modality.cv.util.*;
import ai.djl.modality.cv.transform.*;
import ai.djl.mxnet.zoo.*;
import ai.djl.translate.*;
import ai.djl.training.util.*;
import ai.djl.util.*;
import ai.djl.basicmodelzoo.cv.classification.*;
```
## Step 1: Prepare your MXNet model
This tutorial assumes that you have a MXNet model trained using Python. A MXNet symbolic model usually contains the following files:
* Symbol file: {MODEL_NAME}-symbol.json - a json file that contains network information about the model
* Parameters file: {MODEL_NAME}-{EPOCH}.params - a binary file that stores the parameter weight and bias
* Synset file: synset.txt - an optional text file that stores classification classes labels
This tutorial uses a pre-trained MXNet `resnet18_v1` model.
We use [DownloadUtils.java] for downloading files from internet.
```
%load DownloadUtils.java
DownloadUtils.download("https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/resnet/0.0.1/resnet18_v1-symbol.json", "build/resnet/resnet18_v1-symbol.json", new ProgressBar());
DownloadUtils.download("https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/resnet/0.0.1/resnet18_v1-0000.params.gz", "build/resnet/resnet18_v1-0000.params", new ProgressBar());
DownloadUtils.download("https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/synset.txt", "build/resnet/synset.txt", new ProgressBar());
```
## Step 2: Load your model
```
Path modelDir = Paths.get("build/resnet");
Model model = Model.newInstance();
model.load(modelDir, "resnet18_v1");
```
## Step 3: Create a `Translator`
```
Pipeline pipeline = new Pipeline();
pipeline.add(new CenterCrop()).add(new Resize(224, 224)).add(new ToTensor());
Translator<BufferedImage, Classifications> translator = ImageClassificationTranslator.builder()
.setPipeline(pipeline)
.setSynsetArtifactName("synset.txt")
.build();
```
## Step 4: Load image for classification
```
var img = BufferedImageUtils.fromUrl("https://djl-ai.s3.amazonaws.com/resources/images/kitten.jpg");
img
```
## Step 5: Run inference
```
Predictor<BufferedImage, Classifications> predictor = model.newPredictor(translator);
Classifications classifications = predictor.predict(img);
classifications
```
## Summary
Now, you can load any MXNet symbolic model and run inference.
| github_jupyter |
## Aligning rasters: A step-by-step breakdown
This notebook aligns input rasters with a base reference raster. The implict purpose, reflected in the datasets used here, is to align rasters so that raster math operations can be performed between the rasters
```
import os, sys
import re
import pprint
# from pprint import pprint
import numpy as np
import rasterio
from rasterio import features, transform
from rasterio.mask import mask
from rasterio.transform import Affine
from rasterio.warp import calculate_default_transform, reproject, Resampling
import pandas as pd
import geopandas as gpd
import shapely
from shapely.geometry import shape, box, Polygon
```
### Setup
Directories
```
geo_dir = r'P:\PAK\GEO'
data_dir = r'../../data'
rast_dir = r'rast_inputs'
vect_in_dir = r'vect_inputs'
vect_out_dir = r'vect_out'
rds_dir = r'roads'
dest_dir = r'destinations'
speed_dir = r'speed'
fric_dir = r'friction'
acc_dir = r'access'
```
Projections
```
dest_crs = 'EPSG:32642'
dcrs_int = int(re.findall('[0-9]+',dest_crs)[0])
```
### Load and process raster to points
Load in the base raster we are using as a template so we can match up exactly to its grid and cell size
```
rast_pth = os.path.join(geo_dir,r'Population/HRSL/kp_general_v15.tif')
import rasterio
from rasterio import features
with rasterio.open(rast_pth, 'r') as src1:
rast = src1.read(1).astype(np.float32)
# populate geoms list
results = (
{'properties': {'POP': v}, 'geometry': s}
for i, (s, v)
in enumerate(
rasterio.features.shapes(rast, transform=src1.transform)))
geoms = list(results)
# convert to GDF, clean up, and dissolve
poly = gpd.GeoDataFrame.from_features(geoms)
pts = poly.copy()
pts.geometry = pts.geometry.centroid
pts.dtypes
```
#### Set up dask cluster (if this is a lot points)
```
import dask
import coiled
from dask.distributed import Client, LocalCluster, Lock
from dask.utils import SerializableLock
import dask.dataframe as dd
from dask_control import *
client=get_dask_client(cluster_type='local',n_workers=2,processes=True,threads_per_worker=4)
client
```
#### Load in points data
```
# Load in points if needing to redo for some reason
pts = dd.read_csv(os.path.join(geo_dir,'Population/HRSL/pak_general_v15_pts.csv'),
na_values = ' ',
blocksize='100mb'
)
# pts = gpd.GeoDataFrame(pts, geometry = gpd.points_from_xy(x=pts.lon_4326,y=pts.lat_4326)).set_crs("EPSG:4326")
```
#### Clip to desired extent
Load in KP as clipping object
```
kp = gpd.read_file(os.path.join(geo_dir,'Boundaries/OCHA/pak_admbnda_adm1_ocha_pco_gaul_20181218.shp'))
kp = kp[kp['ADM1_EN'] == 'Khyber Pakhtunkhwa']
kp = kp.to_crs(dest_crs)
# Buffer the polygon by 20km so we take in nearby markets and roads that may be used
# kp.geometry = kp.buffer(20000)
kp = kp.to_crs(4326)
# pts = pts.to_crs(4326)
# pts['lon_4326'] = pts.geometry.x
# pts['lat_4326'] = pts.geometry.y
# pts = pts.to_crs(32642)
# pts['lon_32642'] = pts.geometry.x
# pts['lat_32642'] = pts.geometry.y
# kp_pts = gpd.clip(pts,kp)
def clip_pts(df, polys):
# Join using 4326
# Convert to GDF
if isinstance(polys, gpd.GeoDataFrame) == False:
polys = polys.result()
gdf = gpd.GeoDataFrame(
df,
geometry=gpd.points_from_xy(df.lon_4326, df.lat_4326)
).set_crs("EPSG:4326")
# Clip by extent
gdf = gpd.clip(gdf, polys)
df = pd.DataFrame(gdf.drop('geometry', axis=1))
return df
# Broadcast adm3
kp_dist = client.scatter(kp, broadcast=True)
# Distributed clip
kp_pts = pts.map_partitions(clip_pts, kp_dist)
len(kp_pts)
kp_pts
kp_pts.dtypes
kp_pts.head()
```
#### Export
```
# pts.drop('geometry',axis=1).to_csv(os.path.join(geo_dir,'Population/HRSL/pak_general_v15_pts.csv'))
kp_pts.drop('geometry',axis=1).to_csv(os.path.join(geo_dir,'Population/HRSL/kp_general_v15_pts.csv'))
pts.to_crs(4326).to_file(os.path.join(geo_dir,'Population/HRSL/pak_general_v15_pts.gpkg'),layer="pak_general_v15_4326",driver='GPKG')
pts.to_crs(dcrs_int).to_file(os.path.join(geo_dir,'Population/HRSL/pak_general_v15_pts.gpkg'),layer=f"pak_general_v15_{dcrs_int}",driver='GPKG')
kp_pts.to_crs(4326).to_file(os.path.join(geo_dir,'Population/HRSL/kp_hrsl_v15_pts.gpkg'),layer="kp_general_v15_4326",driver='GPKG')
kp_pts.to_crs(dcrs_int).to_file(os.path.join(geo_dir,'Population/HRSL/kp_hrsl_v15_pts.gpkg'),layer=f"kp_general_v15_{dcrs_int}",driver='GPKG')
```
| github_jupyter |
# Introduction to Deep Learning with PyTorch
In this notebook, you'll get introduced to [PyTorch](http://pytorch.org/), a framework for building and training neural networks. PyTorch in a lot of ways behaves like the arrays you love from Numpy. These Numpy arrays, after all, are just tensors. PyTorch takes these tensors and makes it simple to move them to GPUs for the faster processing needed when training neural networks. It also provides a module that automatically calculates gradients (for backpropagation!) and another module specifically for building neural networks. All together, PyTorch ends up being more coherent with Python and the Numpy/Scipy stack compared to TensorFlow and other frameworks.
## Neural Networks
Deep Learning is based on artificial neural networks which have been around in some form since the late 1950s. The networks are built from individual parts approximating neurons, typically called units or simply "neurons." Each unit has some number of weighted inputs. These weighted inputs are summed together (a linear combination) then passed through an activation function to get the unit's output.
<img src="assets/simple_neuron.png" width=400px>
Mathematically this looks like:
$$
\begin{align}
y &= f(w_1 x_1 + w_2 x_2 + b) \\
y &= f\left(\sum_i w_i x_i +b \right)
\end{align}
$$
With vectors this is the dot/inner product of two vectors:
$$
h = \begin{bmatrix}
x_1 \, x_2 \cdots x_n
\end{bmatrix}
\cdot
\begin{bmatrix}
w_1 \\
w_2 \\
\vdots \\
w_n
\end{bmatrix}
$$
## Tensors
It turns out neural network computations are just a bunch of linear algebra operations on *tensors*, a generalization of matrices. A vector is a 1-dimensional tensor, a matrix is a 2-dimensional tensor, an array with three indices is a 3-dimensional tensor (RGB color images for example). The fundamental data structure for neural networks are tensors and PyTorch (as well as pretty much every other deep learning framework) is built around tensors.
<img src="assets/tensor_examples.svg" width=600px>
With the basics covered, it's time to explore how we can use PyTorch to build a simple neural network.
```
# First, import PyTorch
import torch
def activation(x):
""" Sigmoid activation function
Arguments
---------
x: torch.Tensor
"""
return 1/(1+torch.exp(-x))
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 3 random normal variables
features = torch.randn((1, 5))
# True weights for our data, random normal variables again
weights = torch.randn_like(features)
# and a true bias term
bias = torch.randn((1, 1))
```
Above I generated data we can use to get the output of our simple network. This is all just random for now, going forward we'll start using normal data. Going through each relevant line:
`features = torch.randn((1, 5))` creates a tensor with shape `(1, 5)`, one row and five columns, that contains values randomly distributed according to the normal distribution with a mean of zero and standard deviation of one.
`weights = torch.randn_like(features)` creates another tensor with the same shape as `features`, again containing values from a normal distribution.
Finally, `bias = torch.randn((1, 1))` creates a single value from a normal distribution.
PyTorch tensors can be added, multiplied, subtracted, etc, just like Numpy arrays. In general, you'll use PyTorch tensors pretty much the same way you'd use Numpy arrays. They come with some nice benefits though such as GPU acceleration which we'll get to later. For now, use the generated data to calculate the output of this simple single layer network.
> **Exercise**: Calculate the output of the network with input features `features`, weights `weights`, and bias `bias`. Similar to Numpy, PyTorch has a [`torch.sum()`](https://pytorch.org/docs/stable/torch.html#torch.sum) function, as well as a `.sum()` method on tensors, for taking sums. Use the function `activation` defined above as the activation function.
```
## Calculate the output of this network using the weights and bias tensors
# Output of the network (prediction) formula
output = activation(torch.sum(features * weights) + bias)
output
```
You can do the multiplication and sum in the same operation using a matrix multiplication. In general, you'll want to use matrix multiplications since they are more efficient and accelerated using modern libraries and high-performance computing on GPUs.
Here, we want to do a matrix multiplication of the features and the weights. For this we can use [`torch.mm()`](https://pytorch.org/docs/stable/torch.html#torch.mm) or [`torch.matmul()`](https://pytorch.org/docs/stable/torch.html#torch.matmul) which is somewhat more complicated and supports broadcasting. If we try to do it with `features` and `weights` as they are, we'll get an error
```python
>> torch.mm(features, weights)
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-13-15d592eb5279> in <module>()
----> 1 torch.mm(features, weights)
RuntimeError: size mismatch, m1: [1 x 5], m2: [1 x 5] at /Users/soumith/minicondabuild3/conda-bld/pytorch_1524590658547/work/aten/src/TH/generic/THTensorMath.c:2033
```
As you're building neural networks in any framework, you'll see this often. Really often. What's happening here is our tensors aren't the correct shapes to perform a matrix multiplication. Remember that for matrix multiplications, the number of columns in the first tensor must equal to the number of rows in the second column. Both `features` and `weights` have the same shape, `(1, 5)`. This means we need to change the shape of `weights` to get the matrix multiplication to work.
**Note:** To see the shape of a tensor called `tensor`, use `tensor.shape`. If you're building neural networks, you'll be using this method often.
There are a few options here: [`weights.reshape()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.reshape), [`weights.resize_()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.resize_), and [`weights.view()`](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.view).
* `weights.reshape(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)` sometimes, and sometimes a clone, as in it copies the data to another part of memory.
* `weights.resize_(a, b)` returns the same tensor with a different shape. However, if the new shape results in fewer elements than the original tensor, some elements will be removed from the tensor (but not from memory). If the new shape results in more elements than the original tensor, new elements will be uninitialized in memory. Here I should note that the underscore at the end of the method denotes that this method is performed **in-place**. Here is a great forum thread to [read more about in-place operations](https://discuss.pytorch.org/t/what-is-in-place-operation/16244) in PyTorch.
* `weights.view(a, b)` will return a new tensor with the same data as `weights` with size `(a, b)`.
I usually use `.view()`, but any of the three methods will work for this. So, now we can reshape `weights` to have five rows and one column with something like `weights.view(5, 1)`.
> **Exercise**: Calculate the output of our little network using matrix multiplication.
```
## Calculate the output of this network using matrix multiplication
y = activation(torch.mm(features, weights.view(5, 1)) + bias)
y
```
### Stack them up!
That's how you can calculate the output for a single neuron. The real power of this algorithm happens when you start stacking these individual units into layers and stacks of layers, into a network of neurons. The output of one layer of neurons becomes the input for the next layer. With multiple input units and output units, we now need to express the weights as a matrix.
<img src='assets/multilayer_diagram_weights.png' width=450px>
The first layer shown on the bottom here are the inputs, understandably called the **input layer**. The middle layer is called the **hidden layer**, and the final layer (on the right) is the **output layer**. We can express this network mathematically with matrices again and use matrix multiplication to get linear combinations for each unit in one operation. For example, the hidden layer ($h_1$ and $h_2$ here) can be calculated
$$
\vec{h} = [h_1 \, h_2] =
\begin{bmatrix}
x_1 \, x_2 \cdots \, x_n
\end{bmatrix}
\cdot
\begin{bmatrix}
w_{11} & w_{12} \\
w_{21} &w_{22} \\
\vdots &\vdots \\
w_{n1} &w_{n2}
\end{bmatrix}
$$
The output for this small network is found by treating the hidden layer as inputs for the output unit. The network output is expressed simply
$$
y = f_2 \! \left(\, f_1 \! \left(\vec{x} \, \mathbf{W_1}\right) \mathbf{W_2} \right)
$$
```
### Generate some data
torch.manual_seed(7) # Set the random seed so things are predictable
# Features are 3 random normal variables
features = torch.randn((1, 3))
# Define the size of each layer in our network
n_input = features.shape[1] # Number of input units, must match number of input features
n_hidden = 2 # Number of hidden units
n_output = 1 # Number of output units
# Weights for inputs to hidden layer
W1 = torch.randn(n_input, n_hidden)
# Weights for hidden layer to output layer
W2 = torch.randn(n_hidden, n_output)
# and bias terms for hidden and output layers
B1 = torch.randn((1, n_hidden))
B2 = torch.randn((1, n_output))
```
> **Exercise:** Calculate the output for this multi-layer network using the weights `W1` & `W2`, and the biases, `B1` & `B2`.
```
## Output for multilayer network.
hidden_layer = activation(torch.mm(features, W1) + B1)
output_layer = activation(torch.mm(hidden_layer, W2) + B2)
output_layer
```
If you did this correctly, you should see the output `tensor([[ 0.3171]])`.
The number of hidden units a parameter of the network, often called a **hyperparameter** to differentiate it from the weights and biases parameters. As you'll see later when we discuss training a neural network, the more hidden units a network has, and the more layers, the better able it is to learn from data and make accurate predictions.
## Numpy to Torch and back
Special bonus section! PyTorch has a great feature for converting between Numpy arrays and Torch tensors. To create a tensor from a Numpy array, use `torch.from_numpy()`. To convert a tensor to a Numpy array, use the `.numpy()` method.
```
import numpy as np
a = np.random.rand(4,3)
a
b = torch.from_numpy(a)
b
b.numpy()
```
The memory is shared between the Numpy array and Torch tensor, so if you change the values in-place of one object, the other will change as well.
```
# Multiply PyTorch Tensor by 2, in place
b.mul_(2)
# Numpy array matches new values from Tensor
a
```
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import stats
from scipy.stats import ttest_ind, ttest_ind_from_stats
import datetime as dt
from datetime import datetime,timedelta
from itertools import chain
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
base = automap_base()
# reflect the tables
base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
base.classes.keys()
# Save references to each table
measurement = base.classes.measurement
station = base.classes.station
#check columns in each table
inspector = inspect(engine)
columns = inspector.get_columns("measurement")
for column in columns:
print(column["name"], column["type"])
#check columns in each table
inspector = inspect(engine)
columns = inspector.get_columns("station")
for column in columns:
print(column["name"], column["type"])
```
# Exploratory Climate Analysis
```
# Create our session (link) from Python to the DB
session = Session(engine)
# Calculate the date 1 year ago from the last data point in the database
#need last year before precip data
last_date = (engine.execute("SELECT date FROM measurement ORDER BY date DESC").first())[0]
print(last_date)
year, month, day = map(int, last_date.split("-"))
year_ago = dt.datetime(year, month, day) - timedelta(365)
# year_ago = year_ago.strftime("%Y-%m-%d")
print(year_ago)
# Design a query to retrieve the last 12 months of precipitation data and plot the results
last_year_precip = session.query(measurement.date, measurement.prcp).filter(measurement.date >= year_ago).all()
# print(last_year_precip)
# Perform a query to retrieve the data and precipitation scores
date = [row[0] for row in last_year_precip]
precip = [row[1] for row in last_year_precip]
# Save the query results as a Pandas DataFrame and set the index to the date column
climate_df = pd.DataFrame({"Date" : date,
"Precipitation" : precip}).set_index("Date")
# climate_df
# Sort the dataframe by date
climate_df = climate_df.sort_values("Date")
# climate_df.head()
# Use Pandas Plotting with Matplotlib to plot the data
climate_df.plot(figsize = (10,5))
plt.xlabel("Date")
# plt.tick_params(
# axis="x",
# which="both",
# labelbottom=False)
plt.legend(loc = "best")
plt.show()
# Use Pandas to calcualte the summary statistics for the precipitation data
climate_df.describe()
# Design a query to show how many stations are available in this dataset?
session.query(func.count(station.name)).all()
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
engine.execute("SELECT station, count(station) AS count FROM measurement GROUP BY station ORDER BY count desc").fetchall()
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
engine.execute("SELECT min(tobs), max(tobs), avg(tobs) FROM measurement WHERE station = 'USC00519281'").fetchall()
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
data = engine.execute("SELECT tobs FROM measurement WHERE date >= '2016-8-23' AND station = 'USC00519281'").fetchall()
data = [row[0] for row in data]
hist_data = pd.DataFrame({"tobs": data})
hist_data.head()
hist = hist_data.hist(bins = 12, figsize = (10, 5))
plt.ylabel("Frequency")
plt.xlabel("Temperature")
plt.title("")
plt.legend(["tobs"])
plt.show()
```
## Bonus Challenge Assignment
```
#Average June Temps vs Average December Temps
june = "06"
june_temp = session.query(measurement.tobs).filter(func.strftime("%m", measurement.date) == june).all()
dec = "06"
dec_temp = session.query(measurement.tobs).filter(func.strftime("%m", measurement.date) == dec).all()
#ttest
stats.ttest_ind(june_temp, dec_temp, equal_var=False)
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, average, and maximum temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)).\
filter(measurement.date >= start_date).filter(measurement.date <= end_date).all()
# function usage example
print(calc_temps('2012-02-28', '2012-03-05'))
# Use your previous function `calc_temps` to calculate the tmin, tavg, and tmax
# for your trip using the previous year's data for those same dates.
my_trip = (calc_temps("2017-08-10", "2017-08-20"))
print(my_trip)
my_trip_df = pd.DataFrame(my_trip, columns = ["min", "avg", "max"])
my_trip_df
# Plot the results from your previous query as a bar chart.
# Use "Trip Avg Temp" as your Title
# Use the average temperature for the y value
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
error = [my_trip_df["max"] - my_trip_df["min"]]
my_trip_df.plot(kind="bar", y="avg", yerr=error, title = "Trip Avg Temp", color="blue", figsize= (6,4), legend="")
plt.ylabel("Temp (F)")
plt.tick_params(
axis="x",
which="both",
labelbottom=False)
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's matching dates.
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
engine.execute("SELECT measurement.station, name, latitude, longitude, elevation, sum(prcp) AS total_rainfall \
FROM measurement \
JOIN station ON measurement.station == station.station \
WHERE date BETWEEN '2017-08-10' AND '2017-08-20' \
GROUP BY measurement.station ORDER BY total_rainfall DESC").fetchall()
# Create a query that will calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", measurement.date) == date).all()
daily_normals("01-01")
# calculate the daily normals for your trip
# Set the start and end date of the trip
start_date = "2017-08-10"
end_date ="2017-08-20"
# Use the start and end date to create a range of dates
vacay_dates = pd.date_range(start_date, end_date).strftime("%Y-%m-%d")
# vacay_dates
# Stip off the year and save a list of %m-%d strings
vacay_dates = pd.date_range(start_date, end_date).strftime("%m-%d")
vacay_dates
# Loop through the list of %m-%d strings and calculate the normals for each date
# push each tuple of calculations into a list called `normals`
normals = []
for date in vacay_dates:
normal = daily_normals(date)
normals.append(normal)
normals
# Load the previous query results into a Pandas DataFrame and
new_list = [x for x in chain.from_iterable(normals)]
# new_list
my_trip_df = pd.DataFrame(new_list, columns = ["tmin", "tavg", "tmax"])
# my_trip_df
# add the `trip_dates` range as the `date` index
my_trip_df["date"] = vacay_dates
my_trip_df = my_trip_df.set_index("date")
my_trip_df
# Plot the daily normals as an area plot with `stacked=False`
my_trip_df.plot(kind = "area", stacked=False, figsize = (10,5))
plt.xticks(rotation= 45)
```
| github_jupyter |
```
import numpy as np
import cv2
import matplotlib.pyplot as plt
import math
def rgb2hsi(rgb):
# separar
R,G,B= cv2.split(rgb)
# normalizar
R =R/255
G =G/255
B =B/255
# cantidad de elementos
x=R.shape[0]
y=R.shape[1]
# crear arrays
r=np.empty([x,y])
g=np.empty([x,y])
b=np.empty([x,y])
H=np.empty([x,y])
S=np.empty([x,y])
I=np.empty([x,y])
# recorrer
for i in range(0, x):
for j in range(0,y):
# calcular rgb
divisor=R[i,j]+G[i,j]+B[i,j]
I[i,j]=divisor/3.0
if (divisor != 0.0):
r[i,j]=R[i,j]/divisor
g[i,j]=G[i,j]/divisor
b[i,j]=B[i,j]/divisor
# calcular RGB
if (R[i,j]==G[i,j]) and (G[i,j]==B[i,j]):
H[i,j]=0
S[i,j]=0
else:
argum=(R[i,j]-G[i,j])*(R[i,j]-G[i,j])+(R[i,j]-B[i,j])*(G[i,j]-B[i,j])
num=0.5*((R[i,j]-G[i,j]) + (R[i,j]-B[i,j]))
w=num/math.sqrt(argum)
if (w>1): w=1
if (w<-1): w=-1
H[i,j]=math.acos(w)
if H[i,j] < 0:
print('b')
break
if B[i,j] > G[i,j]:
H[i,j]=2*math.pi-H[i,j]
if (r[i,j] <= g[i,j]) & (r[i,j] <= b[i,j]):
S[i,j]=1-3*r[i,j]
if (g[i,j] <= r[i,j]) & (g[i,j] <= b[i,j]):
S[i,j]=1-3*g[i,j]
if (b[i,j] <= r[i,j]) & (b[i,j] <= g[i,j]):
S[i,j]=1-3*b[i,j]
#H*=179
#S*=255
#I*=255
hsi=cv2.merge([H,S,I])
return hsi
def hsi2rgb(hsi):
H,S,I = cv2.split(hsi)
#H=H/179
#S=S/255
#I=I/255
x=H.shape[0]
y=H.shape[1]
R=np.empty([x,y])
G=np.empty([x,y])
B=np.empty([x,y])
r=np.empty([x,y])
g=np.empty([x,y])
b=np.empty([x,y])
for i in range(0, x):
for j in range(0,y):
if (S[i,j] >1): S[i,j]=1
if (I[i,j] >1): I[i,j]=1
if (S[i,j] ==0):
R[i,j]=I[i,j]
G[i,j]=I[i,j]
B[i,j]=I[i,j]
else:
ums=(1-S[i,j])/3
if (H[i,j]>=0) and (H[i,j]<np.radians(120)):
b[i,j]=ums
r[i,j]= 1/3*(1+(S[i,j]*np.cos(H[i,j])/np.cos(np.radians(60)-H[i,j])))
g[i,j]=1-r[i,j]-b[i,j]
elif (H[i,j]>=np.radians(120)) and (H[i,j]<np.radians(240)):
H[i,j]-=np.radians(120)
r[i,j]=ums
g[i,j]=1/3*(1+(S[i,j]*np.cos(H[i,j])/np.cos(np.radians(60)-H[i,j])))
b[i,j]=1-r[i,j]-g[i,j]
elif (H[i,j]>=np.radians(240)) and (H[i,j]<np.radians(360)):
H[i,j]-=np.radians(240)
g[i,j]=ums
b[i,j]=1/3*(1+(S[i,j]*np.cos(H[i,j])/np.cos(np.radians(60)-H[i,j])))
r[i,j]=1-g[i,j]-b[i,j]
else:
print("fuera de rango")
break
if (r[i,j]<0): r[i,j]=0
if (g[i,j]<0): g[i,j]=0
if (b[i,j]<0): b[i,j]=0
R[i,j]=3*I[i,j]*r[i,j]
G[i,j]=3*I[i,j]*g[i,j]
B[i,j]=3*I[i,j]*b[i,j]
if (R[i,j]>1): R[i,j]=1
if (G[i,j]>1): G[i,j]=1
if (B[i,j]>1): B[i,j]=1
rgb=cv2.merge([R,G,B])*255
return rgb.astype(np.uint8)
#Imagen Original
imagen = cv2.imread("img/5.jpg")
mi_rgb= cv2.cvtColor(imagen, cv2.COLOR_BGR2RGB)
plt.imshow(mi_rgb)
# Realizar conversión
mi_hsi= rgb2hsi(mi_rgb) # RGB a HSI
h,s,i = cv2.split(mi_hsi)
# Realizar el cambio de color
h=np.where((h>=640/179) & (h<=822/179),30/179,h)
mi_hsi=cv2.merge([h,s,i])
n_rgb= hsi2rgb(mi_hsi) # HSI a RGB
plt.imshow(n_rgb)
# Guardar
final=cv2.cvtColor(n_rgb, cv2.COLOR_RGB2BGR)
cv2.imwrite('result/hsi.png',final)
video= cv2.VideoCapture('video/tafirol.mp4')
fps=video.get(cv2.CAP_PROP_FPS)
codec =cv2.VideoWriter_fourcc(*'XVID')
size = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))
nuevoVideo= cv2.VideoWriter('result/hsi.mp4',codec,fps,size)
i=0
success, frame2 = video.read()
while success:
i+=1
# hacemos el cambio solo de un segmento del video
if (i>=53) & (i<224):
print(i,"trabajando")
mi_rgb= cv2.cvtColor(frame2, cv2.COLOR_BGR2RGB)
mi_hsi= rgb2hsi(mi_rgb) # RGB a HSI
h,s,j = cv2.split(mi_hsi)
h=np.where((h>=640/179) & (h<=822/179),30/179,h)
nuevo_hsi= cv2.merge([h,s,j])
nuevo_rgb= hsi2rgb(nuevo_hsi)
nuevo_bgr= cv2.cvtColor(nuevo_rgb, cv2.COLOR_RGB2BGR)
nuevoVideo.write(nuevo_bgr)
success, frame2 = video.read()
video.release()
nuevoVideo.release()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/andrewm4894/colabs/blob/master/some_json_wrangling.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import pandas as pd
data = [{"event_date":"20201107","event_timestamp":"1604801718108000","event_name":"session_start","event_params":[{"key":"firebase_event_origin","value":{"string_value":"auto"}},{"key":"ga_session_id","value":{"int_value":"1604801718"}},{"key":"engaged_session_event","value":{"int_value":"1"}},{"key":"session_engaged","value":{"int_value":"1"}},{"key":"ga_session_number","value":{"int_value":"8"}}],"event_previous_timestamp":"1604785674744000","event_bundle_sequence_id":"13","event_server_timestamp_offset":"754702","user_pseudo_id":"cb052c8ce7b261aecf783ce043089fb3","user_properties":[{"key":"ga_session_id","value":{"int_value":"1604801718","set_timestamp_micros":"1604801718108000"}},{"key":"first_open_time","value":{"int_value":"1562979600000","set_timestamp_micros":"1562977643627000"}},{"key":"ga_session_number","value":{"int_value":"8","set_timestamp_micros":"1604801718108000"}}],"user_first_touch_timestamp":"1562977643627000","device":{"category":"mobile","mobile_brand_name":"Google","mobile_model_name":"Pixel","mobile_os_hardware_model":"Pixel","operating_system":"ANDROID","operating_system_version":"10","language":"en-us","is_limited_ad_tracking":"No","time_zone_offset_seconds":"-21600"},"geo":{"continent":"Americas","country":"United States","region":"Texas","city":"Austin","sub_continent":"Northern America","metro":"(not set)"},"app_info":{"id":"org.livingletter.hymnal","version":"1.1.7","firebase_app_id":"1:76837103840:android:e1d753a7fbfeeaac","install_source":"com.android.vending"},"traffic_source":{"medium":"organic","source":"google-play"},"stream_id":"1440534155","platform":"ANDROID","items":[]},
{"event_date":"20201107","event_timestamp":"1604785674744000","event_name":"session_start","event_params":[{"key":"ga_session_number","value":{"int_value":"7"}},{"key":"firebase_event_origin","value":{"string_value":"auto"}},{"key":"session_engaged","value":{"int_value":"1"}},{"key":"engaged_session_event","value":{"int_value":"1"}},{"key":"ga_session_id","value":{"int_value":"1604785674"}}],"event_previous_timestamp":"1604680637311000","event_bundle_sequence_id":"12","event_server_timestamp_offset":"718754","user_pseudo_id":"cb052c8ce7b261aecf783ce043089fb3","user_properties":[{"key":"ga_session_number","value":{"int_value":"7","set_timestamp_micros":"1604785674744000"}},{"key":"ga_session_id","value":{"int_value":"1604785674","set_timestamp_micros":"1604785674744000"}},{"key":"first_open_time","value":{"int_value":"1562979600000","set_timestamp_micros":"1562977643627000"}}],"user_first_touch_timestamp":"1562977643627000","device":{"category":"mobile","mobile_brand_name":"Google","mobile_model_name":"Pixel","mobile_os_hardware_model":"Pixel","operating_system":"ANDROID","operating_system_version":"10","language":"en-us","is_limited_ad_tracking":"No","time_zone_offset_seconds":"-21600"},"geo":{"continent":"Americas","country":"United States","region":"Texas","city":"Austin","sub_continent":"Northern America","metro":"(not set)"},"app_info":{"id":"org.livingletter.hymnal","version":"1.1.7","firebase_app_id":"1:76837103840:android:e1d753a7fbfeeaac","install_source":"com.android.vending"},"traffic_source":{"medium":"organic","source":"google-play"},"stream_id":"1440534155","platform":"ANDROID","items":[]}]
df = pd.json_normalize(
data,
record_path='event_params',
record_prefix='event_params_',
meta=['event_date', 'event_timestamp', 'event_name','device']
)
df_device = pd.json_normalize(df["device"]).add_prefix('device_')
df = pd.concat([df,df_device], axis=1)
del df['device']
display(df)
```
| github_jupyter |
## Analyze A/B Test Results
This project will assure you have mastered the subjects covered in the statistics lessons. The hope is to have this project be as comprehensive of these topics as possible. Good luck!
## Table of Contents
- [Introduction](#intro)
- [Part I - Probability](#probability)
- [Part II - A/B Test](#ab_test)
- [Part III - Regression](#regression)
<a id='intro'></a>
### Introduction
A/B tests are very commonly performed by data analysts and data scientists. It is important that you get some practice working with the difficulties of these
For this project, you will be working to understand the results of an A/B test run by an e-commerce website. Your goal is to work through this notebook to help the company understand if they should implement the new page, keep the old page, or perhaps run the experiment longer to make their decision.
**As you work through this notebook, follow along in the classroom and answer the corresponding quiz questions associated with each question.** The labels for each classroom concept are provided for each question. This will assure you are on the right track as you work through the project, and you can feel more confident in your final submission meeting the criteria. As a final check, assure you meet all the criteria on the [RUBRIC](https://review.udacity.com/#!/projects/37e27304-ad47-4eb0-a1ab-8c12f60e43d0/rubric).
<a id='probability'></a>
#### Part I - Probability
To get started, let's import our libraries.
```
import pandas as pd
import numpy as np
import random
import matplotlib.pyplot as plt
%matplotlib inline
#We are setting the seed to assure you get the same answers on quizzes as we set up
random.seed(42)
```
`1.` Now, read in the `ab_data.csv` data. Store it in `df`. **Use your dataframe to answer the questions in Quiz 1 of the classroom.**
a. Read in the dataset and take a look at the top few rows here:
b. Use the below cell to find the number of rows in the dataset.
c. The number of unique users in the dataset.
d. The proportion of users converted.
e. The number of times the `new_page` and `treatment` don't line up.
f. Do any of the rows have missing values?
`2.` For the rows where **treatment** is not aligned with **new_page** or **control** is not aligned with **old_page**, we cannot be sure if this row truly received the new or old page. Use **Quiz 2** in the classroom to provide how we should handle these rows.
a. Now use the answer to the quiz to create a new dataset that meets the specifications from the quiz. Store your new dataframe in **df2**.
```
# Double Check all of the correct rows were removed - this should be 0
df2[((df2['group'] == 'treatment') == (df2['landing_page'] == 'new_page')) == False].shape[0]
```
`3.` Use **df2** and the cells below to answer questions for **Quiz3** in the classroom.
a. How many unique **user_id**s are in **df2**?
b. There is one **user_id** repeated in **df2**. What is it?
c. What is the row information for the repeat **user_id**?
d. Remove **one** of the rows with a duplicate **user_id**, but keep your dataframe as **df2**.
`4.` Use **df2** in the below cells to answer the quiz questions related to **Quiz 4** in the classroom.
a. What is the probability of an individual converting regardless of the page they receive?
b. Given that an individual was in the `control` group, what is the probability they converted?
c. Given that an individual was in the `treatment` group, what is the probability they converted?
d. What is the probability that an individual received the new page?
e. Consider your results from a. through d. above, and explain below whether you think there is sufficient evidence to say that the new treatment page leads to more conversions.
**Your answer goes here.**
<a id='ab_test'></a>
### Part II - A/B Test
Notice that because of the time stamp associated with each event, you could technically run a hypothesis test continuously as each observation was observed.
However, then the hard question is do you stop as soon as one page is considered significantly better than another or does it need to happen consistently for a certain amount of time? How long do you run to render a decision that neither page is better than another?
These questions are the difficult parts associated with A/B tests in general.
`1.` For now, consider you need to make the decision just based on all the data provided. If you want to assume that the old page is better unless the new page proves to be definitely better at a Type I error rate of 5%, what should your null and alternative hypotheses be? You can state your hypothesis in terms of words or in terms of **$p_{old}$** and **$p_{new}$**, which are the converted rates for the old and new pages.
**Put your answer here.**
`2.` Assume under the null hypothesis, $p_{new}$ and $p_{old}$ both have "true" success rates equal to the **converted** success rate regardless of page - that is $p_{new}$ and $p_{old}$ are equal. Furthermore, assume they are equal to the **converted** rate in **ab_data.csv** regardless of the page. <br><br>
Use a sample size for each page equal to the ones in **ab_data.csv**. <br><br>
Perform the sampling distribution for the difference in **converted** between the two pages over 10,000 iterations of calculating an estimate from the null. <br><br>
Use the cells below to provide the necessary parts of this simulation. If this doesn't make complete sense right now, don't worry - you are going to work through the problems below to complete this problem. You can use **Quiz 5** in the classroom to make sure you are on the right track.<br><br>
a. What is the **convert rate** for $p_{new}$ under the null?
b. What is the **convert rate** for $p_{old}$ under the null? <br><br>
c. What is $n_{new}$?
d. What is $n_{old}$?
e. Simulate $n_{new}$ transactions with a convert rate of $p_{new}$ under the null. Store these $n_{new}$ 1's and 0's in **new_page_converted**.
f. Simulate $n_{old}$ transactions with a convert rate of $p_{old}$ under the null. Store these $n_{old}$ 1's and 0's in **old_page_converted**.
g. Find $p_{new}$ - $p_{old}$ for your simulated values from part (e) and (f).
h. Simulate 10,000 $p_{new}$ - $p_{old}$ values using this same process similarly to the one you calculated in parts **a. through g.** above. Store all 10,000 values in a numpy array called **p_diffs**.
i. Plot a histogram of the **p_diffs**. Does this plot look like what you expected? Use the matching problem in the classroom to assure you fully understand what was computed here.
j. What proportion of the **p_diffs** are greater than the actual difference observed in **ab_data.csv**?
k. In words, explain what you just computed in part **j.** What is this value called in scientific studies? What does this value mean in terms of whether or not there is a difference between the new and old pages?
**Put your answer here.**
l. We could also use a built-in to achieve similar results. Though using the built-in might be easier to code, the above portions are a walkthrough of the ideas that are critical to correctly thinking about statistical significance. Fill in the below to calculate the number of conversions for each page, as well as the number of individuals who received each page. Let `n_old` and `n_new` refer the the number of rows associated with the old page and new pages, respectively.
```
import statsmodels.api as sm
convert_old =
convert_new =
n_old =
n_new =
```
m. Now use `stats.proportions_ztest` to compute your test statistic and p-value. [Here](http://knowledgetack.com/python/statsmodels/proportions_ztest/) is a helpful link on using the built in.
n. What do the z-score and p-value you computed in the previous question mean for the conversion rates of the old and new pages? Do they agree with the findings in parts **j.** and **k.**?
**Put your answer here.**
<a id='regression'></a>
### Part III - A regression approach
`1.` In this final part, you will see that the result you acheived in the previous A/B test can also be acheived by performing regression.<br><br>
a. Since each row is either a conversion or no conversion, what type of regression should you be performing in this case?
**Put your answer here.**
b. The goal is to use **statsmodels** to fit the regression model you specified in part **a.** to see if there is a significant difference in conversion based on which page a customer receives. However, you first need to create a column for the intercept, and create a dummy variable column for which page each user received. Add an **intercept** column, as well as an **ab_page** column, which is 1 when an individual receives the **treatment** and 0 if **control**.
c. Use **statsmodels** to import your regression model. Instantiate the model, and fit the model using the two columns you created in part **b.** to predict whether or not an individual converts.
d. Provide the summary of your model below, and use it as necessary to answer the following questions.
e. What is the p-value associated with **ab_page**? Why does it differ from the value you found in **Part II**?<br><br> **Hint**: What are the null and alternative hypotheses associated with your regression model, and how do they compare to the null and alternative hypotheses in the **Part II**?
**Put your answer here.**
f. Now, you are considering other things that might influence whether or not an individual converts. Discuss why it is a good idea to consider other factors to add into your regression model. Are there any disadvantages to adding additional terms into your regression model?
**Put your answer here.**
g. Now along with testing if the conversion rate changes for different pages, also add an effect based on which country a user lives. You will need to read in the **countries.csv** dataset and merge together your datasets on the approporiate rows. [Here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.join.html) are the docs for joining tables.
Does it appear that country had an impact on conversion? Don't forget to create dummy variables for these country columns - **Hint: You will need two columns for the three dummy variables.** Provide the statistical output as well as a written response to answer this question.
```
countries_df = pd.read_csv('./countries.csv')
df_new = countries_df.set_index('user_id').join(df2.set_index('user_id'), how='inner')
### Create the necessary dummy variables
```
h. Though you have now looked at the individual factors of country and page on conversion, we would now like to look at an interaction between page and country to see if there significant effects on conversion. Create the necessary additional columns, and fit the new model.
Provide the summary results, and your conclusions based on the results.
```
### Fit Your Linear Model And Obtain the Results
```
<a id='conclusions'></a>
## Conclusions
Congratulations on completing the project!
### Gather Submission Materials
Once you are satisfied with the status of your Notebook, you should save it in a format that will make it easy for others to read. You can use the __File -> Download as -> HTML (.html)__ menu to save your notebook as an .html file. If you are working locally and get an error about "No module name", then open a terminal and try installing the missing module using `pip install <module_name>` (don't include the "<" or ">" or any words following a period in the module name).
You will submit both your original Notebook and an HTML or PDF copy of the Notebook for review. There is no need for you to include any data files with your submission. If you made reference to other websites, books, and other resources to help you in solving tasks in the project, make sure that you document them. It is recommended that you either add a "Resources" section in a Markdown cell at the end of the Notebook report, or you can include a `readme.txt` file documenting your sources.
### Submit the Project
When you're ready, click on the "Submit Project" button to go to the project submission page. You can submit your files as a .zip archive or you can link to a GitHub repository containing your project files. If you go with GitHub, note that your submission will be a snapshot of the linked repository at time of submission. It is recommended that you keep each project in a separate repository to avoid any potential confusion: if a reviewer gets multiple folders representing multiple projects, there might be confusion regarding what project is to be evaluated.
It can take us up to a week to grade the project, but in most cases it is much faster. You will get an email once your submission has been reviewed. If you are having any problems submitting your project or wish to check on the status of your submission, please email us at dataanalyst-project@udacity.com. In the meantime, you should feel free to continue on with your learning journey by beginning the next module in the program.
| github_jupyter |
# Modeling and Simulation in Python
Chapter 18
Copyright 2017 Allen Downey
License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
```
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
```
### Code from the previous chapter
Read the data.
```
data = pd.read_csv('data/glucose_insulin.csv', index_col='time');
```
Interpolate the insulin data.
```
I = interpolate(data.insulin)
```
Initialize the parameters
```
G0 = 290
k1 = 0.03
k2 = 0.02
k3 = 1e-05
```
To estimate basal levels, we'll use the concentrations at `t=0`.
```
Gb = data.glucose[0]
Ib = data.insulin[0]
```
Create the initial condtions.
```
init = State(G=G0, X=0)
```
Make the `System` object.
```
t_0 = get_first_label(data)
t_end = get_last_label(data)
system = System(init=init,
k1=k1, k2=k2, k3=k3,
I=I, Gb=Gb, Ib=Ib,
t_0=t_0, t_end=t_end, dt=2)
def update_func(state, t, system):
"""Updates the glucose minimal model.
state: State object
t: time in min
system: System object
returns: State object
"""
G, X = state
unpack(system)
dGdt = -k1 * (G - Gb) - X*G
dXdt = k3 * (I(t) - Ib) - k2 * X
G += dGdt * dt
X += dXdt * dt
return State(G=G, X=X)
def run_simulation(system, update_func):
"""Runs a simulation of the system.
system: System object
update_func: function that updates state
returns: TimeFrame
"""
unpack(system)
frame = TimeFrame(columns=init.index)
frame.row[t_0] = init
ts = linrange(t_0, t_end, dt)
for t in ts:
frame.row[t+dt] = update_func(frame.row[t], t, system)
return frame
%time results = run_simulation(system, update_func);
```
### Numerical solution
In the previous chapter, we approximated the differential equations with difference equations, and solved them using `run_simulation`.
In this chapter, we solve the differential equation numerically using `run_ode_solver`, which is a wrapper for the SciPy ODE solver.
Instead of an update function, we provide a slope function that evaluates the right-hand side of the differential equations. We don't have to do the update part; the solver does it for us.
```
def slope_func(state, t, system):
"""Computes derivatives of the glucose minimal model.
state: State object
t: time in min
system: System object
returns: derivatives of G and X
"""
G, X = state
unpack(system)
dGdt = -k1 * (G - Gb) - X*G
dXdt = k3 * (I(t) - Ib) - k2 * X
return dGdt, dXdt
```
We can test the slope function with the initial conditions.
```
slope_func(init, 0, system)
```
Here's how we run the ODE solver.
```
%time results2, details = run_ode_solver(system, slope_func, t_eval=data.index);
```
`details` is a `ModSimSeries` object with information about how the solver worked.
```
details
```
`results` is a `TimeFrame` with one row for each time step and one column for each state variable:
```
results2
```
Plotting the results from `run_simulation` and `run_ode_solver`, we can see that they are not very different.
```
plot(results.G, 'g-')
plot(results2.G, 'b-')
plot(data.glucose, 'bo')
```
The differences in `G` are less than 1%.
```
diff = results.G - results2.G
percent_diff = diff / results2.G * 100
percent_diff.dropna()
```
### Optimization
Now let's find the parameters that yield the best fit for the data.
We'll use these values as an initial estimate and iteratively improve them.
```
params = Params(G0 = 290,
k1 = 0.03,
k2 = 0.02,
k3 = 1e-05)
```
`make_system` takes the parameters and actual data and returns a `System` object.
```
def make_system(params, data):
"""Makes a System object with the given parameters.
params: sequence of G0, k1, k2, k3
data: DataFrame with `glucose` and `insulin`
returns: System object
"""
G0, k1, k2, k3 = params
Gb = data.glucose[0]
Ib = data.insulin[0]
t_0 = get_first_label(data)
t_end = get_last_label(data)
init = State(G=G0, X=0)
return System(G0=G0, k1=k1, k2=k2, k3=k3,
init=init, Gb=Gb, Ib=Ib,
t_0=t_0, t_end=t_end)
system = make_system(params, data)
```
`error_func` takes the parameters and actual data, makes a `System` object, and runs `odeint`, then compares the results to the data. It returns an array of errors.
```
def error_func(params, data):
"""Computes an array of errors to be minimized.
params: sequence of parameters
data: DataFrame of values to be matched
returns: array of errors
"""
print(params)
# make a System with the given parameters
system = make_system(params, data)
# solve the ODE
results, details = run_ode_solver(system, slope_func, t_eval=data.index)
# compute the difference between the model
# results and actual data
errors = results.G - data.glucose
return errors
```
When we call `error_func`, we provide a sequence of parameters as a single object.
Here's how that works:
```
error_func(params, data)
```
`fit_leastsq` is a wrapper for `scipy.optimize.leastsq`
Here's how we call it.
```
best_params, fit_details = fit_leastsq(error_func, params, data)
```
The first return value is a `Params` object with the best parameters:
```
best_params
```
The second return value is a `ModSimSeries` object with information about the results.
```
fit_details
fit_details
```
Now that we have `best_params`, we can use it to make a `System` object and run it.
```
system = make_system(best_params, data)
results, details = run_ode_solver(system, slope_func, t_eval=data.index)
details.message
```
Here are the results, along with the data. The first few points of the model don't fit the data, but we don't expect them to.
```
plot(results.G, label='simulation')
plot(data.glucose, 'bo', label='glucose data')
decorate(xlabel='Time (min)',
ylabel='Concentration (mg/dL)')
savefig('figs/chap08-fig04.pdf')
```
### Interpreting parameters
Based on the parameters of the model, we can estimate glucose effectiveness and insulin sensitivity.
```
def indices(params):
"""Compute glucose effectiveness and insulin sensitivity.
params: sequence of G0, k1, k2, k3
data: DataFrame with `glucose` and `insulin`
returns: State object containing S_G and S_I
"""
G0, k1, k2, k3 = params
return State(S_G=k1, S_I=k3/k2)
```
Here are the results.
```
indices(best_params)
```
### Under the hood
Here's the source code for `run_ode_solver` and `fit_leastsq`, if you'd like to know how they work.
```
%psource run_ode_solver
%psource fit_leastsq
```
## Exercises
**Exercise:** Since we don't expect the first few points to agree, it's probably better not to make them part of the optimization process. We can ignore them by leaving them out of the `Series` returned by `error_func`. Modify the last line of `error_func` to return `errors.loc[8:]`, which includes only the elements of the `Series` from `t=8` and up.
Does that improve the quality of the fit? Does it change the best parameters by much?
Note: You can read more about this use of `loc` [in the Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-integer).
**Exercise:** How sensitive are the results to the starting guess for the parameters. If you try different values for the starting guess, do we get the same values for the best parameters?
**Related reading:** You might be interested in this article about [people making a DIY artificial pancreas](https://www.bloomberg.com/news/features/2018-08-08/the-250-biohack-that-s-revolutionizing-life-with-diabetes).
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
import ambry
l = ambry.get_library()
b = l.bundle('d04w001') # Geoschemas
sumlevels_p = l.partition('census.gov-acs_geofile-schemas-2009e-sumlevels')
sumlevels = {}
for row in sumlevels_p.stream(as_dict=True):
sumlevels[row['sumlevel']] = row['description']
from collections import defaultdict, Counter
from geoid import base62_encode
collector = {}
geoids = {}
descriptions = {}
for p in b.partitions:
#print "=====", p.identity.name
l = {}
for i, c in enumerate(p.table.columns):
if i > 5 and c.name not in ('name','geoid', 'memi'):
l[c.name] = [Counter(), 0]
descriptions[c.name] = c.description
for i, row in enumerate(p.stream(as_dict=True)):
if i >= 500:
break
geoid = row['geoid']
for k in l:
v = row[k]
if not str(v).strip():
continue
try:
# The index is not guarantted to be found in the right position; it could be at the start of the
# geoid, so we keep track of the most common place it is found
idx = geoid.index(str(v))
size = len(str(v))
# Kepp tract of the right end position, not the start, since the end pos is independent of the length
l[k][0][idx+size] += 1
l[k][1] = max(l[k][1], size)
except ValueError:
pass
ordered = []
for k, v in l.items():
most = v[0].most_common(1)
if most:
size = v[1]
start = most[0][0] - size
ordered.append((k, start, size))
ordered = sorted(ordered, key = lambda r: r[1])
#for e in ordered:
# print " ", e, len(base62_encode(10**e[2]))
geoids[int(p.grain)] = ordered
for e in ordered:
collector[e[0]]=(e[2],len(base62_encode(10**e[2])) )
# Print out the lengths array
out = []
for k, v in collector.items():
out.append('\'{}\': {}, # {}'.format(k, v[0], descriptions[k]))
print '\n'.join(sorted(out))
for sl in sorted(geoids):
ordered = geoids[sl]
print str(sl)+':', str([ str(e[0]) for e in ordered ])+',', "#", sumlevels[sl]
from geoid import names, segments
names_map = {v:k for k, v in names.items()}
seen = set()
for k, v in segments.items():
if k in names_map:
pass
else:
name = '_'.join( e for e in v)
name = name[0].lower() + name[1:]
if name in seen:
name += str(k)
seen.add(name)
print "'{}': {},".format(name, k)
%load_ext autoreload
%autoreload 2
from geoid.acs import AcsGeoid
for p in b.partitions:
for i, row in enumerate(p.stream(as_dict=True)):
if i >= 500:
break
geoid = row['geoid']
try:
AcsGeoid.parse(geoid)
except Exception as e:
print geoid, e
raise
```
| github_jupyter |
```
import sys
from pathlib import Path
sys.path.append(str(Path.cwd().parent.parent))
import numpy as np
from kymatio.scattering2d.core.scattering2d import scattering2d
import matplotlib.pyplot as plt
import torch
import torchvision
from kymatio import Scattering2D
from PIL import Image
from IPython.display import display
from torchvision.transforms import *
#img = Image.open('/NOBACKUP/gauthiers/KTH/sample_a/wood/54a-scale_10_im_10_col.png')
img = Image.open('/NOBACKUP/gauthiers/chest_xrays_preprocess/train/positive/MIDRC-RICORD-1C-SITE2-000216-21074-0.png')
rsz_transf = torchvision.transforms.Resize((128,128))
img = rsz_transf(img)
display(img)
```
Rotation
```
transformation = torchvision.transforms.RandomRotation(degrees = 45)
transformation.degrees = [45,45]
img_rot2 = transformation(img)
display(img_rot2)
```
Blur
```
transformation = torchvision.transforms.GaussianBlur(3)
img_blur = transformation(img)
display(img_blur)
```
Perspective
```
transformation = torchvision.transforms.RandomPerspective()
img_rdmPersp = transformation(img)
display(img_rdmPersp)
transforms = torchvision.transforms.RandomPerspective(distortion_scale=0.5,p=1)
transforms.distortion_scale = 0.9
img_1 = transforms(img)
display(img_1)
transforms = torchvision.transforms.RandomAffine(degrees = 0, shear=90)
img_2 = transforms(img)
display(img_2)
```
À la Mallat
```
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
import time
t0 = time.time()
# Function \tau in Mallat's. Deform the index u. The function is chosen arbitrary as an example.
tau = lambda u : (0.5*u[0]+0.3*u[1]**2,0.3*u[1])
# Deform the index u for all u of the image.
tau_mat = lambda grid : torch.tensor([[tau(grid[i,j,:]) for j in range(len(grid))] for i in range(len(grid))],device = device)
tauV = lambda u : torch.stack([0.5*u[:,0]+0.3*u[:,1]**2,0.3*u[:,1]]).T
# Deforms the image given a function \tau.
def diffeo(img,tau):
# Image to tensor
transf = torchvision.transforms.ToTensor()
img = transf(img).unsqueeze(0).to(device)
# Number of pixels. Suppose square image.
dim = img.shape[-1]
# Create a (dim x dim) matrix of 2d vectors. Each vector represents the normalized position in the grid.
# Normalized means (-1,-1) is top left and (1,1) is bottom right.
grid = torch.tensor([[[x,y] for x in torch.linspace(-1,1,dim,device = device)] for y in torch.linspace(-1,1,dim,device = device)],device = device)
# Apply u-tau(u) in Mallat's.
grid_transf = (grid - tau_mat(grid)).unsqueeze(0)
# Apply x(u-tau(u)) by interpolating the image at the index points given by grid_transf.
img_transf = torch.nn.functional.grid_sample(img,grid_transf).squeeze(0)
# Tensor to image
transf = torchvision.transforms.ToPILImage()
return transf(img_transf)
# Calculate the deformation size : sup |J_{tau}(u)| over u.
def deformation_size(tau):
# Set a precision. This is arbitrary.
precision = 128
# Create a (flatten) grid of points between (-1,-1) and (1,1). This is the same grid as in the previous
# function (but flatten), but it feels arbitrary also.
points = [torch.tensor([x,y],device = device) for x in torch.linspace(-1,1,precision,device = device) for y in torch.linspace(-1,1,precision,device = device)]
# Evaluate the Jacobian of tau in each of those points. Returns a tensor of precision^2 x 2 x 2, i.e.
# for each point in points the 2 x 2 jacobian. Is it necessary to compute on all points, or only on the
# boundary would be sufficient?
t1 = time.time()
jac = torch.stack(list(map(lambda point : torch.stack(torch.autograd.functional.jacobian(tau,point)), points)))
print("grad calc +", (time.time()-t1))
# Find the norm of those jacobians.
norm_jac = torch.linalg.matrix_norm(jac,ord=2,dim=(1, 2))
# Return the Jacobian with the biggest norm.
return torch.max(norm_jac)
img_diffeo = diffeo(img,tau)
display(img_diffeo)
deformation_size(tau)
print("full notebook +", (time.time()-t0))
tau(torch.randn((64,2)))
points = [torch.tensor([0.,0.]),torch.tensor([1.,2.])]
jac = torch.autograd.functional.jacobian(tau,points[0])
jac2 = torch.stack(jac)
jac = torch.autograd.functional.jacobian(tau,points[1])
jac3 = torch.stack(jac)
n = 0
jac4 = torch.cat([jac2.unsqueeze(n),jac3.unsqueeze(n)],dim = n)
print(jac2)
print(jac3)
print(jac4)
print(jac4.shape)
jac5 = torch.cat([torch.stack(torch.autograd.functional.jacobian(tau,point)).unsqueeze(0) for point in points], dim = 0)
print(jac5)
points = [torch.tensor([0.,0.]),torch.tensor([1.,2.])]
jac = torch.stack(list(map(lambda point : torch.stack(torch.autograd.functional.jacobian(tau,point)), points)))
print(jac)
print(jac.shape)
points = [torch.tensor([0.,0.]),torch.tensor([1.,2.])]
jac = torch.cat([torch.cat([x.unsqueeze(1) for x in torch.autograd.functional.jacobian(tau,point)],dim =1).unsqueeze(2) for point in points],dim = 2)
print(jac)
print(jac.shape)
eps = 0.3
tau = lambda u : (eps*u[0],eps*u[1])
display(diffeo(img,tau))
eps = 0.3
tau = lambda u : (eps*u[1],eps*u[0])
display(diffeo(img,tau))
eps = 0.3
tau = lambda u : (eps*(u[0]+u[1]),eps*(u[0]+u[1]))
display(diffeo(img,tau))
eps = 0.3
tau = lambda u : (eps*(u[0]+u[1]),eps*(u[0]-u[1]))
display(diffeo(img,tau))
eps = 0.3
tau = lambda u : (eps*(u[0]**2+u[1]**2),eps*(2*u[0]*u[1]))
display(diffeo(img,tau))
eps = 0.3
tau = lambda u : (eps*(u[0]**2+u[1]**2),-eps*(2*u[0]*u[1]))
display(diffeo(img,tau))
eps = 0.3
tau = lambda u : (torch.exp(eps*u[0])-1,torch.exp(eps*u[1])-1)
display(diffeo(img,tau))
```
| github_jupyter |
# Basic training functionality
```
from fastai.basic_train import *
from fastai.gen_doc.nbdoc import *
from fastai.vision import *
from fastai.distributed import *
```
[`basic_train`](/basic_train.html#basic_train) wraps together the data (in a [`DataBunch`](/basic_data.html#DataBunch) object) with a pytorch model to define a [`Learner`](/basic_train.html#Learner) object. This is where the basic training loop is defined for the [`fit`](/basic_train.html#fit) function. The [`Learner`](/basic_train.html#Learner) object is the entry point of most of the [`Callback`](/callback.html#Callback) functions that will customize this training loop in different ways (and made available through the [`train`](/train.html#train) module), notably:
- [`Learner.lr_find`](/train.html#lr_find) will launch an LR range test that will help you select a good learning rate
- [`Learner.fit_one_cycle`](/train.html#fit_one_cycle) will launch a training using the 1cycle policy, to help you train your model fast.
- [`Learner.to_fp16`](/train.html#to_fp16) will convert your model in half precision and help you launch a training in mixed precision.
```
show_doc(Learner, title_level=2)
```
The main purpose of [`Learner`](/basic_train.html#Learner) is to train `model` using [`Learner.fit`](/basic_train.html#Learner.fit). After every epoch, all *metrics* will be printed, and will also be available to callbacks.
The default weight decay will be `wd`, which will be handled using the method from [Fixing Weight Decay Regularization in Adam](https://arxiv.org/abs/1711.05101) if `true_wd` is set (otherwise it's L2 regularization). If `bn_wd` is False then weight decay will be removed from batchnorm layers, as recommended in [Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour](https://arxiv.org/abs/1706.02677). You can ensure that batchnorm layer learnable params are trained even for frozen layer groups, by enabling `train_bn`.
To use [discriminative layer training](#Discriminative-layer-training) pass an [`nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) for each layer group to be optimized with different settings.
Any model files created will be saved in `path`/`model_dir`.
You can pass a list of [`callbacks`](/callbacks.html#callbacks) that you have already created, or (more commonly) simply pass a list of callback functions to `callback_fns` and each function will be called (passing `self`) on object initialization, with the results stored as callback objects. For a walk-through, see the [training overview](/training.html) page. You may also want to use an `application` to fit your model, e.g. using the [`create_cnn`](/vision.learner.html#create_cnn) method:
```
path = untar_data(URLs.MNIST_SAMPLE)
data = ImageDataBunch.from_folder(path)
learn = create_cnn(data, models.resnet18, metrics=accuracy)
learn.fit(1)
```
### Model fitting methods
```
show_doc(Learner.fit)
```
Uses [discriminative layer training](#Discriminative-layer-training) if multiple learning rates or weight decay values are passed. To control training behaviour, use the [`callback`](/callback.html#callback) system or one or more of the pre-defined [`callbacks`](/callbacks.html#callbacks).
```
show_doc(Learner.fit_one_cycle)
```
Uses the [`OneCycleScheduler`](/callbacks.one_cycle.html#OneCycleScheduler) callback.
```
show_doc(Learner.lr_find)
```
Runs the learning rate finder defined in [`LRFinder`](/callbacks.lr_finder.html#LRFinder), as discussed in [Cyclical Learning Rates for Training Neural Networks](https://arxiv.org/abs/1506.01186).
### See results
```
show_doc(Learner.get_preds)
show_doc(Learner.validate)
show_doc(Learner.show_results)
show_doc(Learner.predict)
show_doc(Learner.pred_batch)
show_doc(Learner.interpret, full_name='interpret')
jekyll_note('This function only works in the vision application.')
```
### Model summary
```
show_doc(Learner.summary)
```
### Test time augmentation
```
show_doc(Learner.TTA, full_name = 'TTA')
```
Applies Test Time Augmentation to `learn` on the dataset `ds_type`. We take the average of our regular predictions (with a weight `beta`) with the average of predictions obtained through augmented versions of the training set (with a weight `1-beta`). The transforms decided for the training set are applied with a few changes `scale` controls the scale for zoom (which isn't random), the cropping isn't random but we make sure to get the four corners of the image. Flipping isn't random but applied once on each of those corner images (so that makes 8 augmented versions total).
### Gradient clipping
```
show_doc(Learner.clip_grad)
```
### Mixed precision training
```
show_doc(Learner.to_fp16)
```
Uses the [`MixedPrecision`](/callbacks.fp16.html#MixedPrecision) callback to train in mixed precision (i.e. forward and backward passes using fp16, with weight updates using fp32), using all [NVIDIA recommendations](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) for ensuring speed and accuracy.
```
show_doc(Learner.to_fp32)
```
### Distributed training
```
show_doc(Learner.distributed, full_name='distributed')
```
### Discriminative layer training
When fitting a model you can pass a list of learning rates (and/or weight decay amounts), which will apply a different rate to each *layer group* (i.e. the parameters of each module in `self.layer_groups`). See the [Universal Language Model Fine-tuning for Text Classification](https://arxiv.org/abs/1801.06146) paper for details and experimental results in NLP (we also frequently use them successfully in computer vision, but have not published a paper on this topic yet). When working with a [`Learner`](/basic_train.html#Learner) on which you've called `split`, you can set hyperparameters in four ways:
1. `param = [val1, val2 ..., valn]` (n = number of layer groups)
2. `param = val`
3. `param = slice(start,end)`
4. `param = slice(end)`
If we chose to set it in way 1, we must specify a number of values exactly equal to the number of layer groups. If we chose to set it in way 2, the chosen value will be repeated for all layer groups. See [`Learner.lr_range`](/basic_train.html#Learner.lr_range) for an explanation of the `slice` syntax).
Here's an example of how to use discriminative learning rates (note that you don't actually need to manually call [`Learner.split`](/basic_train.html#Learner.split) in this case, since fastai uses this exact function as the default split for `resnet18`; this is just to show how to customize it):
```
# creates 3 layer groups
learn.split(lambda m: (m[0][6], m[1]))
# only randomly initialized head now trainable
learn.freeze()
learn.fit_one_cycle(1)
# all layers now trainable
learn.unfreeze()
# optionally, separate LR and WD for each group
learn.fit_one_cycle(1, max_lr=(1e-4, 1e-3, 1e-2), wd=(1e-4,1e-4,1e-1))
show_doc(Learner.lr_range)
```
Rather than manually setting an LR for every group, it's often easier to use [`Learner.lr_range`](/basic_train.html#Learner.lr_range). This is a convenience method that returns one learning rate for each layer group. If you pass `slice(start,end)` then the first group's learning rate is `start`, the last is `end`, and the remaining are evenly geometrically spaced.
If you pass just `slice(end)` then the last group's learning rate is `end`, and all the other groups are `end/10`. For instance (for our learner that has 3 layer groups):
```
learn.lr_range(slice(1e-5,1e-3)), learn.lr_range(slice(1e-3))
show_doc(Learner.unfreeze)
```
Sets every layer group to *trainable* (i.e. `requires_grad=True`).
```
show_doc(Learner.freeze)
```
Sets every layer group except the last to *untrainable* (i.e. `requires_grad=False`).
```
show_doc(Learner.freeze_to)
show_doc(Learner.split)
```
A convenience method that sets `layer_groups` based on the result of [`split_model`](/torch_core.html#split_model). If `split_on` is a function, it calls that function and passes the result to [`split_model`](/torch_core.html#split_model) (see above for example).
### Saving and loading models
Simply call [`Learner.save`](/basic_train.html#Learner.save) and [`Learner.load`](/basic_train.html#Learner.load) to save and load models. Only the parameters are saved, not the actual architecture (so you'll need to create your model in the same way before loading weights back in). Models are saved to the `path`/`model_dir` directory.
```
show_doc(Learner.load)
show_doc(Learner.save)
```
### Deploying your model
When you are ready to put your model in production, export the minimal state of your [`Learner`](/basic_train.html#Learner) with
```
show_doc(Learner.export)
```
Then you can load it with the following function.
```
show_doc(load_learner)
```
You can find more information and multiple examples in [this tutorial](/tutorial.inference.html)
### Other methods
```
show_doc(Learner.init)
```
Initializes all weights (except batchnorm) using function `init`, which will often be from PyTorch's [`nn.init`](https://pytorch.org/docs/stable/nn.html#torch-nn-init) module.
```
show_doc(Learner.mixup)
```
Uses [`MixUpCallback`](/callbacks.mixup.html#MixUpCallback).
```
show_doc(Learner.backward)
show_doc(Learner.create_opt)
```
You generally won't need to call this yourself - it's used to create the [`optim`](https://pytorch.org/docs/stable/optim.html#module-torch.optim) optimizer before fitting the model.
```
show_doc(Learner.dl)
show_doc(Recorder, title_level=2)
```
A [`Learner`](/basic_train.html#Learner) creates a [`Recorder`](/basic_train.html#Recorder) object automatically - you do not need to explicitly pass it to `callback_fns` - because other callbacks rely on it being available. It stores the smoothed loss, hyperparameter values, and metrics for each batch, and provides plotting methods for each. Note that [`Learner`](/basic_train.html#Learner) automatically sets an attribute with the snake-cased name of each callback, so you can access this through `Learner.recorder`, as shown below.
### Plotting methods
```
show_doc(Recorder.plot)
```
This is mainly used with the learning rate finder, since it shows a scatterplot of loss vs learning rate.
```
learn = create_cnn(data, models.resnet18, metrics=accuracy)
learn.lr_find()
learn.recorder.plot()
show_doc(Recorder.plot_losses)
```
Note that validation losses are only calculated once per epoch, whereas training losses are calculated after every batch.
```
learn.fit_one_cycle(2)
learn.recorder.plot_losses()
show_doc(Recorder.plot_lr)
learn.recorder.plot_lr(show_moms=True)
show_doc(Recorder.plot_metrics)
```
Note that metrics are only collected at the end of each epoch, so you'll need to train at least two epochs to have anything to show here.
```
learn.recorder.plot_metrics()
```
### Callback methods
You don't call these yourself - they're called by fastai's [`Callback`](/callback.html#Callback) system automatically to enable the class's functionality.
```
show_doc(Recorder.on_backward_begin)
show_doc(Recorder.on_batch_begin)
show_doc(Recorder.on_epoch_end)
show_doc(Recorder.on_train_begin)
```
### Inner functions
The following functions are used along the way by the [`Recorder`](/basic_train.html#Recorder) or can be called by other callbacks.
```
show_doc(Recorder.add_metrics)
show_doc(Recorder.add_metric_names)
show_doc(Recorder.format_stats)
```
## Module functions
Generally you'll want to use a [`Learner`](/basic_train.html#Learner) to train your model, since they provide a lot of functionality and make things easier. However, for ultimate flexibility, you can call the same underlying functions that [`Learner`](/basic_train.html#Learner) calls behind the scenes:
```
show_doc(fit)
```
Note that you have to create the `Optimizer` yourself if you call this function, whereas [`Learn.fit`](/basic_train.html#fit) creates it for you automatically.
```
show_doc(train_epoch)
```
You won't generally need to call this yourself - it's what [`fit`](/basic_train.html#fit) calls for each epoch.
```
show_doc(validate)
```
This is what [`fit`](/basic_train.html#fit) calls after each epoch. You can call it if you want to run inference on a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) manually.
```
show_doc(get_preds)
show_doc(loss_batch)
```
You won't generally need to call this yourself - it's what [`fit`](/basic_train.html#fit) and [`validate`](/basic_train.html#validate) call for each batch. It only does a backward pass if you set `opt`.
## Other classes
```
show_doc(LearnerCallback, title_level=3)
show_doc(RecordOnCPU, title_level=3)
```
## Undocumented Methods - Methods moved below this line will intentionally be hidden
```
show_doc(Learner.tta_only)
show_doc(Learner.TTA)
show_doc(RecordOnCPU.on_batch_begin)
```
## New Methods - Please document or move to the undocumented section
| github_jupyter |
```
# load text
filename = 'metamorphosis_clean.txt'
file = open(filename, 'rt')
text = file.read()
file.close()
# open('metamorphosis_clean.txt', rt).read()
```
### split by whitespace
```
# load text
filename = 'metamorphosis_clean.txt'
file = open(filename, 'rt')
text = file.read()
file.close()
# split into words by white space
words = text.split()
print(words[:100])
```
### convert to list of words and save again
Another approach might be to use the regex model (re) and split the document into words by selecting for strings of alphanumeric characters (a-z, A-Z, 0-9 and ‘_’).
```
# load text
filename = 'metamorphosis_clean.txt'
file = open(filename, 'rt')
text = file.read()
file.close()
# split based on words only
import re
words = re.split(r'\W+', text)
print(words[:100])
```
### Split by whitespace and remove punctuation
```
import string
print(string.punctuation)
# load text
filename = 'metamorphosis_clean.txt'
file = open(filename, 'rt')
text = file.read()
file.close()
# split into words by white space
words = text.split()
# remove punctuation from each word
import string
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in words]
print(stripped[:100])
```
### normalizing case (like lower, here)
```
filename = 'metamorphosis_clean.txt'
file = open(filename, 'rt')
text = file.read()
file.close()
# split into words by white space
words = text.split()
# convert to lower case
words = [word.lower() for word in words]
print(words[:100])
```
## NLTK installed
### split into sentences
```
# load data
filename = 'metamorphosis_clean.txt'
file = open(filename, 'rt')
text = file.read()
file.close()
# split into sentences
# then save each sentence to file, one per line
from nltk import sent_tokenize
sentences = sent_tokenize(text)
print(sentences[0])
```
### split into words
```
# load data
# split into words
from nltk.tokenize import word_tokenize
tokens = word_tokenize(text)
print(tokens[:100])
```
### Filter out punctuation
```
# load data
# split into words
from nltk.tokenize import word_tokenize
tokens = word_tokenize(text)
# remove all tokens that are not alphabetic
words = [word for word in tokens if word.isalpha()]
print(words[:100])
```
### filter out stop words (and pipeline)
```
# from nltk.corpus import stopwords
# stop_words = stopwords.words('english')
# print(stop_words)
# A stop word is a commonly used word
# (such as “the”, “a”, “an”, “in”)
# load data
filename = 'metamorphosis_clean.txt'
file = open(filename, 'rt')
text = file.read()
file.close()
# split into words
from nltk.tokenize import word_tokenize
tokens = word_tokenize(text)
# convert to lower case
tokens = [w.lower() for w in tokens]
# remove punctuation from each word
import string
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
# remove remaining tokens that are not alphabetic
words = [word for word in stripped if word.isalpha()]
# filter out stop words
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
print(words[:100])
```
### stem words
Stemming refers to the process of reducing each word to its root or base.
For example “fishing,” “fished,” “fisher” all reduce to the stem “fish.”
```
# load data
filename = 'metamorphosis_clean.txt'
file = open(filename, 'rt')
text = file.read()
file.close()
# split into words
from nltk.tokenize import word_tokenize
tokens = word_tokenize(text)
# stemming of words
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
stemmed = [porter.stem(word) for word in tokens]
print(stemmed[:100])
```
Here is a short list of additional considerations when cleaning text:
Handling large documents and large collections of text documents that do not fit into memory.
Extracting text from markup like HTML, PDF, or other structured document formats.
Transliteration of characters from other languages into English.
Decoding Unicode characters into a normalized form, such as UTF8.
Handling of domain specific words, phrases, and acronyms.
Handling or removing numbers, such as dates and amounts.
Locating and correcting common typos and misspellings.
…
| github_jupyter |
```
import datetime as dt
import pandas as pd
# Get dataframe of boroughs
df = pd.read_csv("taxi_zone_lookup.csv")
df
# Create dictionary of boroughs, and build the list of locations for each borough
# (6 boroughs vs of 265 NY locations)
dfdict = {'EWR': [], 'Queens': [], 'Bronx': [], 'Manhattan': [], 'Staten Island': [], 'Brooklyn': [], 'Unknown': []}
for i in range(len(df)):
dfdict[df["Borough"][i]].append(df["LocationID"][i])
del df
# JANUARY
# read yellow_tripdata_2018-01.csv (only pickup and dropoff datetime columns)
# transform pickup and dropoff datetime from string to a datetime columns
date_columns = ['tpep_pickup_datetime', 'tpep_dropoff_datetime']
df1 = pd.read_csv('yellow_tripdata_2018-01.csv', usecols = ['tpep_pickup_datetime', 'tpep_dropoff_datetime', 'PULocationID'], parse_dates = date_columns)
df1
type(df1.tpep_pickup_datetime[0]) # verify the transformation for pickup datetime
type(df1.tpep_dropoff_datetime[0]) # verify the transformation for dropoff datetime
# consider only 2018 for pickup datetime
keep = df1[df1['tpep_pickup_datetime'].dt.year == 2018]
del df1
# consider only january for pickup datetime
keep = keep[keep['tpep_pickup_datetime'].dt.month == 1]
# same above for dropoff datetime
keep = keep[keep['tpep_dropoff_datetime'].dt.year == 2018]
keep = keep[keep['tpep_dropoff_datetime'].dt.month == 1]
# count daily trips based on pickup datetime
# count total trips based on location
date_time_col = keep['tpep_pickup_datetime']
location_col = keep['PULocationID']
# create series "keep_loc", of counts by location
# create series "keep", of daily counts
keep_loc = location_col.groupby(location_col).count()
keep = date_time_col.groupby(date_time_col.dt.day).count()
keep
# compute mean
mean1 = keep.mean()
mean1
del keep
# count total number of trips for each borough
# this dictionary contains the count for each borough
borough_count = {'EWR': 0, 'Queens': 0, 'Bronx': 0, 'Manhattan': 0, 'Staten Island': 0, 'Brooklyn': 0, 'Unknown': 0}
# for every total count in keep_loc, get the corresponding LocationID, check to what borough it belongs, and
# add the count for that location to the correct borough
for i in range(1, len(keep_loc)):
LocationID = keep_loc.index[i]
for borough in dfdict:
if LocationID in dfdict[borough]:
borough_count[borough] = borough_count[borough] + int(keep_loc[LocationID])
borough_count
del keep_loc
# compute means from total counts (for every borough)
for borough in borough_count:
# divide by the number of days in the month
borough_count[borough] = borough_count[borough]/31
borough_count
# Convert list of boroughs and list of daily means to DataFrame
df_borough = pd.DataFrame(list(borough_count.keys()), columns = ["Borough"])
df_means = pd.DataFrame(list(borough_count.values()), columns = ["Daily mean"])
merge = df_borough.join(df_means)
merge
# Plot an histogram
barplot = merge.plot(x="Borough", y="Daily mean", kind='bar', figsize = (15, 6), fontsize = 12)
# FEBRUARY
date_columns = ['tpep_pickup_datetime', 'tpep_dropoff_datetime']
df2 = pd.read_csv('yellow_tripdata_2018-02.csv', usecols = ['tpep_pickup_datetime', 'tpep_dropoff_datetime', 'PULocationID'], parse_dates = date_columns)
df2
keep2 = df2[df2['tpep_pickup_datetime'].dt.year == 2018]
del df2
keep2 = keep2[keep2['tpep_pickup_datetime'].dt.month == 2]
keep2 = keep2[keep2['tpep_dropoff_datetime'].dt.year == 2018]
keep2 = keep2[keep2['tpep_dropoff_datetime'].dt.month == 2]
date_time_col = keep2['tpep_pickup_datetime']
location_col = keep2['PULocationID']
keep_loc = location_col.groupby(location_col).count()
keep2 = date_time_col.groupby(date_time_col.dt.day).count()
keep2
mean2 = keep2.mean()
mean2
del keep2
borough_count = {'EWR': 0, 'Queens': 0, 'Bronx': 0, 'Manhattan': 0, 'Staten Island': 0, 'Brooklyn': 0, 'Unknown': 0}
for i in range(1, len(keep_loc)):
LocationID = keep_loc.index[i]
for borough in dfdict:
if LocationID in dfdict[borough]:
borough_count[borough] = borough_count[borough] + int(keep_loc[LocationID])
borough_count
del keep_loc
for borough in borough_count:
borough_count[borough] = borough_count[borough]/28
borough_count
df_borough = pd.DataFrame(list(borough_count.keys()), columns = ["Borough"])
df_means = pd.DataFrame(list(borough_count.values()), columns = ["Daily mean"])
merge = df_borough.join(df_means)
merge
# Plot an histogram
barplot = merge.plot(x="Borough", y="Daily mean", kind='bar', figsize = (15, 6), fontsize = 12)
# MARCH
date_columns = ['tpep_pickup_datetime', 'tpep_dropoff_datetime']
df3 = pd.read_csv('yellow_tripdata_2018-03.csv', usecols = ['tpep_pickup_datetime', 'tpep_dropoff_datetime', 'PULocationID'], parse_dates = date_columns)
df3
keep3 = df3[df3['tpep_pickup_datetime'].dt.year == 2018]
del df3
keep3 = keep3[keep3['tpep_pickup_datetime'].dt.month == 3]
keep3 = keep3[keep3['tpep_dropoff_datetime'].dt.year == 2018]
keep3 = keep3[keep3['tpep_dropoff_datetime'].dt.month == 3]
date_time_col = keep3['tpep_pickup_datetime']
location_col = keep3['PULocationID']
keep_loc = location_col.groupby(location_col).count()
keep3 = date_time_col.groupby(date_time_col.dt.day).count()
keep3
mean3 = keep3.mean()
mean3
del keep3
borough_count = {'EWR': 0, 'Queens': 0, 'Bronx': 0, 'Manhattan': 0, 'Staten Island': 0, 'Brooklyn': 0, 'Unknown': 0}
for i in range(1, len(keep_loc)):
LocationID = keep_loc.index[i]
for borough in dfdict:
if LocationID in dfdict[borough]:
borough_count[borough] = borough_count[borough] + int(keep_loc[LocationID])
borough_count
del keep_loc
for borough in borough_count:
borough_count[borough] = borough_count[borough]/31
borough_count
df_borough = pd.DataFrame(list(borough_count.keys()), columns = ["Borough"])
df_means = pd.DataFrame(list(borough_count.values()), columns = ["Daily mean"])
merge = df_borough.join(df_means)
merge
# Plot an histogram
barplot = merge.plot(x="Borough", y="Daily mean", kind='bar', figsize = (15, 6), fontsize = 12)
# APRIL
date_columns = ['tpep_pickup_datetime', 'tpep_dropoff_datetime']
df4 = pd.read_csv('yellow_tripdata_2018-04.csv', usecols = ['tpep_pickup_datetime', 'tpep_dropoff_datetime', 'PULocationID'], parse_dates = date_columns)
df4
keep4 = df4[df4['tpep_pickup_datetime'].dt.year == 2018]
del df4
keep4 = keep4[keep4['tpep_pickup_datetime'].dt.month == 4]
keep4 = keep4[keep4['tpep_dropoff_datetime'].dt.year == 2018]
keep4 = keep4[keep4['tpep_dropoff_datetime'].dt.month == 4]
date_time_col = keep4['tpep_pickup_datetime']
location_col = keep4['PULocationID']
keep_loc = location_col.groupby(location_col).count()
keep4 = date_time_col.groupby(date_time_col.dt.day).count()
keep4
mean4 = keep4.mean()
mean4
del keep4
borough_count = {'EWR': 0, 'Queens': 0, 'Bronx': 0, 'Manhattan': 0, 'Staten Island': 0, 'Brooklyn': 0, 'Unknown': 0}
for i in range(1, len(keep_loc)):
LocationID = keep_loc.index[i]
for borough in dfdict:
if LocationID in dfdict[borough]:
borough_count[borough] = borough_count[borough] + int(keep_loc[LocationID])
borough_count
del keep_loc
for borough in borough_count:
borough_count[borough] = borough_count[borough]/30
borough_count
df_borough = pd.DataFrame(list(borough_count.keys()), columns = ["Borough"])
df_means = pd.DataFrame(list(borough_count.values()), columns = ["Daily mean"])
merge = df_borough.join(df_means)
merge
# Plot an histogram
barplot = merge.plot(x="Borough", y="Daily mean", kind='bar', figsize = (15, 6), fontsize = 12)
# MAY
date_columns = ['tpep_pickup_datetime', 'tpep_dropoff_datetime']
df5 = pd.read_csv('yellow_tripdata_2018-05.csv', usecols = ['tpep_pickup_datetime', 'tpep_dropoff_datetime', 'PULocationID'], parse_dates = date_columns)
df5
keep5 = df5[df5['tpep_pickup_datetime'].dt.year == 2018]
del df5
keep5 = keep5[keep5['tpep_pickup_datetime'].dt.month == 5]
keep5 = keep5[keep5['tpep_dropoff_datetime'].dt.year == 2018]
keep5 = keep5[keep5['tpep_dropoff_datetime'].dt.month == 5]
date_time_col = keep5['tpep_pickup_datetime']
location_col = keep5['PULocationID']
keep_loc = location_col.groupby(location_col).count()
keep5 = date_time_col.groupby(date_time_col.dt.day).count()
keep5
mean5 = keep5.mean()
mean5
del keep5
borough_count = {'EWR': 0, 'Queens': 0, 'Bronx': 0, 'Manhattan': 0, 'Staten Island': 0, 'Brooklyn': 0, 'Unknown': 0}
for i in range(1, len(keep_loc)):
LocationID = keep_loc.index[i]
for borough in dfdict:
if LocationID in dfdict[borough]:
borough_count[borough] = borough_count[borough] + int(keep_loc[LocationID])
borough_count
del keep_loc
for borough in borough_count:
borough_count[borough] = borough_count[borough]/31
borough_count
df_borough = pd.DataFrame(list(borough_count.keys()), columns = ["Borough"])
df_means = pd.DataFrame(list(borough_count.values()), columns = ["Daily mean"])
merge = df_borough.join(df_means)
merge
# Plot an histogram
barplot = merge.plot(x="Borough", y="Daily mean", kind='bar', figsize = (15, 6), fontsize = 12)
# JUNE
date_columns = ['tpep_pickup_datetime', 'tpep_dropoff_datetime']
df6 = pd.read_csv('yellow_tripdata_2018-06.csv', usecols = ['tpep_pickup_datetime', 'tpep_dropoff_datetime', 'PULocationID'], parse_dates = date_columns)
df6
keep6 = df6[df6['tpep_pickup_datetime'].dt.year == 2018]
del df6
keep6 = keep6[keep6['tpep_pickup_datetime'].dt.month == 6]
keep6 = keep6[keep6['tpep_dropoff_datetime'].dt.year == 2018]
keep6 = keep6[keep6['tpep_dropoff_datetime'].dt.month == 6]
date_time_col = keep6['tpep_pickup_datetime']
location_col = keep6['PULocationID']
keep_loc = location_col.groupby(location_col).count()
keep6 = date_time_col.groupby(date_time_col.dt.day).count()
keep6
mean6 = keep6.mean()
mean6
del keep6
borough_count = {'EWR': 0, 'Queens': 0, 'Bronx': 0, 'Manhattan': 0, 'Staten Island': 0, 'Brooklyn': 0, 'Unknown': 0}
for i in range(1, len(keep_loc)):
LocationID = keep_loc.index[i]
for borough in dfdict:
if LocationID in dfdict[borough]:
borough_count[borough] = borough_count[borough] + int(keep_loc[LocationID])
borough_count
del keep_loc
for borough in borough_count:
borough_count[borough] = borough_count[borough]/30
borough_count
df_borough = pd.DataFrame(list(borough_count.keys()), columns = ["Borough"])
df_means = pd.DataFrame(list(borough_count.values()), columns = ["Daily mean"])
merge = df_borough.join(df_means)
merge
# Plot an histogram
barplot = merge.plot(x="Borough", y="Daily mean", kind='bar', figsize = (15, 6), fontsize = 12)
# OVERALL NY ANALYSIS
# build list of means
l1 = [mean1, mean2, mean3, mean4, mean5, mean6]
l1
df_mean = pd.DataFrame(l1, columns = ['daily average'])
df_mean
l2 = ['january', 'february', 'march', 'april', 'may', 'june']
df_month = pd.DataFrame(l2, columns = ['month'])
df_month
merge = df_month.join(df_mean)
merge
barplot = merge.plot(x = 'month', y = 'daily average', kind = 'bar', figsize = (10, 6), fontsize = 12)
```
| github_jupyter |
# Todoist Data Analysis
This notebook processed the downloaded history of your todoist tasks. See [todoist_downloader.ipynb](https://github.com/markwk/qs_ledger/blob/master/todoist/todoist_downloader.ipynb) to export and download your task history from Todoist.
---
```
from datetime import date, datetime as dt, timedelta as td
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
%matplotlib inline
# supress warnings
import warnings
warnings.filterwarnings('ignore')
```
---
# General Data Analysis of Todoist Tasks
```
# import raw data
raw_tasks = pd.read_csv("data/todost-raw-tasks-completed.csv")
len(raw_tasks)
# import processed data
tasks = pd.read_csv("data/todost-tasks-completed.csv")
len(tasks)
```
----
### Simple Data Analysis: Completed Tasks Per Year
```
year_data = tasks['year'].value_counts().sort_index()
# Chart Monthly Tasks Count
dataset = year_data
chart_title = 'Number of Tasks Completed Per Year'
plt.style.use('seaborn-darkgrid')
ax = dataset.plot.bar(figsize=(14, 5), rot=0, legend=False)
ax.set_ylabel('Tasks Completed')
ax.set_xlabel('')
ax.set_title(chart_title)
plt.show()
```
### Simple Data Analysis: Completed Tasks Per Month
```
# simple breakdown by month
totals_by_month = tasks['month'].value_counts().sort_index()
# Chart Monthly Tasks Count
dataset = totals_by_month.tail(24)
chart_title = 'Monthly Number of Tasks Completed (Last 24 Months)'
plt.style.use('seaborn-darkgrid')
ax = dataset.plot.bar(figsize=(14, 5), rot=90, colormap='spring', stacked=True, legend=False)
ax.set_ylabel('Tasks Completed')
ax.set_xlabel('')
ax.set_title(chart_title)
plt.show()
```
------
### Simple Data Analysis: Completed Tasks by Day of Week
```
totals_dow = tasks['dow'].value_counts().sort_index()
dataset = totals_dow
chart_title = 'Completed Tasks by Day of Week'
plt.style.use('seaborn-darkgrid')
ax = dataset.plot.bar(figsize=(14, 5), rot=0, colormap='autumn', stacked=True, legend=False)
ax.set_ylabel('# Completed')
ax.set_xlabel('')
ax.set_title(chart_title)
plt.show()
```
-----
### Simple Data Analysis: Completed Tasks by Hour of the Day
```
hour_counts = tasks['hour'].value_counts().sort_index()
ax = hour_counts.plot(kind='line', figsize=[10, 4], linewidth=4, alpha=1, marker='o', color='#6684c1',
markeredgecolor='#6684c1', markerfacecolor='w', markersize=8, markeredgewidth=2)
xlabels = hour_counts.index.map(lambda x: '{:02}:00'.format(x))
ax.set_xticks(range(len(xlabels)))
ax.set_xticklabels(xlabels, rotation=45, rotation_mode='anchor', ha='right')
ax.set_xlim((hour_counts.index[0], hour_counts.index[-1]))
ax.yaxis.grid(True)
hour_max = hour_counts.max()
ax.set_ylim((0, hour_max+20))
ax.set_ylabel('Number of Tasks')
ax.set_xlabel('', )
ax.set_title('Number of Tasks Completed per hour of the day', )
plt.show()
```
----
## Daily Count of Tasks Completed
```
daily_counts = tasks['date'].value_counts().sort_index()
dataset = daily_counts.tail(30)
chart_title = 'Number of Tasks Completed per Day'
n_groups = len(dataset)
index = np.arange(n_groups)
ax = dataset.plot(kind='line', figsize=[12, 5], linewidth=4, alpha=1, marker='o', color='#6684c1',
markeredgecolor='#6684c1', markerfacecolor='w', markersize=8, markeredgewidth=2)
ax.yaxis.grid(True)
ax.xaxis.grid(True)
ax.set_xticks(index)
ax.set_ylabel('Tasks Completed Count')
# ax.set_xlabel('')
plt.xticks(index, dataset.index, rotation=90)
ax.set_title(chart_title)
plt.show()
# Export
daily_counts.to_csv("data/todoist-daily-completed.csv", index=True)
```
-----
### Projects Breakdown
```
# Optionally pass a list of projects to exclude
exclude_proj = ['Project1', 'Project2']
tasks_data = tasks[~tasks.project_name.isin(exclude_proj)]
project_counts = tasks_data['project_name'].value_counts().sort_values(ascending=False)
# Chart Project Tasks
dataset = project_counts.sort_values(ascending=True).tail(15)
chart_title = 'Project Tasks Breakdown'
plt.style.use('seaborn-darkgrid')
ax = dataset.plot.barh(y='Hours', figsize=(8, 8), colormap='plasma', legend=False)
ax.set_ylabel('')
ax.set_xlabel('Task #')
ax.set_title(chart_title)
plt.show()
```
-----
## General Summary of Todoist Tasks
```
# Life-time Project Time Summary
print('====== Todoist Lifetime Summary ====== ')
print('Total Tasks Completed: {:,}'.format(len(tasks)))
daily_average = round(daily_counts.mean(),1)
print('Daily Task Average: {:,}'.format(daily_average))
print(' ')
print('Top 5 Days with Most Tasks Completed:')
for i, v in daily_counts.sort_values(ascending=False).head(5).items():
print(v, 'tasks on ', i)
```
------
# Year in Review
```
# Set Year
target_year = 2018
```
### Year: Top Projects
```
def yearly_top_projects_chart(year, exclude_projects=[]):
year_data = tasks[tasks['year'] == year]
# Optionally pass a list of projects to exclude
if exclude_projects:
exclude_proj = exclude_projects
year_data = year_data[~tasks.project_name.isin(exclude_proj)]
project_counts = year_data['project_name'].value_counts().sort_values(ascending=False)
project_counts = year_data['project_name'].value_counts().sort_values(ascending=False)
# Chart Project Tasks
dataset = project_counts.sort_values(ascending=True).tail(10)
chart_title = '{} Project Tasks Breakdown'.format(year)
plt.style.use('seaborn-darkgrid')
ax = dataset.plot.barh(y='Hours', figsize=(8, 8), colormap='plasma', legend=False)
ax.set_ylabel('')
ax.set_xlabel('Task #')
ax.set_title(chart_title)
plt.show()
# yearly_top_projects_chart(year=target_year, exclude_projects=['ProjectName', 'ProjectName2''])
yearly_top_projects_chart(year=target_year)
```
### Year: Day of Week Comparison
```
def yearly_dow_chart(year):
year_data = tasks[tasks['year'] == year]
yearly_dow = year_data['dow'].value_counts().sort_index()
days_of_week_list = ['Mon', 'Tues', 'Wed', 'Thurs', 'Friday', 'Sat', 'Sun']
yearly_dow.index = days_of_week_list
chart_title = '{} Tasks Completed by Day of Week | Yearly Total: {:,}'.format(year, yearly_dow.sum())
plt.style.use('seaborn-darkgrid')
ax = yearly_dow.plot.bar(stacked=True, rot=0, figsize=(12,4))
ax.set_xlabel('')
ax.set_ylabel('Hours')
ax.set_title(chart_title)
plt.show()
yearly_dow_chart(year=target_year)
```
### Year: Monthly Tasks Completed Chart
```
def yearly_months_chart(year):
year_data = tasks[tasks['year'] == year]
yearly_months = year_data['month'].value_counts().sort_index()
months_of_year = ['Jan', 'Feb', 'March', 'April', 'May', 'June', 'July',
'Aug', 'Sept', 'Oct', 'Nov', 'Dec']
yearly_months.index = months_of_year
# Chart Monthly Tasks Count
dataset = yearly_months
chart_title = 'Monthly Number of Tasks Completed'
plt.style.use('seaborn-darkgrid')
ax = dataset.plot.bar(figsize=(14, 5), rot=0, colormap='spring', stacked=True, legend=False)
ax.set_ylabel('Tasks Completed')
ax.set_xlabel('')
ax.set_title(chart_title)
plt.show()
yearly_months_chart(year=target_year)
```
#### Year: Tasks Heat Map
```
# Helper Function to Create Heat Map from Data
# Adapted from https://stackoverflow.com/questions/32485907/matplotlib-and-numpy-create-a-calendar-heatmap
DAYS = ['Sun.', 'Mon.', 'Tues.', 'Wed.', 'Thurs.', 'Fri.', 'Sat.']
MONTHS = ['Jan.', 'Feb.', 'Mar.', 'Apr.', 'May', 'June', 'July', 'Aug.', 'Sept.', 'Oct.', 'Nov.', 'Dec.']
def date_heatmap(series, start=None, end=None, mean=False, ax=None, **kwargs):
'''Plot a calendar heatmap given a datetime series.
Arguments:
series (pd.Series):
A series of numeric values with a datetime index. Values occurring
on the same day are combined by sum.
start (Any):
The first day to be considered in the plot. The value can be
anything accepted by :func:`pandas.to_datetime`. The default is the
earliest date in the data.
end (Any):
The last day to be considered in the plot. The value can be
anything accepted by :func:`pandas.to_datetime`. The default is the
latest date in the data.
mean (bool):
Combine values occurring on the same day by mean instead of sum.
ax (matplotlib.Axes or None):
The axes on which to draw the heatmap. The default is the current
axes in the :module:`~matplotlib.pyplot` API.
**kwargs:
Forwarded to :meth:`~matplotlib.Axes.pcolormesh` for drawing the
heatmap.
Returns:
matplotlib.collections.Axes:
The axes on which the heatmap was drawn. This is set as the current
axes in the `~matplotlib.pyplot` API.
'''
# Combine values occurring on the same day.
dates = series.index.floor('D')
group = series.groupby(dates)
series = group.mean() if mean else group.sum()
# Parse start/end, defaulting to the min/max of the index.
start = pd.to_datetime(start or series.index.min())
end = pd.to_datetime(end or series.index.max())
# We use [start, end) as a half-open interval below.
end += np.timedelta64(1, 'D')
# Get the previous/following Sunday to start/end.
# Pandas and numpy day-of-week conventions are Monday=0 and Sunday=6.
start_sun = start - np.timedelta64((start.dayofweek + 1) % 7, 'D')
end_sun = end + np.timedelta64(7 - end.dayofweek - 1, 'D')
# Create the heatmap and track ticks.
num_weeks = (end_sun - start_sun).days // 7
heatmap = np.zeros((7, num_weeks))
ticks = {} # week number -> month name
for week in range(num_weeks):
for day in range(7):
date = start_sun + np.timedelta64(7 * week + day, 'D')
if date.day == 1:
ticks[week] = MONTHS[date.month - 1]
if date.dayofyear == 1:
ticks[week] += f'\n{date.year}'
if start <= date < end:
heatmap[day, week] = series.get(date, 0)
# Get the coordinates, offset by 0.5 to align the ticks.
y = np.arange(8) - 0.5
x = np.arange(num_weeks + 1) - 0.5
# Plot the heatmap. Prefer pcolormesh over imshow so that the figure can be
# vectorized when saved to a compatible format. We must invert the axis for
# pcolormesh, but not for imshow, so that it reads top-bottom, left-right.
ax = ax or plt.gca()
mesh = ax.pcolormesh(x, y, heatmap, **kwargs)
ax.invert_yaxis()
# Set the ticks.
ax.set_xticks(list(ticks.keys()))
ax.set_xticklabels(list(ticks.values()))
ax.set_yticks(np.arange(7))
ax.set_yticklabels(DAYS)
# Set the current image and axes in the pyplot API.
plt.sca(ax)
plt.sci(mesh)
return ax
def year_heat_chart(year):
# Filter by Year
year_data = tasks[(tasks['year'] == year)]
# daily count
year_dates_data = year_data['date'].value_counts().reset_index()
year_dates_data.columns = ['date', 'count']
year_dates_data['date'] = pd.to_datetime(year_dates_data['date'])
# Generate all dates in that year
first_date = str(year)+'-01-01'
last_date = str(year)+'-12-31'
all_dates = pd.date_range(start=first_date, end=last_date)
all_dates = pd.DataFrame(all_dates, columns=['date'])
# combine actual runs by date with total dates possible
year_data = pd.merge(left=all_dates, right=year_dates_data,
left_on="date", right_on="date", how="outer")
year_data['count'].fillna(0, inplace=True)
year_data = year_data.set_index(pd.DatetimeIndex(year_data['date']))
max_daily_count = round(year_data['count'].max(),2)
# key stat and title
total_tasks = round(year_data['count'].sum())
chart_title = '{} Todoist Tasks Heatmap | Total Tasks: {:,}'.format(year, total_tasks)
# set chart data
data = year_data['count']
data.index = year_data.index
# plot data
figsize = plt.figaspect(7 / 56)
fig = plt.figure(figsize=figsize)
ax = date_heatmap(data, edgecolor='black')
max_count = int(round(data.max(),0))
steps = int(round(max_count / 6, 0))
plt.colorbar(ticks=range(0, max_count, steps), pad=0.02)
cmap = mpl.cm.get_cmap('Purples', max_daily_count)
plt.set_cmap(cmap)
plt.clim(0, max_daily_count)
ax.set_aspect('equal')
ax.set_title(chart_title)
plt.show()
year_heat_chart(year=target_year)
# compare previous year:
year_heat_chart(year=2017)
```
### Yearly Summary
```
def yearly_summary(year):
print('====== {} Todoist Summary ======'.format(year))
# Data Setup
year_data = tasks[(tasks['year'] == year)]
print('Total Tasks Completed: {:,}'.format(len(year_data)))
daily_counts = year_data['date'].value_counts().sort_index()
daily_average = round(daily_counts.mean(),1)
print('Daily Task Average: {:,}'.format(daily_average))
print(' ')
project_counts = year_data['project_name'].value_counts()
print('=== Top Projects ===')
for i, v in project_counts.sort_values(ascending=False).head(7).items():
print("* ", v, 'tasks on ', i)
print(' ')
print('=== Monthly Breakdown ===')
monthly_counts = year_data['month'].value_counts().sort_index()
print('Monthly Task Average: {:,}'.format(round(monthly_counts.mean(),1)))
print('> Top 3 Months:')
for i, v in monthly_counts.sort_values(ascending=False).head(3).items():
print("* ", v, 'tasks on ', i)
print('> Bottom 3 Months:')
for i, v in monthly_counts.sort_values(ascending=True).head(3).items():
print("* ", v, 'tasks on ', i)
print(' ')
print('Top 5 Days with Most Tasks Completed:')
for i, v in daily_counts.sort_values(ascending=False).head(5).items():
print("* ", v, 'tasks on ', i)
yearly_summary(year=target_year)
```
| github_jupyter |
# Training and Evaluating Machine Learning Models in cuML
This notebook explores several basic machine learning estimators in cuML, demonstrating how to train them and evaluate them with built-in metrics functions. All of the models are trained on synthetic data, generated by cuML's dataset utilities.
1. Random Forest Classifier
2. UMAP
3. DBSCAN
4. Linear Regression
[](https://colab.research.google.com/github/rapidsai/cuml/blob/tree/branch-0.14/docs/source/estimator_intro.ipynb)
## Classification
### Random Forest Classification and Accuracy metrics
The Random Forest algorithm classification model builds several decision trees, and aggregates each of their outputs to make a prediction. For more information on cuML's implementation of the Random Forest Classification model please refer to :
https://docs.rapids.ai/api/cuml/stable/api.html#cuml.ensemble.RandomForestClassifier
Accuracy score is the ratio of correct predictions to the total number of predictions. It is used to measure the performance of classification models.
For more information on the accuracy score metric please refer to: https://en.wikipedia.org/wiki/Accuracy_and_precision
For more information on cuML's implementation of accuracy score metrics please refer to: https://docs.rapids.ai/api/cuml/stable/api.html#cuml.metrics.accuracy.accuracy_score
The cell below shows an end to end pipeline of the Random Forest Classification model. Here the dataset was generated by using sklearn's make_blobs dataset. The generated dataset was used to train and run predict on the model. Random forest's performance is evaluated and then compared between the values obtained from the cuML and sklearn accuracy metrics.
```
import cuml
import cupy as cp
import numpy as np
from cuml.datasets import make_blobs
from cuml.ensemble import RandomForestClassifier as curfc
from cuml.preprocessing.model_selection import train_test_split
from sklearn.metrics import accuracy_score
n_samples = 1000
n_features = 10
n_info = 7
X_blobs, y_blobs = make_blobs(n_samples=n_samples, cluster_std=0.1,
n_features=n_features, random_state=0,
dtype=np.float32)
X_blobs_train, X_blobs_test, y_blobs_train, y_blobs_test = train_test_split(X_blobs,
y_blobs, train_size=0.8,
random_state=10)
cuml_class_model = curfc(max_features=1.0, n_bins=8, max_depth=10,
split_algo=0, min_rows_per_node=2,
n_estimators=30)
cuml_class_model.fit(X_blobs_train, y_blobs_train)
cu_preds = cuml_class_model.predict(X_blobs_test)
cu_accuracy = cuml.metrics.accuracy_score(y_blobs_test, cu_preds)
# convert cupy test labels to numpy since sklearn's accuracy_score function
# does not accept cupy input
y_blobs_test = cp.asnumpy(y_blobs_test)
sk_accuracy = accuracy_score(y_blobs_test, cp.asnumpy(cu_preds))
print("cuml's accuracy score : ", cu_accuracy)
print("sklearn's accuracy score : ", sk_accuracy)
```
## Clustering
### UMAP and Trustworthiness metrics
UMAP is a dimensionality reduction algorithm which performs non-linear dimension reduction. It can also be used for visualization.
For additional information on the UMAP model please refer to the documentation on https://docs.rapids.ai/api/cuml/stable/api.html#cuml.UMAP
Trustworthiness is a measure of the extent to which the local structure is retained in the embedding of the model. Therefore, if a sample predicted by the model lied within the unexpected region of the nearest neighbors, then those samples would be penalized. For more information on the trustworthiness metric please refer to: https://scikit-learn.org/dev/modules/generated/sklearn.manifold.t_sne.trustworthiness.html
the documentation for cuML's implementation of the trustworthiness metric is: https://docs.rapids.ai/api/cuml/stable/api.html#cuml.metrics.trustworthiness.trustworthiness
The cell below shows an end to end pipeline of UMAP model. Here, the blobs dataset is created by cuml's equivalent of make_blobs function to be used as the input. The output of UMAP's fit_transform is evaluated using the trustworthiness function. The values obtained by sklearn and cuml's trustworthiness are compared below.
```
import cuml
import cupy as cp
import numpy as np
from cuml.datasets import make_blobs
from cuml.manifold.umap import UMAP as cuUMAP
from sklearn.manifold import trustworthiness
# Generate a datasets with 8 "blobs" of grouped-together points so we have an interesting structure to test DBSCAN clustering and UMAP
n_samples = 2**10
n_features = 100
centers = round(n_samples*0.4)
X_blobs, y_blobs = make_blobs(n_samples=n_samples, cluster_std=0.1,
n_features=n_features, random_state=0,
dtype=np.float32)
X_embedded = cuUMAP(n_neighbors=10).fit_transform(X_blobs)
cu_score = cuml.metrics.trustworthiness(X_blobs, X_embedded)
# convert cupy test labels to numpy since sklearn's trustworthiness function
# does not accept cupy input
X_blobs = cp.asnumpy(X_blobs)
sk_score = trustworthiness(X_blobs, cp.asnumpy(X_embedded))
print(" cuml's trustworthiness score : ", cu_score)
print(" sklearn's trustworthiness score : ", sk_score)
```
### DBSCAN and Adjusted Random Index
DBSCAN is a popular and a powerful clustering algorithm. For additional information on the DBSCAN model please refer to the documentation on https://docs.rapids.ai/api/cuml/stable/api.html#cuml.DBSCAN
We create the blobs dataset using the cuml equivalent of make_blobs function.
Adjusted random index is a metric which is used to measure the similarity between two data clusters, and it is adjusted to take into consideration the chance grouping of elements.
For more information on Adjusted random index please refer to: https://en.wikipedia.org/wiki/Rand_index
The cell below shows an end to end model of DBSCAN. The output of DBSCAN's fit_predict is evaluated using the Adjusted Random Index function. The values obtained by sklearn and cuml's adjusted random metric are compared below.
```
import numpy as np
import cuml
from cuml.datasets import make_blobs
from cuml import DBSCAN as cumlDBSCAN
from sklearn.metrics import adjusted_rand_score
n_samples = 2**10
n_features = 100
centers = round(n_samples*0.4)
X_blobs, y_blobs = make_blobs(n_samples=n_samples, cluster_std=0.01,
n_features=n_features, random_state=0,
dtype=np.float32)
cuml_dbscan = cumlDBSCAN(eps=3, min_samples=2)
cu_y_pred = cuml_dbscan.fit_predict(X_blobs)
cu_adjusted_rand_index = cuml.metrics.cluster.adjusted_rand_score(y_blobs, cu_y_pred)
# convert cupy test labels to numpy since sklearn's adjusted_rand_score function
# does not accept cupy input
y_blobs = cp.asnumpy(y_blobs)
sk_adjusted_rand_index = adjusted_rand_score(y_blobs, cp.asnumpy(cu_y_pred))
print(" cuml's adjusted random index score : ", cu_adjusted_rand_index)
print(" sklearn's adjusted random index score : ", sk_adjusted_rand_index)
```
## Regression
### Linear regression and R^2 score
Linear Regression is a simple machine learning model where the response y is modelled by a linear combination of the predictors in X.
R^2 score is also known as the coefficient of determination. It is used as a metric for scoring regression models. It scores the output of the model based on the proportion of total variation of the model.
For more information on the R^2 score metrics please refer to: https://en.wikipedia.org/wiki/Coefficient_of_determination
For more information on cuML's implementation of the r2 score metrics please refer to : https://docs.rapids.ai/api/cuml/stable/api.html#cuml.metrics.regression.r2_score
The cell below uses the Linear Regression model to compare the results between cuML and sklearn trustworthiness metric. For more information on cuML's implementation of the Linear Regression model please refer to :
https://docs.rapids.ai/api/cuml/stable/api.html#linear-regression
```
import numpy as np
import cuml
from cuml.datasets import make_regression
from cuml.linear_model import LinearRegression as culr
from cuml.preprocessing.model_selection import train_test_split
from sklearn.metrics import r2_score
n_samples = 2**10
n_features = 100
n_info = 70
X_reg, y_reg = make_regression(n_samples=n_samples, n_features=n_features,
n_informative=n_info, random_state=123, dtype=np.float32)
# using cuML's train_test_split function to divide the dataset into training and testing splits
X_reg_train, X_reg_test, y_reg_train, y_reg_test = train_test_split(X_reg,
y_reg, train_size=0.8,
random_state=10)
cuml_reg_model = culr(fit_intercept=True,
normalize=True,
algorithm='eig')
cuml_reg_model.fit(X_reg_train,y_reg_train)
cu_preds = cuml_reg_model.predict(X_reg_test)
cu_r2 = cuml.metrics.r2_score(y_reg_test, cu_preds)
sk_r2 = r2_score(y_reg_test, cu_preds)
print("cuml's r2 score : ", cu_r2)
print("sklearn's r2 score : ", sk_r2)
```
| github_jupyter |
```
%matplotlib inline
import json
import os
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from functools import reduce
from matplotlib.ticker import FuncFormatter
DAMORE = '@Fired4Truth'
DATA_DIR = os.path.join("data", "clean")
sns.set_palette(sns.xkcd_palette(["windows blue", "amber", "greyish"]))
NORMAL, HIGHLIGHT, TEXT = sns.color_palette()
THOUSANDS_FORMATTER = FuncFormatter(lambda x, p: format(int(x), ','))
PERCENT_FORMATTER = FuncFormatter(lambda x, p: "{:3.0f}%".format(x*100))
sns.set_context('poster')
sns.set_style('white')
def mutual_followers(followers, ego, *screen_names):
return reduce(lambda acc, k: followers[k] & acc,
screen_names,
followers[ego])
test_followers = {'a': {1, 2, 3}, 'b': {2, 3, 4}, 'c': {3, 4, 5}}
assert mutual_followers(test_followers, 'a') == {1, 2, 3}
assert mutual_followers(test_followers, 'a', 'b') == {2, 3}
assert mutual_followers(test_followers, 'a', 'b', 'c') == {3}
def load_followers(data_dir=DATA_DIR):
followers = {}
for name in os.listdir(DATA_DIR):
if name.endswith(".json"):
with open(os.path.join(DATA_DIR, name)) as fp:
screen_name = '@' + name.replace(".json", "")
followers[screen_name] = set(json.load(fp))
return followers
def create_annotated_df(followers, target=DAMORE, others=None):
others = others or []
followers = load_followers()
n_followers = {k: len(mutual_followers(followers, k, *others))
for k in followers}
df = pd.DataFrame({'n_followers': pd.Series(n_followers)})
df['mutual_followers'] = [len(mutual_followers(followers, target, k, *others))
for k in df.index]
df['mutual_proportion'] = df['mutual_followers'] / n_followers[target]
return df
def sort_and_highlight(df, title, k, highlight_set, formatter):
fig, ax = plt.subplots()
df = df.sort_values(by=k)
colors = [HIGHLIGHT if k in highlight_set else NORMAL
for k in df.index]
# Cheap hack. Couldn't get pandas do multiple colors.
df[k].plot(kind='barh')
plt.barh(range(df.shape[0]),
df[k],
height=0.6,
color=colors)
plt.title(title)
ax.xaxis.set_major_formatter(formatter)
sns.despine()
return fig, ax
followers = load_followers()
df = create_annotated_df(followers)
df
sort_and_highlight(df,
'Total Twitter Followers',
'n_followers',
{DAMORE},
THOUSANDS_FORMATTER);
sort_and_highlight(df,
'P[ Follows @ScreenName | Follows James Damore ]',
'mutual_proportion',
{DAMORE}, PERCENT_FORMATTER);
followers = load_followers()
sub_df = create_annotated_df(followers, target='@Fired4Truth', others=['@TechCrunch'])
sub_df
sort_and_highlight(sub_df,
'P[ Follows @ScreenName | Follows Damore, Follows TechCrunch ]',
'mutual_proportion',
{DAMORE, '@TechCrunch'},
PERCENT_FORMATTER);
fig, ax = sort_and_highlight(sub_df,
'P[ Follows @ScreenName | Follows James Damore, Follows TechCrunch ]',
'mutual_proportion',
{'@travisk', '@sherylsandberg'},
PERCENT_FORMATTER)
ax.annotate('206,588 followers', xy=(0.15, 4), xytext=(0.35, 4),
verticalalignment='center',
arrowprops=dict(facecolor=TEXT, edgecolor=TEXT, shrink=0.05))
ax.annotate('242,680 followers', xy=(0.07, 1), xytext=(0.35, 1),
verticalalignment='center',
arrowprops=dict(facecolor=TEXT, edgecolor=TEXT, shrink=0.05));
fig, ax = sort_and_highlight(sub_df,
'P[ Follows @ScreenName | Follows James Damore, Follows TechCrunch ]',
'mutual_proportion',
{'@marissamayer', '@PrisonPlanet'},
PERCENT_FORMATTER)
ax.annotate(' 681,727 followers', xy=(0.47, 10), xytext=(0.65, 10),
verticalalignment='center',
arrowprops=dict(facecolor=TEXT, edgecolor=TEXT, shrink=0.05))
ax.annotate('1,711,976 followers', xy=(0.2, 7), xytext=(0.65, 7),
verticalalignment='center',
arrowprops=dict(facecolor=TEXT, edgecolor=TEXT, shrink=0.05));
```
| github_jupyter |
```
import scanpy as sc
import pandas as pd
import numpy as np
import scipy as sp
from statsmodels.stats.multitest import multipletests
import matplotlib.pyplot as plt
import seaborn as sns
import os
from os.path import join
import time
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# scTRS tools
import scdrs.util as util
import scdrs.data_loader as dl
import scdrs.method as md
# autoreload
%load_ext autoreload
%autoreload 2
# Constants
DATA_PATH='/n/holystore01/LABS/price_lab/Users/mjzhang/scDRS_data'
OUT_PATH=DATA_PATH+'/results/fig_simu'
# GS
GS_LIST = ['%s_ngene%d'%(prefix, size) for prefix in ['all', 'highmean', 'highvar', 'highbvar']
for size in [100, 500, 1000]]
temp_dic = {'all': 'random genes', 'highmean': 'random high mean-expr genes',
'highvar': 'random high variance genes', 'highbvar': 'random overdispersed genes'}
DIC_GS_NAME = {x:x.split('_')[1].replace('ngene','')+' '+temp_dic[x.split('_')[0]]
for x in GS_LIST}
# DATA_LIST
DATA_LIST = ['tms_facs.ncell_10k']
# Results
DIC_RES_PATH = {'sctrs': DATA_PATH+'/simulation_data/score_file/@d.@g',
'seurat': DATA_PATH+'/simulation_data/score_file/result_scanpy/@d.@g',
'vision': DATA_PATH+'/simulation_data/score_file/result_vision/@d.@g',
'vam': DATA_PATH+'/simulation_data/score_file/result_vam/@d.@g.tsv'}
METHOD_LIST = list(DIC_RES_PATH.keys())
DIC_METHOD_NAME = {'sctrs':'scDRS', 'seurat': 'Seurat', 'vision':'Vision', 'vam':'VAM'}
DIC_METHOD_COLOR = {'sctrs':'C0', 'seurat': 'C1', 'vision':'C2', 'vam':'C3'}
for method in METHOD_LIST:
if method not in DIC_METHOD_NAME.keys():
DIC_METHOD_NAME[method] = method
if method not in DIC_METHOD_COLOR.keys():
DIC_METHOD_COLOR[method] = 'C%d'%len(DIC_METHOD_COLOR)
# Read results
import itertools
q_list = 10**np.linspace(-3,0,30)
dic_res = {}
for gs,dname,method in itertools.product(GS_LIST, DATA_LIST, METHOD_LIST):
print(gs,dname,method)
df_gs = pd.read_csv(DATA_PATH+'/simulation_data/gs_file/%s.gs'%gs, sep='\t', index_col=0)
df_res = pd.DataFrame(index=df_gs.index, columns=q_list, data=-1)
# load scTRS results
if method=='sctrs':
for trait in df_gs.index:
score_file = DIC_RES_PATH[method].replace('@d',dname).replace('@g',gs) + '/%s.score.gz'%trait
if os.path.exists(score_file):
temp_df = pd.read_csv(score_file, sep='\t')
df_res.loc[trait, q_list] = np.quantile(temp_df['pval'], q_list)
else:
print('# file missing: ', score_file)
dic_res['%s:%s:%s'%(dname,gs,method)] = df_res.copy()
# load vam results
if method=='vam':
score_file = DIC_RES_PATH[method].replace('@d',dname).replace('@g',gs)
if os.path.exists(score_file):
temp_df = pd.read_csv(score_file, sep='\t')
temp_df.columns = [x.replace('.','_') for x in temp_df.columns]
drop_list = temp_df.columns[temp_df.mean(axis=0)>0.99]
for trait in df_gs.index:
if trait in drop_list:
print('# %s dropped'%trait)
continue
df_res.loc[trait, q_list] = np.quantile(temp_df[trait], q_list)
df_res = df_res.loc[(df_res==-1).sum(axis=1)==0]
dic_res['%s:%s:%s'%(dname,gs,method)] = df_res.copy()
else:
print('# file missing: ', score_file)
# load vision result
if method=='vision':
for trait in df_gs.index:
score_file = DIC_RES_PATH[method].replace('@d',dname).replace('@g',gs) + '/%s.score.gz'%trait
if os.path.exists(score_file):
temp_df = pd.read_csv(score_file, sep='\t')
df_res.loc[trait, q_list] = np.quantile(temp_df['norm_pval'], q_list)
else:
print('# file missing: ', score_file)
dic_res['%s:%s:%s'%(dname,gs,method)] = df_res.copy()
# load seurat results
if method=='seurat':
for trait in df_gs.index:
score_file = DIC_RES_PATH[method].replace('@d',dname).replace('@g',gs) + '/%s.score.gz'%trait
if os.path.exists(score_file):
temp_df = pd.read_csv(score_file, sep='\t')
df_res.loc[trait, q_list] = np.quantile(temp_df['pval'], q_list)
else:
print('# file missing: ', score_file)
dic_res['%s:%s:%s'%(dname,gs,method)] = df_res.copy()
# Q-Q plot
dname = 'tms_facs.ncell_10k'
plot_list = ['%s:%s'%(dname, x) for x in GS_LIST]
plot_method_list = ['sctrs', 'vision', 'seurat', 'vam']
for plot_name in plot_list:
dname,gs=plot_name.split(':')
df_plot_mean = pd.DataFrame(index=q_list, columns=plot_method_list, data=-1)
df_plot_se = pd.DataFrame(index=q_list, columns=plot_method_list, data=-1)
for method in plot_method_list:
res = '%s:%s'%(plot_name,method)
temp_df = dic_res[res][q_list].loc[(dic_res[res][q_list]==-1).sum(axis=1)==0]
df_plot_mean.loc[q_list, method] = temp_df.mean(axis=0)
df_plot_se.loc[q_list, method] = temp_df.std(axis=0)/np.sqrt(temp_df.shape[0])
df_plot_mean = df_plot_mean.clip(lower=1e-4)
df_plot_se = df_plot_se.clip(lower=1e-10)
# Compute distance and p-value
df_plot_dist = np.absolute(np.log10(df_plot_mean.T)-np.log10(df_plot_mean.index)).T
df_plot_dist.drop(1, axis=0, inplace=True)
df_plot_dist = df_plot_dist.max(axis=0)
temp_df = np.absolute(df_plot_mean.T-df_plot_mean.index).T / df_plot_se
df_plot_p = pd.DataFrame(index=df_plot_mean.index, columns=df_plot_mean.columns,
data=(1-sp.stats.norm.cdf(temp_df))*2)
df_plot_p.drop(1, axis=0, inplace=True)
df_plot_p = df_plot_p.median(axis=0)
# Plot
plt.figure(figsize=[4.2,4])
df_plot_logerr = np.log10(df_plot_mean+1.96*df_plot_se) - np.log10(df_plot_mean)
for i_method,method in enumerate(plot_method_list):
plt.errorbar(-np.log10(df_plot_mean.index), -np.log10(df_plot_mean[method]),
yerr = df_plot_logerr[method], label=DIC_METHOD_NAME[method],
fmt='.', markersize=4, elinewidth=1, color=DIC_METHOD_COLOR[method], zorder=8-i_method)
plt.plot([0, 3], [0, 3], linestyle='--', linewidth=1, color='k', zorder=0)
plt.xlabel('Theoretical -log10(p) quantiles')
plt.ylabel('Actual -log10(p) quantiles')
plt.yticks([0,0.5,1,1.5,2,2.5,3,3.5,4],[0,0.5,1,1.5,2,2.5,3,3.5,'>4'])
plt.grid(linestyle='--', linewidth=0.5)
if 'all' in gs:
plt.title('Null simulations (%s)'%DIC_GS_NAME[gs])
else:
plt.title('Null simulations\n(%s)'%DIC_GS_NAME[gs])
plt.legend()
plt.tight_layout()
plt.savefig(OUT_PATH+'/%s.%s.svg'%(dname,gs))
plt.show()
# Store data for the main figure 'tms_facs.ncell_10k:all_ngene1000'
if plot_name=='tms_facs.ncell_10k:all_ngene1000':
SUPP_TAB_PATH='/n/holystore01/LABS/price_lab/Users/mjzhang/scDRS_data/supp_table'
df_plot_mean.columns = ['%s.mean'%x for x in df_plot_mean]
df_plot_se.columns = ['%s.se'%x for x in df_plot_se]
df_out = df_plot_mean.join(df_plot_se)
df_out.index.name='quantile'
df_out = df_out[['%s.%s'%(x,y) for x in plot_method_list for y in ['mean', 'se']]]
df_out.to_csv(SUPP_TAB_PATH+'/supp_tab_fig2a.tsv', sep='\t')
for method in plot_method_list:
print(method, (np.log10(df_out['%s.mean'%method]+1.96*df_out['%s.se'%method])
- np.log10(df_out['%s.mean'%method])).max())
```
### Cell type-disease association
```
# Load single-cell data
adata = sc.read_h5ad(DATA_PATH+'/simulation_data/single_cell_data/tms_facs.ncell_10k.h5ad')
# Read full score
dic_res_full = {}
score_file=DATA_PATH+'/simulation_data/score_file/tms_facs.ncell_10k.all_ngene1000'
df_gs = pd.read_csv(DATA_PATH+'/simulation_data/gs_file/all_ngene1000.gs', sep='\t', index_col=0)
for trait in df_gs.index:
if os.path.exists(score_file+'/%s.full_score.gz'%trait):
dic_res_full[trait] = pd.read_csv(score_file+'/%s.full_score.gz'%trait, sep='\t', index_col=0)
else:
print('# file missing: ', score_file)
# Cell type-disease association
celltype_list = sorted(set(adata.obs['cell_ontology_class']))
trait_list = list(df_gs.index)
df_stats = pd.DataFrame(index=celltype_list, columns=trait_list, dtype=float)
for trait in trait_list:
for ct in celltype_list:
cell_list = adata.obs_names[adata.obs['cell_ontology_class']==ct]
temp_df = dic_res_full[trait].loc[cell_list].copy()
score_q95 = np.quantile(temp_df['norm_score'], 0.95)
temp_df = temp_df[[x for x in temp_df.columns if x.startswith('ctrl_norm_score')]]
v_ctrl_score_q95 = np.quantile(temp_df, 0.95, axis=0)
df_stats.loc[ct,trait] = ((v_ctrl_score_q95>=score_q95).sum()+1) / (v_ctrl_score_q95.shape[0]+1)
df_stats_fdr = df_stats.copy()
print('# n_celltype=%d, n_rep=%d'%df_stats_fdr.shape)
for col in df_stats_fdr:
df_stats_fdr[col] = multipletests(df_stats[col], method='fdr_bh')[1]
for alpha in [0.05, 0.1, 0.2]:
v_fd = (df_stats_fdr<alpha).sum(axis=0)
v_d = v_fd.clip(lower=1)
v_fdp = v_fd / v_d
print('# alpha=%0.2f, FDP=%0.3f (SE=%0.3f)'
%(alpha, v_fdp.mean(), 1.96*v_fdp.std()/np.sqrt(df_stats_fdr.shape[1])))
```
| github_jupyter |
# Probando el ajuste de distribuciones hipotéticas
A veces, el conocimiento específico sugiere fuertes razones que justifiquen alguna suposición; de lo contrario, esto debería probarse de alguna manera. Cuando comprobamos si los datos experimentales se ajustan a una distribución de probabilidad dada, no estamos realmente probando una hipótesis sobre un parámetro o dos; de hecho, estamos ejecutando una prueba no paramétrica.
En esta sección ilustramos tres tipos de enfoque:
- La prueba de chi-cuadrado, que es de propósito general y, en términos generales, verifica el ajuste en términos de histogramas y densidades.
```
import numpy as np
import scipy.stats as st # Librería estadística
import statsmodels.api as sm
import matplotlib.pyplot as plt
```
Una trama Q-Q es generalmente un enfoque más poderoso, en lugar de simplemente usar la técnica común de comparación de histogramas de las dos muestras, pero requiere más habilidad para interpretar. Los diagramas Q-Q se usan comúnmente para comparar un conjunto de datos con un modelo teórico

## Interpretación
- Si las dos distribuciones que se comparan son idénticas, la gráfica Q-Q sigue la línea de 45° $y = x$
- Si las dos distribuciones concuerdan después de transformar linealmente los valores en una de las distribuciones, entonces la gráfica Q – Q sigue alguna línea, pero no necesariamente la línea y = x
- Si la tendencia general de la gráfica Q-Q es más plana que la línea y = x, la distribución representada en el eje horizontal está más dispersa que la distribución representada en el eje vertical.
- A la inversa, si la tendencia general de la gráfica Q-Q es más pronunciada que la línea y = x, la distribución representada en el eje vertical está más dispersa que la distribución representada en el eje horizontal.
> Referencia: https://en.wikipedia.org/wiki/Q%E2%80%93Q_plot
```
# Creo variables aleatorias normales
mu = 30; sigma = 10
measurements = np.random.normal(loc = mu, scale = sigma, size=100)
# Histograma de las variables creadas
divisiones = 10 # Cantidad de barras en el histograma
plt.hist(measurements,divisiones,density=True)
x = np.arange(0,60,.1)
y = st.norm.pdf(x,loc = mu, scale = sigma)
plt.plot(x,y,'r--')
plt.ylabel('Probability')
plt.grid()
plt.show()
# gráfica de Q-Q entre las muestras creadas y una curva normal
# grap2 = st.probplot(measurements, dist="norm", plot=plt)
grap1 = sm.qqplot(measurements, dist='norm',line='s')
plt.grid()
# plt.xlabel('Normal theorical quantiles')
# plt.ylabel('Data theorical quantiles')
plt.show()
######## t-student
nsample = 100
#A t distribution with small degrees of freedom:
x = st.t.rvs(3, size=nsample)
# res = st.probplot(x, plot=plt)
sm.qqplot(x,dist='norm',line='45')
plt.title('t with small df')
plt.grid()
plt.show()
#A t distribution with larger degrees of freedom:
x = st.t.rvs(505, size=nsample)
# res = st.probplot(x, plot=plt)
sm.qqplot(x,dist='norm',line='45')
plt.title('t with larger df')
plt.grid()
plt.show()
y_norm = st.norm(*st.norm.fit(x)).pdf(x1)
Q_norm = np.percentile(y_norm,[25,50,75])
Q_norm
########### Ajustando una exponencial
nsample = 500
# Distribución exponencial con parámetro lambda = 0.7
lamb = 1
x = st.expon.rvs(loc=0,scale=1/lamb, size=nsample)
f,ax = plt.subplots(1,2,figsize=(12,4))
sm.qqplot(x,dist='norm',line='s',ax = ax[0])
plt.title('Comparación de cuantiles exponenciales con normales')
plt.ylim([0,3])
plt.grid()
# plt.show()
# Gráfica box-plot
plt.subplot(122)
B = plt.boxplot(x)
plt.grid()
plt.show()
# Obtener los valores de Q1 y Q3 de la gráfica
[item.get_ydata()[0] for item in B['whiskers']]
# Otra forma de obtener los quantiles
Q_exp = np.percentile(x,[25,50,75])
print('Quantiles random exponential de la muestra ',Q_exp)
10//2
np.random.seed(5555)
# Comparación de dos conjuntos de datos
x = np.random.normal(loc=8.5, scale=2.5, size=100)
y = np.random.normal(loc=8.0, scale=3.0, size=100)
# Gráfica de las dos normales
x1 = np.arange(-1,20,.1)
y1 = st.norm.pdf(x1,loc=8.5, scale=2.5)
y2 = st.norm.pdf(x1,loc=8.0, scale=3.0)
plt.plot(x1,y1,x1,y2)
# Comparación de quantiles de dos conjuntos de datos
sm.qqplot_2samples(x, y,line='45')
plt.show()
Q2 = np.arange(2,14,2)
print('Quantiles x=',np.percentile(x,Q2))
print('Quantiles y=',np.percentile(y,Q2))
np.percentile(x,Q2)-np.percentile(y,Q2)
```
# La prueba chi cuadrado - Usando el histograma
$$\chi^2 = \sum_{j=1}^J{(O_j-E_j)^2\over E_j}$$
El estadístico anterior, tiene (aproximadamente) una distribución de chi-cuadrado. Deberíamos rechazar la hipótesis si $\chi^2$ es demasiado grande, es decir, si $\chi^2>\chi^2_{1-\alpha,m}$ donde:
- $\chi^2_{1-\alpha,m}$ es un cuantil de la distribución de chi-cuadrado.
- $\alpha$ es el nivel de significancia de la prueba.
- m es el número de grados de libertad.
Lo que nos falta aquí es m, que depende del número de parámetros de la distribución que hemos estimado utilizando los datos. Si no se ha estimado ningún parámetro, es decir, si hemos asumido una distribución parametrizada específica antes de observar datos, los grados de libertad son $J - 1$; si tenemos parámetros p estimados, deberíamos usar $J - p - 1$, con $J$ la cantidad de particiones del histograma.
```
np.random.seed(555)
mu_real = 10; sigma_real = 20
N = 100 # Cantidad de muestras
n1 = np.random.normal(mu_real,sigma_real,N)
J = 90 # Cantidad de particiones del histograma
[freq,x,p]=plt.hist(n1,J,density=True)
plt.show()
# Se obvia el último valor de x para obtener exactamente J muestras de x
x = x[:-1]
# Media y desviación estándar muestral
mu = np.mean(x)
sigma = np.std(x)
print('media mu =%f, desviación estándar muestral =%f'%(mu,sigma))
```
### Se desea resolver la siguiente prueba de hipótesis
> $H_0$: la distribución es normal con ´media= $\mu$´ y ´desviación estándar = $\sigma$´
> $H_a$: los datos no se distribuyen normales
```
pi = st.norm.pdf(x,loc=mu,scale=sigma)
# Cálculo de la esperanza usando la expresión teórica
Ei = x*pi
# Cálculo teórico de la chi cuadrada
x2 = np.sum(list(map(lambda Ei,obs_i:(obs_i-Ei)**2/Ei,Ei,freq)))
print('Valor de chi cuadrado teorico = ',x2)
# Cálculo usando la librería estadística de la chi cuadrada
X2 = st.chisquare(freq,Ei)
print('Valor de chi cuadrado librería = ',X2)
# Cálculo de Grados de libertad del estadístico
p = 2 # Parámetros estimados con los datos
m = J-p-1 # grados de libertad
Chi_est = st.chi2.ppf(q = 0.95,df=m)
print('Estadístico de chi_cuadrado = ',Chi_est)
print('Media muestral = ',mu,'\nDesviación estándar muestral = ',sigma)
```
> **Conclusión**: No podemos rechazar la $H_0$ por lo tanto los datos distribuyen normales.
# Pruebas de correlación de Pearson
El análisis de correlación juega un papel importante en los modelos de Monte Carlo:
- En el análisis de entrada, debemos verificar si algunas variables están correlacionadas para modelarlas correctamente.
- Al aplicar la reducción de varianza por el método de números complementarios, puede ser importante verificar la fuerza de la correlación entre el estimador de Monte Carlo crudo y la variable de control que consideramos.
Está claro que la magnitud de la correlación se debe comparar con el tamaño de la muestra, y una estrategia simple es probar la hipótesis nula
$$ H_0: \rho_{XY}=0$$
contra la hipótesis alternativa
$$ H_a: \rho_{XY}\neq0$$
Sin embargo, necesitamos una estadística cuya distribución bajo la hipótesis nula sea bastante manejable. Un resultado útil es que, si la muestra es normal, la estadística
$$T=R_{XY}\sqrt{{n-2 \over 1-R_{XY}^2}}$$
se distribuye aproximadamente como una variable t con n - 2 grados de libertad, para una muestra adecuadamente grande. Esto puede ser explotado para llegar a las pruebas de correlación.
Entonces, si tenemos un conjunto de datos ${x_1, ..., x_n}$ que contiene n valores y otro conjunto de datos ${y_1, ..., y_n}$ que contiene n valores, entonces la fórmula para la correlación $R_{XY}$ es:
$$R_{XY}={\frac {\sum _{i=1}^{n}(x_{i}-{\bar {x}})(y_{i}-{\bar {y}})}{{\sqrt {\sum _{i=1}^{n}(x_{i}-{\bar {x}})^{2}}}{\sqrt {\sum _{i=1}^{n}(y_{i}-{\bar {y}})^{2}}}}}$$
Donde:
- n es el tamaño de la muestra
- $x_{i},y_{i}$ son las muestras individuales indexadas con i.
- ${\bar {x}}={\frac {1}{n}}\sum _{i=1}^{n}x_{i}$ la media muestral; y análogamente para $\bar {y}$.
- Puede ser probado que $-1\leq R_{XY} \leq 1$, justo como su contraparte probabilistica $\rho_{XY}$
```
np.random.seed(5555)
N = 100
Z = np.random.normal(size=N)
x1 = np.random.normal(10,5,N)
x2 = np.random.normal(30,8,N)
# Comando que estima el valor del coeficiente de correlación de pearson
corr = st.pearsonr(x1+50*Z,x2+50*Z)
corr2 = st.pearsonr(x1,x2)
Rxy = corr[0]
print('Rxy = ',corr[0],', p-value = ',corr[1])
print('Rxy2 = ',corr2[0],',p-value2 = ',corr2[1])
# Cálculo del p-value
T = Rxy*np.sqrt((N-2)/(1-Rxy**2))
p_val = st.t.pdf(T,df=N-2)
print(p_val)
```
El ` valor-p` indica aproximadamente la **probabilidad de que un sistema no correlacionado produzca conjuntos de datos que tengan una correlación de Pearson** al menos tan extrema como la calculada a partir de estos conjuntos de datos. Los valores p no son completamente confiables, pero probablemente sean razonables para conjuntos de datos mayores de 500 aproximadamente.
```
x = np.arange(-10,60,.1)
y1 = st.norm.pdf(x,10,5)
y2 = st.norm.pdf(x,30,8)
plt.plot(x,y1,x,y2);
```
# Estimación de parámetros
Ilustremos a tráves de un ejemplo como usando el método montecarlos podemos estimar parámetros de interés de alguna distribución en particular.
### Ejemplo
Considere una variable aleatoria $X\sim U[a,b]$. Recordemos que
$$E(X)={a+b\over 2},\quad Var(X)={(b-a)^2\over 12}$$
Claramente, la media de la muestra $\bar X$ y la varianza muestral $S^2$ no nos proporcionan estimaciones directas de los parámetros a y b. Sin embargo, podríamos considerar la siguiente forma de transformar las estadísticas de muestra en estimaciones de parámetros. Si sustituimos $\mu$ y $\sigma^2$ con sus estimaciones, encontramos
$$\begin{split}a+b&=2\bar X \\ -a+b&=2\sqrt 3 S\end{split}$$
Resolviendo este sistemas de ecuaciones obtenemos los siguientes estimados
$$\hat a = \bar X -\sqrt 3 S,\quad \hat b = \bar X+ \sqrt 3 S$$
```
# Solución ejemplo
a= 5; b=10 # Parámetros reales
N = 10; # Cantidad de términos
X = np.random.uniform(a,b,N)
media = np.mean(X)
std = np.std(X)
# estimaciones
a_hat = media-np.sqrt(3)*std
b_hat = media+np.sqrt(3)*std
print('Estimación de a = ',a_hat)
print('Estimación de b = ',b_hat)
```
# Método de máxima verosimilitud
El método de máxima verosimilitud es un enfoque alternativo para encontrar estimadores de forma sistemática. Imagine que una variable aleatoria X tiene un PDF caracterizado por un único parámetro $\theta$, denotado por $f_x(x;\theta)$. Si extraemos una muestra de n i.i.d. variables de esta distribución, la densidad conjunta es solo el producto de PDF's individuales:
$$f_{X_1,\cdots,X_n}(x_1,\cdots,x_n;\theta)=f_X(x_1;\theta)\cdot f_X(x_2;\theta)\cdots f_X(x_n;\theta)=\prod_{i=1}^{n}f_X(x_i;\theta)$$
Si nosotros estamos interesados en estimar $\theta$ dada una muestra $X_i=x_i,i=1,\cdots,n$, podemos construir la función verosimilitud
$$L(\theta)=L(\theta;x_1,\cdots,x_n)=f_{X_1,\cdots,X_n}(x_1,\cdots,x_n;\theta)$$
Esta notación es usada para enfatizar que la función depende del parámetros desconocido $\theta$, para una muestra de observaciones dada. El acrónimo para referirse a este método es MLE 'maximum-likelihood estimator'
La intuición sugiere que deberíamos seleccionar el parámetro $\theta$ que produzca el mayor valor de la función de probabilidad. Por lo tanto lo que debemos de hacer es encontrar el $\theta$ que maximice la expresión anterior y para ellos podemos hacer uso de la derivada para encontrar los puntos críticos de la función $L(\theta)$. Ilustremos el método con el siguiente ejemplo.
## Ejemplo bernoulli
Supongamos $X_1,...X_n\sim Bernoulli(p)$. La función de densidad correspondiente es $p(x;p)=p x(1−p)^{1−x}$, por lo que:
$$
\mathcal{L}(p)=\prod_{i=1}^n p(x_i;p)=\prod_{i=1}^n p^{x_i}(1-p)^{1-x_i}=p^{\sum x_i}(1-p)^{n-\sum x_i}
$$
denotemos $S=\sum x_i$, entonces
$$
\mathcal{l}(p)=S \log p + (n-S) \log (1-p)
$$
Encontremos su máxima verosimilitud
La derivada con respecto a $ p$ es:

la cuál se anula en:
$$
\hat p = \frac{\sum x_i}{n}
$$
> Referencias:
> - https://ljk.imag.fr/membres/Bernard.Ycart/emel/cours/ep/node12.html
> - https://tereom.github.io/est-computacional-2018/maxima-verosimilitud.html
```
from scipy import optimize
n = 20
S = 12
l = lambda theta: S*np.log(theta) + (n - S) * np.log(1-theta)
L = lambda theta: theta**S * (1-theta)**(n-S)
theta = np.arange(0.001,1,0.001)
plt.plot(theta,L(theta),label= r'L($\theta$)')
plt.legend()
plt.show()
plt.plot(theta,l(theta),label= r'l($\theta$)')
plt.legend()
plt.show()
max_L = optimize.fmin(lambda x:-L(x),0,disp=False)
print(r'máximo $\theta$ de L($\theta$)=', max_L)
max_teorico = S/n
print('máximo teorico =',max_teorico)
```
### Ejemplo: MLE para la función de distribución exponencial
La PDF de una V.A exponencial esta dada por:
$$f_X(x;\lambda)=\lambda e^{-\lambda x}$$
Encuentre la función de máxima verosimilitud para demostrar que la condición de optimalidad de primer orden conduce a
$$\hat \lambda = {1\over {1\over n}\sum_{i=1}^nX_i}={1 \over \bar X}$$
### Forma de utilizar este método en python
```
################ Adjusting a exponential distribution
np.random.seed(5555)
# picking 150 of from a exponential distrubution
# with lambda = 5
samp = st.expon.rvs(loc=0,scale=1/5,size=150)
param = st.expon.fit(samp) # distribution fitting
# now, param[0] and param[1]=1/lambda are the location and scale
# of the fitted distribution
x = np.linspace(0,2,100)
# fitted distribution
pdf_fitted = st.expon.pdf(x,loc=param[0],scale=param[1])
# original distribution
pdf = st.expon.pdf(x,loc=0,scale=1/5)
plt.title('Expoential distribution')
plt.plot(x,pdf_fitted,'r-',label='Fitted')
plt.plot(x,pdf,'b-',label='Original')
plt.legend()
plt.hist(samp,20,normed=1,alpha=.8,)
plt.show()
print('Lambda fitted = ',1/param[1])
```
# <font color = red> Tarea
Demostrar **Teoricamente** usando el MLE, que los estimadores de máxima verosimilitud para los parámetros $\mu$ y $\sigma$ de una distribución normal, estan dados por:
$$\hat \mu = {1\over n}\sum_{i=1}^n x_i,\quad \hat \sigma^2={1\over n}\sum_{i=1}^n (x_i-\hat \mu)^2$$
**Recuerde que:** La distribución normal es
$$f(x\mid \mu ,\sigma ^{2})={\frac {1}{\sqrt {2\pi \sigma ^{2}}}}e^{-{\frac {(x-\mu )^{2}}{2\sigma ^{2}}}}$$
### Forma de utilizar este método en python
```
################ Adjusting a normal distribution
np.random.seed(5555)
# picking 150 of from a normal distrubution
# with mean 0 and standard deviation 1
samp = st.norm.rvs(loc=0,scale=1,size=100)
param = st.norm.fit(samp) # distribution fitting
# now, param[0] and param[1] are the mean and
# the standard deviation of the fitted distribution
x = np.linspace(-5,5,100)
# fitted distribution
pdf_fitted = st.norm.pdf(x,loc=param[0],scale=param[1])
# original distribution
pdf = st.norm.pdf(x)
plt.title('Normal distribution')
plt.plot(x,pdf_fitted,'r-',label='Fitted')
plt.plot(x,pdf,'b-',label='Original')
plt.legend()
plt.hist(samp,30,density=1,alpha=.8,)
plt.show()
print('Media encontrada usando MLE=',param[0],', \nStd encontrada usando MLE=',param[1])
# Parameters obtained theoricaly
sigma_hat = np.std(samp)
mu_hat = np.mean(samp)
print('Media obtenida usando la media muestral=',mu_hat,', \nStd obtenida usando la std muestral=',sigma_hat)
```
<script>
$(document).ready(function(){
$('div.prompt').hide();
$('div.back-to-top').hide();
$('nav#menubar').hide();
$('.breadcrumb').hide();
$('.hidden-print').hide();
});
</script>
<footer id="attribution" style="float:right; color:#808080; background:#fff;">
Created with Jupyter by Oscar David Jaramillo Zuluaga.
</footer>
| github_jupyter |
# Properties of drugs
Find various properties of the individual drugs
1.) ATC
2.) GO Annotations
3.) Disease
4.) KeGG Pathways
5.) SIDER (known effects)
6.) Offside (known off sides)
7.) TwoSides
8.) Drug Properties (physico-chemical properties)
9.) Enzymes, Transporters and Carriers
10.) Chemical_Gentic Perturbations (MsigDB)
## 1. ATC
Extract information about the anatomical as well as therapeutic group a drug is associated to using DrugBank as main source
```
import networkx as nx
#The the ATC classification from drugbank (see python file: 2a_Create_DrugBank_Network.ipynb)
DrugBankInfo = nx.read_gml('../data/Drug_Properties/Drugbank_2018-07-03_CLOUD_Only.gml')
print 'DrugBank Network loaded'
#Create output file
fp_out = open('../results/Drug_Properties/CLOUD_to_ATC.csv','w')
fp_out.write('CLOUD,DrugBankID,First_Level_ATCs,Second_Level_ATCs\n')
#Dictionary containing DrugBank to CLOUD identifier
DrugBank_to_CLOUD = {}
#parse through all CLOUD drugs and check for ATC code annotation in drugbank (Use first and second level; third level and below too specific)
fp = open('../data/Drug_Properties/CLOUD_DrugBank_PubChem_Chembl.csv','r')
fp.next()
for line in fp:
tmp = line.strip().split(',')
DrugBank_to_CLOUD[tmp[1]] = tmp[0]
first_level = set()
fist_second_level = set()
if DrugBankInfo.has_node(tmp[1]):
if DrugBankInfo.node[tmp[1]].has_key('ATCcode'):
atc_codes = DrugBankInfo.node[tmp[1]]['ATCcode'].split(',')
if '' in atc_codes:
atc_codes.remove('')
for atc in atc_codes:
atc = atc.strip()
first_level.add(atc[0])
fist_second_level.add(atc[0:3])
fp_out.write(tmp[0]+','+tmp[1]+','+';'.join(first_level)+','+';'.join(fist_second_level)+'\n')
fp.close()
fp_out.close()
print 'Finished ATC annotations'
```
## 2. GO Annotations
Extract GO annotations from GeneOntology for the targets of the individual drugs. Not only leaf but also upstream term information is collected for the three branches (i) Function, (ii) Component, (iii) Process
```
#use our inhouse database and the corresponding python file to create the upward ontology for every leaf GO term (all get included)
#Download (http://www.geneontology.org/page/downloads)
import gene2terms_addupstream as GO
#Include all threee GO branches
go_branches = ['Function','Process','Component']
#Find all the targets for the individual cloud drugs
cloud_targets = {}
fp = open('../data/Drug_Properties/CLOUD_All_Targets.csv', 'r')
fp.next()
for line in fp:
tmp = line.strip().split(',')
cloud_targets[tmp[0]] = tmp[2].split(';')
fp.close()
#contain all CLOUD identifier
all_clouds = cloud_targets.keys()
all_clouds.sort()
#Go throug the GO branches and find GO terms for a specific drug via: Drug --> Targets --> Associated GO-Terms
drug_to_GO = {}
for go_branch in go_branches:
print go_branch
drug_to_GO[go_branch] = {}
GO_Association_UP, GO_genes_annotation = GO.getAllGene_Annotation(go_branch)
for drug in all_clouds:
drug_to_GO[go_branch][drug] = []
for target in cloud_targets[drug]:
drug_to_GO[go_branch][drug].extend(GO_Association_UP[target])
drug_to_GO[go_branch][drug] = list(set(drug_to_GO[go_branch][drug]))
#Save CLOUD drug to GO term annotations
fp_out = open('../results/Drug_Properties/CLOUD_to_GOterms.csv','w')
fp_out.write('CLOUD,GO_Function,GO_Process,GO_Component\n')
for cloud in all_clouds:
fp_out.write(cloud+','+';'.join(drug_to_GO['Function'][cloud])+','+';'.join(drug_to_GO['Process'][cloud])+','+';'.join(drug_to_GO['Component'][cloud])+'\n')
fp_out.close()
print 'Finished GO'
```
## 3. Diseases
Extract Disesase annotations from DiseaseOntology for the targets of the individual drugs. Not only leaf but also upstream term information is collected.
```
# Download from http://www.disgenet.org/web/DisGeNET/menu/downloads and http://disease-ontology.org/downloads/
# Again use inhouse database (manually curated), and corresponding scripts
# Get all cloud drug targets
fp = open('../data/Drug_Properties/CLOUD_All_Targets.csv', 'r')
fp.next()
for line in fp:
tmp = line.strip().split(',')
cloud_targets[tmp[0]] = tmp[2].split(';')
fp.close()
all_clouds = cloud_targets.keys()
all_clouds.sort()
#Extrate the upward disease ontology (find all disease associated leaf plus upwards ontology terms for a specific gene)
Disease_Association_UP,d_diseases_annotation = GO.getAllGene_Disease_Annotation()
all_proteins = Disease_Association_UP.keys()
all_proteins = [int(x) for x in all_proteins]
all_proteins.sort()
fp_out = open('../results/Drug_Properties/Gene_to_Disease.csv','w')
fp_out.write('Gene,Disease_ID\n')
for protein in all_proteins:
fp_out.write(str(protein)+','+';'.join(Disease_Association_UP[str(protein)])+'\n')
fp_out.close()
break
#associated drug with diseaes
drug_to_Diseases = {}
for drug in all_clouds:
drug_to_Diseases[drug] = []
for target in cloud_targets[drug]:
drug_to_Diseases[drug].extend(Disease_Association_UP[target])
drug_to_Diseases[drug] = list(set(drug_to_Diseases[drug]))
fp_out = open('../results/Drug_Properties/CLOUD_to_Disease.csv','w')
fp_out.write('CLOUD,Disease_ID\n')
for cloud in all_clouds:
fp_out.write(cloud+','+';'.join(drug_to_Diseases[cloud])+'\n')
fp_out.close()
print 'Finished Diseases'
```
## 4. KeGG Pathways
Extract information about pathways being annotated to (i) the drug itself, as well as (ii) pathways associated to the target of drugs
```
'''
Extract direct drug <--> pathway annotations
'''
#Get KeGG pathways via the biopython.KEGG REST
from Bio.KEGG import REST
#Find the KeGG identifiers via the drugbank annotations
DrugBankInfo = nx.read_gml('../data/Drug_Properties/Drugbank_2018-07-03_CLOUD_Only.gml')
print 'DrugBank Network loaded'
#parse through all CLOUD targets
fp = open('../data/Drug_Properties/CLOUD_DrugBank_PubChem_Chembl.csv','r')
fp.next()
drug_to_pathways = {}
all_targeted_Pathways = set()
all_clouds = []
kegg_IDs = {}
#find the KeGG Drug page and find PATHWAY informations (direct drug to pathway)
for line in fp:
tmp = line.strip().split(',')
drug_to_pathways[tmp[0]] = []
all_clouds.append(tmp[0])
if DrugBankInfo.has_node(tmp[1]):
if DrugBankInfo.node[tmp[1]].has_key('KEGGDrug'):
kegg_ID = DrugBankInfo.node[tmp[1]]['KEGGDrug']
kegg_IDs[tmp[0]] = kegg_ID
drug_file = REST.kegg_get(kegg_ID).read()
for line in drug_file.rstrip().split("\n"):
section = line[:12].strip() # section names are within 12 columns
if not section == "":
current_section = section
if current_section == "PATHWAY":
tmp2 = line[12:].split(' ')
pathwayID = tmp2[0].split('(')[0]
drug_to_pathways[tmp[0]].append(pathwayID)
all_targeted_Pathways.add(pathwayID)
print 'Number of pathways directed targeted: %d' %len(all_targeted_Pathways)
all_clouds.sort()
'''
Additonally to finding the direct annotations, also find drug <--> targets <--> pathways associated to those target annotations
'''
#Get all targets
cloud_targets = {}
fp = open('../data/Drug_Properties/CLOUD_All_Targets.csv', 'r')
fp.next()
for line in fp:
tmp = line.strip().split(',')
cloud_targets[tmp[0]] = tmp[2].split(';')
fp.close()
# find human pahtways
human_pathways = REST.kegg_list("pathway", "hsa").read()
# get all human pathways, and add the dictionary
pathways = {}
for line in human_pathways.rstrip().split("\n"):
entry, description = line.split("\t")
pathways[entry] = {'Description' :description, 'IDs':None,'Symbols':None}
print len(pathways)
# Get the genes for pathways and add them to a list
for pathway in pathways.keys():
pathway_file = REST.kegg_get(pathway).read() # query and read each pathway
# iterate through each KEGG pathway file, keeping track of which section
# of the file we're in, only read the gene in each pathway
current_section = None
genesSymbols = []
genesIDs = []
for line in pathway_file.rstrip().split("\n"):
section = line[:12].strip() # section names are within 12 columns
if not section == "":
current_section = section
if current_section == "GENE":
if ';' in line:
gene_identifiers, gene_description = line[12:].split("; ")
gene_id, gene_symbol = gene_identifiers.split()
if not gene_id in genesIDs:
genesIDs.append(gene_id)
genesSymbols.append(gene_symbol)
pathways[pathway] = genesIDs
via_target_assigned_Pathways = {}
second_assigned_pathways = set()
for cloud in all_clouds:
via_target_assigned_Pathways[cloud] = []
targets = cloud_targets[cloud]
for p in pathways:
if len(set(targets).intersection(set(pathways[p]))) > 0:
via_target_assigned_Pathways[cloud].append(p)
second_assigned_pathways.add(p)
print 'Number of pathways indirected targeted: %d' %len(second_assigned_pathways)
fp_out = open('../results/Drug_Properties/CLOUD_to_KeGG_Pathways.csv','w')
fp_out.write('CLOUD,KeGG_DrugID,KeGG_Assigned_Pathways,Via_Target_Assigned\n')
for cloud in all_clouds:
if kegg_IDs.has_key(cloud):
fp_out.write(cloud+','+kegg_IDs[cloud]+','+';'.join(drug_to_pathways[cloud])+','+';'.join(via_target_assigned_Pathways[cloud])+'\n')
else:
fp_out.write(cloud+',,'+';'.join(drug_to_pathways[cloud])+','+';'.join(via_target_assigned_Pathways[cloud])+'\n')
fp_out.close()
print 'Finished Pathways'
```
## 5. SIDER
Extract information about known adverse reaction of drugs using the Sider database
```
def ATC_To_PubChem(isOffsides = 'None'):
'''
Sider offerst a direct conversion from ATC code to the internally used PubChem ID.
Offers a better coverage.
Download: http://sideeffects.embl.de/download/ [Nov. 2018] drug_atc.tsv file
(here named: Pubchem_To_ATC)
'''
dic_ATc_To_Pubchem = {}
fp = open('../data/Drug_Properties/Pubchem_To_ATC.tsv')
for line in fp:
tmp = line.strip().split('\t')
dic_ATc_To_Pubchem[tmp[1]] = tmp[0]
cloud_drugs = nx.read_gml('../data/Drug_Properties/Drugbank_2018-07-03_CLOUD_Only.gml')
#find pubchem identifiers via ATC identifiers (as pubchem identifiers sometimes not unique neithers SID nor CID)
cloud_to_Pubchem = {}
PubChem_to_cloud = {}
found_PubChems = []
for drugBankID in cloud_drugs.nodes():
if cloud_drugs.node[drugBankID].has_key('ATCcode'):
all_codes = [x.strip() for x in cloud_drugs.node[drugBankID]['ATCcode'].split(',') if x != '']
for code in all_codes:
if dic_ATc_To_Pubchem.has_key(code):
pubChemID = dic_ATc_To_Pubchem[code][3:]
if isOffsides == 'offsides':
tmp = list(pubChemID)
tmp[0] = '0'
pubChemID = ''.join(tmp)
cloud_to_Pubchem[drugBankID] = pubChemID
PubChem_to_cloud[pubChemID] = drugBankID
found_PubChems.append(pubChemID)
return cloud_to_Pubchem, PubChem_to_cloud,found_PubChems
'''
Download SIDER.tsv from http://sideeffects.embl.de/download/ [Nov. 2018]
'''
#get the different identifiers of a drug
DrugBank_To_CLOUD = {}
CLOUD_To_DrugBank = {}
fp = open('../data/Drug_Properties/CLOUD_DrugBank_PubChem_Chembl.csv')
fp.next()
all_clouds = []
for line in fp:
tmp = line.strip().split(',')
all_clouds.append(tmp[0])
DrugBank_To_CLOUD[tmp[1]] = tmp[0]
CLOUD_To_DrugBank[tmp[0]] = tmp[1]
fp.close()
all_clouds.sort()
#extract pubchem identifier via ATC codes
DrugBank_to_Pubchem_viaATC, PubChem_to_cloud_viaATC,found_PubChems_viaATC = ATC_To_PubChem()
#further use drugbank to find additional pubchem identifiers for the cloud drugs
cloud_drugs = nx.read_gml('../data/Drug_Properties/Drugbank_2018-07-03_CLOUD_Only.gml')
#associate cloud with the different pubchem identifiers
pubchemCompound_To_DrugBank = {}
DrugBank_to_PubChem = {}
pubchemCompound = []
pubchemSubstance = []
for node in cloud_drugs.nodes():
if cloud_drugs.node[node].has_key('PubChemCompound'):
pubchemCompound.append(cloud_drugs.node[node]['PubChemCompound'])
pubchemCompound_To_DrugBank[cloud_drugs.node[node]['PubChemCompound']] = node
DrugBank_to_PubChem[node] = cloud_drugs.node[node]['PubChemCompound']
#Combine both dictionaries together
for key in DrugBank_to_Pubchem_viaATC:
DrugBank_to_PubChem[key] = DrugBank_to_Pubchem_viaATC[key]
#check the SIDER database for given sideeffect of a given drug (once via the ATC to pubchem identfiers; once via drugbank to pubchem)
compund_sideEffect = {}
fp = open('../data/Drug_Properties/SIDER.tsv','r')
for line in fp:
tmp = line.strip().split('\t')
id1 = tmp[1][3:]
id2 = tmp[2][3:]
if id1 in found_PubChems_viaATC:
if compund_sideEffect.has_key(PubChem_to_cloud_viaATC[id1]):
compund_sideEffect[PubChem_to_cloud_viaATC[id1]].append(tmp[3])
else:
compund_sideEffect[PubChem_to_cloud_viaATC[id1]] = [tmp[3]]
if id1 in pubchemCompound:
if compund_sideEffect.has_key(pubchemCompound_To_DrugBank[id1]):
compund_sideEffect[pubchemCompound_To_DrugBank[id1]].append(tmp[3])
else:
compund_sideEffect[pubchemCompound_To_DrugBank[id1]] = [tmp[3]]
if id2 in found_PubChems_viaATC:
if compund_sideEffect.has_key(PubChem_to_cloud_viaATC[id2]):
compund_sideEffect[PubChem_to_cloud_viaATC[id2]].append(tmp[3])
else:
compund_sideEffect[PubChem_to_cloud_viaATC[id2]] = [tmp[3]]
if id2 in pubchemCompound:
if compund_sideEffect.has_key(pubchemCompound_To_DrugBank[id2]):
compund_sideEffect[pubchemCompound_To_DrugBank[id2]].append(tmp[3])
else:
compund_sideEffect[pubchemCompound_To_DrugBank[id2]] = [tmp[3]]
##
# Save results
##
fp = open('../results/Drug_Properties/CLOUD_to_SIDER.csv','w')
fp.write('CLOUD,PubChem,SIDER_Ids\n')
for key in all_clouds:
if compund_sideEffect.has_key(CLOUD_To_DrugBank[key]):
fp.write(key +','+DrugBank_to_PubChem[CLOUD_To_DrugBank[key]]+','+';'.join(list(set(compund_sideEffect[CLOUD_To_DrugBank[key]])))+'\n')
elif DrugBank_to_PubChem.has_key(CLOUD_To_DrugBank[key]):
fp.write(key +','+DrugBank_to_PubChem[CLOUD_To_DrugBank[key]]+',' + '\n')
else:
fp.write(key + ',,\n')
fp.close()
print 'Finish with SIDER'
```
## 6. Offsides
Extract information about known adverse reaction of drugs using the Offside database (Tantonetti)
```
'''
Download Offsides.tsv from http://tatonettilab.org/resources/tatonetti-stm.html [Nov. 2018]
'''
#get the different identifiers of a drug
DrugBank_To_CLOUD = {}
CLOUD_To_DrugBank = {}
fp = open('../data/Drug_Properties/CLOUD_DrugBank_PubChem_Chembl.csv')
fp.next()
for line in fp:
tmp = line.strip().split(',')
DrugBank_To_CLOUD[tmp[1]] = tmp[0]
CLOUD_To_DrugBank[tmp[0]] = tmp[1]
fp.close()
#extract pubchem identifier via ATC codes
DrugBank_to_Pubchem_viaATC, PubChem_to_cloud_viaATC, found_PubChems_viaATC = ATC_To_PubChem('offsides')
#further use drugbank to find additional pubchem identifiers for the cloud drugs
cloud_drugs = nx.read_gml('../data/Drug_Properties/Drugbank_2018-07-03_CLOUD_Only.gml')
#associate cloud with the different pubchem identifiers
pubchemCompound_To_DrugBank = {}
DrugBank_to_PubChem = {}
pubchemCompound = []
pubchemSubstance = []
for node in cloud_drugs.nodes():
if cloud_drugs.node[node].has_key('PubChemCompound'):
pubchemCompound.append(cloud_drugs.node[node]['PubChemCompound'].zfill(9))
pubchemCompound_To_DrugBank[cloud_drugs.node[node]['PubChemCompound'].zfill(9)] = node
DrugBank_to_PubChem[node] = cloud_drugs.node[node]['PubChemCompound'].zfill(9)
# Combine both dictionaries together
for key in DrugBank_to_Pubchem_viaATC:
DrugBank_to_PubChem[key] = DrugBank_to_Pubchem_viaATC[key]
#check the OFFSIDES database for given sideeffect of a given drug (once via the ATC to pubchem identfiers; once via drugbank to pubchem)
compund_sideEffect = {}
fp = open('../data/Drug_Properties/Offsides.tsv', 'r')
fp.next()
for line in fp:
tmp = line.strip().split('\t')
id1 = tmp[0].replace('"','')[3:]
sideEffect = tmp[2].replace('"','')
#print id1
if id1 in found_PubChems_viaATC:
if compund_sideEffect.has_key(PubChem_to_cloud_viaATC[id1]):
compund_sideEffect[PubChem_to_cloud_viaATC[id1]].append(sideEffect)
else:
compund_sideEffect[PubChem_to_cloud_viaATC[id1]] = [sideEffect]
print len(compund_sideEffect.keys())
# print compund_sideEffect.keys()
if id1 in pubchemCompound:
if compund_sideEffect.has_key(pubchemCompound_To_DrugBank[id1]):
compund_sideEffect[pubchemCompound_To_DrugBank[id1]].append(sideEffect)
else:
compund_sideEffect[pubchemCompound_To_DrugBank[id1]] = [sideEffect]
print len(compund_sideEffect.keys())
# print compund_sideEffect.keys()
fp = open('../results/Drug_Properties/CLOUD_to_Offsides.csv', 'w')
fp.write('CLOUD,PubChem,OFFSIDE_Ids\n')
for key in all_clouds:
if compund_sideEffect.has_key(CLOUD_To_DrugBank[key]):
fp.write(key +','+DrugBank_to_PubChem[CLOUD_To_DrugBank[key]]+','+';'.join(list(set(compund_sideEffect[CLOUD_To_DrugBank[key]])))+'\n')
elif DrugBank_to_PubChem.has_key(CLOUD_To_DrugBank[key]):
fp.write(key + ',' +DrugBank_to_PubChem[CLOUD_To_DrugBank[key]]+',' + '\n')
else:
fp.write(key + ',,\n')
fp.close()
print 'Finish with OFFSIDES'
```
## 7. TwoSides
Extract information about side effects for drug combinations using TwoSide (Tantonetti))
```
'''
Download Offsides.tsv from http://tatonettilab.org/resources/tatonetti-stm.html [Nov. 2018]
'''
#get the different identifiers of a drug
DrugBank_To_CLOUD = {}
CLOUD_To_DrugBank = {}
fp = open('../data/Drug_Properties/CLOUD_DrugBank_PubChem_Chembl.csv')
fp.next()
for line in fp:
tmp = line.strip().split(',')
DrugBank_To_CLOUD[tmp[1]] = tmp[0]
CLOUD_To_DrugBank[tmp[0]] = tmp[1]
fp.close()
#extract pubchem identifier via ATC codes
DrugBank_to_Pubchem_viaATC, PubChem_to_cloud_viaATC, found_PubChems_viaATC = ATC_To_PubChem('offsides')
#further use drugbank to find additional pubchem identifiers for the cloud drugs
cloud_drugs = nx.read_gml('../data/Drug_Properties/Drugbank_2018-07-03_CLOUD_Only.gml')
pubchemCompound_To_DrugBank = {}
DrugBank_to_PubChem = {}
pubchemCompound = []
pubchemSubstance = []
for node in cloud_drugs.nodes():
if cloud_drugs.node[node].has_key('PubChemCompound'):
pubchemCompound.append(cloud_drugs.node[node]['PubChemCompound'].zfill(9))
pubchemCompound_To_DrugBank[cloud_drugs.node[node]['PubChemCompound'].zfill(9)] = node
DrugBank_to_PubChem[node] = cloud_drugs.node[node]['PubChemCompound'].zfill(9)
# Combine both dictionaries together
for key in DrugBank_to_Pubchem_viaATC:
DrugBank_to_PubChem[key] = DrugBank_to_Pubchem_viaATC[key]
#check the SIDER database for given sideeffect of a given drug (once via the ATC to pubchem identfiers; once via drugbank to pubchem)
TwoSide_Network = nx.Graph()
fp = open('../data/Drug_Properties/TwoSides.tsv', 'r')
fp.next()
for line in fp:
tmp = line.strip().split('\t')
id1 = tmp[0][3:]
id2 = tmp[1][3:]
sideEffect = tmp[4]
#print id1
found_id1 = None
found_id2 = None
if id1 in found_PubChems_viaATC:
found_id1 = PubChem_to_cloud_viaATC[id1]
elif id1 in pubchemCompound:
found_id1 = pubchemCompound_To_DrugBank[id1]
if found_id1 != None:
if id2 in found_PubChems_viaATC:
found_id2 = PubChem_to_cloud_viaATC[id2]
elif id2 in pubchemCompound:
found_id2 = pubchemCompound_To_DrugBank[id2]
if found_id2 != None:
if TwoSide_Network.has_edge(found_id1,found_id2) == False:
TwoSide_Network.add_edge(found_id1,found_id2)
TwoSide_Network[found_id1][found_id2]['SideEffect'] = sideEffect
else:
TwoSide_Network[found_id1][found_id2]['SideEffect'] = TwoSide_Network[found_id1][found_id2]['SideEffect'] +',' + sideEffect
nx.write_gml(TwoSide_Network,'../results/Drug_Properties/TwoSide_CLOUDs.gml')
print 'Finish with TwoSides'
```
## 8. Drug Properties
Extract Physicochemical properties of the drugs e.g. Lipinski Rule of 5, LogS, LogP etc. Use DrugBank as main source of information
```
'''
Physicochemical properties (calculated) offered by DrugBank
'''
#List of interesting physicochemical properties (continues)
Continuesfeatures = ['Polarizability','logS','logP','NumberofRings','PhysiologicalCharge',
'PolarSurfaceAreaPSA','pKastrongestbasic','pKastrongestacidic',
'Refractivity','MonoisotopicWeight','HBondDonorCount',
'RotatableBondCount','WaterSolubility']
##List of interesting physicochemical properties (discrete)
discreteFeatures = ['DrugSubClass','DrugClass','Family']
#Drugbank file
DrugBankInfo = nx.read_gml('../data/Drug_Properties/Drugbank_2018-07-03_CLOUD_Only.gml')
print 'DrugBank Network loaded'
#output file
fp = open('../data/Drug_Properties/CLOUD_DrugBank_PubChem_Chembl.csv','r')
fp.next()
#parse through all cloud drugs and find physicochemical propterties
CLOUD_Chemical_properties = {}
all_clouds = []
kegg_IDs = {}
for line in fp:
tmp = line.strip().split(',')
all_clouds.append(tmp[0])
CLOUD_Chemical_properties[tmp[0]] = {}
if DrugBankInfo.has_node(tmp[1]):
CLOUD_Chemical_properties[tmp[0]]['DrugBankID'] = tmp[1]
for c in Continuesfeatures:
if DrugBankInfo.node[tmp[1]].has_key(c):
CLOUD_Chemical_properties[tmp[0]][c] = str(DrugBankInfo.node[tmp[1]][c])
else:
CLOUD_Chemical_properties[tmp[0]][c] = 'None'
for d in discreteFeatures:
if DrugBankInfo.node[tmp[1]].has_key(d):
CLOUD_Chemical_properties[tmp[0]][d] = str(DrugBankInfo.node[tmp[1]][d])
else:
CLOUD_Chemical_properties[tmp[0]][d] = 'None'
else:
CLOUD_Chemical_properties[tmp[0]]['DrugBankID'] = 'None'
for c in Continuesfeatures:
CLOUD_Chemical_properties[tmp[0]][c] = 'None'
for d in discreteFeatures:
CLOUD_Chemical_properties[tmp[0]][d] = 'None'
##
# Save results
##
fp = open('../results/Drug_Properties/CLOUD_to_ChemicalProperties.tsv', 'w')
fp.write('CLOUD\tDrugBankID\t')
fp.write('\t'.join(Continuesfeatures)+'\t'+'\t'.join(discreteFeatures)+'\n')
for cloud in all_clouds:
fp.write(cloud+'\t'+CLOUD_Chemical_properties[cloud]['DrugBankID'])
for c in Continuesfeatures:
fp.write('\t'+CLOUD_Chemical_properties[cloud][c])
for d in discreteFeatures:
fp.write('\t'+CLOUD_Chemical_properties[cloud][d])
fp.write('\n')
fp.close()
print 'Finish with Chemical Properties'
```
## 9. Targets, Enzymes, Transporters and Carriers
Split the full lust of targets into targets, enzymes, transporters and carriers
Therefore use the DrugBank annotations of what a target, transporter, carrier and enzyme is. Go trough all drugbank targets and take the corresponding annotations.
Then go trough the CLOUD targets and assign the targets accordingly. If drugbank does not show any annotation the gene is assumed to be a target.
Enzymes: e.g. CYP3A1
Transporter: e.g. MDR5
Carriers: e.g. ALB
```
DrugBankInfo = nx.read_gml('../data/Drug_Properties/Drugbank_2018-07-03.gml')
print 'Full DrugBank Network loaded'
annotated_enzyme_symbols = set()
annotated_transporters_symbols = set()
annotated_carriers_symbols = set()
#Go through all drugs in drugbank and extract target information; bin it correctly into one of the three classes
for drug in list(DrugBankInfo.nodes()):
if DrugBankInfo.node[drug].has_key('Enzymes'):
enzymes = [x for x in DrugBankInfo.node[drug]['Enzymes'].strip().split(',') if x != '']
for e in enzymes:
annotated_enzyme_symbols.add(e.split('_')[0])
if DrugBankInfo.node[drug].has_key('Transporters'):
transporters = [x for x in DrugBankInfo.node[drug]['Transporters'].strip().split(',') if x != '']
for t in transporters:
annotated_transporters_symbols.add(t.split('_')[0])
if DrugBankInfo.node[drug].has_key('Carriers'):
carriers = [x for x in DrugBankInfo.node[drug]['Carriers'].strip().split(',') if x != '']
for c in carriers:
annotated_carriers_symbols.add(c.split('_')[0])
#Plot the number of found Enzymes, Transporters, Carriers
print len(annotated_enzyme_symbols)
print len(annotated_transporters_symbols)
print len(annotated_carriers_symbols)
'''
Parse the enzyme, carriers and transporter SYMBOLS to EntrezIDs using mygeneinfo
'''
import mygene
mg = mygene.MyGeneInfo()
#Enzymes
query = mg.querymany(annotated_enzyme_symbols, scope='symbol', species='human',verbose=False)
final_annotated_enzyme_symbols = []
final_annotated_enzyme_IDs = []
for result in query:
if result.has_key('entrezgene'):
final_annotated_enzyme_symbols.append(result['symbol'])
final_annotated_enzyme_IDs.append(str(result['_id']))
#Transporters
query = mg.querymany(annotated_transporters_symbols, scope='symbol', species='human',verbose=False)
final_annotated_transporters_symbols = []
final_annotated_transporters_IDs = []
for result in query:
if result.has_key('entrezgene'):
final_annotated_transporters_symbols.append(result['symbol'])
final_annotated_transporters_IDs.append(str(result['_id']))
#Carriers
query = mg.querymany(annotated_carriers_symbols, scope='symbol', species='human',verbose=False)
final_annotated_carriers_symbols = []
final_annotated_carriers_IDs = []
for result in query:
if result.has_key('entrezgene'):
final_annotated_carriers_symbols.append(result['symbol'])
final_annotated_carriers_IDs.append(str(result['_id']))
print len(final_annotated_enzyme_IDs)
print len(final_annotated_transporters_IDs)
print len(final_annotated_carriers_IDs)
'''
Create an output file with the various transporters/enzymes/targets etc. being split.
'''
#Get the DrugBank targets
cloud_DrugBanktargets = {}
fp = open('../data/Drug_Properties/CLOUD_DrugBank_Targets_ONLY.csv', 'r')
fp.next()
for line in fp:
tmp = line.strip().split(',')
cloud_DrugBanktargets[tmp[0]] = tmp[2].split(';')
fp.close()
#Get all targets accociated to the individual CLOUDS (including CYP etc.)
cloud_targets = {}
fp = open('../data/Drug_Properties/CLOUD_All_Targets.csv', 'r')
fp.next()
for line in fp:
tmp = line.strip().split(',')
cloud_targets[tmp[0]] = tmp[2].split(';')
fp.close()
#List containing all CLOUD identifiers
all_clouds = cloud_targets.keys()
all_clouds.sort()
#Create output file
fp_out = open('../results/Drug_Properties/CLOUD_to_TargetsSplit.csv', 'w')
fp_out.write('CLOUD,Targets,Transporters,Enzymes,Carriers\n')
#save the per drug annotations of CLOUD drugs
targets_number = []
enzymes_number = []
transporters_number = []
carriers_number = []
#save total amount of distinct targets, enzymes etc. targeted by CLOUD
different_targets = set()
different_enzymes = set()
different_transporters = set()
different_carriers = set()
#save total amount of targets found
all_targets = 0
#Go through all CLOUDS
for cloud in all_clouds:
targets = []
enzymes = []
carriers = []
transporters = []
for target in cloud_targets[cloud]:
#First check if the target is annoated in DrugBank to be a target of this drug! (sometimes CYP or other can be main targets)
if target in cloud_DrugBanktargets[cloud]:
targets.append(target)
else:
#If it is not the main target of this drug bin it correctly according to drugbank standards
not_associated = False
if target in final_annotated_enzyme_IDs:
enzymes.append(target)
not_associated = True
if target in final_annotated_transporters_IDs:
transporters.append(target)
not_associated = True
if target in final_annotated_carriers_IDs:
carriers.append(target)
not_associated = True
if not_associated == False:
targets.append(target)
fp_out.write(cloud+','+';'.join(targets)+','+';'.join(transporters)+','+';'.join(enzymes)+','+';'.join(carriers)+'\n')
#Save the results
all_targets += len(targets)
targets_number.append(len(targets))
enzymes_number.append(len(enzymes))
transporters_number.append(len(transporters))
carriers_number.append(len(carriers))
different_targets = different_targets.union(set(targets))
different_enzymes = different_enzymes.union(set(enzymes))
different_transporters = different_transporters.union(set(transporters))
different_carriers = different_carriers.union(set(carriers))
fp_out.close()
'''
CREATE OUTPUT OVERVIEW OVER DRUG TARGETS/ANNOTATIONS
'''
import numpy as np
from matplotlib import pylab as plt
print'Mean number of targets: %.2f' %np.mean(targets_number)
print'Median number of targets: %.2f' %np.median(targets_number)
print'Mean number of enzymes: %.2f' %np.mean(enzymes_number)
print'Mean number of carriers: %.2f' %np.mean(carriers_number)
print'Mean number of transporters: %.2f' %np.mean(transporters_number)
print 'Total number of targets: %d' %all_targets
print 'Number of distinct targets: %d' %len(different_targets)
print'Number of distinct enzymes: %d' %len(different_enzymes)
print'Number of distinct carriers: %d' %len(different_carriers)
print'Number of distinct transporters: %d' %len(different_transporters)
plt.hist(targets_number,bins=22, color='#40B9D4')
plt.axvline(np.mean(targets_number),ls='--', color='grey')
plt.savefig('../results/Drug_Properties/CLOUD_TargetsFiltered.pdf')
plt.close()
```
## 10. Chemical Genetic perturbations
Use the msigDB Chemical_Genetic_Perturbations set to annotate the CLOUD target respetively
```
'''
Download from http://software.broadinstitute.org/gsea/msigdb/collections.jsp#C5 [December 17. 2018]
'''
#Get all CLOUD targets
cloud_targets = {}
fp = open('../data/Drug_Properties/CLOUD_All_Targets.csv', 'r')
fp.next()
for line in fp:
tmp = line.strip().split(',')
cloud_targets[tmp[0]] = tmp[2].split(';')
fp.close()
#Find the gene to perturbation associated (one gene can have various associated perturbations)
fp = open('../data/Drug_Properties/Msig_ChemGen_Perturbation.gmt','r')
gene_to_perturbation = {}
for line in fp:
tmp = line.strip().split('\t')
for gene in tmp[2:]:
if gene_to_perturbation.has_key(gene):
gene_to_perturbation[gene].append(tmp[0])
else:
gene_to_perturbation[gene] = [tmp[0]]
fp.close()
#find cloud associations via CLOUD --> Targets ===> Perturbations associated with certain targets
fp_out = open('../results/Drug_Properties/CLOUD_to_Perturbations.csv', 'w')
fp_out.write('CLOUD,Perturbations\n')
for cloud in all_clouds:
perturbations = []
for gene in cloud_targets[cloud]:
if gene_to_perturbation.has_key(gene):
perturbations.extend(gene_to_perturbation[gene])
fp_out.write(cloud+','+';'.join(perturbations)+'\n')
fp_out.close()
```
| github_jupyter |
# Graded Programming Assignment
In this assignment, you will implement re-use the unsupervised anomaly detection algorithm but turn it into a simpler feed forward neural network for supervised classification.
You are training the neural network from healthy and broken samples and at later stage hook it up to a message queue for real-time anomaly detection.
We've provided a skeleton for you containing all the necessary code but left out some important parts indicated with ### your code here ###
After you’ve completed the implementation please submit it to the autograder
```
!pip install tensorflow==2.2.0rc0
import tensorflow as tf
if not tf.__version__ == '2.2.0-rc0':
print(tf.__version__)
raise ValueError('please upgrade to TensorFlow 2.2.0-rc0, or restart your Kernel (Kernel->Restart & Clear Output)')
```
Now we import all the dependencies
```
import numpy as np
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
import sklearn
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Activation
import pickle
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
from queue import Queue
import pandas as pd
import json
%matplotlib inline
```
We grab the files necessary for taining. Those are sampled from the lorenz attractor model implemented in NodeRED. Those are two serialized pickle numpy arrays. In case you are interested in how these data has been generated please have a look at the following tutorial. https://developer.ibm.com/tutorials/iot-deep-learning-anomaly-detection-2/
```
!rm watsoniotp.*
!wget https://raw.githubusercontent.com/romeokienzler/developerWorks/master/lorenzattractor/watsoniotp.healthy.phase_aligned.pickle
!wget https://raw.githubusercontent.com/romeokienzler/developerWorks/master/lorenzattractor/watsoniotp.broken.phase_aligned.pickle
!mv watsoniotp.healthy.phase_aligned.pickle watsoniotp.healthy.pickle
!mv watsoniotp.broken.phase_aligned.pickle watsoniotp.broken.pickle
```
De-serialize the numpy array containing the training data
```
data_healthy = pickle.load(open('watsoniotp.healthy.pickle', 'rb'), encoding='latin1')
data_broken = pickle.load(open('watsoniotp.broken.pickle', 'rb'), encoding='latin1')
```
Reshape to three columns and 3000 rows. In other words three vibration sensor axes and 3000 samples
Since this data is sampled from the Lorenz Attractor Model, let's plot it with a phase lot to get the typical 2-eyed plot. First for the healthy data
```
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(data_healthy[:,0], data_healthy[:,1], data_healthy[:,2],lw=0.5)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Lorenz Attractor")
```
Then for the broken one
```
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(data_broken[:,0], data_broken[:,1], data_broken[:,2],lw=0.5)
ax.set_xlabel("X Axis")
ax.set_ylabel("Y Axis")
ax.set_zlabel("Z Axis")
ax.set_title("Lorenz Attractor")
```
In the previous examples, we fed the raw data into an LSTM. Now we want to use an ordinary feed-forward network. So we need to do some pre-processing of this time series data
A widely-used method in traditional data science and signal processing is called Discrete Fourier Transformation. This algorithm transforms from the time to the frequency domain, or in other words, it returns the frequency spectrum of the signals.
The most widely used implementation of the transformation is called FFT, which stands for Fast Fourier Transformation, let’s run it and see what it returns
```
data_healthy_fft = np.fft.fft(data_healthy).real
data_broken_fft = np.fft.fft(data_broken).real
```
Let’s first have a look at the shape and contents of the arrays.
```
print (data_healthy_fft.shape)
print (data_healthy_fft)
```
First, we notice that the shape is the same as the input data. So if we have 3000 samples, we get back 3000 spectrum values, or in other words 3000 frequency bands with the intensities.
The second thing we notice is that the data type of the array entries is not float anymore, it is complex. So those are not complex numbers, it is just a means for the algorithm the return two different frequency compositions in one go. The real part returns a sine decomposition and the imaginary part a cosine. We will ignore the cosine part in this example since it turns out that the sine part already gives us enough information to implement a good classifier.
But first let’s plot the two arrays to get an idea how a healthy and broken frequency spectrum differ
```
fig, ax = plt.subplots(num=None, figsize=(14, 6), dpi=80, facecolor='w', edgecolor='k')
size = len(data_healthy_fft)
ax.plot(range(0,size), data_healthy_fft[:,0].real, '-', color='blue', animated = True, linewidth=1)
ax.plot(range(0,size), data_healthy_fft[:,1].real, '-', color='red', animated = True, linewidth=1)
ax.plot(range(0,size), data_healthy_fft[:,2].real, '-', color='green', animated = True, linewidth=1)
fig, ax = plt.subplots(num=None, figsize=(14, 6), dpi=80, facecolor='w', edgecolor='k')
size = len(data_healthy_fft)
ax.plot(range(0,size), data_broken_fft[:,0].real, '-', color='blue', animated = True, linewidth=1)
ax.plot(range(0,size), data_broken_fft[:,1].real, '-', color='red', animated = True, linewidth=1)
ax.plot(range(0,size), data_broken_fft[:,2].real, '-', color='green', animated = True, linewidth=1)
```
So, what we've been doing is so called feature transformation step. We’ve transformed the data set in a way that our machine learning algorithm – a deep feed forward neural network implemented as binary classifier – works better. So now let's scale the data to a 0..1
```
def scaleData(data):
# normalize features
scaler = MinMaxScaler(feature_range=(0, 1))
return scaler.fit_transform(data)
```
And please don’t worry about the warnings. As explained before we don’t need the imaginary part of the FFT
```
data_healthy_scaled = scaleData(data_healthy_fft)
data_broken_scaled = scaleData(data_broken_fft)
data_healthy_scaled = data_healthy_scaled.T
data_broken_scaled = data_broken_scaled.T
```
Now we reshape again to have three examples (rows) and 3000 features (columns). It's important that you understand this. We have turned our initial data set which containd 3 columns (dimensions) of 3000 samples. Since we applied FFT on each column we've obtained 3000 spectrum values for each of the 3 three columns. We are now using each column with the 3000 spectrum values as one row (training example) and each of the 3000 spectrum values becomes a column (or feature) in the training data set
```
data_healthy_scaled.reshape(3, 3000)
data_broken_scaled.reshape(3, 3000)
```
# Start of Assignment
The first thing we need to do is to install a little helper library for submitting the solutions to the coursera grader:
```
!rm -f rklib.py
!wget https://raw.githubusercontent.com/IBM/coursera/master/rklib.py
```
Please specify you email address you are using with cousera here:
```
from rklib import submit, submitAll
key = "4vkB9vnrEee8zg4u9l99rA"
all_parts = ["O5cR9","0dXlH","ZzEP8"]
email = #### your code here ###
```
## Task
Given, the explanation above, please fill in the following two constants in order to make the neural network work properly
```
#### your code here ###
dim = #### your code here ###
samples = #### your code here ###
```
### Submission
Now it’s time to submit your first solution. Please make sure that the secret variable contains a valid submission token. You can obtain it from the courser web page of the course using the grader section of this assignment.
```
part = "O5cR9"
token = #### your code here ### (have a look here if you need more information on how to obtain the token https://youtu.be/GcDo0Rwe06U?t=276)
parts_data = {}
parts_data["0dXlH"] = json.dumps({"number_of_neurons_layer1": 0, "number_of_neurons_layer2": 0, "number_of_neurons_layer3": 0, "number_of_epochs": 0})
parts_data["O5cR9"] = json.dumps({"dim": dim, "samples": samples})
parts_data["ZzEP8"] = None
submitAll(email, token, key, parts_data)
```
To observe how training works we just print the loss during training
```
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
sys.stdout.write(str(logs.get('loss'))+str(', '))
sys.stdout.flush()
self.losses.append(logs.get('loss'))
lr = LossHistory()
```
## Task
Please fill in the following constants to properly configure the neural network. For some of them you have to find out the precise value, for others you can try and see how the neural network is performing at a later stage. The grader only looks at the values which need to be precise
```
number_of_neurons_layer1 = #### your code here ###
number_of_neurons_layer2 = #### your code here ###
number_of_neurons_layer3 = #### your code here ###
number_of_epochs = #### your code here ###
```
### Submission
Please submit your constants to the grader
```
parts_data = {}
parts_data["0dXlH"] = json.dumps({"number_of_neurons_layer1": number_of_neurons_layer1, "number_of_neurons_layer2": number_of_neurons_layer2, "number_of_neurons_layer3": number_of_neurons_layer3, "number_of_epochs": number_of_epochs})
parts_data["O5cR9"] = json.dumps({"dim": dim, "samples": samples})
parts_data["ZzEP8"] = None
token = #### your code here ###
submitAll(email, token, key, parts_data)
```
## Task
Now it’s time to create the model. Please fill in the placeholders. Please note since this is only a toy example, we don't use a separate corpus for training and testing. Just use the same data for fitting and scoring
```
# design network
from tensorflow.keras import optimizers
sgd = optimizers.SGD(lr=0.01, clipnorm=1.)
model = Sequential()
model.add(Dense(number_of_neurons_layer1,input_shape=(dim, ), activation='relu'))
model.add(Dense(number_of_neurons_layer2, activation='relu'))
model.add(Dense(number_of_neurons_layer3, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=sgd)
def train(data,label):
model.fit(#### your code here ###, #### your code here ###, epochs=number_of_epochs, batch_size=72, validation_data=(data, label), verbose=0, shuffle=True,callbacks=[lr])
def score(data):
return model.predict(data)
```
We prepare the training data by concatenating a label “0” for the broken and a label “1” for the healthy data. Finally we union the two data sets together
```
label_healthy = np.repeat(1,3)
label_healthy.shape = (3,1)
label_broken = np.repeat(0,3)
label_broken.shape = (3,1)
train_healthy = np.hstack((data_healthy_scaled,label_healthy))
train_broken = np.hstack((data_broken_scaled,label_broken))
train_both = np.vstack((train_healthy,train_broken))
```
Let’s have a look at the two training sets for broken and healthy and at the union of them. Note that the last column is the label
```
pd.DataFrame(train_healthy)
pd.DataFrame(train_broken)
pd.DataFrame(train_both)
```
So those are frequency bands. Notice that although many frequency bands are having nearly the same energy, the neural network algorithm still can work those out which are significantly different.
## Task
Now it’s time to do the training. Please provide the first 3000 columns of the array as the 1st parameter and column number 3000 containing the label as 2nd parameter. Please use the python array slicing syntax to obtain those.
The following link tells you more about the numpy array slicing syntax
https://docs.scipy.org/doc/numpy-1.13.0/reference/arrays.indexing.html
```
features = train_both[:,#### your code here ###]
labels = train_both[:,#### your code here ###]
```
Now it’s time to do the training. You should see the loss trajectory go down, we will also plot it later. Note: We also could use TensorBoard for this but for this simple scenario we skip it. In some rare cases training doesn’t converge simply because random initialization of the weights caused gradient descent to start at a sub-optimal spot on the cost hyperplane. Just recreate the model (the cell which contains *model = Sequential()*) and re-run all subsequent steps and train again
```
train(features,labels)
```
Let's plot the losses
```
fig, ax = plt.subplots(num=None, figsize=(14, 6), dpi=80, facecolor='w', edgecolor='k')
size = len(lr.losses)
ax.plot(range(0,size), lr.losses, '-', color='blue', animated = True, linewidth=1)
```
Now let’s examine whether we are getting good results. Note: best practice is to use a training and a test data set for this which we’ve omitted here for simplicity
```
score(data_healthy_scaled)
score(data_broken_scaled)
```
### Submission
In case you feel confident that everything works as it should (getting values close to one for the healthy and close to zero for the broken case) you can make sure that the secret variable contains a valid submission token and submit your work to the grader
```
parts_data = {}
parts_data["0dXlH"] = json.dumps({"number_of_neurons_layer1": number_of_neurons_layer1, "number_of_neurons_layer2": number_of_neurons_layer2, "number_of_neurons_layer3": number_of_neurons_layer3, "number_of_epochs": number_of_epochs})
parts_data["O5cR9"] = json.dumps({"dim": dim, "samples": samples})
token = #### your code here ###
prediction = str(np.sum(score(data_healthy_scaled))/3)
myData={'healthy' : prediction}
myData
parts_data["ZzEP8"] = json.dumps(myData)
submitAll(email, token, key, parts_data)
```
| github_jupyter |
# Reproduce Allen smFISH results with Starfish
This notebook walks through a work flow that reproduces the smFISH result for one field of view using the starfish package.
```
from copy import deepcopy
from glob import glob
import json
import os
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy import ndimage as ndi
from scipy import stats
from skimage import (exposure, feature, filters, io, measure,
morphology, restoration, segmentation, transform,
util, img_as_float)
from starfish.io import Stack
from starfish.constants import Indices
# # developer note: for rapid iteration, it may be better to run this cell, download the data once, and load
# # the data from the local disk. If so, uncomment this cell and run this instead of the above.
# !aws s3 sync s3://czi.starfish.data.public/20180606/allen_smFISH ./allen_smFISH
# experiment_json = os.path.abspath("./allen_smFISH/fov_001/experiment.json")
# this is a large (1.1GB) FOV, so the download may take some time
experiment_json = 'https://dmf0bdeheu4zf.cloudfront.net/20180606/allen_smFISH/fov_001/experiment.json'
```
Load the Stack object, which while not well-named right now, should be thought of as an access point to an "ImageDataSet". In practice, we expect the Stack object or something similar to it to be an access point for _multiple_ fields of view. In practice, the thing we talk about as a "TileSet" is the `Stack.image` object. The data are currently stored in-memory in a `numpy.ndarray`, and that is where most of our operations are done.
The numpy array can be accessed through Stack.image.numpy\_array (public method, read only) or Stack.image.\_data (read and write)
```
codebook = pd.read_json('https://dmf0bdeheu4zf.cloudfront.net/20180606/allen_smFISH/fov_001/codebook.json')
codebook
```
We're ready now to load the experiment into starfish (This experiment is big, it takes a few minutes):
```
s = Stack()
s.read(experiment_json)
```
All of our implemented operations leverage the `Stack.image.apply` method to apply a single function over each of the tiles or volumes in the FOV, depending on whether the method accepts a 2d or 3d array. Below, we're clipping each image independently at the 10th percentile. I've placed the imports next to the methods so that you can easily locate the code, should you want to look under the hood and understand what parameters have been chosen.
The verbose flag for our apply loops could use a bit more refinement. We should be able to tell it how many images it needs to process from looking at the image stack, but for now it's dumb so just reports the number of tiles or volumes it's processed. This FOV has 102 images over 3 volumes.
```
from starfish.pipeline.filter import Filter
s_clip = Filter.Clip(p_min=10, p_max=100, verbose=True)
s_clip.filter(s.image)
```
We're still working through the backing of the Stack.image object with the on-disk or on-cloud Tile spec. As a result, most of our methods work in-place. For now, we can hack around this by deepcopying the data before administering the operation. This notebook was developed on a 64gb workstation, so be aware of the memory usage when copying!
```
# filtered_backup = deepcopy(s)
```
If you ever want to visualize the image in the notebook, we've added a widget to do that. The first parameter is an indices dict that specifies which hybridization round, channel, z-slice you want to view. The result is a pageable visualization across that arbitrary set of slices. Below I'm visualizing the first channel, which your codebook tells me is Nmnt.
[N.B. once you click on the slider, you can page with the arrow keys on the keyboard.]
```
s.image.show_stack({Indices.CH: 0});
s_bandpass = Filter.Bandpass(lshort=0.5, llong=7, threshold=None, truncate=4, verbose=True)
s_bandpass.filter(s.image)
```
For bandpass, there's a point where things get weird, at `c == 0; z <= 14`. In that range the images look mostly like noise. However, _above_ that, they look great + background subtracted! The later stages of the pipeline appear robust to this, though, as no spots are called for the noisy sections.
```
# I wasn't sure if this clipping was supposed to be by volume or tile. I've done tile here, but it can be easily
# switched to volume.
s_clip = Filter.Clip(p_min=10, p_max=100, is_volume=False, verbose=True)
s_clip.filter(s.image)
sigma=(1, 0, 0) # filter only in z, do nothing in x, y
glp = Filter.GaussianLowPass(sigma=sigma, is_volume=True, verbose=True)
glp.filter(s.image)
```
Below, because spot finding is so slow when single-plex, we'll pilot this on a max projection to show that the parameters work. Here's what trackpy.locate, which we wrap, produces for a z-projection of channel 1. To do use our plotting methods on z-projections we have to expose some of the starfish internals, which will be improved upon.
```
from showit import image
from trackpy import locate
# grab a section from the tensor.
ch1 = s.image.max_proj(Indices.Z)[0, 1]
results = locate(ch1, diameter=3, minmass=250, maxsize=3, separation=5, preprocess=False, percentile=10)
results.columns = ['x', 'y', 'intensity', 'r', 'eccentricity', 'signal', 'raw_mass', 'ep']
# plot the z-projection
f, ax = plt.subplots(figsize=(20, 20))
ax.imshow(ch1, vmin=15, vmax=52, cmap=plt.cm.gray)
# draw called spots on top as red circles
# scale radius plots the red circle at scale_radius * spot radius
s.image._show_spots(results, ax=plt.gca(), scale_radius=7)
```
Below spot finding is on the _volumes_ for each channel. This will take about `11m30s`
```
from starfish.pipeline.features.spots.detector import SpotFinder
# I've guessed at these parameters from the allen_smFISH code, but you might want to tweak these a bit.
# as you can see, this function takes a while. It will be great to parallelize this. That's also coming,
# although we haven't figured out where it fits in the priority list.
kwargs = dict(
spot_diameter=3, # must be odd integer
min_mass=300,
max_size=3, # this is max _radius_
separation=5,
noise_size=0.65, # this is not used because preprocess is False
preprocess=False,
percentile=10, # this is irrelevant when min_mass, spot_diameter, and max_size are set properly
verbose=True,
is_volume=True,
)
lmpf = SpotFinder.LocalMaxPeakFinder(**kwargs)
spot_attributes = lmpf.find(s.image)
# save the results to disk as json
for attrs, (hyb, ch) in spot_attributes:
attrs.save(f'spot_attributes_c{ch.value}.json')
# # if you want to load them back in the same shape, here's how:
# from starfish.pipeline.features.spot_attributes import SpotAttributes
# spot_attributes = [SpotAttributes.load(attrs) for attrs in glob('spot_attributes_c*.json')]
# this is not a very performant function because of how matplotlib renders circles as individual artists,
# but I think it's useful for debugging the spot detection.
# Note that in places where spots are "missed" it is often because they've been localized to individual
# nearby z-planes, whereas most spots exist across several layers of z.
s.image.show_stack({Indices.CH: 1, Indices.HYB: 0}, show_spots=spot_attributes[1][0], figure_size=(20, 20), p_min=60, p_max=99.9);
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import PIL
import skimage as sk
import random
from PIL import Image
def prepare_dataset(path):
#declare arrays
x=[]
y=[]
# 이미지와 label을 리스트에 넣기
data_folders = os.listdir(path)
for folder in data_folders:
full_path = os.path.join(path,folder)
for img in os.listdir(full_path):
image = Image.open(os.path.join(full_path,img)).convert('L') # 이미지를 그레이스케일로 변환
image = image.resize((224,224),Image.ANTIALIAS)
x.append(np.asarray(image))
# 증상이 없으면 0, 있으면 1
if('non' in full_path):
y.append(0)
else:
y.append(1)
x = np.asarray(x)
y = np.asarray(y)
return (x,y)
(x_train,y_train) = prepare_dataset('Desktop/dataset/training_set/')
(x_test,y_test) = prepare_dataset('Desktop/dataset/test_set/')
(x_validation,y_validation) = prepare_dataset('Desktop/dataset/validation_set/')
# 이미지의 shape
print("Shape of x_train {}\nShape of x_test{}\nShape of x_validation{}".format(x_train.shape,x_test.shape,x_validation.shape))
# train 데이터셋에서 랜덤 이미지 선택해서 보여주기
def random_example(x,y,rows,cols):
row = 0
col = 0
f, axarr = plt.subplots(rows,cols)
for i in range(3):
for k in range(2):
rnd = random.randint(0,len(x))
axarr[row,col].imshow(x[rnd],cmap='gray')
if(y is not None):
axarr[row,col].set_title("Has Tumor" if y[rnd] == 1 else "No Tumor")
col += 1
col = 0
row += 1
f.tight_layout(pad=0.9,h_pad=2.0)
plt.show()
random_example(x_train,None,3,2)
# 이미지 변형을 통한 데이터 증폭
class Augmentation:
def __init__(self):
pass
def random_rotation(self,data,label):
# 왼쪽으로 25도, 오른쪽으로 25도 사이의 무작위 각도로 이미지 회전
augmented_images = []
augmented_label = []
random_degree = random.uniform(-25, 25)
counter = 0
for img in data:
img = sk.transform.rotate(img, random_degree)
augmented_images.append(img)
augmented_label.append(label[counter])
counter += 1
return (augmented_images,augmented_label)
def random_noise(self,data,label):
# 이미지에 무작위 노이즈 추가
augmented_images = []
augmented_label = []
counter = 0
for img in data:
img = sk.util.random_noise(img)
augmented_images.append(img)
augmented_label.append(label[counter])
counter += 1
return (augmented_images,augmented_label)
def horizontal_flip(self,data,label):
# 이미지 좌우반전
counter = 0
augmented_images = []
augmented_label = []
for img in data:
img = img[:, ::-1]
augmented_images.append(img)
augmented_label.append(label[counter])
counter += 1
return (augmented_images,augmented_label)
def vertical_flip(self,data,label):
# 이미지 상하반전
counter = 0
augmented_images = []
augmented_label = []
for img in data:
img = np.flip(img)
augmented_images.append(img)
augmented_label.append(label[counter])
counter += 1
return (augmented_images,augmented_label)
AUG = Augmentation()
(x_noise,y_noise) = AUG.random_noise(x_train,y_train)
(x_h_flipped,y_h_flipped) = AUG.horizontal_flip(x_train,y_train)
(x_v_flipped,y_v_flipped) = AUG.vertical_flip(x_train,y_train)
(x_rotated,y_rotated) = AUG.random_rotation(x_train,y_train)
# 노이즈 추가/상하좌우반전/로테이션한 데이터 합치기
x_noise = np.asarray(x_noise)
x_h_flipped = np.asarray(x_h_flipped)
x_v_flipped = np.asarray(x_v_flipped)
x_rotated = np.asarray(x_rotated)
x_train = np.concatenate((x_train,x_noise,x_h_flipped,x_v_flipped,x_rotated),axis=0)
#----------------------------------------------------------------------------------------------------------------------------------------------------------------
y_noise = np.asarray(y_noise)
y_h_flipped = np.asarray(y_h_flipped)
y_v_flipped = np.asarray(y_v_flipped)
y_rotated = np.asarray(y_rotated)
y_train = np.concatenate((y_train,y_noise,y_h_flipped,y_v_flipped,y_rotated),axis=0)
random_example(x_train,y_train,3,2)
# 텐서로 바꾸기
import torch
x_train = torch.from_numpy(x_train)
x_test = torch.from_numpy(x_test)
y_train = torch.from_numpy(y_train)
y_test = torch.from_numpy(y_test)
train = torch.utils.data.TensorDataset(x_train,y_train)
train_loader = torch.utils.data.DataLoader(train,batch_size=4,shuffle=True)
test = torch.utils.data.TensorDataset(x_test,y_test)
test_loader = torch.utils.data.DataLoader(test,batch_size=4,shuffle=False)
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
# Conv 레이어 함수 정의
def conv3x3(in_planes,out_planes,stride=1):
return nn.Conv2d(in_planes,out_planes,kernel_size=3,stride=stride,padding=1,bias=False)
def conv1x1(in_planes,out_planes,stride=1):
return nn.Conv2d(in_planes,out_planes,kernel_size=1,stride=stride,bias=False)
# ResNet에 사용하는 단위 Block을 정의
class BasicBlock(nn.Module):
expansion = 1
def __init__(self,inplanes,planes,stride=1,downsample=None):
super(BasicBlock,self).__init__()
self.conv1 = conv3x3(inplanes,planes,stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.drop = nn.Dropout(0.5)
self.conv2 = conv3x3(planes,planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self,x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out= self.relu(out)
out = self.drop(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.drop(out)
if(self.downsample is not None):
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
num_classes = 2
# 모델로 ResNet 18 사용
class ResNet(nn.Module):
def __init__(self,block,layers,num_classes=num_classes):
super(ResNet,self).__init__()
self.inplanes = 64 # according to research paper
self.conv1 = nn.Conv2d(1,64,kernel_size=7,stride=2,padding=3,bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size = 3,stride=2,padding=1)
self.layer1 = self._make_layer(block,64,layers[0],stride=1)
self.layer2 = self._make_layer(block,128,layers[1],stride=2)
self.layer3 = self._make_layer(block,256,layers[2],stride=2)
self.layer4 = self._make_layer(block,512,layers[3],stride=2)
self.avgpooling = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(512*block.expansion,num_classes)
for m in self.modules():
if isinstance(m,nn.Conv2d):
nn.init.kaiming_normal_(m.weight,mode="fan_out",nonlinearity="relu")
elif isinstance(m,nn.BatchNorm2d):
nn.init.constant_(m.weight,1)
nn.init.constant_(m.bias, 0)
def _make_layer(self,block,planes,num_layers,stride = 1):
downsample = None
if stride!=1 or self.inplanes != planes*block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes,planes*block.expansion,stride),
nn.BatchNorm2d(planes*block.expansion)
)
layers = []
layers.append(block(self.inplanes,planes,stride,downsample))
self.inplanes = planes*block.expansion
for _ in range(1,len(layers)):
layers.append(block(self.inplanes,planes))
return nn.Sequential(*layers)
def forward(self,x):
x= self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x=self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpooling(x)
x = x.view(x.size(0),-1) #flatten
x = self.fc(x)
return x
# resnet 50
# model = ResNet(BottleNeck,[3,4,6,3])
# resnet 18
model = ResNet(BasicBlock,[2,2,2,2],num_classes=2)
model.cuda()
device = torch.device("cuda")
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(),0.0001) # 0.0001은 학습률
total_step = len(train_loader)
loss_list = []
train_acc = []
test_acc = []
batch_size = 4
for epoch in range(300):
for i,data in enumerate(train_loader,0):
# i --->index , data ----> image
inputs,labels= data
try:
inputs = inputs.view(batch_size,1,224,224)
inputs = inputs.float()
except:
continue
if torch.cuda.is_available():
inputs, labels = inputs.to(device),labels.to(device, dtype = torch.long)
#zero gradient
optimizer.zero_grad()
# forward
outputs = model(inputs)
# output과 label을 비교하여 loss 계산
loss = criterion(outputs,labels)
# backward
loss.backward()
# update weigths
optimizer.step()
if(i==(len(x_train)/batch_size)-1 and epoch%10 == 0):
print("Epoch : {}".format(epoch))
correct = 0
total = 0
with torch.no_grad():
for data in train_loader:
images,labels = data
try:
images = images.view(batch_size,1,224,224)
images = images.float()
except:
continue # 데이터셋 에러 처리
if torch.cuda.is_available():
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_,predicted = torch.max(outputs.data,1) # max value index 반환
total += labels.size(0)
correct += (predicted == labels).sum().item()
train_acc.append((100*correct/total))
loss_list.append(loss.item())
if(epoch % 10 == 0):
print("Accuracy train: ",(100*correct/total))
plt.subplot(2, 1, 1)
plt.plot(loss_list)
plt.title("Loss")
plt.subplot(2, 1, 2)
plt.plot(np.array(train_acc)/100,label="Train Accuracy",color='green')
plt.title("Train Accuracy")
plt.tight_layout(pad=0.9,h_pad=2.0)
plt.show()
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images,labels = data
try:
images = images.view(batch_size,1,224,224)
images = images.float()
except:
continue
if torch.cuda.is_available():
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_,predicted = torch.max(outputs.data,1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
test_acc.append((100*correct/total))
print("Test Accuracy: ",sum(test_acc)/len(test_acc))
torch.save(model, "brain_resnet18.pt")
# 정확도가 그리 높지는 않음
# 데이터셋 추가 확보 필요
```
| github_jupyter |
<a href="https://colab.research.google.com/github/LucyKinyua/Week2_MS/blob/main/Moringa_Data_Science_Prep_W2_Independent_Project_2021_05_Lucy_Kinyua_SQL_Notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Overview
In this part of the assessment, you will act as a Data analyst to answer a research question on the US elections.
First, will be required to write a Data Report on the process that will undertake while working on the given research problem. Using the CRISP-DM Methodology, you will document the various stages of the data science lifecycle for the given research problem while also providing your recommendation.
You have been provided with a detailed description of what you will be expected to do below.
Do remember that complex tasks that may seem hard at first can often be broken down into a sequence of simple tasks, and there are workarounds to do what first seems “impossible” with a succession of known operations.
**Problem Description**
You have been asked to help a candidate to become US president. The winner of the election will be the candidate winning the most grand electors.
Grand electors are attributed at the state level: in each of the 51 states, there is a given number of grand electors to win (roughly, but not exactly, proportional to the size of the state) and the presidential candidate receiving the most local votes wins ALL the Grand Electors in that state.
Because the number of grand electors is not exactly proportional to the population, some states can be prioritized to increase the return on investment of the campaign. We assume here there are only 2 candidates, and no history (no trend of certain states to vote for a particular candidate or party). Hence, each vote is equally "expensive" to get, but some states grant more grand elector per capita.
You are provided with 2 tables: one giving the number of Grand Electors per state, the other the population per state.
- Grand Electors by State
- Population by State
You are asked to identify the states that should be prioritized to win the election, with a smart but simple algorithm (brute force computation of all possibilities to find the absolute optimum is not accepted, as it would be to computationally expensive). It is ok not to find the overall optimum, we just want a strategy that makes sense.
(This is of course a very simplistic view of reality, but it is a nice starting point to play with data and analyze possibilities).
First take a few minutes to think about what you need to do. Steps will be suggested hereafter, but a big part of the data scientist's job is to identify the flow of operations when being asked a practical question, so it is important you start exercising on that in addition to programming.
Here is what we are suggesting to do: we will rank states by decreasing number of grand electors per capita. The first states in the list will be the most valuable (you get a large number of grand electors by convincing a small number of people to vote for you). We will target all the states at the top of the list until the cumulative sum (also called running total) of grand electors won is larger than half the total number of Grand Electors in the country.
**Instructions**
To do that, we need (you are allowed to create as many intermediary tables as you want, to keep queries short):
1. To join the 2 tables:
- You notice States are not capitalized the same way in both tables (one is in uppercase letters, the other not), so you will first need to convert all to uppercase, for instance.
- Now you can join the tables on the state key.
2. Your boss wants you to change the name of the "District of Columbia" state to its short version "DC". Please do that.
3. To compute the ratio between the number of grand electors and the population. Please create a new column with that ratio.
4. To order the states by decreasing ratio of Grand Electors per capita. That will make our priority list.
5. To compute the running total of Grand Electors in that sorted list.
- Hint: you can get inspiration from here to compute a running total from here: https://stackoverflow.com/questions/21382766/cumulative-summing-values-in-sqlite
6. Independently, to compute the half of the total of Grand Electors overall (in the whole country):
- This is the threshold we need to reach for winning the presidential election.
7. To filter our sorted list of states in order to keep only the (top) ones enabling us to reach the computed threshold. (the other states can be ignored). That is our target list.
- Hint: You can do that in 2 steps:
- Select all the states for which the running total is below or equal to the threshold.
- Add the first state for which the running total is larger than the threshold.
Can you draw some conclusions from the result? Is it in line with your expectations? How many states do you end up with in the target list? Is it a small or a large number? Do you think it would be a good recommendation to target those states?
# Connecting to database and importing data from CSV files
```
# Loading an sql extension to allow me to work with sql on Colaboratory;
#
%load_ext sql
# Connecting to the in memory sqlite database;
#
%sql sqlite://
# Importing the python csv library to allow me to read csv file(s) that will have uploaded to this environment;
#
import csv
# Importing the pandas library to use for data manipulation in this notebook;
#
import pandas as pd
# Uploading the Grand Electors csv file;
#
with open('GrandElectors_by_state.csv','r') as f:
GrandElectors_by_state = pd.read_csv(f, index_col=0, encoding='utf-8')
%sql PERSIST GrandElectors_by_state;
# Previewing the Grand_Electors table;
#
%%sql
SELECT * FROM GrandElectors_by_state
LIMIT 10;
# Uploading the Population csv file;
#
with open('Population_by_state.csv','r') as f:
Population_by_state = pd.read_csv(f, index_col=0, encoding='utf-8')
%sql PERSIST Population_by_state;
# Previewing the Population table;
#
%%sql
SELECT * FROM Population_by_state
LIMIT 10;
```
# Data Preparation
```
# Instruction 1:
# Notice States are not capitalized the same way in both tables
# Converting States in GrandElectors_by_state table to UPPERCASE;
#
%%sql
UPDATE GrandElectors_by_state SET State = upper(State);
SELECT * FROM GrandElectors_by_state;
# Previewing the Grand_Electors table to confirm the update has taken effect;
#
%%sql
SELECT * FROM GrandElectors_by_state LIMIT 10;
# Counting the number of States in the Grand_Electors table;
#
%%sql
SELECT COUNT(State) FROM GrandElectors_by_state;
# Counting the number of States in the Population table;
#
%%sql
SELECT COUNT(State) FROM Population_by_state;
# Notice that the number of States are not the same
# This will be corrected after using the INNER JOIN function
# The records from table one and table two would both be returned,...
# ... but only if the values in column one of table one match the values in column one of table two.
# Any records that do not have matching values would not be returned by an INNER JOIN.
# Joining both tables;
#
%%sql
CREATE TABLE if not exists new_table AS SELECT STATE, POPULATION, GRAND_ELECTORS FROM
(SELECT
GrandElectors_by_state.State AS STATE,
Population_by_state.Population AS POPULATION,
GrandElectors_by_state.GrandElectors AS GRAND_ELECTORS
FROM GrandElectors_by_state
INNER JOIN Population_by_state ON GrandElectors_by_state.State = Population_by_state.State);
SELECT * FROM new_table;
# Previewing the new joint table to confirm the update has taken effect;
#
%%sql
SELECT * FROM new_table;
```
# Modelling
```
# Instruction 2:
# Changing the name of the "District of Columbia" State to its short version "DC".
#
%%sql
UPDATE new_table
SET STATE = "DC"
WHERE STATE = "DISTRICT OF COLUMBIA";
SELECT * FROM new_table;
# Instruction 3:
# To compute the ratio between the number of grand electors and the population.
# Create a new column with that ratio.
#
# Instruction 4:
# To order the states by decreasing ratio of Grand Electors per capita. That will make our priority list.
#
%%sql
SELECT STATE, POPULATION, GRAND_ELECTORS,
(POPULATION/GRAND_ELECTORS) AS "POPULATION FOR EVERY 1 GRAND ELECTOR"
FROM new_table
ORDER BY "POPULATION FOR EVERY 1 GRAND ELECTOR" DESC;
# Instruction 5:
# To compute the running total of Grand Electors in that sorted list.
#
%%sql
%%sql
SELECT STATE, POPULATION, GRAND_ELECTORS, "POPULATION FOR EVERY 1 GRAND ELECTOR",
SUM(GRAND_ELECTORS)
OVER (PARTITION BY STATE ORDER BY "POPULATION FOR EVERY 1 GRAND ELECTOR" DESC)
AS "POPULATION FOR EVERY 1 GRAND ELECTOR"
FROM new_table;
%%sql
SELECT
STATE,
POPULATION,
GRAND_ELECTORS,
"POPULATION FOR EVERY 1 GRAND ELECTOR"
SUM (GRAND_ELECTORS) OVER (ORDER BY "POPULATION FOR EVERY 1 GRAND ELECTOR") AS RUNNING_TOTAL
FROM new_table;
%%sql
SELECT country, registration_date,registred_users,
SUM(registred_users)
OVER (PARTITION BY country ORDER BY registration_date)
AS total_users
FROM registration;
SELECT registration_date,registred_users,
SUM(registred_users) OVER (ORDER BY registration_date)
AS total_users
FROM registration;
SELECT
t.Date,
Sum(r.KeyColumn1),
Sum(r.KeyColumn2),
Sum(r.KeyColumn3)
FROM (SELECT DISTINCT Date FROM MyTable) as t
Left Join MyTable as r On (r.Date < t.Date)
Group By t.Date;
%%sql
SELECT SUM(GRAND_ELECTORS)
FROM new_table;
# Instruction 6:
# Compute half of the total of Grand Electors overall (in the whole country):
# - This is the threshold we need to reach for winning the presidential election.
#
%%sql
SELECT SUM(GRAND_ELECTORS)
# Instruction 7:
# To filter our sorted list of states in order to keep only the (top) ones enabling us to reach the computed threshold.
# (the other states can be ignored). That is our target list.
# Hint: You can do that in 2 steps:
# - Select all the states for which the running total is below or equal to the threshold.
# - Add the first state for which the running total is larger than the threshold.
#
%%sql
SELECT
FROM new_table;
```
# Evaluation
```
# Test against success criteria
#
```
# Deployment
```
# Conclusion:
# Can you draw some conclusions from the result?
# Is it in line with your expectations?
# How many states do you end up with in the target list?
# Is it a small or a large number?
# Do you think it would be a good recommendation to target those states?
#
```
| github_jupyter |
# Autonomous driving - Car detection
Welcome to your week 3 programming assignment. You will learn about object detection using the very powerful YOLO model. Many of the ideas in this notebook are described in the two YOLO papers: [Redmon et al., 2016](https://arxiv.org/abs/1506.02640) and [Redmon and Farhadi, 2016](https://arxiv.org/abs/1612.08242).
**You will learn to**:
- Use object detection on a car detection dataset
- Deal with bounding boxes
## <font color='darkblue'>Updates</font>
#### If you were working on the notebook before this update...
* The current notebook is version "3a".
* You can find your original work saved in the notebook with the previous version name ("v3")
* To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#### List of updates
* Clarified "YOLO" instructions preceding the code.
* Added details about anchor boxes.
* Added explanation of how score is calculated.
* `yolo_filter_boxes`: added additional hints. Clarify syntax for argmax and max.
* `iou`: clarify instructions for finding the intersection.
* `iou`: give variable names for all 8 box vertices, for clarity. Adds `width` and `height` variables for clarity.
* `iou`: add test cases to check handling of non-intersecting boxes, intersection at vertices, or intersection at edges.
* `yolo_non_max_suppression`: clarify syntax for tf.image.non_max_suppression and keras.gather.
* "convert output of the model to usable bounding box tensors": Provides a link to the definition of `yolo_head`.
* `predict`: hint on calling sess.run.
* Spelling, grammar, wording and formatting updates to improve clarity.
## Import libraries
Run the following cell to load the packages and dependencies that you will find useful as you build the object detector!
```
import argparse
import os
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
import scipy.io
import scipy.misc
import numpy as np
import pandas as pd
import PIL
import tensorflow as tf
from keras import backend as K
from keras.layers import Input, Lambda, Conv2D
from keras.models import load_model, Model
from yolo_utils import read_classes, read_anchors, generate_colors, preprocess_image, draw_boxes, scale_boxes
from yad2k.models.keras_yolo import yolo_head, yolo_boxes_to_corners, preprocess_true_boxes, yolo_loss, yolo_body
%matplotlib inline
```
**Important Note**: As you can see, we import Keras's backend as K. This means that to use a Keras function in this notebook, you will need to write: `K.function(...)`.
## 1 - Problem Statement
You are working on a self-driving car. As a critical component of this project, you'd like to first build a car detection system. To collect data, you've mounted a camera to the hood (meaning the front) of the car, which takes pictures of the road ahead every few seconds while you drive around.
<center>
<video width="400" height="200" src="nb_images/road_video_compressed2.mp4" type="video/mp4" controls>
</video>
</center>
<caption><center> Pictures taken from a car-mounted camera while driving around Silicon Valley. <br> We thank [drive.ai](htps://www.drive.ai/) for providing this dataset.
</center></caption>
You've gathered all these images into a folder and have labelled them by drawing bounding boxes around every car you found. Here's an example of what your bounding boxes look like.
<img src="nb_images/box_label.png" style="width:500px;height:250;">
<caption><center> <u> **Figure 1** </u>: **Definition of a box**<br> </center></caption>
If you have 80 classes that you want the object detector to recognize, you can represent the class label $c$ either as an integer from 1 to 80, or as an 80-dimensional vector (with 80 numbers) one component of which is 1 and the rest of which are 0. The video lectures had used the latter representation; in this notebook, we will use both representations, depending on which is more convenient for a particular step.
In this exercise, you will learn how "You Only Look Once" (YOLO) performs object detection, and then apply it to car detection. Because the YOLO model is very computationally expensive to train, we will load pre-trained weights for you to use.
## 2 - YOLO
"You Only Look Once" (YOLO) is a popular algorithm because it achieves high accuracy while also being able to run in real-time. This algorithm "only looks once" at the image in the sense that it requires only one forward propagation pass through the network to make predictions. After non-max suppression, it then outputs recognized objects together with the bounding boxes.
### 2.1 - Model details
#### Inputs and outputs
- The **input** is a batch of images, and each image has the shape (m, 608, 608, 3)
- The **output** is a list of bounding boxes along with the recognized classes. Each bounding box is represented by 6 numbers $(p_c, b_x, b_y, b_h, b_w, c)$ as explained above. If you expand $c$ into an 80-dimensional vector, each bounding box is then represented by 85 numbers.
#### Anchor Boxes
* Anchor boxes are chosen by exploring the training data to choose reasonable height/width ratios that represent the different classes. For this assignment, 5 anchor boxes were chosen for you (to cover the 80 classes), and stored in the file './model_data/yolo_anchors.txt'
* The dimension for anchor boxes is the second to last dimension in the encoding: $(m, n_H,n_W,anchors,classes)$.
* The YOLO architecture is: IMAGE (m, 608, 608, 3) -> DEEP CNN -> ENCODING (m, 19, 19, 5, 85).
#### Encoding
Let's look in greater detail at what this encoding represents.
<img src="nb_images/architecture.png" style="width:700px;height:400;">
<caption><center> <u> **Figure 2** </u>: **Encoding architecture for YOLO**<br> </center></caption>
If the center/midpoint of an object falls into a grid cell, that grid cell is responsible for detecting that object.
Since we are using 5 anchor boxes, each of the 19 x19 cells thus encodes information about 5 boxes. Anchor boxes are defined only by their width and height.
For simplicity, we will flatten the last two last dimensions of the shape (19, 19, 5, 85) encoding. So the output of the Deep CNN is (19, 19, 425).
<img src="nb_images/flatten.png" style="width:700px;height:400;">
<caption><center> <u> **Figure 3** </u>: **Flattening the last two last dimensions**<br> </center></caption>
#### Class score
Now, for each box (of each cell) we will compute the following element-wise product and extract a probability that the box contains a certain class.
The class score is $score_{c,i} = p_{c} \times c_{i}$: the probability that there is an object $p_{c}$ times the probability that the object is a certain class $c_{i}$.
<img src="nb_images/probability_extraction.png" style="width:700px;height:400;">
<caption><center> <u> **Figure 4** </u>: **Find the class detected by each box**<br> </center></caption>
##### Example of figure 4
* In figure 4, let's say for box 1 (cell 1), the probability that an object exists is $p_{1}=0.60$. So there's a 60% chance that an object exists in box 1 (cell 1).
* The probability that the object is the class "category 3 (a car)" is $c_{3}=0.73$.
* The score for box 1 and for category "3" is $score_{1,3}=0.60 \times 0.73 = 0.44$.
* Let's say we calculate the score for all 80 classes in box 1, and find that the score for the car class (class 3) is the maximum. So we'll assign the score 0.44 and class "3" to this box "1".
#### Visualizing classes
Here's one way to visualize what YOLO is predicting on an image:
- For each of the 19x19 grid cells, find the maximum of the probability scores (taking a max across the 80 classes, one maximum for each of the 5 anchor boxes).
- Color that grid cell according to what object that grid cell considers the most likely.
Doing this results in this picture:
<img src="nb_images/proba_map.png" style="width:300px;height:300;">
<caption><center> <u> **Figure 5** </u>: Each one of the 19x19 grid cells is colored according to which class has the largest predicted probability in that cell.<br> </center></caption>
Note that this visualization isn't a core part of the YOLO algorithm itself for making predictions; it's just a nice way of visualizing an intermediate result of the algorithm.
#### Visualizing bounding boxes
Another way to visualize YOLO's output is to plot the bounding boxes that it outputs. Doing that results in a visualization like this:
<img src="nb_images/anchor_map.png" style="width:200px;height:200;">
<caption><center> <u> **Figure 6** </u>: Each cell gives you 5 boxes. In total, the model predicts: 19x19x5 = 1805 boxes just by looking once at the image (one forward pass through the network)! Different colors denote different classes. <br> </center></caption>
#### Non-Max suppression
In the figure above, we plotted only boxes for which the model had assigned a high probability, but this is still too many boxes. You'd like to reduce the algorithm's output to a much smaller number of detected objects.
To do so, you'll use **non-max suppression**. Specifically, you'll carry out these steps:
- Get rid of boxes with a low score (meaning, the box is not very confident about detecting a class; either due to the low probability of any object, or low probability of this particular class).
- Select only one box when several boxes overlap with each other and detect the same object.
### 2.2 - Filtering with a threshold on class scores
You are going to first apply a filter by thresholding. You would like to get rid of any box for which the class "score" is less than a chosen threshold.
The model gives you a total of 19x19x5x85 numbers, with each box described by 85 numbers. It is convenient to rearrange the (19,19,5,85) (or (19,19,425)) dimensional tensor into the following variables:
- `box_confidence`: tensor of shape $(19 \times 19, 5, 1)$ containing $p_c$ (confidence probability that there's some object) for each of the 5 boxes predicted in each of the 19x19 cells.
- `boxes`: tensor of shape $(19 \times 19, 5, 4)$ containing the midpoint and dimensions $(b_x, b_y, b_h, b_w)$ for each of the 5 boxes in each cell.
- `box_class_probs`: tensor of shape $(19 \times 19, 5, 80)$ containing the "class probabilities" $(c_1, c_2, ... c_{80})$ for each of the 80 classes for each of the 5 boxes per cell.
#### **Exercise**: Implement `yolo_filter_boxes()`.
1. Compute box scores by doing the elementwise product as described in Figure 4 ($p \times c$).
The following code may help you choose the right operator:
```python
a = np.random.randn(19*19, 5, 1)
b = np.random.randn(19*19, 5, 80)
c = a * b # shape of c will be (19*19, 5, 80)
```
This is an example of **broadcasting** (multiplying vectors of different sizes).
2. For each box, find:
- the index of the class with the maximum box score
- the corresponding box score
**Useful references**
* [Keras argmax](https://keras.io/backend/#argmax)
* [Keras max](https://keras.io/backend/#max)
**Additional Hints**
* For the `axis` parameter of `argmax` and `max`, if you want to select the **last** axis, one way to do so is to set `axis=-1`. This is similar to Python array indexing, where you can select the last position of an array using `arrayname[-1]`.
* Applying `max` normally collapses the axis for which the maximum is applied. `keepdims=False` is the default option, and allows that dimension to be removed. We don't need to keep the last dimension after applying the maximum here.
* Even though the documentation shows `keras.backend.argmax`, use `keras.argmax`. Similarly, use `keras.max`.
3. Create a mask by using a threshold. As a reminder: `([0.9, 0.3, 0.4, 0.5, 0.1] < 0.4)` returns: `[False, True, False, False, True]`. The mask should be True for the boxes you want to keep.
4. Use TensorFlow to apply the mask to `box_class_scores`, `boxes` and `box_classes` to filter out the boxes we don't want. You should be left with just the subset of boxes you want to keep.
**Useful reference**:
* [boolean mask](https://www.tensorflow.org/api_docs/python/tf/boolean_mask)
**Additional Hints**:
* For the `tf.boolean_mask`, we can keep the default `axis=None`.
**Reminder**: to call a Keras function, you should use `K.function(...)`.
```
# GRADED FUNCTION: yolo_filter_boxes
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):
"""Filters YOLO boxes by thresholding on object and class confidence.
Arguments:
box_confidence -- tensor of shape (19, 19, 5, 1)
boxes -- tensor of shape (19, 19, 5, 4)
box_class_probs -- tensor of shape (19, 19, 5, 80)
threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
Returns:
scores -- tensor of shape (None,), containing the class probability score for selected boxes
boxes -- tensor of shape (None, 4), containing (b_x, b_y, b_h, b_w) coordinates of selected boxes
classes -- tensor of shape (None,), containing the index of the class detected by the selected boxes
Note: "None" is here because you don't know the exact number of selected boxes, as it depends on the threshold.
For example, the actual output size of scores would be (10,) if there are 10 boxes.
"""
# Step 1: Compute box scores
### START CODE HERE ### (≈ 1 line)
box_scores = box_confidence * box_class_probs
### END CODE HERE ###
# Step 2: Find the box_classes using the max box_scores, keep track of the corresponding score
### START CODE HERE ### (≈ 2 lines)
box_classes = K.argmax(box_scores, axis=-1)
box_class_scores = K.max(box_scores, axis=-1, keepdims=False)
### END CODE HERE ###
# Step 3: Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
### START CODE HERE ### (≈ 1 line)
filtering_mask = box_class_scores >= threshold
### END CODE HERE ###
# Step 4: Apply the mask to box_class_scores, boxes and box_classes
### START CODE HERE ### (≈ 3 lines)
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_a:
box_confidence = tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([19, 19, 5, 4], mean=1, stddev=4, seed = 1)
box_class_probs = tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.shape))
print("boxes.shape = " + str(boxes.shape))
print("classes.shape = " + str(classes.shape))
```
**Expected Output**:
<table>
<tr>
<td>
**scores[2]**
</td>
<td>
10.7506
</td>
</tr>
<tr>
<td>
**boxes[2]**
</td>
<td>
[ 8.42653275 3.27136683 -0.5313437 -4.94137383]
</td>
</tr>
<tr>
<td>
**classes[2]**
</td>
<td>
7
</td>
</tr>
<tr>
<td>
**scores.shape**
</td>
<td>
(?,)
</td>
</tr>
<tr>
<td>
**boxes.shape**
</td>
<td>
(?, 4)
</td>
</tr>
<tr>
<td>
**classes.shape**
</td>
<td>
(?,)
</td>
</tr>
</table>
**Note** In the test for `yolo_filter_boxes`, we're using random numbers to test the function. In real data, the `box_class_probs` would contain non-zero values between 0 and 1 for the probabilities. The box coordinates in `boxes` would also be chosen so that lengths and heights are non-negative.
### 2.3 - Non-max suppression ###
Even after filtering by thresholding over the class scores, you still end up with a lot of overlapping boxes. A second filter for selecting the right boxes is called non-maximum suppression (NMS).
<img src="nb_images/non-max-suppression.png" style="width:500px;height:400;">
<caption><center> <u> **Figure 7** </u>: In this example, the model has predicted 3 cars, but it's actually 3 predictions of the same car. Running non-max suppression (NMS) will select only the most accurate (highest probability) of the 3 boxes. <br> </center></caption>
Non-max suppression uses the very important function called **"Intersection over Union"**, or IoU.
<img src="nb_images/iou.png" style="width:500px;height:400;">
<caption><center> <u> **Figure 8** </u>: Definition of "Intersection over Union". <br> </center></caption>
#### **Exercise**: Implement iou(). Some hints:
- In this code, we use the convention that (0,0) is the top-left corner of an image, (1,0) is the upper-right corner, and (1,1) is the lower-right corner. In other words, the (0,0) origin starts at the top left corner of the image. As x increases, we move to the right. As y increases, we move down.
- For this exercise, we define a box using its two corners: upper left $(x_1, y_1)$ and lower right $(x_2,y_2)$, instead of using the midpoint, height and width. (This makes it a bit easier to calculate the intersection).
- To calculate the area of a rectangle, multiply its height $(y_2 - y_1)$ by its width $(x_2 - x_1)$. (Since $(x_1,y_1)$ is the top left and $x_2,y_2$ are the bottom right, these differences should be non-negative.
- To find the **intersection** of the two boxes $(xi_{1}, yi_{1}, xi_{2}, yi_{2})$:
- Feel free to draw some examples on paper to clarify this conceptually.
- The top left corner of the intersection $(xi_{1}, yi_{1})$ is found by comparing the top left corners $(x_1, y_1)$ of the two boxes and finding a vertex that has an x-coordinate that is closer to the right, and y-coordinate that is closer to the bottom.
- The bottom right corner of the intersection $(xi_{2}, yi_{2})$ is found by comparing the bottom right corners $(x_2,y_2)$ of the two boxes and finding a vertex whose x-coordinate is closer to the left, and the y-coordinate that is closer to the top.
- The two boxes **may have no intersection**. You can detect this if the intersection coordinates you calculate end up being the top right and/or bottom left corners of an intersection box. Another way to think of this is if you calculate the height $(y_2 - y_1)$ or width $(x_2 - x_1)$ and find that at least one of these lengths is negative, then there is no intersection (intersection area is zero).
- The two boxes may intersect at the **edges or vertices**, in which case the intersection area is still zero. This happens when either the height or width (or both) of the calculated intersection is zero.
**Additional Hints**
- `xi1` = **max**imum of the x1 coordinates of the two boxes
- `yi1` = **max**imum of the y1 coordinates of the two boxes
- `xi2` = **min**imum of the x2 coordinates of the two boxes
- `yi2` = **min**imum of the y2 coordinates of the two boxes
- `inter_area` = You can use `max(height, 0)` and `max(width, 0)`
```
# GRADED FUNCTION: iou
def iou(box1, box2):
"""Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)
box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)
"""
# Assign variable names to coordinates for clarity
(box1_x1, box1_y1, box1_x2, box1_y2) = box1
(box2_x1, box2_y1, box2_x2, box2_y2) = box2
# Calculate the (yi1, xi1, yi2, xi2) coordinates of the intersection of box1 and box2. Calculate its Area.
### START CODE HERE ### (≈ 7 lines)
xi1 = max(box1[0], box2[0])
yi1 = max(box1[1], box2[1])
xi2 = min(box1[2], box2[2])
yi2 = min(box1[3], box2[3])
inter_width = max(xi2-xi1, 0)
inter_height = max(yi2-yi1, 0)
inter_area = inter_width * inter_height
### END CODE HERE ###
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
### START CODE HERE ### (≈ 3 lines)
box1_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
box2_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
union_area = box1_area + box2_area - inter_area
### END CODE HERE ###
# compute the IoU
### START CODE HERE ### (≈ 1 line)
iou = inter_area/union_area
### END CODE HERE ###
return iou
## Test case 1: boxes intersect
box1 = (2, 1, 4, 3)
box2 = (1, 2, 3, 4)
print("iou for intersecting boxes = " + str(iou(box1, box2)))
## Test case 2: boxes do not intersect
box1 = (1,2,3,4)
box2 = (5,6,7,8)
print("iou for non-intersecting boxes = " + str(iou(box1,box2)))
## Test case 3: boxes intersect at vertices only
box1 = (1,1,2,2)
box2 = (2,2,3,3)
print("iou for boxes that only touch at vertices = " + str(iou(box1,box2)))
## Test case 4: boxes intersect at edge only
box1 = (1,1,3,3)
box2 = (2,3,3,4)
print("iou for boxes that only touch at edges = " + str(iou(box1,box2)))
```
**Expected Output**:
```
iou for intersecting boxes = 0.14285714285714285
iou for non-intersecting boxes = 0.0
iou for boxes that only touch at vertices = 0.0
iou for boxes that only touch at edges = 0.0
```
#### YOLO non-max suppression
You are now ready to implement non-max suppression. The key steps are:
1. Select the box that has the highest score.
2. Compute the overlap of this box with all other boxes, and remove boxes that overlap significantly (iou >= `iou_threshold`).
3. Go back to step 1 and iterate until there are no more boxes with a lower score than the currently selected box.
This will remove all boxes that have a large overlap with the selected boxes. Only the "best" boxes remain.
**Exercise**: Implement yolo_non_max_suppression() using TensorFlow. TensorFlow has two built-in functions that are used to implement non-max suppression (so you don't actually need to use your `iou()` implementation):
** Reference documentation **
- [tf.image.non_max_suppression()](https://www.tensorflow.org/api_docs/python/tf/image/non_max_suppression)
```
tf.image.non_max_suppression(
boxes,
scores,
max_output_size,
iou_threshold=0.5,
name=None
)
```
Note that in the version of tensorflow used here, there is no parameter `score_threshold` (it's shown in the documentation for the latest version) so trying to set this value will result in an error message: *got an unexpected keyword argument 'score_threshold.*
- [K.gather()](https://www.tensorflow.org/api_docs/python/tf/keras/backend/gather)
Even though the documentation shows `tf.keras.backend.gather()`, you can use `keras.gather()`.
```
keras.gather(
reference,
indices
)
```
```
# GRADED FUNCTION: yolo_non_max_suppression
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
"""
Applies Non-max suppression (NMS) to set of boxes
Arguments:
scores -- tensor of shape (None,), output of yolo_filter_boxes()
boxes -- tensor of shape (None, 4), output of yolo_filter_boxes() that have been scaled to the image size (see later)
classes -- tensor of shape (None,), output of yolo_filter_boxes()
max_boxes -- integer, maximum number of predicted boxes you'd like
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (, None), predicted score for each box
boxes -- tensor of shape (4, None), predicted box coordinates
classes -- tensor of shape (, None), predicted class for each box
Note: The "None" dimension of the output tensors has obviously to be less than max_boxes. Note also that this
function will transpose the shapes of scores, boxes, classes. This is made for convenience.
"""
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
### START CODE HERE ### (≈ 1 line)
nms_indices = tf.image.non_max_suppression(boxes = boxes, scores = scores, max_output_size = max_boxes, iou_threshold = iou_threshold)
### END CODE HERE ###
# Use K.gather() to select only nms_indices from scores, boxes and classes
### START CODE HERE ### (≈ 3 lines)
scores = K.gather(scores, nms_indices)
boxes = K.gather(boxes, nms_indices)
classes = K.gather(classes, nms_indices)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
scores = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
boxes = tf.random_normal([54, 4], mean=1, stddev=4, seed = 1)
classes = tf.random_normal([54,], mean=1, stddev=4, seed = 1)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
```
**Expected Output**:
<table>
<tr>
<td>
**scores[2]**
</td>
<td>
6.9384
</td>
</tr>
<tr>
<td>
**boxes[2]**
</td>
<td>
[-5.299932 3.13798141 4.45036697 0.95942086]
</td>
</tr>
<tr>
<td>
**classes[2]**
</td>
<td>
-2.24527
</td>
</tr>
<tr>
<td>
**scores.shape**
</td>
<td>
(10,)
</td>
</tr>
<tr>
<td>
**boxes.shape**
</td>
<td>
(10, 4)
</td>
</tr>
<tr>
<td>
**classes.shape**
</td>
<td>
(10,)
</td>
</tr>
</table>
### 2.4 Wrapping up the filtering
It's time to implement a function taking the output of the deep CNN (the 19x19x5x85 dimensional encoding) and filtering through all the boxes using the functions you've just implemented.
**Exercise**: Implement `yolo_eval()` which takes the output of the YOLO encoding and filters the boxes using score threshold and NMS. There's just one last implementational detail you have to know. There're a few ways of representing boxes, such as via their corners or via their midpoint and height/width. YOLO converts between a few such formats at different times, using the following functions (which we have provided):
```python
boxes = yolo_boxes_to_corners(box_xy, box_wh)
```
which converts the yolo box coordinates (x,y,w,h) to box corners' coordinates (x1, y1, x2, y2) to fit the input of `yolo_filter_boxes`
```python
boxes = scale_boxes(boxes, image_shape)
```
YOLO's network was trained to run on 608x608 images. If you are testing this data on a different size image--for example, the car detection dataset had 720x1280 images--this step rescales the boxes so that they can be plotted on top of the original 720x1280 image.
Don't worry about these two functions; we'll show you where they need to be called.
```
# GRADED FUNCTION: yolo_eval
def yolo_eval(yolo_outputs, image_shape = (720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
"""
Converts the output of YOLO encoding (a lot of boxes) to your predicted boxes along with their scores, box coordinates and classes.
Arguments:
yolo_outputs -- output of the encoding model (for image_shape of (608, 608, 3)), contains 4 tensors:
box_confidence: tensor of shape (None, 19, 19, 5, 1)
box_xy: tensor of shape (None, 19, 19, 5, 2)
box_wh: tensor of shape (None, 19, 19, 5, 2)
box_class_probs: tensor of shape (None, 19, 19, 5, 80)
image_shape -- tensor of shape (2,) containing the input shape, in this notebook we use (608., 608.) (has to be float32 dtype)
max_boxes -- integer, maximum number of predicted boxes you'd like
score_threshold -- real value, if [ highest class probability score < threshold], then get rid of the corresponding box
iou_threshold -- real value, "intersection over union" threshold used for NMS filtering
Returns:
scores -- tensor of shape (None, ), predicted score for each box
boxes -- tensor of shape (None, 4), predicted box coordinates
classes -- tensor of shape (None,), predicted class for each box
"""
### START CODE HERE ###
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions (convert boxes box_xy and box_wh to corner coordinates)
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = 0.5)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape)
# Use one of the functions you've implemented to perform Non-max suppression with
# maximum number of boxes set to max_boxes and a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes)
### END CODE HERE ###
return scores, boxes, classes
with tf.Session() as test_b:
yolo_outputs = (tf.random_normal([19, 19, 5, 1], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 2], mean=1, stddev=4, seed = 1),
tf.random_normal([19, 19, 5, 80], mean=1, stddev=4, seed = 1))
scores, boxes, classes = yolo_eval(yolo_outputs)
print("scores[2] = " + str(scores[2].eval()))
print("boxes[2] = " + str(boxes[2].eval()))
print("classes[2] = " + str(classes[2].eval()))
print("scores.shape = " + str(scores.eval().shape))
print("boxes.shape = " + str(boxes.eval().shape))
print("classes.shape = " + str(classes.eval().shape))
```
**Expected Output**:
<table>
<tr>
<td>
**scores[2]**
</td>
<td>
138.791
</td>
</tr>
<tr>
<td>
**boxes[2]**
</td>
<td>
[ 1292.32971191 -278.52166748 3876.98925781 -835.56494141]
</td>
</tr>
<tr>
<td>
**classes[2]**
</td>
<td>
54
</td>
</tr>
<tr>
<td>
**scores.shape**
</td>
<td>
(10,)
</td>
</tr>
<tr>
<td>
**boxes.shape**
</td>
<td>
(10, 4)
</td>
</tr>
<tr>
<td>
**classes.shape**
</td>
<td>
(10,)
</td>
</tr>
</table>
## Summary for YOLO:
- Input image (608, 608, 3)
- The input image goes through a CNN, resulting in a (19,19,5,85) dimensional output.
- After flattening the last two dimensions, the output is a volume of shape (19, 19, 425):
- Each cell in a 19x19 grid over the input image gives 425 numbers.
- 425 = 5 x 85 because each cell contains predictions for 5 boxes, corresponding to 5 anchor boxes, as seen in lecture.
- 85 = 5 + 80 where 5 is because $(p_c, b_x, b_y, b_h, b_w)$ has 5 numbers, and 80 is the number of classes we'd like to detect
- You then select only few boxes based on:
- Score-thresholding: throw away boxes that have detected a class with a score less than the threshold
- Non-max suppression: Compute the Intersection over Union and avoid selecting overlapping boxes
- This gives you YOLO's final output.
## 3 - Test YOLO pre-trained model on images
In this part, you are going to use a pre-trained model and test it on the car detection dataset. We'll need a session to execute the computation graph and evaluate the tensors.
```
sess = K.get_session()
```
### 3.1 - Defining classes, anchors and image shape.
* Recall that we are trying to detect 80 classes, and are using 5 anchor boxes.
* We have gathered the information on the 80 classes and 5 boxes in two files "coco_classes.txt" and "yolo_anchors.txt".
* We'll read class names and anchors from text files.
* The car detection dataset has 720x1280 images, which we've pre-processed into 608x608 images.
```
class_names = read_classes("model_data/coco_classes.txt")
anchors = read_anchors("model_data/yolo_anchors.txt")
image_shape = (720., 1280.)
```
### 3.2 - Loading a pre-trained model
* Training a YOLO model takes a very long time and requires a fairly large dataset of labelled bounding boxes for a large range of target classes.
* You are going to load an existing pre-trained Keras YOLO model stored in "yolo.h5".
* These weights come from the official YOLO website, and were converted using a function written by Allan Zelener. References are at the end of this notebook. Technically, these are the parameters from the "YOLOv2" model, but we will simply refer to it as "YOLO" in this notebook.
Run the cell below to load the model from this file.
```
yolo_model = load_model("model_data/yolo.h5")
```
This loads the weights of a trained YOLO model. Here's a summary of the layers your model contains.
```
yolo_model.summary()
```
**Note**: On some computers, you may see a warning message from Keras. Don't worry about it if you do--it is fine.
**Reminder**: this model converts a preprocessed batch of input images (shape: (m, 608, 608, 3)) into a tensor of shape (m, 19, 19, 5, 85) as explained in Figure (2).
### 3.3 - Convert output of the model to usable bounding box tensors
The output of `yolo_model` is a (m, 19, 19, 5, 85) tensor that needs to pass through non-trivial processing and conversion. The following cell does that for you.
If you are curious about how `yolo_head` is implemented, you can find the function definition in the file ['keras_yolo.py'](https://github.com/allanzelener/YAD2K/blob/master/yad2k/models/keras_yolo.py). The file is located in your workspace in this path 'yad2k/models/keras_yolo.py'.
```
yolo_outputs = yolo_head(yolo_model.output, anchors, len(class_names))
```
You added `yolo_outputs` to your graph. This set of 4 tensors is ready to be used as input by your `yolo_eval` function.
### 3.4 - Filtering boxes
`yolo_outputs` gave you all the predicted boxes of `yolo_model` in the correct format. You're now ready to perform filtering and select only the best boxes. Let's now call `yolo_eval`, which you had previously implemented, to do this.
```
scores, boxes, classes = yolo_eval(yolo_outputs, image_shape)
```
### 3.5 - Run the graph on an image
Let the fun begin. You have created a graph that can be summarized as follows:
1. <font color='purple'> yolo_model.input </font> is given to `yolo_model`. The model is used to compute the output <font color='purple'> yolo_model.output </font>
2. <font color='purple'> yolo_model.output </font> is processed by `yolo_head`. It gives you <font color='purple'> yolo_outputs </font>
3. <font color='purple'> yolo_outputs </font> goes through a filtering function, `yolo_eval`. It outputs your predictions: <font color='purple'> scores, boxes, classes </font>
**Exercise**: Implement predict() which runs the graph to test YOLO on an image.
You will need to run a TensorFlow session, to have it compute `scores, boxes, classes`.
The code below also uses the following function:
```python
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
```
which outputs:
- image: a python (PIL) representation of your image used for drawing boxes. You won't need to use it.
- image_data: a numpy-array representing the image. This will be the input to the CNN.
**Important note**: when a model uses BatchNorm (as is the case in YOLO), you will need to pass an additional placeholder in the feed_dict {K.learning_phase(): 0}.
#### Hint: Using the TensorFlow Session object
* Recall that above, we called `K.get_Session()` and saved the Session object in `sess`.
* To evaluate a list of tensors, we call `sess.run()` like this:
```
sess.run(fetches=[tensor1,tensor2,tensor3],
feed_dict={yolo_model.input: the_input_variable,
K.learning_phase():0
}
```
* Notice that the variables `scores, boxes, classes` are not passed into the `predict` function, but these are global variables that you will use within the `predict` function.
```
def predict(sess, image_file):
"""
Runs the graph stored in "sess" to predict boxes for "image_file". Prints and plots the predictions.
Arguments:
sess -- your tensorflow/Keras session containing the YOLO graph
image_file -- name of an image stored in the "images" folder.
Returns:
out_scores -- tensor of shape (None, ), scores of the predicted boxes
out_boxes -- tensor of shape (None, 4), coordinates of the predicted boxes
out_classes -- tensor of shape (None, ), class index of the predicted boxes
Note: "None" actually represents the number of predicted boxes, it varies between 0 and max_boxes.
"""
# Preprocess your image
image, image_data = preprocess_image("images/" + image_file, model_image_size = (608, 608))
# Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
# You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
### START CODE HERE ### (≈ 1 line)
out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input: image_data, K.learning_phase(): 0})
### END CODE HERE ###
# Print predictions info
print('Found {} boxes for {}'.format(len(out_boxes), image_file))
# Generate colors for drawing bounding boxes.
colors = generate_colors(class_names)
# Draw bounding boxes on the image file
draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)
# Save the predicted bounding box on the image
image.save(os.path.join("out", image_file), quality=90)
# Display the results in the notebook
output_image = scipy.misc.imread(os.path.join("out", image_file))
imshow(output_image)
return out_scores, out_boxes, out_classes
```
Run the following cell on the "test.jpg" image to verify that your function is correct.
```
out_scores, out_boxes, out_classes = predict(sess, "test.jpg")
```
**Expected Output**:
<table>
<tr>
<td>
**Found 7 boxes for test.jpg**
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.60 (925, 285) (1045, 374)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.66 (706, 279) (786, 350)
</td>
</tr>
<tr>
<td>
**bus**
</td>
<td>
0.67 (5, 266) (220, 407)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.70 (947, 324) (1280, 705)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.74 (159, 303) (346, 440)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.80 (761, 282) (942, 412)
</td>
</tr>
<tr>
<td>
**car**
</td>
<td>
0.89 (367, 300) (745, 648)
</td>
</tr>
</table>
The model you've just run is actually able to detect 80 different classes listed in "coco_classes.txt". To test the model on your own images:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Write your image's name in the cell above code
4. Run the code and see the output of the algorithm!
If you were to run your session in a for loop over all your images. Here's what you would get:
<center>
<video width="400" height="200" src="nb_images/pred_video_compressed2.mp4" type="video/mp4" controls>
</video>
</center>
<caption><center> Predictions of the YOLO model on pictures taken from a camera while driving around the Silicon Valley <br> Thanks [drive.ai](https://www.drive.ai/) for providing this dataset! </center></caption>
## <font color='darkblue'>What you should remember:
- YOLO is a state-of-the-art object detection model that is fast and accurate
- It runs an input image through a CNN which outputs a 19x19x5x85 dimensional volume.
- The encoding can be seen as a grid where each of the 19x19 cells contains information about 5 boxes.
- You filter through all the boxes using non-max suppression. Specifically:
- Score thresholding on the probability of detecting a class to keep only accurate (high probability) boxes
- Intersection over Union (IoU) thresholding to eliminate overlapping boxes
- Because training a YOLO model from randomly initialized weights is non-trivial and requires a large dataset as well as lot of computation, we used previously trained model parameters in this exercise. If you wish, you can also try fine-tuning the YOLO model with your own dataset, though this would be a fairly non-trivial exercise.
**References**: The ideas presented in this notebook came primarily from the two YOLO papers. The implementation here also took significant inspiration and used many components from Allan Zelener's GitHub repository. The pre-trained weights used in this exercise came from the official YOLO website.
- Joseph Redmon, Santosh Divvala, Ross Girshick, Ali Farhadi - [You Only Look Once: Unified, Real-Time Object Detection](https://arxiv.org/abs/1506.02640) (2015)
- Joseph Redmon, Ali Farhadi - [YOLO9000: Better, Faster, Stronger](https://arxiv.org/abs/1612.08242) (2016)
- Allan Zelener - [YAD2K: Yet Another Darknet 2 Keras](https://github.com/allanzelener/YAD2K)
- The official YOLO website (https://pjreddie.com/darknet/yolo/)
**Car detection dataset**:
<a rel="license" href="http://creativecommons.org/licenses/by/4.0/"><img alt="Creative Commons License" style="border-width:0" src="https://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">The Drive.ai Sample Dataset</span> (provided by drive.ai) is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>. We are grateful to Brody Huval, Chih Hu and Rahul Patel for providing this data.
| github_jupyter |
```
# default_exp helpers
```
# helpers
> this didn't fit anywhere else
```
#export
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.pyplot as plt
#ToDo: Propagate them through the methods
iters = 10
l2 = 1
n_std = 4
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
import IPython
def plot_regret(y_optimal_list,y_hat_list):
y_optimal_array = np.array(y_optimal_list)
y_hat_array = np.array(y_hat_list)
regret_list = []
regret = np.cumsum(y_optimal_array - y_hat_array)
plt.plot(regret)
def showcase_code(pyfile,class_name = False, method_name = False, end_string = False):
"""shows content of py file"""
with open(pyfile) as f:
code = f.read()
if class_name:
#1. find beginning (class + <name>)
index = code.find(f'class {class_name}')
code = code[index:]
#2. find end (class (new class!) or end of script)
end_index = code[7:].find('class')
if method_name:
#1. find beginning (class + <name>)
index = code.find(f'def {method_name}')
code = code[index:]
#2. find end (class (new class!) or end of script)
end_index = code[7:].find('def')
if end_string:
end_index = code[7:].find('# helpers')
code = code[:end_index]
formatter = HtmlFormatter()
return IPython.display.HTML('<style type="text/css">{}</style>{}'.format(
formatter.get_style_defs('.highlight'),
highlight(code, PythonLexer(), formatter)))
showcase_code('thompson_sampling/helpers.py',method_name='showcase_code')
showcase_code('thompson_sampling/solvers.py',class_name='BetaBandit', end_string = True)
#export
import scipy.stats as stats
def plot_online_logreg(online_lr, wee_x, wee_y):
# closing other figures
plt.close('all')
plt.figure(figsize=[9,3.5], dpi=150)
# let us check the distribution of weights and uncertainty bounds
plt.figure(figsize=[9,3.5], dpi=150)
# plotting the pdf of the weight distribution
X_pdf = np.linspace(-4, 4, 1000)
pdf = stats.norm(loc=online_lr.m, scale=online_lr.q**(-1.0)).pdf(X_pdf)
# range and resolution of probability plot
X_prob = np.linspace(-6, 6, 1000)
p_dist = 1/(1 + np.exp(-X_prob * online_lr.m))
p_dist_plus = 1/(1 + np.exp(-X_prob * (online_lr.m + 2*online_lr.q**(-1.0))))
p_dist_minus = 1/(1 + np.exp(-X_prob * (online_lr.m - 2*online_lr.q**(-1.0))))
# opening subplots
ax1 = plt.subplot2grid((1, 5), (0, 0), colspan=2, rowspan=1)
ax2 = plt.subplot2grid((1, 5), (0, 2), colspan=3, rowspan=1)
# plotting distriution of weights
ax1.plot(X_pdf, pdf, color='b', linewidth=2, alpha=0.5)
#ax1.plot([cmab.weights[0][1], cmab.weights[0][1]], [0, max(pdf)], 'k--', label='True $\\beta$', linewidth=1)
ax1.fill_between(X_pdf, pdf, 0, color='b', alpha=0.2)
# plotting probabilities
ax2.plot(X_prob, p_dist, color='b', linewidth=2, alpha=0.5)
ax2.fill_between(X_prob, p_dist_plus, p_dist_minus, color='b', alpha=0.2)
ax2.scatter(wee_x, wee_y, c='k')
# title and comments
ax1.set_title('OLR estimate for $\\beta$', fontsize=10)
ax1.set_xlabel('$\\beta$', fontsize=10); ax1.set_ylabel('$density$', fontsize=10)
ax2.set_title('OLR estimate for $\\theta(x)$', fontsize=10)
ax2.set_xlabel('$x$', fontsize=10); ax2.set_ylabel('$\\theta(x)$', fontsize=10)
ax1.legend(fontsize=10)
plt.tight_layout()
plt.show()
import numpy as np
from thompson_sampling.models import OnlineLogisticRegression, BatchBayesLinReg
from thompson_sampling.multi_armed_bandits import contextual_categorical_bandit
theta = [0.6,1.0]
noise = 0.1
wee_x = np.random.uniform(-6,6,10)
wee_y = np.array([contextual_categorical_bandit(x,0, theta, noise)[0] for x in wee_x])
# OLR object
online_lr = OnlineLogisticRegression(1, .5, 1)
for i in range(len(wee_y)):
online_lr.fit(wee_x[i].reshape(-1,1), wee_y[i].reshape(-1,1))
plot_online_logreg(online_lr, wee_x, wee_y)
#export
from mpl_toolkits.axes_grid1 import ImageGrid
def prettify_ax(ax):
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_xlabel('$w_1$')
ax.set_ylabel('$w_2$')
return ax
def sample(n, weights):
for _ in range(n):
x = np.array([1, np.random.uniform(-1, 1)])
y = np.dot(weights, x) + np.random.normal(0, .2)
yield x, y
def sample(n, weights):
X = np.array([[1, np.random.uniform(-1, 1)] for i in range(n)])
y = [np.dot(weights, x) + np.random.normal(0, .2) for x in X]
return X, y
def plot_param_through_time(model,N,n_samples,X,y,):
w = np.linspace(-1, 1, 100)
W = np.dstack(np.meshgrid(w, w))
n_samples = 5
fig = plt.figure(figsize=(7 * n_samples, 21))
grid = ImageGrid(
fig, 111, # similar to subplot(111)
nrows_ncols=(n_samples, 3), # creates a n_samplesx3 grid of axes
axes_pad=.5 # pad between axes in inch.
)
# We'll store the features and targets for plotting purposes
xs = []
ys = []
for i, (xi, yi) in enumerate(zip(X,y)):
pred_dist = model.predict(xi)
# Prior weight distribution
ax = prettify_ax(grid[3 * i])
ax.set_title(f'Prior weight distribution #{i + 1}')
ax.contourf(w, w, model.weights_dist.pdf(W), N, cmap='viridis')
ax.scatter(*weights, color='red') # true weights the model has to find
# Update model
model.learn(xi, yi)
# Prior weight distribution
ax = prettify_ax(grid[3 * i + 1])
ax.set_title(f'Posterior weight distribution #{i + 1}')
ax.contourf(w, w, model.weights_dist.pdf(W), N, cmap='viridis')
ax.scatter(*weights, color='red') # true weights the model has to find
# Posterior target distribution
xs.append(xi)
ys.append(yi)
posteriors = [model.predict(np.array([1, wi])) for wi in w]
ax = prettify_ax(grid[3 * i + 2])
ax.set_title(f'Posterior target distribution #{i + 1}')
# Plot the old points and the new points
ax.scatter([xi[1] for xi in xs[:-1]], ys[:-1])
ax.scatter(xs[-1][1], ys[-1], marker='*')
# Plot the predictive mean along with the predictive interval
ax.plot(w, [p.mean() for p in posteriors], linestyle='--')
cis = [p.interval(.95) for p in posteriors]
ax.fill_between(
x=w,
y1=[ci[0] for ci in cis],
y2=[ci[1] for ci in cis],
alpha=.1
)
# Plot the true target distribution
ax.plot(w, [np.dot(weights, [1, xi]) for xi in w], color='red')
def sample(n, weights):
for _ in range(n):
x = np.array([1, np.random.uniform(-1, 1)])
y = np.dot(weights, x) + np.random.normal(0, .2)
yield x, y
def sample(n, weights):
X = np.array([[1, np.random.uniform(-1, 1)] for i in range(n)])
y = [np.dot(weights, x) + np.random.normal(0, .2) for x in X]
return X, y
model = BatchBayesLinReg(n_features=2, alpha=2, beta=25)
np.random.seed(42)
# Pick some true parameters that the model has to find
weights = np.array([-.3, .5])
n_samples = 5
N = 100
X,y = sample(n_samples, weights)
plot_param_through_time(model,N,n_samples,X,y)
```
| github_jupyter |
```
#export
from local.torch_basics import *
from local.test import *
from local.core import *
from local.data.all import *
from local.tabular.core import *
try: import cudf,nvcategory
except: print("This requires rapids, see https://rapids.ai/ for installation details")
from local.notebook.showdoc import *
#default_exp tabular.rapids
```
# Tabular with rapids
> Basic functions to preprocess tabular data before assembling it in a `DataBunch` on the GPU.
```
#export
@patch
def __array__(self:cudf.DataFrame): return self.pandas().__array__()
#export
class TabularGPU(Tabular):
def transform(self, cols, f):
for c in cols: self[c] = f(self[c])
def __getattr__(self,k):
if isinstance(self.items, cudf.DataFrame) and k in self.items.columns: return self.items[k]
return super().__getattr__(k)
```
## TabularProcessors
```
#export
def _to_str(c): return c if c.dtype == "object" else c.astype("str")
def _remove_none(c):
if None in c: c.remove(None)
return c
#export
@Categorify
def setups(self, to: TabularGPU):
self.lbls = {n: nvcategory.from_strings(_to_str(to.iloc[:,n]).data).keys() for n in to.all_cat_names}
self.classes = {n: CategoryMap(_remove_none(c.to_host()), add_na=(n in to.cat_names)) for n,c in self.lbls.items()}
@patch
def _apply_cats_gpu(self: Categorify, c):
return cudf.Series(nvcategory.from_strings(_to_str(c).data).set_keys(self.lbls[c.name]).values()).add(add)
@Categorify
def encodes(self, to: TabularGPU):
def _apply_cats_gpu(add, c):
return cudf.Series(nvcategory.from_strings(_to_str(c).data).set_keys(self.lbls[c.name]).values()).add(add)
to.transform(to.cat_names, partial(_apply_cats_gpu, 1))
to.transform(L(to.cat_y), partial(_apply_cats_gpu, 0))
df = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,0,2]}))
to = TabularGPU(df, Categorify, 'a')
cat = to.procs.categorify
test_eq(list(cat['a']), ['#na#','0','1','2'])
test_eq(to.a.to_array(), np.array([1,2,3,1,3]))
df1 = cudf.from_pandas(pd.DataFrame({'a':[1,0,3,-1,2]}))
to1 = to.new(df1)
cat(to1)
#Values that weren't in the training df are sent to 0 (na)
test_eq(to1.a.to_array(), np.array([2,1,0,0,3]))
#Test decode
to2 = TabularPandas(to1.items.to_pandas(), None, 'a')
to2 = cat.decode(to2)
test_eq(to2.a, np.array(['1','0','#na#','#na#','2']))
df = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,3,2]}))
to = TabularGPU(df, Categorify, 'a', splits=[[0,1,2], [3,4]])
cat = to.procs.categorify
test_eq(list(cat['a']), ['#na#','0','1','2'])
test_eq(to.a.to_array(), np.array([1,2,3,0,3]))
#TODO Categorical (fails for now)
#df = cudf.from_pandas(pd.DataFrame({'a':pd.Categorical(['M','H','L','M'], categories=['H','M','L'], ordered=True)}))
#to = TabularGPU(df, Categorify, 'a')
#cat = to.procs.categorify
#test_eq(cat['a'].to_host(), ['H','M','L'])
#test_eq(df["a"].to_array(), [2,1,3,2])
#export
@Normalize
def setups(self, to: TabularGPU):
self.means = {n: to.iloc[:,n].mean() for n in to.cont_names}
self.stds = {n: to.iloc[:,n].std(ddof=0)+1e-7 for n in to.cont_names}
@Normalize
def encodes(self, to: TabularGPU):
to.transform(to.cont_names, lambda c: (c-self.means[c.name])/self.stds[c.name])
df = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,3,4]}))
to = TabularGPU(df, Normalize, cont_names='a')
norm = to.procs.normalize
x = np.array([0,1,2,3,4])
m,s = x.mean(),x.std()
test_eq(norm.means['a'], m)
test_close(norm.stds['a'], s)
test_close(to.a.to_array(), (x-m)/s)
df1 = cudf.from_pandas(pd.DataFrame({'a':[5,6,7]}))
to1 = to.new(df1)
norm(to1)
test_close(to1.a.to_array(), (np.array([5,6,7])-m)/s)
to2 = TabularPandas(to1.items.to_pandas(), None, cont_names='a')
to2 = norm.decode(to2)
test_close(to2.a, [5,6,7])
df = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,3,4]}))
to = TabularGPU(df, Normalize, cont_names='a', splits=[[0,1,2], [3,4]])
norm = to.procs.normalize
x = np.array([0,1,2])
m,s = x.mean(),x.std()
test_eq(norm.means, {'a': m})
test_close(norm.stds['a'], s)
test_close(to.a.to_array(), (np.array([0,1,2,3,4])-m)/s)
#export
@patch
def median(self:cudf.Series):
"Get the median of `self`"
col = self.dropna().reset_index(drop=True).sort_values()
return col[len(col)//2] if len(col)%2 != 0 else (col[len(col)//2]+col[len(col)//2-1])/2
col = cudf.Series([0,1,np.nan,1,2,3,4])
test_eq(col.median(), 1.5)
col = cudf.Series([np.nan,1,np.nan,1,2,3,4])
test_eq(col.median(), 2)
#export
@patch
def idxmax(self:cudf.Series):
"Return the index of the first occurence of the max in `self`"
return self.argsort(ascending=False).index[0]
#export
@FillMissing
def setups(self, to: TabularGPU):
self.na_dict = {}
for n in to.cont_names:
col = to.iloc[:, n]
if col.isnull().any(): self.na_dict[n] = self.fill_strategy(col, self.fill_vals[n])
@FillMissing
def encodes(self, to: TabularGPU):
for n in to.cont_names:
if n in self.na_dict:
if self.add_col:
to.items[n+'_na'] = to[n].isnull()
if n+'_na' not in to.cat_names: to.cat_names.append(n+'_na')
to[n] = to[n].fillna(self.na_dict[n])
elif df[n].isnull().any():
raise Exception(f"nan values in `{n}` but not in setup training set")
fill1,fill2,fill3 = (FillMissing(fill_strategy=s)
for s in [FillStrategy.median, FillStrategy.constant, FillStrategy.mode])
df = cudf.from_pandas(pd.DataFrame({'a':[0,1,np.nan,1,2,3,4]}))
df1 = df.copy(); df2 = df.copy()
tos = TabularGPU(df, fill1, cont_names='a'),TabularGPU(df1, fill2, cont_names='a'),TabularGPU(df2, fill3, cont_names='a')
test_eq(fill1.na_dict, {'a': 1.5})
test_eq(fill2.na_dict, {'a': 0})
test_eq(fill3.na_dict, {'a': 1.0})
for t in tos: test_eq(t.cat_names, ['a_na'])
for to_,v in zip(tos, [1.5, 0., 1.]):
test_eq(to_.a.to_array(), np.array([0, 1, v, 1, 2, 3, 4]))
test_eq(to_.a_na.to_array(), np.array([0, 0, 1, 0, 0, 0, 0]))
dfa = cudf.from_pandas(pd.DataFrame({'a':[np.nan,0,np.nan]}))
tos = [t.new(o) for t,o in zip(tos,(dfa,dfa.copy(),dfa.copy()))]
for t in tos: t.process()
for to_,v in zip(tos, [1.5, 0., 1.]):
test_eq(to_.a.to_array(), np.array([v, 0, v]))
test_eq(to_.a_na.to_array(), np.array([1, 0, 1]))
```
## Tabular Pipelines -
```
procs = [Normalize, Categorify, FillMissing, noop]
df = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,1,np.nan,1,2,3,4]}))
to = TabularGPU(df, procs, cat_names='a', cont_names='b')
#Test setup and apply on df_trn
test_eq(to.a.to_array(), [1,2,3,2,2,3,1])
test_eq(to.b_na.to_array(), [1,1,2,1,1,1,1])
x = np.array([0,1,1.5,1,2,3,4])
m,s = x.mean(),x.std()
test_close(to.b.to_array(), (x-m)/s)
test_eq(to.procs.classes, {'a': ['#na#','0','1','2'], 'b_na': ['#na#','False','True']})
#Test apply on y_names
procs = [Normalize, Categorify, FillMissing, noop]
df = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,1,np.nan,1,2,3,4], 'c': ['b','a','b','a','a','b','a']}))
to = TabularGPU(df, procs, cat_names='a', cont_names='b', y_names='c')
test_eq(to.cat_names, ['a', 'b_na'])
test_eq(to.a.to_array(), [1,2,3,2,2,3,1])
test_eq(to.b_na.to_array(), [1,1,2,1,1,1,1])
test_eq(to.c.to_array(), [1,0,1,0,0,1,0])
x = np.array([0,1,1.5,1,2,3,4])
m,s = x.mean(),x.std()
test_close(to.b.to_array(), (x-m)/s)
test_eq(to.procs.classes, {'a': ['#na#','0','1','2'], 'b_na': ['#na#','False','True'], 'c': ['a','b']})
procs = [Normalize, Categorify, FillMissing, noop]
df = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,1,np.nan,1,2,3,4], 'c': ['b','a','b','a','a','b','a']}))
to = TabularGPU(df, procs, cat_names='a', cont_names='b', y_names='c')
test_eq(to.cat_names, ['a', 'b_na'])
test_eq(to.a.to_array(), [1,2,3,2,2,3,1])
test_eq(to.a.dtype,int)
test_eq(to.b_na.to_array(), [1,1,2,1,1,1,1])
test_eq(to.c.to_array(), [1,0,1,0,0,1,0])
procs = [Normalize, Categorify, FillMissing, noop]
df = cudf.from_pandas(pd.DataFrame({'a':[0,1,2,1,1,2,0], 'b':[0,np.nan,1,1,2,3,4], 'c': ['b','a','b','a','a','b','a']}))
to = TabularGPU(df, procs, cat_names='a', cont_names='b', y_names='c', splits=[[0,1,4,6], [2,3,5]])
test_eq(to.cat_names, ['a', 'b_na'])
test_eq(to.a.to_array(), [1,2,2,1,0,2,0])
test_eq(to.a.dtype,int)
test_eq(to.b_na.to_array(), [1,2,1,1,1,1,1])
test_eq(to.c.to_array(), [1,0,0,0,1,0,1])
#export
from torch.utils.dlpack import from_dlpack
@ReadTabBatch
def encodes(self, to: TabularGPU):
return from_dlpack(to.cats.to_dlpack()).long(),from_dlpack(to.conts.to_dlpack()).float(), from_dlpack(to.targ.to_dlpack()).long()
```
## Integration example
```
path = untar_data(URLs.ADULT_SAMPLE)
df = cudf.from_pandas(pd.read_csv(path/'adult.csv'))
df_trn,df_tst = df.iloc[:10000].copy(),df.iloc[10000:].copy()
df_trn.head()
cat_names = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race']
cont_names = ['age', 'fnlwgt', 'education-num']
procs = [Categorify, FillMissing, Normalize]
splits = RandomSplitter()(range_of(df_trn))
%time to = TabularGPU(df_trn, procs, splits=splits, cat_names=cat_names, cont_names=cont_names, y_names="salary")
splits = [list(range(len(splits[0]))), list(range(len(splits[0]), 10000))]
dsrc = DataSource(to, splits=splits, tfms=[None])
dl = TabDataLoader(to.valid, bs=64, num_workers=0)
dl.show_batch()
```
## Export -
```
#hide
from local.notebook.export import notebook2script
notebook2script(all_fs=True)
```
| github_jupyter |
# Inspirational Notebooks
### Generating new festures according to these notebooks
* https://www.kaggle.com/nuhsikander/lgbm-new-features-corrected
* https://www.kaggle.com/rteja1113/lightgbm-with-count-features
* https://www.kaggle.com/aharless/swetha-s-xgboost-revised
* https://www.kaggle.com/bk0000/non-blending-lightgbm-model-lb-0-977
# Load Data
```
%matplotlib inline
import matplotlib.pyplot as plt
import math
import numpy as np
import pandas as pd
import seaborn as sns
from pandas import read_csv
import sklearn
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import train_test_split
import warnings
warnings.filterwarnings('ignore')
plt.rc("font", size=14)
sns.set(style="white")
sns.set(style="whitegrid", color_codes=True)
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
from keras.optimizers import RMSprop
from keras.optimizers import Adagrad
from keras.optimizers import Adadelta
# df_train= pd.read_csv('data/train.csv',nrows=200000, parse_dates=['click_time'])
df_train = pd.read_csv('data/train_new_cols.csv',nrows=200000) #train data subset, original too large
df_train.dropna()
df_train.info()
df_train.columns.values
cols = ['ip', 'app', 'device', 'os', 'channel', 'click_time',
'attributed_time', 'is_attributed', 'day', 'hour', 'minute',
'second', 'ip_confRate', 'app_confRate', 'device_confRate',
'os_confRate', 'channel_confRate', 'app_channel_confRate',
'app_os_confRate', 'app_device_confRate', 'channel_os_confRate',
'channel_device_confRate', 'os_device_confRate',
'ip_app_channel_var_day', 'ip_app_os_var_hour',
'ip_day_channel_var_hour_x', 'ip_day_hour_count_channel',
'ip_app_count_channel', 'ip_app_os_count_channel',
'ip_app_day_hour_count_channel', 'ip_app_channel_mean_hour',
'app_AvgViewPerDistinct_ip', 'app_count_channel',
'channel_count_app', 'ip_nunique_channel', 'ip_nunique_app',
'ip_day_nunique_hour', 'ip_app_nunique_os', 'ip_nunique_device',
'app_nunique_channel', 'ip_device_os_nunique_app',
'ip_device_os_cumcount_app', 'ip_cumcount_app', 'ip_cumcount_os',
'ip_day_channel_var_hour_y', 'ip_nextClick', 'ip_app_nextClick',
'ip_channel_nextClick', 'ip_os_nextClick',
'ip_app_device_os_channel_nextClick', 'ip_os_device_nextClick',
'ip_os_device_app_nextClick', 'prev_identical_clicks',
'future_identical_clicks', 'prev_app_clicks', 'future_app_clicks']
df_train = df_train.loc[:,cols]
df_train.head(10)
df_train['is_attributed'].value_counts()
sns.countplot(x='is_attributed', data=df_train, palette='hls')
plt.show()
# The ratio of df_train to df_test is 0.8 to 0.2 or 0.75 to 0.25
df_test = pd.read_csv('data/train_new_cols.csv', nrows=50000,skiprows=range(1, 400000))
df_test.dropna()
df_test.info()
df_test = df_test.loc[:,cols]
df_test.head(10)
df_test['is_attributed'].value_counts()
sns.countplot(x='is_attributed', data=df_test, palette='hls')
plt.show()
# Get columes names except the click_time (object), attributed_time (object) and is_attributed
train_cols = []
for each_value in df_train.columns.values:
if each_value == 'click_time' or each_value == 'attributed_time' or each_value == 'is_attributed':
continue
train_cols.append(each_value)
train_cols
# Data_X
X_train = df_train.loc[:,train_cols]
X_test = df_test.loc[:,train_cols]
X_train.shape
X_test.shape
# Data_Y
y_train = df_train[['is_attributed']]
y_test = df_test[['is_attributed']]
y_train.shape
y_test.shape
y_test_val = y_test.values
```
# Algorithms
### 1. Random Forest
```
from sklearn.ensemble import RandomForestClassifier
def randomForest_pre(X_train, y_train, X_test, y_test):
X_train = np.nan_to_num(X_train)
y_train = np.nan_to_num(y_train)
X_test = np.nan_to_num(X_test)
y_test = np.nan_to_num(y_test)
RF_model = RandomForestClassifier()
# Train the model
RF_fit = RF_model.fit(X_train, y_train)
# Get the prediction of the test data
RF_predict = RF_model.predict(X_test)
# Compare the prediction with the known values
RF_acc = sklearn.metrics.accuracy_score(np.array(RF_predict)[:],
np.array(y_test_val)[:])
# Plot the data
plt.figure(figsize=(10,5))
plt.plot(RF_predict, color='red', label='Prediction')
plt.plot(y_test, label='Y_test')
plt.legend(['Prediction', 'Y_test'])
_ = plt.ylim()
return RF_fit, RF_predict, RF_acc, RF_model
# RF_fit, RF_pre, RF_acc, rf_model = randomForest_pre(X_train, y_train, X_test, y_test)
# RF_acc
```
### 2. Gradient Boosting & AdaBoost
```
from sklearn.ensemble import GradientBoostingClassifier
def gradientBoosting_pre(X_train, y_train, X_test, y_test):
X_train = np.nan_to_num(X_train)
y_train = np.nan_to_num(y_train)
X_test = np.nan_to_num(X_test)
y_test = np.nan_to_num(y_test)
GB_model = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0)
# Train the model
GB_fit = GB_model.fit(X_train, y_train)
# Get the prediction of the test data
GB_predict = GB_model.predict(X_test)
# Compare the prediction with the known values
GB_acc = sklearn.metrics.accuracy_score(np.array(GB_predict)[:],
np.array(y_test_val)[:])
# Plot the data
plt.figure(figsize=(10,5))
plt.plot(GB_predict, color='red', label='Prediction')
plt.plot(y_test, label='Y_test')
plt.legend(['Prediction', 'Y_test'])
_ = plt.ylim()
return GB_fit, GB_predict, GB_acc, GB_model
# GB_fit, GB_predict, GB_acc, gb_model = gradientBoosting_pre(X_train, y_train, X_test, y_test)
# GB_acc
```
### 3. Logistic Regression
```
from sklearn.linear_model import LogisticRegression
def logisticRegression_pre(X_train, y_train, X_test, y_test):
X_train = np.nan_to_num(X_train)
y_train = np.nan_to_num(y_train)
X_test = np.nan_to_num(X_test)
y_test = np.nan_to_num(y_test)
LG_model = LogisticRegression()
# Train the model
LG_fit = LG_model.fit(X_train, y_train)
# Get the prediction of the test data
LG_predict = LG_model.predict(X_test)
# Compare the prediction with the known values
LG_acc = sklearn.metrics.accuracy_score(np.array(LG_predict)[:],
np.array(y_test_val)[:])
return LG_fit, LG_predict, LG_acc, LG_model
# LG_fit, LG_predict, LG_acc, lg_model = logisticRegression_pre(X_train, y_train, X_test, y_test)
# LG_acc
```
### 4. SVM - Support Vector Machine
```
from sklearn import svm
def svm_pre(X_train, y_train, X_test, y_test):
X_train = np.nan_to_num(X_train)
y_train = np.nan_to_num(y_train)
X_test = np.nan_to_num(X_test)
y_test = np.nan_to_num(y_test)
svm_model = svm.SVC()
uniq = np.unique(y_train[9000:10000])
# Train the model
SVM_fit = svm_model.fit(X_train[9000:10000], y_train[9000:10000])
# Get the prediction of the test data
SVM_predict = svm_model.predict(X_test)
# Compare the prediction with the known values
SVM_acc = sklearn.metrics.accuracy_score(np.array(LG_predict)[:],
np.array(y_test_val)[:])
return SVM_fit, SVM_predict, SVM_acc, svm_model
# svm_fit, svm_predict, svm_acc, svm_model = svm_pre(X_train, y_train, X_test, y_test)
# svm_acc
```
### 5. ANN - Artificial Neural Network
```
def shallow_net_A(n=55,i=len(train_cols),o=2):
# Create simple one dense layer net
# Default 55 neurons, input 5, output 2
net = Sequential()
net.add(Dense(n, activation='sigmoid', input_shape=(i,)))
net.add(Dense(2, activation='softmax'))
# Compile net
net.compile(loss='mean_squared_error', optimizer=SGD(lr=0.01), metrics=['accuracy'])
return net
def ann_pre(X_train, y_train, X_test, y_test):
ann_model = shallow_net_A()
ann_summary = ann_model.summary()
# Convert the values
X_train_ann = np.nan_to_num(X_train)
y_train_ann = np.nan_to_num(y_train)
X_test_ann = np.nan_to_num(X_test)
y_test_ann = np.nan_to_num(y_test)
# Conver the matrix, finally we have two classes (n_classes), the original one has oly one class
n_classes = 2
y_train_ann = keras.utils.to_categorical(y_train_ann, n_classes)
y_test_ann = keras.utils.to_categorical(y_test_ann, n_classes)
# Training the model
ann_fit = ann_model.fit(X_train_ann, y_train_ann, batch_size=128, epochs=99, verbose=1, validation_data=(X_test_ann, y_test_ann))
# Evaluate: loss & accuracy -> Using Evaluation to get the accracy
ann_evaluate = ann_model.evaluate(X_test_ann, y_test_ann)
# Using prediction
ann_pre = ann_model.predict(X_test)
# Convert value to boolean value
y_pre = (ann_pre > 0.5)
# Counting the boolean value, counting the accuracy by using basic calculation
from sklearn.metrics import confusion_matrix
ann_output = confusion_matrix(y_test_ann.argmax(axis=1), y_pre.argmax(axis=1))
ann_prediction_acc = ann_output[0][0]/(ann_output[0][0]+ann_output[1][0])
return ann_summary, ann_fit, ann_evaluate, ann_prediction_acc, ann_model
# ann_summary, ann_fit, ann_evaluate, ann_prediction_acc, ann_model = ann_pre(X_train, y_train, X_test, y_test)
# ann_prediction_acc
```
### 6. MLP - Multi-layered Neural Network
```
def shallow_net_C(n=55,i=len(train_cols),o=2):
# Create simple one dense layer net
# Default 55 neurons, input 5, output 2, here we have more hidden layers with different activation
net = Sequential()
net.add(Dense(n, activation='sigmoid', input_shape=(i,)))
net.add(Dense(n, activation='relu', input_shape=(i,)))
net.add(Dense(n, activation='tanh', input_shape=(i,)))
net.add(Dense(n, activation='elu', input_shape=(i,)))
net.add(Dense(2, activation='softmax'))
# Compile net
net.compile(loss='mean_squared_error', optimizer=SGD(lr=0.01), metrics=['accuracy'])
return net
def mlp_pre(X_train, y_train, X_test, y_test):
mlp_model = shallow_net_C()
mlp_summary = mlp_model.summary()
# Convert the values
X_train_mlp = np.nan_to_num(X_train)
y_train_mlp = np.nan_to_num(y_train)
X_test_mlp = np.nan_to_num(X_test)
y_test_mlp = np.nan_to_num(y_test)
# Conver the matrix, finally we have two classes (n_classes)
n_classes = 2
y_train_mlp = keras.utils.to_categorical(y_train_mlp, n_classes)
y_test_mlp = keras.utils.to_categorical(y_test_mlp, n_classes)
# Training the model
mlp_fit = mlp_model.fit(X_train_mlp, y_train_mlp, batch_size=128, epochs=99, verbose=1, validation_data=(X_test_mlp, y_test_mlp))
# Evaluate: loss & accuracy -> Using Evaluation to get the accracy
mlp_evaluate = mlp_model.evaluate(X_test_mlp, y_test_mlp)
# Using prediction
mlp_pre = mlp_model.predict(X_test)
# Convert value to boolean value
y_pre = (mlp_pre > 0.5)
# Counting the boolean value, counting the accuracy by using basic calculation
from sklearn.metrics import confusion_matrix
mlp_output = confusion_matrix(y_test_mlp.argmax(axis=1), y_pre.argmax(axis=1))
mlp_prediction_acc = mlp_output[0][0]/(mlp_output[0][0]+mlp_output[1][0])
return mlp_summary, mlp_fit, mlp_evaluate, mlp_prediction_acc, mlp_model
# mlp_summary, mlp_fit, mlp_evaluate, mlp_prediction_acc, mlp_model = mlp_pre(X_train, y_train, X_test, y_test)
# mlp_prediction_acc
```
### 7. RNN - Recurrent Neural Network (LSTM)
```
from keras.layers import LSTM
def rnn_pre(df_train, train_rate=0.75):
# Set the dataset for train and test
df_rnn_train = df_train.loc[:,train_cols + ['is_attributed']]
df_rnn_test = df_test.loc[:,train_cols + ['is_attributed']]
df_rnn = df_rnn_train.append(df_rnn_test)
pre_col_index = list(df_rnn_train).index('is_attributed')
dataset = df_rnn.values.astype('float32')
dataset = np.nan_to_num(dataset)
# Normalize the dataset, set all the data of the dataset to be in the range between 0 and 1
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
# Split into train and test sets
train_size = int(len(dataset) * train_rate)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
# Use this function to prepare the train and test datasets for modeling
look_back = 1
trainY = train[:, pre_col_index]
trainX = np.delete(train, pre_col_index, axis = 1)
testY = test[:, pre_col_index]
testX = np.delete(test, pre_col_index, axis = 1)
# Reshape input to be [samples, time steps, features], here it changes the dimension from 2D to 3D
trainX = np.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = np.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
# Create and fit the LSTM network
RNN_model = Sequential()
RNN_model.add(LSTM(5, input_shape=(1, len(trainX[0][0]))))
RNN_model.add(Dense(1))
RNN_model.compile(loss='mean_squared_error', optimizer='adam')
RNN_model.fit(trainX, trainY, epochs=10, batch_size=128, verbose=2)
# Make predictions, trainPredict should be 1D array
trainPredict = RNN_model.predict(trainX)
testPredict = RNN_model.predict(testX)
# Change the dimension from 3D to 2D
trainX_2D = trainX.transpose([1,0,2]).reshape(len(trainX),len(trainX[0][0]))
testX_2D = testX.transpose([1,0,2]).reshape(len(testX),len(testX[0][0]))
# Append prediction back to the model
trainPredict_6cols = np.append(trainX_2D, trainPredict, 1)
testPredict_6cols = np.append(testX_2D, testPredict, 1)
# Invert predictions back to normal values
trainPredict_6cols = scaler.inverse_transform(trainPredict_6cols)
testPredict_6cols = scaler.inverse_transform(testPredict_6cols)
# Calculating the RMSE
trainScore = math.sqrt(mean_squared_error(trainY, trainPredict_6cols[:, pre_col_index]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY, testPredict_6cols[:, pre_col_index]))
print('Test Score: %.2f RMSE' % (testScore))
final_prediction_train = np.where(trainPredict_6cols[:, pre_col_index] > 0, 1, 0)
final_prediction_test = np.where(testPredict_6cols[:, pre_col_index] > 0, 1, 0)
# Change dimension from 2D to 1D
final_prediction_train = np.reshape(final_prediction_train, (-1, 1))
final_prediction_test = np.reshape(final_prediction_test, (-1, 1))
# Counting the accuracy by using basic calculation
rnn_acc_train = sklearn.metrics.accuracy_score(np.array(final_prediction_train)[:],
np.array(trainY)[:])
rnn_acc_test = sklearn.metrics.accuracy_score(np.array(final_prediction_test)[:],
np.array(testY)[:])
return rnn_acc_train
# rnn_acc = rnn_pre(df_train)
# rnn_acc
```
# Train & Test Data (Call the function)
#### 1. Random Forest
```
RF_fit, RF_pre, RF_acc, rf_model = randomForest_pre(X_train, y_train, X_test, y_test)
print('Random Forest accuracy: {}%'.format(RF_acc * 100))
```
#### 2. Gradient Boosting
```
GB_fit, GB_predict, GB_acc, gb_model = gradientBoosting_pre(X_train, y_train, X_test, y_test)
print('Gradient Boosting accuracy: {}%'.format(GB_acc * 100))
```
#### 3. Logistic Regression
```
LG_fit, LG_predict, LG_acc, lg_model = logisticRegression_pre(X_train, y_train, X_test, y_test)
print('Logistic Regression accuracy: {}%'.format(LG_acc * 100))
```
#### 4. SVM - Support Vector Machine
```
svm_fit, svm_predict, svm_acc, svm_model = svm_pre(X_train, y_train, X_test, y_test)
print('SVM accuracy: {}%'.format(svm_acc * 100))
```
#### 5. ANN - Artificial Neural Network
```
ann_summary, ann_fit, ann_evaluate, ann_prediction_acc, ann_model = ann_pre(X_train, y_train, X_test, y_test)
print('ANN accuracy: {}%'.format(ann_prediction_acc * 100))
```
#### 6. MLP - Multi-layered Neural Network
```
mlp_summary, mlp_fit, mlp_evaluate, mlp_prediction_acc, mlp_model = mlp_pre(X_train, y_train, X_test, y_test)
print('MLP accuracy: {}%'.format(mlp_prediction_acc * 100))
```
#### 7. RNN - Recurrent Neural Network
```
rnn_acc = rnn_pre(df_train)
print('RNN accuracy: {}%'.format(rnn_acc * 100))
```
### Conclusion
```
print('Random Forest accuracy: {}%'.format(RF_acc * 100))
print('Gradient Boosting accuracy: {}%'.format(GB_acc * 100))
print('Logistic Regression accuracy: {}%'.format(LG_acc * 100))
print('SVM accuracy: {}%'.format(svm_acc * 100))
print('ANN accuracy: {}%'.format(ann_prediction_acc * 100))
print('MLP accuracy: {}%'.format(mlp_prediction_acc * 100))
print('RNN accuracy: {}%'.format(rnn_acc * 100))
```
Random Forest is the best one for our project
# Prediction for test.csv
```
# Columns for our current analsis
train_cols
# Read the test data
df = pd.read_csv('data/test_small_all_features.csv')[train_cols].astype('float64')
df = np.nan_to_num(df)
# Read the output of the test data
sample_out = pd.read_csv('data/sample_submission.csv', nrows=1000000)[['is_attributed']].astype('float64')
df_predict = rf_model.predict(df)
# Compare the prediction with the known values
df_acc = sklearn.metrics.accuracy_score(np.array(df_predict)[:],
np.array(sample_out)[:])
print('By using the best algorittm, the accuracy of the prediction: {}%'.format(df_acc * 100))
```
The code in the document is licensed under the MIT License: https://opensource.org/licenses/MIT
All writing in the document is licensed bt The Creative Commons Attribution 3.0 https://creativecommons.org/licenses/by/3.0/us/.
| github_jupyter |
<a href="https://colab.research.google.com/github/findingfoot/ML_practice-codes/blob/master/principal_component_analysis_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
cancer = datasets.load_breast_cancer()
print(cancer.DESCR)
#checking if 0 represents malignant or benign
# we already know that there are 357 benign values. we count the count of data points that are classified as 1 and cross check with the information we already have
len(cancer.data[cancer.target == 1])
# How features affect the target
fig, axes = plt.subplots(10,3, figsize = (12,9))
malignant = cancer.data[cancer.target == 0]
benign = cancer.data[cancer.target ==1]
ax = axes.ravel()
for i in range(30):
_, bins = np.histogram(cancer.data[:,i], bins = 40)
ax[i].hist(malignant[:,i], bins = bins, color = 'r', alpha = 0.5)
ax[i].hist(benign[:,i], bins = bins, color = 'y', alpha = 0.8)
ax[i].set_title(cancer.feature_names[i], fontsize = 8 )
ax[i].axes.get_xaxis().set_visible(False)
ax[i].set_yticks(())
ax[0].legend(['Malignant', 'Benign'], loc = "best")
plt.tight_layout()
plt.show()
cancer_df = pd.DataFrame(cancer.data, columns = cancer.feature_names)
cancer_df.head()
plt.subplot(1,2,1)
plt.scatter(cancer_df['worst symmetry'], cancer_df['worst texture'], s = cancer_df['worst area']*0.05,color = 'teal', label = 'check', alpha = 0.3)
plt.xlabel('Worst Symmetry', fontsize = 12)
plt.ylabel('Worst Texture', fontsize = 12)
plt.subplot(1,2,2)
plt.scatter(cancer_df['mean radius'], cancer_df['mean concave points'], s = cancer_df['mean area']*0.05,color = 'teal', label = 'check', alpha = 0.3)
plt.xlabel('Mean Radius', fontsize = 12)
plt.ylabel('Mean Concave', fontsize = 12)
plt.subplot(1,2,2)
# we need to scale the data before the fitting algorithm is implemented.
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(cancer.data)
scaled_x = scaler.transform(cancer.data)
scaled_x.max(axis=0)
from sklearn.decomposition import PCA
pca = PCA(n_components = 3)
pca.fit(scaled_x)
x_pca = pca.transform(scaled_x)
x_pca.shape
variance_test = np.var(x_pca, axis =0)
variance_ratio = variance_test/np.sum(variance_test)
print(variance_ratio)
Xax=x_pca[:,0]
Yax=x_pca[:,1]
labels=cancer.target
cdict={0:'red',1:'green'}
labl={0:'Malignant',1:'Benign'}
marker={0:'*',1:'o'}
alpha={0:.3, 1:.5}
fig,ax=plt.subplots(figsize=(7,5))
fig.patch.set_facecolor('white')
for l in np.unique(labels):
ix=np.where(labels==l)
ax.scatter(Xax[ix],Yax[ix],c=cdict[l],s=40,
label=labl[l],marker=marker[l],alpha=alpha[l])
# for loop ends
plt.xlabel("First Principal Component",fontsize=14)
plt.ylabel("Second Principal Component",fontsize=14)
plt.legend()
plt.show()
plt.matshow(pca.components_,cmap='viridis')
plt.yticks([0,1,2],['1st Comp','2nd Comp','3rd Comp'],fontsize=10)
plt.colorbar()
plt.xticks(range(len(cancer.feature_names)),cancer.feature_names,rotation=65,ha='left')
plt.tight_layout()
plt.show()#
feature_worst=list(cancer_df.columns[20:31]) # select the 'worst' features
import seaborn as sns
s=sns.heatmap(cancer_df[feature_worst].corr(),cmap='coolwarm')
s.set_yticklabels(s.get_yticklabels(),rotation=30,fontsize=7)
s.set_xticklabels(s.get_xticklabels(),rotation=30,fontsize=7)
plt.show()
```
| github_jupyter |
What you should know about C
----
- Write, compile and run a simple program in C
- Static types
- Control flow especially `for` loop
- Using functions
- Using structs
- Pointers and arrays
- Function pointers
- Dynamic memory allocation
- Separate compilation and `make`
### Structs
**Exercise 1**
Write and use a `struct` to represent dates.
```
```
**Solution**
```
%%file ex1.c
#include <stdio.h>
typedef struct {
int day;
int month;
int year;
} date;
int main(int argc, char* argv[])
{
date d1;
d1.day = 29;
d1.month = 3;
d1.year = 2016;
date d2 = {30, 3, 2016};
date d3 = {.year = 2016, .month = 3, .day = 31};
printf("%d-%d-%d\n", d1.month, d1.day, d1.year);
printf("%d-%d-%d\n", d2.month, d2.day, d2.year);
printf("%d-%d-%d\n", d3.month, d3.day, d3.year);
}
%%bash
gcc -std=c99 -o ex1 ex1.c
%%bash
./ex1
```
### Pointers
**Exercise 2**
Write and use pointers for working with
- (a) doubles
- (b) the date struct
- (c) vector of doubles
- (d) 2D array of doubles
```
```
**Solution**
```
%%file ex2a.c
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
double x1 = 2.78;
double x2 = 3.14;
double *p1 = malloc(sizeof(double));
if (p1 == NULL) return -1;
double *p2 = calloc(sizeof(double), 1);
if (p2 == NULL) return -1;
printf("%p: %.2f\n", p1, *p1);
printf("%p: %.2f\n\n", p2, *p2);
p1 = &x1;
*p2 = x2;
printf("%p: %.2f\n", p1, *p1);
printf("%p: %.2f\n", p2, *p2);
// free(p1);
// free(p2);
}
%%bash
gcc -std=c99 -o ex2a ex2a.c
%%bash
./ex2a
```
**Solution**
```
%%file ex2b.c
#include <stdio.h>
#include <stdlib.h>
typedef struct {
int day;
int month;
int year;
} date;
int main(int argc, char* argv[])
{
date *d1 = malloc(sizeof(date));
if (d1 == NULL) return -1;
d1->day = 29;
d1->month = 3;
d1->year = 2016;
printf("%d-%d-%d\n", d1->month, d1->day, d1->year);
printf("%d-%d-%d\n", (*d1).month, (*d1).day, (*d1).year);
free(d1);
}
%%bash
gcc -std=c99 -o ex2b ex2b.c
%%bash
./ex2b
```
**Solution**
```
%%file ex2c.c
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int n = atoi(argv[1]);
double *xs = calloc(sizeof(double), n);
if (xs == NULL) return -1;
for (int i=0; i<n; i++) {
xs[i] = i*i;
}
printf("%.2f\n", *(xs));
printf("%.2f\n", *(xs + 2));
printf("%.2f\n", xs[0]);
printf("%.2f\n", xs[2]);
free(xs);
}
%%bash
gcc -std=c99 -o ex2c ex2c.c
%%bash
./ex2c 10
```
**Solution**
```
%%file ex2d.c
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
int rows = 2;;
int cols = 3;
double **xs = malloc(sizeof(double) * rows);
for (int i=0; i < rows; i++) {
xs[i] = calloc(sizeof(double), cols);
}
for (int i=0; i<rows; i++) {
for (int j=0; j<cols; j++) {
xs[i][j] = i+j;
}
}
printf("%.2f\n", xs[0][0]);
printf("%.2f\n", xs[1][2]);
for (int i=0; i<rows; i++) {
free(xs[i]);
}
free(xs);
}
%%bash
gcc -std=c99 -o ex2d ex2d.c
%%bash
./ex2d
```
### Function pointers
**Exercise 3**
Write and use a function pointer.
**Solution**
```
%%file ex3.c
#include <stdio.h>
#include <stdlib.h>
double add(double x, double y) {
return x + y;
}
double mult(double x, double y) {
return x * y;
}
int main(int argc, char* argv[])
{
double a = 3.0;
double b = 4.0;
double (*f)(double, double) = add;
typedef double (*fp)(double, double);
fp g = mult;
printf("%.2f\n", add(a, b));
printf("%.2f\n", f(a, b));
printf("%.2f\n", g(a, b));
}
%%bash
gcc -std=c99 -o ex3 ex3.c
%%bash
./ex3
```
### Separate compilation
**Exercise 4**
Write header and implementation files for the add function, and use the function in a separate driver file. Use a makefile to compile the executable.
```
```
**Solution**
```
%%file ex4.h
#pragma once
double add(double x, double y);
%%file ex4.c
#include "ex4.h"
double add(double x, double y) {
return x + y;
}
%%file ex4_main.c
#include <stdio.h>
#include "ex4.h"
int main() {
double a = 3.0;
double b = 4.0;
printf("%.2f\n", add(a, b));
}
%%file makefile
ex4_main: ex4_main.c ex4.o
gcc -std=c99 -o ex4_main ex4_main.c ex4.o
ex4.o: ex4.c
gcc -std=c99 -c ex4.c
%%bash
make
%%bash
./ex4_main
%%file makefile
TARGET = ex4_main
OBJECTS = ex4.o
CFLAGS = -O3 -std=c99
LDLIBS = -lm
CC = gcc
all: $(TARGET)
clean:
rm $(TARGET) $(OBJECTS)
$(TARGET): $(OBJECTS)
%%bash
make clean
make
%%bash
./ex4_main
```
What you should know about C++
----
- Anonymous functions
- Generalized function pointers
- Ranged for
- Using the standard template library
- Iterators
- Containers
- Algorithms
- The `random` library
- Using `amradillo`
**Exercise 5**
Implement Newton's method in 1D for root finding. Pass in the function and gradient as generalized function pointers. Use the method to find all roots of the polynomial equation $f(x) = x^3 - 7x - 6$
```
```
**Solution**
```
%%file ex5.cpp
#include <iostream>
#include <vector>
#include <iomanip>
#include <cmath>
#include <functional>
using std::vector;
using std::cout;
using std::function;
using func = function<double(double)>;
double newton(double x, func f, func fprime, int max_iter=10) {
for (int i=0; i<max_iter; i++) {
x -= f(x)/fprime(x);
}
return x;
};
int main()
{
auto f = [](double x) { return pow(x, 3) - 7*x - 6; };
auto fprime = [](double x) { return 3.0*pow(x, 2) - 7; };
vector<double> x = {-5, 0, 5};
for (auto x_: x) {
cout << std::setw(2) << x_ << ": "
<< std::setw(3) << newton(x_, f, fprime) << "\n";
}
}
%%bash
g++ -std=c++11 ex5.cpp -o ex5
%%bash
./ex5
```
**Exercise 6**
Use the armadillo library to
- Generate 10 x-coordinates linearly spaced between 10 and 15
- Generate 10 random y-values as $y = 3x^2 - 7x + 2 + \epsilon$ where $\epsilon \sim 10 N(0,1)$
- Find the length of $x$ and $y$ and the Euclidean distance between $x$ and $y$
- Find the correlation between $x$ and $y$
- Solve the linear system to find a quadratic fit for this data
```
```
**Solution**
```
%%file ex6.cpp
#include <iostream>
#include <fstream>
#include <armadillo>
using std::cout;
using std::ofstream;
using namespace arma;
int main()
{
vec x = linspace<vec>(10.0,15.0,10);
vec eps = 10*randn<vec>(10);
vec y = 3*x%x - 7*x + 2 + eps;
cout << "x:\n" << x << "\n";
cout << "y:\n" << y << "\n";
cout << "Lenght of x is: " << norm(x) << "\n";
cout << "Lenght of y is: " << norm(y) << "\n";
cout << "Distance(x, y) is: " << norm(x-y) << "\n";
cout << "Correlation(x, y) is: " << cor(x, y) << "\n";
mat A = join_rows(ones<vec>(10), x);
A = join_rows(A, x%x);
cout << "A:\n" << A << "\n";
vec b = solve(A, y);
cout << "b:\n" << b << "\n";
ofstream fout1("x.txt");
x.print(fout1);
ofstream fout2("y.txt");
y.print(fout2);
ofstream fout3("b.txt");
b.print(fout3);
}
%%bash
g++ -std=c++11 ex6.cpp -o ex6 -larmadillo
%%bash
./ex6
x = np.loadtxt('x.txt')
y = np.loadtxt('y.txt')
b = np.loadtxt('b.txt')
plt.scatter(x, y, s=40)
plt.plot(x, b[0] + b[1]*x + b[2]*x**2, c='red')
pass
```
| github_jupyter |
## Training Network
In supervised training, the network processes inputs and compares its resulting outputs against the desired outputs.
Errors are propagated back through the system, causing the system to adjust the weights which control the network. This is done using the Backpropagation algorithm, also called backprop. This process occurs over and over as the weights are continually tweaked.
The set of data which enables the training is called the "training set."
During the training of a network the same set of data is processed many times as the connection weights are ever refined. Iteratively passing batches of data through the network and updating the weights, so that the error is decreased, is known as Stochastic Gradient Descent (SGD).
Training refers to determining the best set of weights for maximizing a neural network’s accuracy.
The amount by which the weights are changed is determined by a parameter called Learning rate.
Neural networks can be used without knowing precisely how training works. Most modern machine learning libraries have greatly automated the training process.
### NOTE:
Basicaly this notebook prepared to use within **Google Colab**: https://colab.research.google.com/.
The Google Colabatory has **free Tesla K80 GPU** and already prepared to develop deep learning applications.
First time opens this notebook, do not forget to enable **Python 3** runtime and **GPU** accelerator in Google Colab **Notebook Settings**.
### Setup Project
Create workspace and change directory.
```
PROJECT_HOME = '/content/keras-movie-reviews-classification'
import os.path
if not os.path.exists(PROJECT_HOME):
os.makedirs(PROJECT_HOME)
os.chdir(PROJECT_HOME)
!pwd
```
### Import Project
Import GitHub project to workspace.
```
# Import project and override existing data.
!git init .
!git remote add -t \* -f origin https://github.com/alex-agency/keras-movie-reviews-classification.git
!git reset --hard origin/master
!git checkout
!ls -la input
```
### Keras
Keras is a high-level API, written in Python and capable of running on top of TensorFlow, Theano, or CNTK deep learning frameworks.
Keras provides a simple and modular API to create and train Neural Networks, hiding most of the complicated details under the hood.
By default, Keras is configured to use Tensorflow as the backend since it is the most popular choice.
Keras is becoming super popular recently because of its simplicity.
### Keras workflow
<img src="https://www.learnopencv.com/wp-content/uploads/2017/09/keras-workflow.jpg" width="700px">
```
# Load Keras libraries
from keras.models import load_model
from keras import callbacks
```
### Load model and dataset
Loading model definition from HDF5 file.
```
import numpy as np
# Load data from numpy array
loaded = np.load('input/dataset.npz')
(X_train, Y_train), (X_test, Y_test) = loaded['dataset']
# Load model from HDF5 file.
model = load_model('input/mlps-model-definition.h5') # model with MLP network
print("Model Summary")
print(model.summary())
```
### Configuring the training process
Once the model is ready, we need to configure the learning process.
Compile the model means that Keras will generate a computation graph in TensorFlow.
### Loss functions
In a supervised learning problem, we have to find the error between the actual values and the predicted value. There can be different metrics which can be used to evaluate this error. This metric is often called loss function or cost function or objective function. There can be more than one loss function depending on what you are doing with the error. In general, we use:
* binary-cross-entropy for a binary classification problem
* categorical-cross-entropy for a multi-class classification problem
* mean-squared-error for a regression problem and so on
### Optimizers
An Optimizer determines how the network weights are updated.
Keras provides a lot of optimizers to choose from.
RMSprop and Adam is a good choice of optimizer for most problems.
### Overfitting
Overfitting describes the situation in which your model is over-optimized to accurately predict the training set, at the expense of generalizing to unknown data (which is the objective of learning in the first place). This can happen because the model greatly twists itself to perfectly conform to the training set, even capturing its underlying noise.
How can we avoid overfitting? The simplest solution is to split our dataset into a training set and a test set. The training set is used for the optimization procedure, but we evaluate the accuracy of our model by forwarding the test set to the trained model and measuring its accuracy.
During training, we can monitor the accuracy of the model on the training set and test set. The longer we train, the more likely our training accuracy is to go higher and higher, but at some point, it is likely the test set will stop improving. This is a cue to stop training at that point. We should generally expect that training accuracy is higher than test accuracy, but if it is much higher, that is a clue that we have overfit.
```
# Compile model
model.compile(loss='binary_crossentropy', # cross-entropy loss function for binary classification
optimizer='adam', # Adam optimiser one of the most popular optimization method
metrics=['accuracy']) # print the accuracy during training
# Early stopping callback
# Stop training when a monitored quantity has stopped improving.
# Using held-out validation set, to determine when to terminate the training process to avoid overfitting.
early_stopping = callbacks.EarlyStopping(monitor='val_loss', # quantity to be monitored
min_delta=0, # minimum change in the monitored quantity to qualify as an improvement
patience=2, # number of epochs with no improvement after which training will be stopped
verbose=1, mode='auto')
# Train model
history = model.fit(X_train, Y_train, # train the model using the training set
batch_size=8, # in each iteration, use size of training examples at once
epochs=20, # iterate amount of times over the entire training set
callbacks=[early_stopping], # called after each epoch
validation_split=0.2, # use 20% of the data for validation
verbose=2) # enables detailed logs, where 2 is print some information after each epoch
# Evaluate model
score = model.evaluate(X_test, Y_test, verbose=0) # evaluate the trained model on the test set
print('Test loss:', score[0])
print('Test accuracy:', score[1])
import matplotlib.pyplot as plt
# Plot the loss over each epochs.
plt.plot(history.history['loss'], label='training')
plt.plot(history.history['val_loss'], label='validation')
plt.legend()
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()
# Plot the accuracy evaluated on the training set.
plt.plot(history.history['acc'], label='training');
plt.plot(history.history['val_acc'], label='validation');
plt.legend()
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
```
### Export trained model to file
Saving whole Keras model into a single HDF5 file which will contain:
* the architecture of the model, allowing to re-create the model
* the weights of the model
* the training configuration (loss, optimizer)
* the state of the optimizer, allowing to resume training exactly where you left off.
```
# Model filename
model_filename = 'mlps-model.h5'
# Create output directory
output_dir = 'output'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_file = os.path.join(output_dir, model_filename)
# Export model into HDF5 file.
model.save(model_file)
!ls -la output
```
### Downloading file to your local file system
It will invoke a browser download of the file to your local computer.
```
from google.colab import files
# Download file
files.download(model_file)
```
| github_jupyter |
# Chapter 3 - a binary classification example
```
from keras.datasets import imdb
from keras import models, layers
from keras import optimizers
from keras import losses
from keras import metrics
import numpy as np
import matplotlib.pyplot as plt
```
## Loading dataset
```
# Suggested code - doesn't work
# (train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
```
### Workaround to load dataset:
https://stackoverflow.com/a/56243777
```
# save np.load
np_load_old = np.load
# modify the default parameters of np.load
np.load = lambda *a,**k: np_load_old(*a, allow_pickle=True, **k)
# call load_data with allow_pickle implicitly set to true
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
# restore np.load for future normal usage
np.load = np_load_old
```
### Example of decoding a review back to English
```
word_index = imdb.get_word_index()
reverse_word_index = dict(
[(value, key) for (key, value) in word_index.items()]
)
decoded_review = ' '.join([reverse_word_index.get(i - 3, '?') for i in train_data[0]])
decoded_review
```
## Preparing data
```
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1
return results
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
y_train = np.asarray(train_labels).astype('float32')
y_test = np.asarray(test_labels).astype('float32')
```
## Model definition
```
model = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(
optimizer=optimizers.RMSprop(lr=0.001),
loss=losses.binary_crossentropy,
metrics=[metrics.binary_accuracy]
)
```
## Creating a validation set
```
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
```
## Training model
```
history = model.fit(
partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val)
)
```
## Plotting the training and validation loss
```
history_dict = history.history
loss_values = history_dict['loss']
val_loss_values = history_dict['val_loss']
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, 'bo', label='Training loss')
plt.plot(epochs, val_loss_values, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
```
## Plotting the training and validation accuracy
```
plt.clf()
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Acc')
plt.legend()
plt.show()
```
## Retraining model from scratch
```
odel = models.Sequential()
model.add(layers.Dense(16, activation='relu', input_shape=(10000,)))
model.add(layers.Dense(16, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(
optimizer='rmsprop',
loss=losses.binary_crossentropy,
metrics=[metrics.binary_accuracy]
)
model.fit(x_train, y_train, epochs=4, batch_size=512)
results = model.evaluate(x_test, y_test)
results
model.predict(x_test)
```
| github_jupyter |
# Natural language inference: task and datasets
```
__author__ = "Christopher Potts"
__version__ = "CS224u, Stanford, Fall 2020"
```
## Contents
1. [Overview](#Overview)
1. [Our version of the task](#Our-version-of-the-task)
1. [Primary resources](#Primary-resources)
1. [Set-up](#Set-up)
1. [SNLI](#SNLI)
1. [SNLI properties](#SNLI-properties)
1. [Working with SNLI](#Working-with-SNLI)
1. [MultiNLI](#MultiNLI)
1. [MultiNLI properties](#MultiNLI-properties)
1. [Working with MultiNLI](#Working-with-MultiNLI)
1. [Annotated MultiNLI subsets](#Annotated-MultiNLI-subsets)
1. [Adversarial NLI](#Adversarial-NLI)
1. [Adversarial NLI properties](#Adversarial-NLI-properties)
1. [Working with Adversarial NLI](#Working-with-Adversarial-NLI)
1. [Other NLI datasets](#Other-NLI-datasets)
## Overview
Natural Language Inference (NLI) is the task of predicting the logical relationships between words, phrases, sentences, (paragraphs, documents, ...). Such relationships are crucial for all kinds of reasoning in natural language: arguing, debating, problem solving, summarization, and so forth.
[Dagan et al. (2006)](https://u.cs.biu.ac.il/~nlp/RTE1/Proceedings/dagan_et_al.pdf), one of the foundational papers on NLI (also called Recognizing Textual Entailment; RTE), make a case for the generality of this task in NLU:
> It seems that major inferences, as needed by multiple applications, can indeed be cast in terms of textual entailment. For example, __a QA system__ has to identify texts that entail a hypothesized answer. [...] Similarly, for certain __Information Retrieval__ queries the combination of semantic concepts and relations denoted by the query should be entailed from relevant retrieved documents. [...] In __multi-document summarization__ a redundant sentence, to be omitted from the summary, should be entailed from other sentences in the summary. And in __MT evaluation__ a correct translation should be semantically equivalent to the gold standard translation, and thus both translations should entail each other. Consequently, we hypothesize that textual entailment recognition is a suitable generic task for evaluating and comparing applied semantic inference models. Eventually, such efforts can promote the development of entailment recognition "engines" which may provide useful generic modules across applications.
## Our version of the task
Our NLI data will look like this:
| Premise | Relation | Hypothesis |
|:--------|:---------------:|:------------|
| turtle | contradiction | linguist |
| A turtled danced | entails | A turtle moved |
| Every reptile danced | entails | Every turtle moved |
| Some turtles walk | contradicts | No turtles move |
| James Byron Dean refused to move without blue jeans | entails | James Dean didn't dance without pants |
In the [word-entailment bakeoff](hw_wordentail.ipynb), we study a special case of this where the premise and hypothesis are single words. This notebook begins to introduce the problem of NLI more fully.
## Primary resources
We're going to focus on three NLI corpora:
* [The Stanford Natural Language Inference corpus (SNLI)](https://nlp.stanford.edu/projects/snli/)
* [The Multi-Genre NLI Corpus (MultiNLI)](https://www.nyu.edu/projects/bowman/multinli/)
* [The Adversarial NLI Corpus (ANLI)](https://github.com/facebookresearch/anli)
The first was collected by a group at Stanford, led by [Sam Bowman](https://www.nyu.edu/projects/bowman/), and the second was collected by a group at NYU, also led by [Sam Bowman](https://www.nyu.edu/projects/bowman/). Both have the same format and were crowdsourced using the same basic methods. However, SNLI is entirely focused on image captions, whereas MultiNLI includes a greater range of contexts.
The third corpus was collected by a group at Facebook AI and UNC Chapel Hill. The team's goal was to address the fact that datasets like SNLI and MultiNLI seem to be artificially easy – models trained on them can often surpass stated human performance levels but still fail on examples that are simple and intuitive for people. The dataset is "Adversarial" because the annotators were asked to try to construct examples that fooled strong models but still passed muster with other human readers.
This notebook presents tools for working with these corpora. The [second notebook in the unit](nli_02_models.ipynb) concerns models of NLI.
## Set-up
* As usual, you need to be fully set up to work with [the CS224u repository](https://github.com/cgpotts/cs224u/).
* If you haven't already, download [the course data](http://web.stanford.edu/class/cs224u/data/data.tgz), unpack it, and place it in the directory containing the course repository – the same directory as this notebook. (If you want to put it somewhere else, change `DATA_HOME` below.)
```
import nli
import os
import pandas as pd
import random
DATA_HOME = os.path.join("data", "nlidata")
SNLI_HOME = os.path.join(DATA_HOME, "snli_1.0")
MULTINLI_HOME = os.path.join(DATA_HOME, "multinli_1.0")
ANNOTATIONS_HOME = os.path.join(DATA_HOME, "multinli_1.0_annotations")
ANLI_HOME = os.path.join(DATA_HOME, "anli_v1.0")
```
## SNLI
### SNLI properties
For SNLI (and MultiNLI), MTurk annotators were presented with premise sentences and asked to produce new sentences that entailed, contradicted, or were neutral with respect to the premise. A subset of the examples were then validated by an additional four MTurk annotators.
* All the premises are captions from the [Flickr30K corpus](http://shannon.cs.illinois.edu/DenotationGraph/).
* Some of the sentences rather depressingly reflect stereotypes ([Rudinger et al. 2017](https://aclanthology.coli.uni-saarland.de/papers/W17-1609/w17-1609)).
* 550,152 train examples; 10K dev; 10K test
* Mean length in tokens:
* Premise: 14.1
* Hypothesis: 8.3
* Clause-types
* Premise S-rooted: 74%
* Hypothesis S-rooted: 88.9%
* Vocab size: 37,026
* 56,951 examples validated by four additional annotators
* 58.3% examples with unanimous gold label
* 91.2% of gold labels match the author's label
* 0.70 overall Fleiss kappa
* Top scores currently around 90%.
### Working with SNLI
The following readers should make it easy to work with SNLI:
* `nli.SNLITrainReader`
* `nli.SNLIDevReader`
Writing a `Test` reader is easy and so left to the user who decides that a test-set evaluation is appropriate. We omit that code as a subtle way of discouraging use of the test set during project development.
The base class, `nli.NLIReader`, is used by all the readers discussed here.
Because the datasets are so large, it is often useful to be able to randomly sample from them. All of the reader classes discussed here support this with their keyword argument `samp_percentage`. For example, the following samples approximately 10% of the examples from the SNLI training set:
```
nli.SNLITrainReader(SNLI_HOME, samp_percentage=0.10, random_state=42)
```
The precise number of examples will vary somewhat because of the way the sampling is done. (Here, we choose efficiency over precision in the number of cases we return; see the implementation for details.)
All of the readers have a `read` method that yields `NLIExample` example instances. For SNLI, these have the following attributes:
* __annotator_labels__: `list of str`
* __captionID__: `str`
* __gold_label__: `str`
* __pairID__: `str`
* __sentence1__: `str`
* __sentence1_binary_parse__: `nltk.tree.Tree`
* __sentence1_parse__: `nltk.tree.Tree`
* __sentence2__: `str`
* __sentence2_binary_parse__: `nltk.tree.Tree`
* __sentence2_parse__: `nltk.tree.Tree`
The following creates the label distribution for the training data:
```
snli_labels = pd.Series(
[ex.gold_label for ex in nli.SNLITrainReader(
SNLI_HOME, filter_unlabeled=False).read()])
snli_labels.value_counts()
```
Use `filter_unlabeled=True` (the default) to silently drop the examples for which `gold_label` is `-`.
Let's look at a specific example in some detail:
```
snli_iterator = iter(nli.SNLITrainReader(SNLI_HOME).read())
snli_ex = next(snli_iterator)
print(snli_ex)
```
As you can see from the above attribute list, there are __three versions__ of the premise and hypothesis sentences:
1. Regular string representations of the data
1. Unlabeled binary parses
1. Labeled parses
```
snli_ex.sentence1
```
The binary parses lack node labels; so that we can use `nltk.tree.Tree` with them, the label `X` is added to all of them:
```
snli_ex.sentence1_binary_parse
```
Here's the full parse tree with syntactic categories:
```
snli_ex.sentence1_parse
```
The leaves of either tree are tokenized versions of them:
```
snli_ex.sentence1_parse.leaves()
```
## MultiNLI
### MultiNLI properties
* Train premises drawn from five genres:
1. Fiction: works from 1912–2010 spanning many genres
1. Government: reports, letters, speeches, etc., from government websites
1. The _Slate_ website
1. Telephone: the Switchboard corpus
1. Travel: Berlitz travel guides
* Additional genres just for dev and test (the __mismatched__ condition):
1. The 9/11 report
1. Face-to-face: The Charlotte Narrative and Conversation Collection
1. Fundraising letters
1. Non-fiction from Oxford University Press
1. _Verbatim_ articles about linguistics
* 392,702 train examples; 20K dev; 20K test
* 19,647 examples validated by four additional annotators
* 58.2% examples with unanimous gold label
* 92.6% of gold labels match the author's label
* Test-set labels available as a Kaggle competition.
* Top matched scores currently around 0.81.
* Top mismatched scores currently around 0.83.
### Working with MultiNLI
For MultiNLI, we have the following readers:
* `nli.MultiNLITrainReader`
* `nli.MultiNLIMatchedDevReader`
* `nli.MultiNLIMismatchedDevReader`
The MultiNLI test sets are available on Kaggle ([matched version](https://www.kaggle.com/c/multinli-matched-open-evaluation) and [mismatched version](https://www.kaggle.com/c/multinli-mismatched-open-evaluation)).
The interface to these is the same as for the SNLI readers:
```
nli.MultiNLITrainReader(MULTINLI_HOME, samp_percentage=0.10, random_state=42)
```
The `NLIExample` instances for MultiNLI have the same attributes as those for SNLI. Here is the list repeated from above for convenience:
* __annotator_labels__: `list of str`
* __captionID__: `str`
* __gold_label__: `str`
* __pairID__: `str`
* __sentence1__: `str`
* __sentence1_binary_parse__: `nltk.tree.Tree`
* __sentence1_parse__: `nltk.tree.Tree`
* __sentence2__: `str`
* __sentence2_binary_parse__: `nltk.tree.Tree`
* __sentence2_parse__: `nltk.tree.Tree`
The full label distribution:
```
multinli_labels = pd.Series(
[ex.gold_label for ex in nli.MultiNLITrainReader(
MULTINLI_HOME, filter_unlabeled=False).read()])
multinli_labels.value_counts()
```
No examples in the MultiNLI train set lack a gold label, so the value of the `filter_unlabeled` parameter has no effect here, but it does have an effect in the `Dev` versions.
### Annotated MultiNLI subsets
MultiNLI includes additional annotations for a subset of the dev examples. The goal is to help people understand how well their models are doing on crucial NLI-related linguistic phenomena.
```
matched_ann_filename = os.path.join(
ANNOTATIONS_HOME,
"multinli_1.0_matched_annotations.txt")
mismatched_ann_filename = os.path.join(
ANNOTATIONS_HOME,
"multinli_1.0_mismatched_annotations.txt")
def view_random_example(annotations, random_state=42):
random.seed(random_state)
ann_ex = random.choice(list(annotations.items()))
pairid, ann_ex = ann_ex
ex = ann_ex['example']
print("pairID: {}".format(pairid))
print(ann_ex['annotations'])
print(ex.sentence1)
print(ex.gold_label)
print(ex.sentence2)
matched_ann = nli.read_annotated_subset(matched_ann_filename, MULTINLI_HOME)
view_random_example(matched_ann)
```
## Adversarial NLI
### Adversarial NLI properties
The ANLI dataset was created in response to evidence that datasets like SNLI and MultiNLI are artificially easy for modern machine learning models to solve. The team sought to tackle this weakness head-on, by designing a crowdsourcing task in which annotators were explicitly trying to confuse state-of-the-art models. In broad outline, the task worked like this:
1. The crowdworker is presented with a premise (context) text and asked to construct a hypothesis sentence that entails, contradicts, or is neutral with respect to that premise. (The actual wording is more informal, along the lines of the SNLI/MultiNLI task).
1. The crowdworker submits a hypothesis text.
1. The premise/hypothesis pair is fed to a trained model that makes a prediction about the correct NLI label.
1. If the model's prediction is correct, then the crowdworker loops back to step 2 to try again. If the model's prediction is incorrect, then the example is validated by different crowdworkers.
The dataset consists of three rounds, each involving a different model and a different set of sources for the premise texts:
| Round | Model | Training data | Context sources |
|:------:|:------------|:---------------------------|:-----------------|
| 1 | [BERT-large](https://www.aclweb.org/anthology/N19-1423/) | SNLI + MultiNLI | Wikipedia |
| 2 | [ROBERTa](https://arxiv.org/abs/1907.11692) | SNLI + MultiNLI + [NLI-FEVER](https://github.com/easonnie/combine-FEVER-NSMN/blob/master/other_resources/nli_fever.md) + Round 1 | Wikipedia |
| 3 | [ROBERTa](https://arxiv.org/abs/1907.11692) | SNLI + MultiNLI + [NLI-FEVER](https://github.com/easonnie/combine-FEVER-NSMN/blob/master/other_resources/nli_fever.md) + Round 2 | Various |
Each round has train/dev/test splits. The sizes of these splits and their label distributions are calculated just below.
The [project README](https://github.com/facebookresearch/anli/blob/master/README.md) seeks to establish some rules for how the rounds can be used for training and evaluation.
### Working with Adversarial NLI
For ANLI, we have the following readers:
* `nli.ANLITrainReader`
* `nli.ANLIDevReader`
As with SNLI, we leave the writing of a `Test` version to the user, as a way of discouraging inadvertent use of the test set during project development.
Because ANLI is distributed in three rounds, and the rounds can be used independently or pooled, the interface has a `rounds` argument. The default is `rounds=(1,2,3)`, but any subset of them can be specified. Here are some illustrations using the `Train` reader; the `Dev` interface is the same:
```
for rounds in ((1,), (2,), (3,), (1,2,3)):
count = len(list(nli.ANLITrainReader(ANLI_HOME, rounds=rounds).read()))
print("R{0:}: {1:,}".format(rounds, count))
```
The above figures correspond to those in Table 2 of the paper. I am not sure what accounts for the differences of 100 examples in round 2 (and, in turn, in the grand total).
ANLI uses a different set of attributes from SNLI/MultiNLI. Here is a summary of what `NLIExample` instances offer for this corpus:
* __uid__: a unique identifier; akin to `pairID` in SNLI/MultiNLI
* __context__: the premise; corresponds to `sentence1` in SNLI/MultiNLI
* __hypothesis__: the hypothesis; corresponds to `sentence2` in SNLI/MultiNLI
* __label__: the gold label; corresponds to `gold_label` in SNLI/MultiNLI
* __model_label__: the label predicted by the model used in the current round
* __reason__: a crowdworker's free-text hypothesis about why the model made an incorrect prediction for the current __context__/__hypothesis__ pair
* __emturk__: for dev (and test), this is `True` if the annotator contributed only dev (test) exmples, else `False`; in turn, it is `False` for all train examples.
* __genre__: the source for the __context__ text
* __tag__: information about the round and train/dev/test classification
All these attribute are `str`-valued except for `emturk`, which is `bool`-valued.
The labels in this dataset are conceptually the same as for `SNLI/MultiNLI`, but they are encoded differently:
```
anli_labels = pd.Series(
[ex.label for ex in nli.ANLITrainReader(ANLI_HOME).read()])
anli_labels.value_counts()
```
For the dev set, the `label` and `model_label` values are always different, suggesting that these evaluations will be very challenging for present-day models:
```
pd.Series(
[ex.label == ex.model_label for ex in nli.ANLIDevReader(ANLI_HOME).read()]
).value_counts()
```
In the train set, they do sometimes correspond, and you can track the changes in the rate of correct model predictions across the rounds:
```
for r in (1,2,3):
dist = pd.Series(
[ex.label == ex.model_label
for ex in nli.ANLITrainReader(ANLI_HOME, rounds=(r,)).read()]
).value_counts()
dist = dist / dist.sum()
dist.name = "Round {}".format(r)
print(dist, end="\n\n")
```
This corresponds to Table 2, "Model error rate (Verified)", in the paper. (I am not sure what accounts for the slight differences in the percentages.)
## Other NLI datasets
* [The FraCaS textual inference test suite](http://www-nlp.stanford.edu/~wcmac/downloads/) is a smaller, hand-built dataset that is great for evaluating a model's ability to handle complex logical patterns.
* [SemEval 2013](https://www.cs.york.ac.uk/semeval-2013/) had a wide range of interesting data sets for NLI and related tasks.
* [The SemEval 2014 semantic relatedness shared task](http://alt.qcri.org/semeval2014/task1/) used an NLI dataset called [Sentences Involving Compositional Knowledge (SICK)](http://alt.qcri.org/semeval2014/task1/index.php?id=data-and-tools).
* [MedNLI](https://physionet.org/physiotools/mimic-code/mednli/) is specialized to the medical domain, using data derived from [MIMIC III](https://mimic.physionet.org).
* [XNLI](https://github.com/facebookresearch/XNLI) is a multilingual collection of test sets derived from MultiNLI.
* [Diverse Natural Language Inference Collection (DNC)](http://decomp.io/projects/diverse-natural-language-inference/) transforms existing annotations from other tasks into NLI problems for a diverse range of reasoning challenges.
* [SciTail](http://data.allenai.org/scitail/) is an NLI dataset derived from multiple-choice science exam questions and Web text.
* [NLI Style FEVER](https://github.com/easonnie/combine-FEVER-NSMN/blob/master/other_resources/nli_fever.md) is a version of [the FEVER dataset](http://fever.ai) put into a standard NLI format. It was used by the Adversarial NLI team to train models for their annotation round 2.
* Models for NLI might be adapted for use with [the 30M Factoid Question-Answer Corpus](http://agarciaduran.org/).
* Models for NLI might be adapted for use with [the Penn Paraphrase Database](http://paraphrase.org/).
| github_jupyter |
```
import glob
import os
import pandas as pd
import numpy as np
##################### Traces description
# 1. CLT_PUSH_START - SENDING Time between the scheduling of the request and its actual processing
# 2. CLT_PUSH_END - CLT_PUSH_START Time to prepare the packet, send it to the NIC driver through rte_eth_tx_burst(), and free the dpdk mbuf
# 3. SRV_POP_START - CLT_PUSH_END Time on the wire: item detected in the io queue's receive queue - client packet sent /!\ I think this can be negative if the server schedule's pop way before the client sends requests
# 4. SRV_POP_END - SRV_POP_START Time to parse incoming packet + "waiting time" at the server's queue
# 5. NET_RECEIVE - SRV_POP_END Time between message delivered to the application by dmtr_wait_any() and packet processed by the I/O queue
# 6. HTTP_DISPATCH - NET_RECEIVE Time taken to select the HTTP recipient (either RR, or apply the filter, etc)
# 7. START_HTTP - HTTP_DISPATCH Time spent in memory queue between network component and HTTP
# 8. END_HTTP - START_HTTP Time spent performing HTTP processing
# 9. HTTP_DONE - END_HTTP Time spent in memory queue between HTTP component and HTPP /!\ This include the "wait time" of dmtr_wait_any, as the same poll operates on both network sockets, and this memory queue
# 10. SRV_PUSH_START - HTTP_DONE Time between the scheduling of the response and its actual processing
# 11. SRV_PUSH_END - SRV_PUSH_START Time spent preparing the packet and sending it to the wire (identical to #2)
# 12. CLT_POP_START - SRV_PUSH_END Time spent on the wire /!\ I think this can be negative as the client schedules the read as soon as it as sent the request
# 13. CLT_POP_END - CLT_POP_START Time spent processing an incoming network packet (includes wait time) (identical to #4)
# 14. COMPLETED - CLT_POP_END Time ellapsed between the reponse being delivered to the client by dmtr_wait_any(), and the response's being fully processed by the I/O queue
TRACE_ORDER = [
'SENDING',
'CLT_PUSH_START',
# 'CLT_PUSH_END',
# 'SRV_POP_START',
'SRV_POP_END',
'NET_RECEIVE',
'HTTP_DISPATCH',
'START_HTTP',
'END_HTTP',
'HTTP_DONE',
'SRV_PUSH_START',
# 'SRV_PUSH_END',
# 'CLT_POP_START',
'CLT_POP_END',
'COMPLETED'
]
def read_tokens(trace_dir, exclude_first = 5):
#REQ_ID SENDING READING COMPLETED PUSH_TOKEN POP_TOKEN)
files = glob.glob(os.path.join(trace_dir, '*traces*'))
files = list(filter(lambda x: not ('POP' in x or 'PUSH' in x), files))
if len(files) > 1:
raise Exception("Too many files")
df = pd.read_csv(files[0], sep='\t')
min_time = df[df.columns[1]].min()
df = df[df[df.columns[1]] > min_time + exclude_first * 1e9]
return df
def read_traces(trace_dir, label):
files = glob.glob(os.path.join(trace_dir, '*%s-traces' % label))
if len(files) > 1:
raise Exception("Too many files")
df = pd.read_csv(files[0], sep='\t')
return df
def merge_trace(token_df, trace_df, token_label, col_label):
trace_df = trace_df[['%s_TOKEN' % token_label, 'TIME']]
df = pd.merge(token_df, trace_df, on='%s_TOKEN' % token_label)
# return df
return df.rename(columns={'TIME': col_label})
def merge_traces(token_df, trace_df, token_label, col_label):
start_df = trace_df[trace_df.START]
stop_df = trace_df[~trace_df.START]
df = merge_trace(token_df, start_df, token_label, '%s_%s_START' % (col_label, token_label))
df = merge_trace(df, stop_df, token_label, '%s_%s_END' % (col_label, token_label))
return df
col_labels = dict(client='CLT', server='SRV')
token_labels = dict(client='rate_client', server='')
def order_cols(df, subtract_root = True):
col_order = list(filter(lambda x: x in df.columns, TRACE_ORDER, ))
df = df[['REQ_ID'] +col_order].set_index('REQ_ID')
if subtract_root:
df[col_order] = df[col_order].apply(lambda x: x - df[col_order[0]])
return df
def read_profiling_node(base_dir, experiment, node_label):
client_dir = os.path.join(base_dir, experiment, node_label)
token_df = read_tokens(client_dir)
push_df = read_traces(client_dir, 'PUSH')
pop_df = read_traces(client_dir, 'POP')
df = merge_traces(token_df, push_df, 'PUSH', col_labels[node_label])
df = merge_traces(df, pop_df, 'POP', col_labels[node_label])
return order_cols(df)
CLIENT_RCV = 'CLT_POP_END'
CLIENT_SND = 'CLT_PUSH_START'
def read_merged_profiling(base_dir, experiment):
client_df = read_profiling_node(base_dir, experiment, 'client')
server_df = read_profiling_node(base_dir, experiment, 'server')
server_cols = server_df.columns
client_cols = client_df.columns
df = client_df.join(server_df)
offset = df[CLIENT_SND]
df[server_cols] = df[server_cols].apply(lambda x: x + offset)
offset =( df[CLIENT_RCV] - df[server_cols[-1]]) / 2
df[server_cols] = df[server_cols].apply(lambda x: x + offset)
return order_cols(df.reset_index())
COLORS = ["#700f00",
"#013fb0",
"#cbcd11",
"#6b3a7d",
"#ff392e",
"#008eb2",
"#ff8da5",
"#000000",
"#458f00",
"#AAAAAA",
"#123456",
"#7192F1",
"#013fb0",
'#777777',
'#BBBBBB'
]
def stacked_plot(df, full_sep=False):
columns = df.columns
print(columns)
bottom = 0
cols_so_far = []
for prev_col, next_col, color in zip(columns, columns[1:], COLORS):
if not full_sep:
bottom = df[prev_col]
plt.bar(df.index, df[next_col] - df[prev_col], 1, bottom=bottom, color=color, label=prev_col)
if full_sep:
bottom = (bottom + df[next_col]- df[prev_col]).max()
def plot_stacked_sample(df, sample_size=100, full_sep=False):
df = df.sort_values(df.columns[-1])
lowest = df.iloc[:sample_size]
highest = df.iloc[-sample_size:]
middlest = df.iloc[int(len(df) / 2 - sample_size / 2): int(len(df) / 2 + sample_size / 2)]
plt.figure(figsize=(9.5, 4))
ax1 = plt.subplot(131)
stacked_plot(lowest.reset_index(drop=True), full_sep)
ax2 = plt.subplot(132, sharey=ax1)
stacked_plot(middlest.reset_index(drop=True), full_sep)
plt.subplot(133, sharey=ax2)
stacked_plot(highest.reset_index(drop=True), full_sep)
plt.tight_layout()
plt.subplots_adjust(top=.8)
plt.sca(ax2)
plt.legend(loc='lower center', bbox_to_anchor=(.5, 1), ncol=5)
df = read_merged_profiling('profiling', 'all_pinned_8')
plot_stacked_sample(df, 200)
df = read_merged_profiling('profiling', 'all_pinned_14')
plot_stacked_sample(df, 200)
df = read_merged_profiling('profiling', 'all_pinned_file_only')
plot_stacked_sample(df, 200)
import numpy as np
def plot_correlations(df):
columns = df.columns
for prev_col, next_col, color in zip(columns, columns[1:], COLORS):
diffs = df[next_col] - df[prev_col]
x = diffs / diffs.max()
y = df.COMPLETED - df[columns[0]]
plt.plot(x, y, '.', markersize=.1, color=color, label=prev_col)
mask = ~x.isna() & ~y.isna()
p = np.polyfit(x[mask], y[mask], 1)
plt.plot(x, p[0] + p[1]*x, '-', color=color)
df = read_merged_profiling('profiling', 'all_pinned_file_only')
df = df.sort_values('COMPLETED')
# df = df.iloc[-5000:]
diffs = df.astype(float).diff(-1, axis=1)
diffs = diffs[diffs.columns[~diffs.isna().all().values]]
diffs['TOTAL'] = df['COMPLETED']
plt.figure()
corr = diffs.corr()
corr[corr == 1] = 0
plt.imshow(corr)
plt.xticks(range(len(corr.columns)))
plt.yticks(range(len(corr.columns)))
plt.xlim([-.5, 10.5])
plt.ylim([-.5, 10.5])
plt.gca().set_xticklabels(corr.columns, rotation=-45, ha='left')
plt.gca().set_yticklabels(corr.columns)
plt.colorbar()
plt.tight_layout()
# plt.subplots_adjust(left=.3, top=.8)
df = read_merged_profiling('profiling', 'all_pinned_file_only')
df = df.sort_values('COMPLETED')
# df = df.iloc[-5000:]
diffs = df.astype(float).diff(-1, axis=1)
diffs = diffs[diffs.columns[~diffs.isna().all().values]]
diffs['TOTAL'] = df['COMPLETED']
plt.figure()
corr = diffs.cov()
# corr[corr > .98] = 0
plt.imshow(corr)
plt.xticks(range(len(corr.columns)))
plt.yticks(range(len(corr.columns)))
plt.xlim([-.5, 10.5])
plt.ylim([-.5, 10.5])
plt.gca().set_xticklabels(corr.columns, rotation=-45, ha='left')
plt.gca().set_yticklabels(corr.columns)
plt.colorbar()
plt.tight_layout()
# plt.subplots_adjust(left=.3, top=.8)
df = read_merged_profiling('profiling', 'all_pinned_regex_only')
plot_stacked_sample(df, 200)
```
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
---
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
---
**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
---
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
## Import Packages
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
```
## Ideas for Lane Detection Pipeline
**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
`cv2.inRange()` for color selection
`cv2.fillPoly()` for regions selection
`cv2.line()` to draw lines on an image given endpoints
`cv2.addWeighted()` to coadd / overlay two images
`cv2.cvtColor()` to grayscale or change color
`cv2.imwrite()` to output images to file
`cv2.bitwise_and()` to apply a mask to an image
**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
## Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
```
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
```
## Test Images
Build your pipeline to work on the images in the directory "test_images"
**You should make sure your pipeline works well on these images before you try the videos.**
```
import os
os.listdir("test_images/")
```
## Build a Lane Finding Pipeline
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
```
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
import numpy
kernel_size = 3
low_threshold = 150
high_threshold = 250
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi * 90 /180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_len = 10 #minimum number of pixels making up a line
max_line_gap = 100 # maximum gap in pixels between connectable line segments
for file in os.listdir("test_images/"):
imgfile = "test_images/" + file
init_image = mpimg.imread(imgfile)
ysize = image.shape[0]
xsize = image.shape[1]
triangle = numpy.array([[0, ysize-1], [xsize-1, ysize-1], [xsize/2, ysize/2]], numpy.int32)
image = grayscale(init_image)
image = gaussian_blur(image, kernel_size)
image = canny(image, low_threshold, high_threshold)
image = region_of_interest(image, [triangle])
image = hough_lines(image, rho, theta, threshold, min_line_len, max_line_gap)
image = weighted_img(image, init_image)
mpimg.imsave("test_images_result/" + file, image)
```
## Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
`solidWhiteRight.mp4`
`solidYellowLeft.mp4`
**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
**If you get an error that looks like this:**
```
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
```
**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
return result
```
Let's try the one with the solid white lane on the right first ...
```
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
```
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
```
## Improve the draw_lines() function
**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
Now for the one with the solid yellow lane on the left. This one's more tricky!
```
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Writeup and Submission
If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.
## Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
```
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
```
| github_jupyter |
```
from os.path import exists, join, isfile
from os import listdir, makedirs
from obspy.geodetics import kilometer2degrees
import numpy as np
from obspy.taup import TauPyModel
import matplotlib.pyplot as plt
from SS_MTI import Inversion
import threading
import subprocess
import Create_Vmod
from SS_MTI import Gradient, PhaseTracer, Misfit
```
# Fixed parameters (initial model)
```
bin_filepath = "/home/nienke/Documents/Research/SS_MTI/External_packages/reflectivity_Mars/SRC/test/crfl_sac"
save_path_OG = "/home/nienke/Documents/Research/SS_MTI/External_packages/Test_reflectivity/Gradient_descent_classic"
if not exists(join(save_path_OG, "start_v")):
makedirs(join(save_path_OG, "start_v"))
f_start = join(save_path_OG, "start_v")
## Fixed parameters:
src_depth = 20.0
epi_in_km = 1774.7380
epi = kilometer2degrees(epi_in_km, radius=3389.5)
baz = 0.0
dt = 0.025
phases = ["P", "S", "P", "S", "S"]
comps = ["Z", "T", "R", "Z", "R"]
t_pres = [1, 1, 1, 1, 1]
t_posts = [30, 30, 30, 30, 30]
ylims = [2e-10, 2e-10, 1e-10, 3e-10, 2e-10]
fmin = 0.2
fmax = 0.6
zerophase = False
## Start parameters:
bm_start_model = "/home/nienke/Documents/Research/Data/MTI/MT_vs_STR/bm_models/TAYAK.bm"
m_rr = 0.3000
m_tt = 0.1000
m_pp = 0.2000
m_rt = 0.2000
m_rp = 0.5000
m_tp = 0.2000
# mtt,mtp,mrt,mpp,mrp,mrr = 0.1000 0.2000 0.2000 0.1000 0.1000 0.2000 # TRUE MODEL
focal_mech = [m_rr, m_tt, m_pp, m_rt, m_rp, m_tp]
Moho_d = 75.0
m0 = np.hstack((focal_mech, Moho_d))
sigmas = np.ones(len(phases)) * 1e-10
Create_Vmod.create_dat_file(
src_depth, epi_in_km, baz, focal_mech, dt, f_start, bm_start_model,
)
Create_Vmod.update_dat_file(
dat_folder=f_start,
m=m0,
vpvs=False,
depth=True,
produce_tvel= True,
tvel_name = "Init",
)
m0
```
# Observed data
```
path_observed = (
"/home/nienke/Documents/Research/SS_MTI/External_packages/Test_reflectivity/obs_2/"
)
npz_file = "/home/nienke/Documents/Research/Data/npz_files/TAYAK.npz"
st_obs = Gradient.read_refl_mseeds(path=path_observed)
Taup = TauPyModel(npz_file)
obs_tts = [PhaseTracer.get_traveltime(Taup, phase, src_depth, epi) for phase in phases]
st_obs_w, st_obs_full, s_obs = Gradient.window(
st_obs, phases, comps, obs_tts, t_pres, t_posts, fmin, fmax, zerophase,
)
```
# Classic gradient descent
#
```
update_nr = 25
current_update = 15
epsilon = 0.001
prior_crfl_filepath = join(f_start, "crfl.dat")
Inversion.gradient_descent(
bin_path=bin_filepath,
save_path=save_path_OG,
epsilon=epsilon,
update_nr=update_nr,
dt=dt,
sigmas=sigmas,
st_obs_w=st_obs_w,
current_update=current_update,
prior_crfl_filepath=None,
alphas=[1e-6, 1e-5, 5e-5, 1e-4, 5e-4, 1e-3, 1e-2, 1e-1],
fmin=fmin,
fmax=fmax,
phases=phases,
comps=comps,
t_pres=t_pres,
t_posts=t_posts,
)
Gradient.plot_updates(
save_path=save_path_OG,
st_obs_full=st_obs_full,
st_obs_w=st_obs_w,
obs_tts=obs_tts,
phases=phases,
comps=comps,
t_pres=t_pres,
t_posts=t_posts,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
ylims=ylims,
)
Gradient.plot_misfits(save_path_OG,epsilon=0.001)
```
# Gaus-Newton method
## Jacobian calculation
```
def Exact_gradient(
thread_folder: str,
thread_nr: int,
dat_folder: str,
unit_v: np.array,
bin_filepath: str,
):
print(thread_nr)
print(unit_v)
""" Thread worker function """
""" Copy .dat file into new folder """
dat_filepath = join(dat_folder, "crfl.dat")
subprocess.call(f"scp {dat_filepath} .", shell=True, cwd=thread_folder)
""" Copy binary file into new folder """
subprocess.call(f"scp {bin_filepath} .", shell=True, cwd=thread_folder)
""" Plug unit vector into .dat file """
Create_Vmod.update_dat_file(
dat_folder=thread_folder,
m=unit_v,
vpvs=False,
depth=False,
produce_tvel=True,
tvel_name="exact",
)
""" Run the reflectivity code """
print(f"Running exact gradient on thread: {thread_nr}\n")
subprocess.call("./crfl_sac", shell=True, cwd=thread_folder)
print(f"thread: {thread_nr} is done\n")
np.save(join(thread_folder, "m.npy"), unit_v)
def Get_J_moment(
save_path: str,
prior_dat_folder: str,
bin_file_path: str,
phases: [str],
comps: [str],
t_pres: [float],
t_posts: [float],
fmin: float,
fmax: float,
zerophase: bool,
):
unit_vs = np.array(
(
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1],
)
)
J_approx = []
threads = []
dat_folder = prior_dat_folder # f_start
""" Creating the exact seismograms: """
for i in range(6):
""" Create folder for each thread """
thread_folder = join(save_path, f"Exact_{i}")
if not exists(thread_folder):
makedirs(thread_folder)
""" Run forward model with each unit vector """
exact_st = [
f
for f in listdir(thread_folder)
if f.startswith("st00")
if isfile(join(thread_folder, f))
]
if not exact_st:
t = threading.Thread(
target=Exact_gradient,
args=[thread_folder, i, dat_folder, unit_vs[:, i], bin_filepath],
)
threads.append(t)
t.start()
if not exact_st:
for thread in threads:
thread.join()
""" Reading in seismograms and substituting into approximate Green's functions: """
for i in range(6):
thread_folder = join(save_path, f"Exact_{i}")
st = Gradient.read_refl_mseeds(path=thread_folder, stack=False)
""" Window the data """
npz_name = [
f
for f in listdir(thread_folder)
if f.endswith(".npz")
if isfile(join(thread_folder, f))
]
if npz_name:
npz_file = join(thread_folder, npz_name[0],)
dat_file = join(thread_folder)
Taup = TauPyModel(npz_file)
depth = Create_Vmod.read_depth_from_dat(dat_file)
epi = Create_Vmod.read_epi_from_dat(dat_file)
syn_tts = []
for j, phase in enumerate(phases):
syn_tts.append(PhaseTracer.get_traveltime(Taup, phase, depth, epi))
else:
syn_tts = Gradient.get_tt_from_dat_file(phases, thread_folder, "exact")
st_syn_w, st_syn_full, s_syn = Gradient.window(
st, phases, comps, syn_tts, t_pres, t_posts, fmin, fmax, zerophase,
)
if i == 0:
J_approx = np.zeros((len(s_syn), 6))
J_approx[:, i] = s_syn
else:
J_approx[:, i] = s_syn
return J_approx
def proposal(
m:[float],
save_path_OG: str,
new_folder_name: str,
dat_folder: str,
phases: [str],
comps: [str],
t_pres: [float],
t_posts: [float],
dt: float,
sigmas: [float],
fmin: float,
fmax:float,
zerophase:bool,
):
save_folder = join(save_path_OG, new_folder_name)
if not exists(save_folder):
makedirs(save_folder)
if exists(join(save_folder, "It_0")):
st_file = [
f
for f in listdir(join(save_folder, "It_0"))
if f.startswith("st00")
if isfile(join(save_folder, "It_0", f))
]
else:
st_file = []
if st_file:
st_syn = Gradient.read_refl_mseeds(path=join(save_folder, "It_0"), stack=False)
else:
src_str = Gradient.SRC_STR(
binary_file_path=bin_filepath,
prior_dat_filepath=join(dat_folder, "crfl.dat"),
save_folder=save_folder,
phases=phases,
components=comps,
t_pres=t_pres,
t_posts=t_posts,
vpvs=False,
depth=True,
dt=dt,
sigmas=sigmas,
tstars=None,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
start_it=0,
)
st_syn = src_str.forward(m)
""" Window the data """
if exists(join(save_folder, "It_0")):
npz_name = [
f
for f in listdir(join(save_folder, "It_0"))
if f.endswith(".npz")
if isfile(join(save_folder, "It_0", f))
]
else:
npz_name = []
if npz_name:
npz_file = join(save_folder, "It_0", npz_name[0],)
dat_file = join(save_folder, "It_0")
Taup = TauPyModel(npz_file)
depth = Create_Vmod.read_depth_from_dat(dat_file)
epi = Create_Vmod.read_epi_from_dat(dat_file)
syn_tts = []
for i, phase in enumerate(phases):
syn_tts.append(PhaseTracer.get_traveltime(Taup, phase, depth, epi))
else:
syn_tts = Gradient.get_tt_from_dat_file(phases, join(save_folder, "It_0"), m[-1])
st_syn_w, st_syn_full, s_syn = Gradient.window(
st_syn, phases, comps, syn_tts, t_pres, t_posts, fmin, fmax, zerophase,
)
return st_syn_w, st_syn_full, s_syn
```
## Start the inversion:
```
save_path_OG = "/home/nienke/Documents/Research/SS_MTI/External_packages/Test_reflectivity/Gauss_newton/"
update_nr = 4
current_update = 2
update = 0
lambd_0 = 100. # Initial lambda value
nu = 1.5
misfits = np.zeros(update_nr + 1)
while update < update_nr:
if update == 0:
accepted_folder = f_start
# if current_update != 0:
# """ Check where the previous update ended and take this crfl.dat file as prior file"""
# prev_update = current_update - 1
"""
Step 1: Get the Gradient of seismogram w.r.t moment tensor paramters based on unit vectors:
"""
J_m = Get_J_moment(
save_path=join(save_path_OG,f"Update_{update}"),
prior_dat_folder=accepted_folder,
bin_file_path=bin_filepath,
phases=phases,
comps=comps,
t_pres=t_pres,
t_posts=t_posts,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
)
"""
Step 2: Forward run with the actual model parameters
- This is necessary for the Jacobian of the structural parameters
- This is necessary for the gradient of the misfit w.r.t. the seismometer
- This is necessary for the misfit calculation
"""
st_syn_w0, st_syn_full0, s_syn0 = proposal(
m=m0,
save_path_OG=save_path_OG,
new_folder_name=f"Update_{update}",
dat_folder=accepted_folder,
phases=phases,
comps=comps,
t_pres=t_pres,
t_posts=t_posts,
dt=dt,
sigmas=sigmas,
fmin=fmin,
fmax=fmax,
zerophase = False,
)
""" Calculate misfit """
xi0 = np.sum(Misfit.L2().run_misfit(phases, st_obs_w, st_syn_w0, sigmas ** 2))
""" Save initial guess parameters: """
if update == 0:
if not exists(join(save_path_OG, "start_v")):
makedirs(join(save_path_OG, "start_v"))
np.save(join(save_path_OG, "start_v","m1_initial.npy"),m0)
st = Gradient.read_refl_mseeds(path = join(save_path_OG,f"Update_{update}","It_0"),stack = False)
st.write(join(save_path_OG, "start_v", "st_m1.mseed"), format="MSEED")
np.save(join(save_path_OG, "start_v", "misfit.npy"),xi0)
"""
Step 3: Get the Jacobian of the structural parameters
"""
""" Then we need S1 and epsilon """
epsilon = 0.001
m1 = m0
m1[-1] += epsilon * m1[-1]
st_syn_w1, st_syn_full1, s_syn1 = proposal(
m=m1,
save_path_OG=join(save_path_OG,f"Update_{update}"),
new_folder_name="Depth_update",
dat_folder=accepted_folder,
phases=phases,
comps=comps,
t_pres=t_pres,
t_posts=t_posts,
dt=dt,
sigmas=sigmas,
fmin=fmin,
fmax=fmax,
zerophase = False,
)
J_str = np.expand_dims((s_syn1 - s_syn0) / epsilon, axis=1)
"""
Step 4: Combine the two Jacobians (moment tensor + structure)
"""
J_total = np.hstack((J_m, J_str)) / np.mean(sigmas)**2
"""
Step 5: Get the gradient of the misfit w.r.t. the seismogram
(i.e., derivative of the L2-norm)
"""
dxi_ds = np.expand_dims(-(s_obs - s_syn0) / (np.mean(sigmas) ** 2), axis=1)
"""
Step 6: Do update
NOTE: we have a singular matrix unfortunately, so therefore we will make use of the Levenberg-Marquardt algorithm,
which is basically adding damping.
Instead of the identity matrix we use diag(J.T@J):
Fletcher(1971)-A modified Marquardt subroutine for non-linear least squares,
zero values will be filled with average logaritmic values.
"""
# I = np.eye(J_total.shape[1]) # Levenberg-Marquardt algorithm
diag = np.diag(J_total.T @ J_total)
diag.setflags(write=1)
# Fill zero-values with logaritmic average values:
zero_diag = np.where(diag == 0)
non_zero_inds = np.where(diag != 0)
if zero_diag:
for z in zero_diag[0]:
print(np.mean(diag[diag != 0]))
diag[z] =np.exp(np.mean(np.log(diag[diag != 0])))
diag[non_zero_inds]=0.
I = np.diag(diag)
# diag = np.diag(J_total.T @ J_total)
# diag.setflags(write=1)
# # Fill zero-values with logaritmic average values:
# zero_diag = np.where(diag == 0)
# if zero_diag:
# for z in zero_diag[0]:
# print(z)
# diag[z] = np.exp(np.mean(np.log(diag[diag != 0])))
# I = np.diag(diag) # Levenberg-Marquardt Fletcher
"""
Step 7: Test two scenarios:
1. Lambda
2. Lambda/nu
"""
update_current = update
while update_current == update:
print(f"lamda 0 = {lambd_0}")
xi1s=np.ones(2)*9e9
m1_props = np.zeros((len(m0),2))
for i in range(2):
""" 1. lambda_0"""
if i == 0:
lambd = lambd_0
prop_folder = join(save_path_OG,f"Update_{update}","lambd_0")
elif i == 1:
lambd = lambd_0/nu
prop_folder = join(save_path_OG,f"Update_{update}","lambd_0_v")
J_inv = np.linalg.inv(J_total.T @ J_total + lambd * I)
J_d = J_total.T @ dxi_ds
m1_props[:,i] = m0 + (J_inv @ J_d)[:,0]
xi1s[i] = Gradient.SRC_STR(
binary_file_path=bin_filepath,
prior_dat_filepath=join(accepted_folder,"crfl.dat"),
save_folder=prop_folder,
phases=phases,
components=comps,
t_pres=t_pres,
t_posts=t_posts,
vpvs=False,
depth=True,
dt=dt,
sigmas=sigmas,
tstars=None,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
start_it=0,
).misfit(m1_props[:,i],st_obs_w)
## Make a choice for updating
if xi1s[0] < xi0 and xi1s[1] < xi0:
"""
Both proposals are smaller in misfit,
we then choose lambda/nu because we aim for least damping
"""
print("update 1")
lambd_0 /= nu
# Save Everything in current update:
np.save(join(save_path_OG,f"Update_{update}",f"m1_.npy"),m1_props[:,1])
st = Gradient.read_refl_mseeds(path = join(save_path_OG,f"Update_{update}","lambd_0_v","It_0"),stack = False)
st.write(join(save_path_OG,f"Update_{update}", "st_m1.mseed"), format="MSEED")
np.save(join(save_path_OG,f"Update_{update}", "misfit.npy"),xi1s[1])
np.save(join(save_path_OG,f"Update_{update}", "lambda_0_v.npy"),lambd_0 / nu)
#DO UPTDATE
accepted_folder = join(save_path_OG,f"Update_{update}","lambd_0_v","It_0")
update +=1
elif xi1s[0] < xi0 and xi1s[1] > xi0:
print("update 2")
lambd_0 = lambd_0
#DO UPTDATE
np.save(join(save_path_OG,f"Update_{update}",f"m1_.npy"),m1_props[:,0])
st = Gradient.read_refl_mseeds(path = join(save_path_OG,f"Update_{update}","lambd_0","It_0"),stack = False)
st.write(join(save_path_OG,f"Update_{update}", "st_m1.mseed"), format="MSEED")
np.save(join(save_path_OG,f"Update_{update}", "misfit.npy"),xi1s[0])
np.save(join(save_path_OG,f"Update_{update}", "lambda_0.npy"),lambd_0)
accepted_folder = join(save_path_OG,f"Update_{update}","lambd_0","It_0")
update+= 1
else: # No update, because both new misfits are smaller
print("update 3 (no update)")
lambd_0 *= nu
diag = np.diag(J_total.T @ J_total)
diag.setflags(write=1)
# Fill zero-values with logaritmic average values:
zero_diag = np.where(diag == 0)
non_zero_inds = np.where(diag != 0)
if zero_diag:
for z in zero_diag[0]:
print(np.mean(diag[diag != 0]))
diag[z] =100#np.exp(np.mean(np.log(diag[diag != 0])))
diag[non_zero_inds]=0.
I = np.diag(diag)
print(I)
J_inv = np.linalg.inv(J_total.T @ J_total + 1e2 * I)
J_d = J_total.T @ dxi_ds
prop = m0 + (J_inv @ J_d)[:,0]
J_inv @ J_total.T
J_total[:,2]
Gradient.plot_updates(
save_path=save_path_OG,
st_obs_full=st_obs_full,
st_obs_w=st_obs_w,
obs_tts=obs_tts,
phases=phases,
comps=comps,
t_pres=t_pres,
t_posts=t_posts,
fmin=fmin,
fmax=fmax,
zerophase=zerophase,
ylims=ylims,
)
fig, ax = plt.subplots(nrows=1, ncols=1, sharex="all", figsize=(8, 8))
update_folders = np.sort(
np.asarray(
[int(f.strip("Update_")) for f in listdir(save_path_OG) if f.startswith("misfit")]
)
)
print(update_folders)
Xs = np.array(
[
np.load(join(save_path, f"Update_{update_folders[up_nr]}", f"misfit.npy"))
for up_nr in update_folders
]
)
ms = np.arange(0, len(Xs))
ax.semilogy(ms, Xs)
ax.tick_params(axis="both", which="major", labelsize=15)
ax.tick_params(axis="both", which="minor", labelsize=15)
ax.set_xlabel("Update nr", fontsize=20)
ax.set_ylabel("Misfit", fontsize=20)
ax.set_xticks(ms)
ax.set_xticklabels(ms)
(xi1s < xi0).any()
dxi_ds
J_d
diag
dxi_ds.T.shape
J_total.T @ dxi_ds.T
```
# Two-step inversion:
### Get misfit w.r.t. seismogram
Seismogram is computed using m0 (so initial guess)
```
""" Forward run with the actual model parameters """
save_folder = join(save_path_OG, "Update_1")
if not exists(save_folder):
makedirs(save_folder)
st_file = [
f
for f in listdir(join(save_folder, "It_0"))
if f.startswith("st00")
if isfile(join(save_folder, "It_0", f))
]
if st_file:
st_syn = Gradient.read_refl_mseeds(path=st_file, stack=False)
else:
src_str = Gradient.SRC_STR(
binary_file_path=bin_filepath,
prior_dat_filepath=join(f_start, "crfl.dat"),
save_folder=save_folder,
phases=phases,
components=comps,
t_pres=t_pres,
t_posts=t_posts,
vpvs=False,
depth=True,
dt=dt,
sigmas=sigmas,
tstars=None,
fmin=fmin,
fmax=fmax,
zerophase=False,
start_it=0,
)
st_syn = src_str.forward(m0)
""" Window the data """
npz_name = [
f
for f in listdir(join(save_folder, "It_0"))
if f.endswith(".npz")
if isfile(join(save_folder, "It_0", f))
]
if npz_name:
npz_file = join(save_folder, "It_0", npz_name[0],)
dat_file = join(save_folder, "It_0")
Taup = TauPyModel(npz_file)
depth = Create_Vmod.read_depth_from_dat(dat_file)
epi = Create_Vmod.read_epi_from_dat(dat_file)
syn_tts = []
for i, phase in enumerate(phases):
syn_tts.append(PhaseTracer.get_traveltime(Taup, phase, depth, epi))
else:
syn_tts = Gradient.get_tt_from_dat_file(phases, join(save_folder, "It_0"), m0[-1])
st_syn_w, st_syn_full, s_syn = Gradient.window(
st_syn, phases, comps, syn_tts, t_pres, t_posts, fmin, fmax, zerophase,
)
```
### Gradient of seismogram w.r.t. structural update
```
""" We have the seismogram of m0 (from previous cell)"""
s0 = s_syn
""" Then we need S1 and epsilon """
epsilon = 0.001
m1 = m0
m1[-1] += epsilon * m1[-1]
save_folder = join(save_path_OG, "Depth_update")
if not exists(save_folder):
makedirs(save_folder)
st_file = [
f
for f in listdir(join(save_folder, "It_0"))
if f.startswith("st00")
if isfile(join(save_folder, "It_0", f))
]
if st_file:
st_syn = Gradient.read_refl_mseeds(path=st_file, stack=False)
else:
src_str = Gradient.SRC_STR(
binary_file_path=bin_filepath,
prior_dat_filepath=join(f_start, "crfl.dat"),
save_folder=save_folder,
phases=phases,
components=comps,
t_pres=t_pres,
t_posts=t_posts,
vpvs=False,
depth=True,
dt=dt,
sigmas=sigmas,
tstars=None,
fmin=fmin,
fmax=fmax,
zerophase=False,
start_it=0,
)
st_syn = src_str.forward(m1)
""" Window the data """
npz_name = [
f
for f in listdir(join(save_folder, "It_0"))
if f.endswith(".npz")
if isfile(join(save_folder, "It_0", f))
]
if npz_name:
npz_file = join(save_folder, "It_0", npz_name[0],)
dat_file = join(save_folder, "It_0")
Taup = TauPyModel(npz_file)
depth = Create_Vmod.read_depth_from_dat(dat_file)
epi = Create_Vmod.read_epi_from_dat(dat_file)
syn_tts = []
for i, phase in enumerate(phases):
syn_tts.append(PhaseTracer.get_traveltime(Taup, phase, depth, epi))
else:
syn_tts = Gradient.get_tt_from_dat_file(phases, join(save_folder, "It_0"), m0[-1])
st_syn_w1, st_syn_full1, s1 = Gradient.window(
st_syn, phases, comps, syn_tts, t_pres, t_posts, fmin, fmax, zerophase,
)
""" Get the approximate gradient of the depth """
J_depth = np.expand_dims((s1 - s0) / epsilon, axis=1)
J_depth.shape
```
### Add depth gradient to exact gradient
```
J_total = np.hstack((J_approx, J_depth))
```
### Calculate derivative of misfit w.r.t. seismogram (using L2)
```
# Considering L2:
dxi_ds = np.expand_dims(-(s_obs - s_syn) / (np.mean(sigmas) ** 2), axis=0)
dxi_ds.shape
G_total.shape
```
### Determine the gradient of the misfit w.r.t. model params
```
dxi_dm = dxi_ds @ G_total
dxi_dm
plt.semilogy(dxi_dm[0, :])
```
### Two step inversion:
#### 1. Invert the moment tensor using the exact gradients
```
""" 1.1 Get the Green's function for the moment tensor:"""
save_path_OG = "/home/nienke/Documents/Research/SS_MTI/External_packages/Test_reflectivity/Exact_Gradient"
G_approx = Get_G(
save_path=save_path_OG,
prior_dat_folder=f_start,
bin_file_path=bin_filepath,
phases=phases,
comps=comps,
t_pres=t_pres,
t_posts=t_posts,
fmin=fmin,
fmax=fmax,
zerophase=False
)
np.diag(1/(sigmas**2)).shape
Wd = np.diag(np.ones(G_approx.shape[0])) * (1/sigmas[0]**2)
s_obs_dims = np.expand_dims(s_obs,axis = 1)
A = G_approx.T @ G_approx
B = G_approx.T @ s_obs_dims
M = np.linalg.solve(A, B)
# M = np.linalg.lstsq(A, B)[0]
M
plt.figure(figsize=(16,8))
# for i in range(6):
plt.plot(G_approx[:,2])
st_file = "/home/nienke/Documents/Research/SS_MTI/External_packages/Test_reflectivity/Exact_Gradient/Exact_5/"
st_syn = Gradient.read_refl_mseeds(path=st_file, stack=False)
plt.figure(figsize=(16,8))
for tr in st_syn:
plt.plot(tr.times(),tr.data)
```
### 2. Invert the structure using approximate gradient
```
np.load("/home/nienke/Documents/Research/SS_MTI/External_packages/Test_reflectivity/Gradient_descent_classic/Update_6/X1s_0.001.npy")
```
| github_jupyter |
```
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_validate
import numpy as np
import xgboost as xgb
import pandas as pd
train_datasetL = pd.read_csv("../data/ori_data/train_process.csv", header=None, sep="\t").iloc[:, 0].values
dev_datasetL = pd.read_csv("../data/ori_data/dev_process.csv", header=None, sep="\t").iloc[:, 0].values
train_datamatrix = np.load("../data/ori_data/train.featurematrix.data")
dev_datamatrix = np.load("../data/ori_data/dev.featurematrix.data")
from sklearn.ensemble import RandomForestClassifier
randomforest_classifier = RandomForestClassifier(n_estimators=10, criterion='gini', max_depth=None,
min_samples_split=2, min_samples_leaf=1,
min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, bootstrap=True, oob_score=False, n_jobs=1,
random_state=None, verbose=0, warm_start=False, class_weight="balanced")
randomforest_classifier.fit(train_datamatrix, train_datasetL)
preds = randomforest_classifier.predict(dev_datamatrix)
pred_label = preds >= 0.5
pred_label = pred_label.astype(int)
from sklearn.metrics import classification_report
print(classification_report(dev_datasetL, pred_label))
from sklearn.linear_model import LogisticRegression
linear_classifier = LogisticRegression(class_weight="balanced", max_iter=1000)
linear_classifier.fit(train_datamatrix, train_datasetL)
preds = linear_classifier.predict(dev_datamatrix)
pred_label = preds >= 0.5
pred_label = pred_label.astype(int)
from sklearn.metrics import classification_report
print(classification_report(dev_datasetL, pred_label))
import pickle
pickle.dump(linear_classifier, open("../data/m_result/linear_classifier.model", "wb"), 2)
# specify parameters via map
xgb_classifier = xgb.XGBClassifier(max_depth=11, learning_rate=0.01, n_estimators=1000,
silent=0, objective='binary:logistic', booster='gbtree',
n_jobs=1, nthread=None, gamma=0, min_child_weight=2, max_delta_step=0, subsample=1,
colsample_bytree=1, colsample_bylevel=1, reg_alpha=0,
reg_lambda=1, scale_pos_weight=4,
base_score=0.5, random_state=0, seed=None, missing=None)
xgb_classifier.fit(train_datamatrix, train_datasetL)
# make prediction
preds = xgb_classifier.predict(dev_datamatrix)
pred_label = preds >= 0.5
pred_label = pred_label.astype(int)
from sklearn.metrics import classification_report
print(classification_report(dev_datasetL, pred_label))
pickle.dump(bst, open("../data/m_result/xgboost_3.model", "wb"), 2)
from sklearn.neighbors import KNeighborsClassifier
knn_classifier = KNeighborsClassifier(n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski',
metric_params=None, n_jobs=1)
knn_classifier.fit(train_datamatrix, train_datasetL)
# make prediction
preds = knn_classifier.predict(dev_datamatrix)
pred_label = preds >= 0.5
pred_label = pred_label.astype(int)
from sklearn.metrics import classification_report
print(classification_report(dev_datasetL, pred_label))
# select same number of pos and neg example
def select_trainset(datasetM, datasetL, neg_ratio=1):
pos_index_list = []
for index, val in enumerate(datasetL):
if val == 1:
pos_index_list.append(index)
pos_trainM = datasetM[pos_index_list, :]
pos_trainL = datasetL[pos_index_list]
neg_index_list = [index for index in range(len(datasetL)) if index not in pos_index_list]
neg_len = len(pos_index_list) * neg_ratio
permuation_index = np.random.permutation(len(neg_index_list))
neg_index_selected = permuation_index[:neg_len]
neg_trainM = datasetM[neg_index_selected, :]
neg_trainL = datasetL[neg_index_selected]
return np.concatenate((pos_trainM, neg_trainM)), np.concatenate((pos_trainL, neg_trainL))
selected_trainM, selected_trainL = select_trainset(train_datamatrix, train_datasetL, neg_ratio=1)
print(selected_trainM.shape)
print(selected_trainL.shape)
# specify parameters via map
xgb_classifier = xgb.XGBClassifier(max_depth=7, learning_rate=0.1, n_estimators=1000,
silent=0, objective='binary:logistic', booster='gbtree',
n_jobs=1, nthread=None, gamma=0, min_child_weight=2, max_delta_step=0, subsample=1,
colsample_bytree=1, colsample_bylevel=1, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1,
base_score=0.5, random_state=0, seed=None, missing=None)
xgb_classifier.fit(selected_trainM, selected_trainL,
eval_set=[(selected_trainM, selected_trainL), (dev_datamatrix, dev_datasetL)],
early_stopping_rounds=100, eval_metric="auc")
# make prediction
preds = xgb_classifier.predict(dev_datamatrix)
pred_label = preds >= 0.5
pred_label = pred_label.astype(int)
from sklearn.metrics import classification_report
print(classification_report(dev_datasetL, pred_label))
pickle.dump(bst, open("../data/m_result/1_1_ratio_xgboost.model", "wb"), 2)
```
| github_jupyter |
# Basic Motion
Welcome to JetBot's browser based programming interface! This document is
called a *Jupyter Notebook*, which combines text, code, and graphic
display all in one! Prett neat, huh? If you're unfamiliar with *Jupyter* we suggest clicking the
``Help`` drop down menu in the top toolbar. This has useful references for
programming with *Jupyter*.
In this notebook, we'll cover the basics of controlling JetBot.
### Importing the Robot class
To get started programming JetBot, we'll need to import the ``Robot`` class. This class
allows us to easily control the robot's motors! This is contained in the ``jetbot`` package.
> If you're new to Python, a *package* is essentially a folder containing
> code files. These code files are called *modules*.
To import the ``Robot`` class, highlight the cell below and press ``ctrl + enter`` or the ``play`` icon above.
This will execute the code contained in the cell
```
from jetbot import Robot
```
Now that we've imported the ``Robot`` class we can initialize the class *instance* as follows.
```
robot = Robot()
```
### Commanding the robot
Now that we've created our ``Robot`` instance we named "robot", we can use this instance
to control the robot. To make the robot spin counterclockwise at 30% of it's max speed
we can call the following
> WARNING: This next command will make the robot move! Please make sure the robot has clearance.
```
robot.left(speed=0.3)
```
Cool, you should see the robot spin counterclockwise!
> If your robot didn't turn left, that means one of the motors is wired backwards! Try powering down your
> robot and swapping the terminals that the ``red`` and ``black`` cables of the incorrect motor.
>
> REMINDER: Always be careful to check your wiring, and don't change the wiring on a running system!
Now, to stop the robot you can call the ``stop`` method.
```
robot.stop()
```
Maybe we only want to run the robot for a set period of time. For that, we can use the Python ``time`` package.
```
import time
```
This package defines the ``sleep`` function, which causes the code execution to block for the specified number of seconds
before running the next command. Try the following to make the robot turn left only for half a second.
```
robot.left(0.3)
time.sleep(0.5)
robot.stop()
```
Great. You should see the robot turn left for a bit and then stop.
> Wondering what happened to the ``speed=`` inside the ``left`` method? Python allows
> us to set function parameters by either their name, or the order that they are defined
> (without specifying the name).
The ``BasicJetbot`` class also has the methods ``right``, ``forward``, and ``backwards``. Try creating your own cell to make
the robot move forward at 50% speed for one second.
Create a new cell by highlighting an existing cell and pressing ``b`` or the ``+`` icon above. Once you've done that, type in the code that you think will make the robot move forward at 50% speed for one second.
### Controlling motors individually
Above we saw how we can control the robot using commands like ``left``, ``right``, etc. But what if we want to set each motor speed
individually? Well, there are two ways you can do this
The first way is to call the ``set_motors`` method. For example, to turn along a left arch for a second we could set the left motor to 30% and the right motor to 60% like follows.
```
robot.set_motors(0.3, 0.6)
time.sleep(1.0)
robot.stop()
```
Great! You should see the robot move along a left arch. But actually, there's another way that we could accomplish the same thing.
The ``Robot`` class has two attributes named ``left_motor`` and ``right_motor`` that represent each motor individually.
These attributes are ``Motor`` class instances, each which contains a ``value`` attribute. This ``value`` attribute
is a [traitlet](https://github.com/ipython/traitlets) which generates ``events`` when assigned a new value. In the motor
class, we attach a function that updates the motor commands whenever the value changes.
So, to accomplish the exact same thing we did above, we could execute the following.
```
robot.left_motor.value = 0.34
robot.left_motor.alpha = 0.9
robot.right_motor.value = 0.34
robot.right_motor.alpha = 0.81
time.sleep(3)
robot.left_motor.value = 0.0
robot.right_motor.value = 0.0
```
You should see the robot move in the same exact way!
### Link motors to traitlets
A really cool feature about these [traitlets](https://github.com/ipython/traitlets) is that we can
also link them to other traitlets! This is super handy because Jupyter Notebooks allow us
to make graphical ``widgets`` that use traitlets under the hood. This means we can attach
our motors to ``widgets`` to control them from the browser, or just visualize the value.
To show how to do this, let's create and display two sliders that we'll use to control our motors.
```
import ipywidgets.widgets as widgets
from IPython.display import display
# create two sliders with range [-1.0, 1.0]
left_slider = widgets.FloatSlider(description='left', min=-1.0, max=1.0, step=0.01, orientation='vertical')
right_slider = widgets.FloatSlider(description='right', min=-1.0, max=1.0, step=0.01, orientation='vertical')
# create a horizontal box container to place the sliders next to eachother
slider_container = widgets.HBox([left_slider, right_slider])
# display the container in this cell's output
display(slider_container)
```
You should see two ``vertical`` sliders displayed above.
> HELPFUL TIP: In Jupyter Lab, you can actually "pop" the output of cells into entirely separate window! It will still be
> connected to the notebook, but displayed separately. This is helpful if we want to pin the output of code we executed elsewhere.
> To do this, right click the output of the cell and select ``Create New View for Output``. You can then drag the new window
> to a location you find pleasing.
Try clicking and dragging the sliders up and down. Notice nothing happens when we move the sliders currently. That's because we haven't connected them to motors yet! We'll do that by using the ``link`` function from the traitlets package.
```
import traitlets
left_link = traitlets.link((left_slider, 'value'), (robot.left_motor, 'value'))
right_link = traitlets.link((right_slider, 'value'), (robot.right_motor, 'value'))
```
Now try dragging the sliders (slowly at first). You should see the respective motor turn!
The ``link`` function that we created above actually creates a bi-directional link! That means,
if we set the motor values elsewhere, the sliders will update! Try executing the code block below
```
robot.forward(0.3)
time.sleep(0.5)
robot.stop()
```
You should see the sliders respond to the motor commands! If we want to remove this connection we can call the
``unlink`` method of each link.
```
left_link.unlink()
right_link.unlink()
```
But what if we don't want a *bi-directional* link, let's say we only want to use the sliders to display the motor values,
but not control them. For that we can use the ``dlink`` function. The left input is the ``source`` and the right input is the ``target``
```
left_link = traitlets.dlink((robot.left_motor, 'value'), (left_slider, 'value'))
right_link = traitlets.dlink((robot.right_motor, 'value'), (right_slider, 'value'))
```
Now try moving the sliders. You should see that the robot doesn't respond. But when set the motors using a different method,
the sliders will update and display the value!
### Attach functions to events
Another way to use traitlets, is by attaching functions (like ``forward``) to events. These
functions will get called whenever a change to the object occurs, and will be passed some information about that change
like the ``old`` value and the ``new`` value.
Let's create and display some buttons that we'll use to control the robot.
```
# create buttons
button_layout = widgets.Layout(width='100px', height='80px', align_self='center')
stop_button = widgets.Button(description='stop', button_style='danger', layout=button_layout)
forward_button = widgets.Button(description='forward', layout=button_layout)
backward_button = widgets.Button(description='backward', layout=button_layout)
left_button = widgets.Button(description='left', layout=button_layout)
right_button = widgets.Button(description='right', layout=button_layout)
# display buttons
middle_box = widgets.HBox([left_button, stop_button, right_button], layout=widgets.Layout(align_self='center'))
controls_box = widgets.VBox([forward_button, middle_box, backward_button])
display(controls_box)
```
You should see a set of robot controls displayed above! But right now they wont do anything. To do that
we'll need to create some functions that we'll attach to the button's ``on_click`` event.
```
def stop(change):
robot.stop()
def step_forward(change):
robot.forward(0.3)
time.sleep(0.5)
robot.stop()
def step_backward(change):
robot.backward(0.3)
time.sleep(0.5)
robot.stop()
def step_left(change):
robot.left(0.3)
time.sleep(0.5)
robot.stop()
def step_right(change):
robot.right(0.3)
time.sleep(0.5)
robot.stop()
```
Now that we've defined the functions, let's attach them to the on-click events of each button
```
# link buttons to actions
stop_button.on_click(stop)
forward_button.on_click(step_forward)
backward_button.on_click(step_backward)
left_button.on_click(step_left)
right_button.on_click(step_right)
```
Now when you click each button, you should see the robot move!
### Heartbeat Killswitch
Here we show how to connect a 'heartbeat' to stop the robot from moving. This is a simple way to detect if the robot connection is alive. You can lower the slider below to reduce the period (in seconds) of the heartbeat. If a round-trip communication between broswer cannot be made within two heartbeats, the '`status`' attribute of the heartbeat will be set ``dead``. As soon as the connection is restored, the ``status`` attribute will return to ``alive``.
```
from jetbot import Heartbeat
heartbeat = Heartbeat()
# this function will be called when heartbeat 'alive' status changes
def handle_heartbeat_status(change):
if change['new'] == Heartbeat.Status.dead:
robot.stop()
heartbeat.observe(handle_heartbeat_status, names='status')
period_slider = widgets.FloatSlider(description='period', min=0.001, max=0.5, step=0.01, value=0.5)
traitlets.dlink((period_slider, 'value'), (heartbeat, 'period'))
display(period_slider, heartbeat.pulseout)
```
Try executing the code below to start the motors, and then lower the slider to see what happens. You can also try disconnecting your robot or PC.
```
robot.left(0.2)
# now lower the `period` slider above until the network heartbeat can't be satisfied
```
### Conclusion
That's it for this example notebook! Hopefully you feel confident that you can program your robot to move around now :)
| github_jupyter |
# 머신 러닝 교과서 3판
# 14장 - 텐서플로의 구조 자세히 알아보기 (2/3)
**아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.**
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://nbviewer.jupyter.org/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch14/ch14_part2.ipynb"><img src="https://jupyter.org/assets/main-logo.svg" width="28" />주피터 노트북 뷰어로 보기</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/rickiepark/python-machine-learning-book-3rd-edition/blob/master/ch14/ch14_part2.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
</td>
</table>
### 목차
- 텐서플로 추정기
- 특성 열 사용하기
- 사전에 준비된 추정기로 머신 러닝 수행하기
```
import numpy as np
import tensorflow as tf
import pandas as pd
from IPython.display import Image
tf.__version__
```
## 텐서플로 추정기
##### 사전에 준비된 추정기 사용하는 단계
* **단계 1:** 데이터 로딩을 위해 입력 함수 정의하기
* **단계 2:** 추정기와 데이터 사이를 연결하기 위해 특성 열 정의하기
* **단계 3:** 추정기 객체를 만들거나 케라스 모델을 추정기로 바꾸기
* **단계 4:** 추정기 사용하기: train() evaluate() predict()
```
tf.random.set_seed(1)
np.random.seed(1)
```
### 특성 열 사용하기
* 정의: https://developers.google.com/machine-learning/glossary/#feature_columns
* 문서: https://www.tensorflow.org/api_docs/python/tf/feature_column
```
Image(url='https://git.io/JL56E', width=700)
dataset_path = tf.keras.utils.get_file("auto-mpg.data",
("http://archive.ics.uci.edu/ml/machine-learning-databases"
"/auto-mpg/auto-mpg.data"))
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower',
'Weight', 'Acceleration', 'ModelYear', 'Origin']
df = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
df.tail()
print(df.isna().sum())
df = df.dropna()
df = df.reset_index(drop=True)
df.tail()
import sklearn
import sklearn.model_selection
df_train, df_test = sklearn.model_selection.train_test_split(df, train_size=0.8)
train_stats = df_train.describe().transpose()
train_stats
numeric_column_names = ['Cylinders', 'Displacement', 'Horsepower', 'Weight', 'Acceleration']
df_train_norm, df_test_norm = df_train.copy(), df_test.copy()
for col_name in numeric_column_names:
mean = train_stats.loc[col_name, 'mean']
std = train_stats.loc[col_name, 'std']
df_train_norm.loc[:, col_name] = (df_train_norm.loc[:, col_name] - mean)/std
df_test_norm.loc[:, col_name] = (df_test_norm.loc[:, col_name] - mean)/std
df_train_norm.tail()
```
#### 수치형 열
```
numeric_features = []
for col_name in numeric_column_names:
numeric_features.append(tf.feature_column.numeric_column(key=col_name))
numeric_features
feature_year = tf.feature_column.numeric_column(key="ModelYear")
bucketized_features = []
bucketized_features.append(tf.feature_column.bucketized_column(
source_column=feature_year,
boundaries=[73, 76, 79]))
print(bucketized_features)
feature_origin = tf.feature_column.categorical_column_with_vocabulary_list(
key='Origin',
vocabulary_list=[1, 2, 3])
categorical_indicator_features = []
categorical_indicator_features.append(tf.feature_column.indicator_column(feature_origin))
print(categorical_indicator_features)
```
### 사전에 준비된 추정기로 머신러닝 수행하기
```
def train_input_fn(df_train, batch_size=8):
df = df_train.copy()
train_x, train_y = df, df.pop('MPG')
dataset = tf.data.Dataset.from_tensor_slices((dict(train_x), train_y))
# 셔플, 반복, 배치
return dataset.shuffle(1000).repeat().batch(batch_size)
## 조사
ds = train_input_fn(df_train_norm)
batch = next(iter(ds))
print('키:', batch[0].keys())
print('ModelYear:', batch[0]['ModelYear'])
all_feature_columns = (numeric_features +
bucketized_features +
categorical_indicator_features)
print(all_feature_columns)
regressor = tf.estimator.DNNRegressor(
feature_columns=all_feature_columns,
hidden_units=[32, 10],
model_dir='models/autompg-dnnregressor/')
EPOCHS = 1000
BATCH_SIZE = 8
total_steps = EPOCHS * int(np.ceil(len(df_train) / BATCH_SIZE))
print('훈련 스텝:', total_steps)
regressor.train(
input_fn=lambda:train_input_fn(df_train_norm, batch_size=BATCH_SIZE),
steps=total_steps)
reloaded_regressor = tf.estimator.DNNRegressor(
feature_columns=all_feature_columns,
hidden_units=[32, 10],
warm_start_from='models/autompg-dnnregressor/',
model_dir='models/autompg-dnnregressor/')
def eval_input_fn(df_test, batch_size=8):
df = df_test.copy()
test_x, test_y = df, df.pop('MPG')
dataset = tf.data.Dataset.from_tensor_slices((dict(test_x), test_y))
return dataset.batch(batch_size)
eval_results = reloaded_regressor.evaluate(
input_fn=lambda:eval_input_fn(df_test_norm, batch_size=8))
for key in eval_results:
print('{:15s} {}'.format(key, eval_results[key]))
print('평균 손실 {:.4f}'.format(eval_results['average_loss']))
pred_res = regressor.predict(input_fn=lambda: eval_input_fn(df_test_norm, batch_size=8))
print(next(iter(pred_res)))
```
#### Boosted Tree Regressor
```
boosted_tree = tf.estimator.BoostedTreesRegressor(
feature_columns=all_feature_columns,
n_batches_per_layer=20,
n_trees=200)
boosted_tree.train(
input_fn=lambda:train_input_fn(df_train_norm, batch_size=BATCH_SIZE))
eval_results = boosted_tree.evaluate(
input_fn=lambda:eval_input_fn(df_test_norm, batch_size=8))
print(eval_results)
print('평균 손실 {:.4f}'.format(eval_results['average_loss']))
```
| github_jupyter |
# Posthoc Inference on Contrasts
In this notebook, we provide examples of how to run posthoc inference to infer on contrasts in the linear model.
## Set Up
#### Import the required python packages.
```
import numpy as np
import numpy.matlib as npm
import matplotlib.pyplot as plt
import sanssouci as ss
import pyrft as pr
```
#### Initialize the example
```
# Set the dimension of the example and the number of subjects
Dim = (50,50)
N = 100
m = np.prod(Dim)
# Generate the category vector and obtain the corresponding design matrix
from sklearn.utils import check_random_state
rng = check_random_state(101)
categ = rng.choice(3, N, replace = True)
X = pr.group_design(categ)
# Specify the contrast matrix (here 2 contrasts are chosen)
C = np.array([[1, -1, 0], [0, 1, -1]])
# Calulate the number contrasts
L = C.shape[0]
# Calculate the number of p-values generated (L for each voxels)
npvals = m * L
# Generate a white noise field
lat_data = pr.wfield(Dim, N)
# Generate a stationary random field with given FWHM
# FWHM = 4; lat_data = pr.statnoise(Dim, N, FWHM)
# Plot a sample realization of the noise
plt.imshow(lat_data.field[:, :, 1])
```
### Add signal to the field
```
# Obtain the locations where the category is 2
w2 = np.where(categ==2)[0]
# Initialize the spatial signal
pi0 = 0.9 # proportion of noise (true null hypotheses)
p0 = int(np.round(pi0 * m))
signal = np.zeros(m)
signal[(p0 + 1): m] = 1
signal = signal.reshape(Dim)
# Add the signal to the field
for I in np.arange(len(w2)):
lat_data.field[:, :, w2[I]] += signal
# Convert the signal to boolean to determine whether the true signal is
bool_signal = np.zeros(Dim + (L,)) == 0
bool_signal[:, :, 1] = signal > 0
# Plot the locaion locations for illustration
plt.imshow(signal)
```
## Posthoc Inference
### Bootstrapping the Data
Bootstrapping is performed using the residuals of the linear model. This gives test-statistics that have the same asymptotic distribution as the limiting test-statistic (under the null). See Eck 2017 and Freedman 1981 for further details. In our context we use these to obtain bootstrapped pivotal statistics which allow us to obtain asymptotic JER control.
```
# Specify the number of bootstraps to use
B = 100
# Choose the template to use (by default the linear template is chosen)
template = 'linear'
# Run the bootstrapped algorithm
minPperm, orig_pvalues, pivotal_stats, bs = pr.boot_contrasts(lat_data, X, C, B, template, True, 1)
import matplotlib.pyplot as plt
plt.hist(minPperm)
```
### Plotting the p-values
```
pval_sort_idx = np.argsort(np.ravel(orig_pvalues.field))
pvals = np.ravel(orig_pvalues.field)[pval_sort_idx]
figure, axes = plt.subplots(nrows=1, ncols=2)
plt.subplot(121)
plt.hist(np.ravel(orig_pvalues.field), 100)
plt.title('Histogram of the p-values')
plt.ylabel('Counts')
plt.subplot(122)
plt.plot(pvals[:np.min([1000, npvals])])
plt.title('Smallest 1000 p-values')
plt.xlabel('k')
plt.ylabel('p_{(k)}')
figure.tight_layout(pad=3.0)
```
### Lambda Calibration
Using the bootstrapped pivotal_stats that we have calculated we can choose a value lambda that is the (alpha)% quantile (for some 0 < alpha < 1) of the distribution in order to provide asymptotic JER control at a level alpha.
```
# Choose the confidence level
alpha = 0.1
# Obtain the lambda calibration
lambda_quant = np.quantile(pivotal_stats, alpha)
print('Lambda Quantile:', lambda_quant)
# Calculate the number of voxels in the mask
m = np.sum(lat_data.mask)
# Gives t_k^L(lambda) = lambda*k/m for k = 1, ..., m
thr = ss.t_linear(lambda_quant, np.arange(1, m + 1), m)
```
### PostHoc Bound
For a chosen subset of voxels, provide a bound on the number of true null hypotheses within that subset.
```
# Get the first 10 pvalues (or any subset of the p-values)
subset_pvals = np.sort(np.ravel(orig_pvalues.field))[:10]
# Compute an upper bound on the number of null hypotheses
bound = ss.max_fp(subset_pvals, thr)
print('FP Upper Bound on subset:', bound)
```
### Confidence Envelopes
```
# These are the confidence envelopes. I.e. for i = 1:npvals, max_FP[i-1] is the upper bound on the number of
# false positives that occur within the set [p[0], \dots, p[i-1]] if you were to reject all elements of that set.
max_FP = ss.curve_max_fp(subset_pvals, thr) # Confidence envelope on the chosen subset
print(max_FP)
max_FP = ss.curve_max_fp(pvals, thr) # Confidence envelope on all of them
print(max_FP[0: 200])
```
#### Plot the FPR and TP curve bounds
```
# Generate the vector [0,...,npvals]
one2npvals = np.arange(1, npvals + 1)
# Choose the number of p-values (always the smallest ones first) to plot
lowestnumber = 1000
# Ensure that selected number is not greater than the total number of p-values
lowestnumber = np.min([lowestnumber, npvals])
# Dividing the envelope by the number of elements in the set gives a bound on the false discovery proportion
max_FDP = max_FP[0: lowestnumber] / one2npvals[0: lowestnumber]
min_TP = one2npvals[0: lowestnumber] - max_FP[0: lowestnumber]
# Calculate the truth (to determine if it is correctly bounded!)
sorted_signal = np.ravel(bool_signal)[pval_sort_idx]
TP = np.zeros(lowestnumber)
for I in np.arange(lowestnumber):
TP[I] = np.sum(sorted_signal[0: I + 1])
# Calculate the true FDP for each subset
FP = np.zeros(lowestnumber)
for I in np.arange(lowestnumber):
FP[I] = np.sum(abs(sorted_signal[0: I + 1] - 1))
true_FDP = FP / one2npvals[0: lowestnumber]
# Initialize the figure
figure = plt.figure(figsize=(10, 4))
# Plot the false discovery proportion and its bound
plt.subplot(121)
plt.plot(max_FDP, label='FDP bound')
plt.plot(true_FDP, label='True FDP')
plt.title('Upper bound on FDP amongst smallest p-values')
plt.xlim(1, lowestnumber)
plt.xlabel('k')
plt.ylabel('FDP(p_{(1)}, \dots, p_{(k)}')
plt.legend(loc="upper right")
# Plot the true postives and their bound
plt.subplot(122)
plt.plot(min_TP, label='TP bound')
plt.plot(TP, label='True Positives')
plt.title('Lower bound on TP')
plt.legend(loc="upper right")
plt.xlim(1, lowestnumber)
plt.xlabel('k')
plt.ylabel('TP(p_{(1)}, \dots, p_{(k)}')
#figure, axes = plt.subplots(nrows=1, ncols=2)
figure.tight_layout(pad=1.0)
```
As can be seen we obtain an upper bound on the false discovery proportion and a lower bound on the number of true positives with each set. Note that this bound is valid 95% of the time.
### Bootstrap paths
```
for b in np.arange(B):
plt.plot(bs[:, b], color="blue")
# Calculate reference families
t_k, _ = pr.t_ref(template)
m = bs.shape[0]
lamb = np.arange(11) / 10
print(lamb)
print(m)
k = np.arange(m + 1)
for l in np.arange(len(lamb)):
plt.plot(t_k(lamb[l], k, m),color="black")
plt.xlim(1, m)
plt.xlabel('k')
plt.ylabel('p_{b,(k)}')
plt.title('Plotting the ordered p-values for each bootstrap')
```
| github_jupyter |
<h1 align="center">Welcome to SimpleITK Jupyter Notebooks</h1>
## Newcomers to Jupyter Notebooks:
1. We use two types of cells, code and markdown.
2. To run a code cell, select it (mouse or arrow key so that it is highlighted) and then press shift+enter which also moves focus to the next cell or ctrl+enter which doesn't.
3. Closing the browser window does not close the Jupyter server. To close the server, go to the terminal where you ran it and press ctrl+c twice.
For additional details see the [Jupyter Notebook Quick Start Guide](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/index.html).
## SimpleITK Environment Setup
Check that SimpleITK and auxiliary program(s) are correctly installed in your environment, and that you have the SimpleITK version which you expect (<b>requires network connectivity</b>).
You can optionally download all of the data used in the notebooks in advance. This step is only necessary if you expect to run the notebooks without network connectivity.
The following cell checks that all expected packages are installed.
```
from __future__ import print_function
import importlib
from distutils.version import LooseVersion
# check that all packages are installed (see requirements.txt file)
required_packages = {'jupyter',
'numpy',
'matplotlib',
'ipywidgets',
'scipy',
'pandas',
'SimpleITK'
}
problem_packages = list()
# Iterate over the required packages: If the package is not installed
# ignore the exception.
for package in required_packages:
try:
p = importlib.import_module(package)
except ImportError:
problem_packages.append(package)
if len(problem_packages) is 0:
print('All is well.')
else:
print('The following packages are required but not installed: ' \
+ ', '.join(problem_packages))
import SimpleITK as sitk
%run update_path_to_download_script
from downloaddata import fetch_data, fetch_data_all
from ipywidgets import interact
print(sitk.Version())
```
We expect that you have an external image viewer installed. The default viewer is <a href="https://fiji.sc/#download">Fiji</a>. If you have another viewer (i.e. ITK-SNAP or 3D Slicer) you will need to set an environment variable to point to it. This can be done from within a notebook as shown below.
```
# Uncomment the line below to change the default external viewer to your viewer of choice and test that it works.
#%env SITK_SHOW_COMMAND /Applications/ITK-SNAP.app/Contents/MacOS/ITK-SNAP
# Retrieve an image from the network, read it and display using the external viewer.
# The show method will also set the display window's title and by setting debugOn to True,
# will also print information with respect to the command it is attempting to invoke.
# NOTE: The debug information is printed to the terminal from which you launched the notebook
# server.
sitk.Show(sitk.ReadImage(fetch_data("SimpleITK.jpg")), "SimpleITK Logo", debugOn=True)
```
Now we check that the ipywidgets will display correctly. When you run the following cell you should see a slider.
If you don't see a slider please shutdown the Jupyter server, at the command line prompt press Control-c twice, and then run the following command:
```jupyter nbextension enable --py --sys-prefix widgetsnbextension```
```
interact(lambda x: x, x=(0,10));
```
Download all of the data in advance if you expect to be working offline (may take a couple of minutes).
```
fetch_data_all(os.path.join('..','Data'), os.path.join('..','Data','manifest.json'))
```
| github_jupyter |
[View in Colaboratory](https://colab.research.google.com/github/PranY/FastAI_projects/blob/master/TSG.ipynb)
```
!pip install fastai
!pip install torch_nightly -f https://download.pytorch.org/whl/nightly/cu92/torch_nightly.html
! pip install kaggle
! pip install tqdm
from google.colab import drive
drive.mount('/content/drive')
! ls "drive/My Drive"
! cp drive/My\ Drive/kaggle.json ~/.kaggle/
! kaggle competitions download -c tgs-salt-identification-challenge
! python -c 'import fastai; print(fastai.__version__)'
! python -c 'import fastai; fastai.show_install(0)'
!ls
# ! rm -r train/
# !rm -r test/
! mkdir train
! mkdir test
! unzip train.zip -d train
! unzip test.zip -d test
! ls train/images | wc -l
! ls train/masks | wc -l
! ls test/images | wc -l
%matplotlib inline
%reload_ext autoreload
%autoreload 2
from tqdm import tqdm_notebook
from fastai import *
from fastai.vision import *
#from fastai.docs import *
import PIL
# Loading of training/testing ids and depths
train_df = pd.read_csv("train.csv", index_col="id", usecols=[0])
depths_df = pd.read_csv("depths.csv", index_col="id")
train_df = train_df.join(depths_df)
test_df = depths_df[~depths_df.index.isin(train_df.index)]
num_workers=0
len(train_df)
PATH_X = Path('train/images')
PATH_Y = Path('train/masks')
# def resize2d(fn:PathOrStr, sz) -> Image:
# img = PIL.Image.open(fn)
# img = img.resize((sz,sz), PIL.Image.BILINEAR)
# img.save(fn)
# for l in list(PATH_X.iterdir()):
# resize2d(l,128)
# for l in list(PATH_Y.iterdir()):
# resize2d(l,128)
# Reducing mask images to {0,1}
def FormatMask(fn:PathOrStr) -> Image:
img = PIL.Image.open(fn).convert('L')
# Let numpy do the heavy lifting for converting pixels to pure black or white
bw = np.asarray(img).copy()
# Pixel range is 0...255, 256/2 = 128
bw[bw < 128] = 0 # Black
bw[bw >= 128] = 1 # White
# Now we put it back in Pillow/PIL land
imfile = PIL.Image.fromarray(bw)
imfile.save(fn)
for l in list(PATH_Y.iterdir()):
FormatMask(l)
class ImageMask(Image):
"Class for image segmentation target."
def lighting(self, func:LightingFunc, *args:Any, **kwargs:Any)->'Image': return self
def refresh(self):
self.sample_kwargs['mode'] = 'bilinear'
return super().refresh()
@property
def data(self)->TensorImage:
"Return this image pixels as a `LongTensor`."
return self.px.long()
def show(self, ax:plt.Axes=None, figsize:tuple=(3,3), title:Optional[str]=None, hide_axis:bool=True,
cmap:str='viridis', alpha:float=0.5):
ax = _show_image(self, ax=ax, hide_axis=hide_axis, cmap=cmap, figsize=figsize, alpha=alpha)
if title: ax.set_title(title)
def open_mask(fn:PathOrStr)->ImageMask:
"Return `ImageMask` object create from mask in file `fn`."
x = PIL.Image.open(fn).convert('L')
return ImageMask(pil2tensor(x).float().div_(255))
def _show_image(img:Image, ax:plt.Axes=None, figsize:tuple=(3,3), hide_axis:bool=True, cmap:str='binary',
alpha:float=None)->plt.Axes:
if ax is None: fig,ax = plt.subplots(figsize=figsize)
ax.imshow(image2np(img.data), cmap=cmap, alpha=alpha)
if hide_axis: ax.axis('off')
return ax
img = next(PATH_X.iterdir())
open_image(img).show()
open_image(img).size
def get_y_fn(x_fn): return PATH_Y/f'{x_fn.name[:-4]}.png'
img_y_f = get_y_fn(img)
open_mask(img_y_f).show()
open_mask(img_y_f).size
x = open_image(img)
x.show(y=open_mask(img_y_f))
x.shape
open_image(img).shape, open_mask(img_y_f).shape
def get_datasets(path):
x_fns = [o for o in path.iterdir() if o.is_file()]
y_fns = [get_y_fn(o) for o in x_fns]
mask = [o>=1000 for o in range(len(x_fns))]
arrs = arrays_split(mask, x_fns, y_fns)
return [SegmentationDataset(*o) for o in arrs]
train_ds,valid_ds = get_datasets(PATH_X)
train_ds,valid_ds
x,y = next(iter(train_ds))
x.shape, y.shape, type(x), type(y)
size = 128
def get_tfm_datasets(size):
datasets = get_datasets(PATH_X)
tfms = get_transforms(do_flip=True, max_rotate=4, max_lighting=0.2)
return transform_datasets(train_ds, valid_ds, tfms=tfms, tfm_y=True, size=size, padding_mode='border')
train_tds, *_ = get_tfm_datasets(size)
for i in range(0,3):
train_tds[i][0].show()
for i in range(0,3):
train_tds[i][1].show()
_,axes = plt.subplots(1,4, figsize=(12,6))
for i, ax in enumerate(axes.flat):
imgx,imgy = train_tds[i]
imgx.show(ax, y=imgy)
default_norm,default_denorm = normalize_funcs( mean=tensor([0.4850, 0.4560, 0.4060]), std=tensor([0.2290, 0.2240, 0.2250]))
bs = 32
def get_data(size, bs):
return DataBunch.create(*get_tfm_datasets(size), bs=bs, tfms=default_norm)
data = get_data(size, bs)
#export
def show_xy_images(x:Tensor,y:Tensor,rows:int,figsize:tuple=(9,9)):
"Shows a selection of images and targets from a given batch."
fig, axs = plt.subplots(rows,rows,figsize=figsize)
for i, ax in enumerate(axs.flatten()): show_image(x[i], y=y[i], ax=ax)
plt.tight_layout()
x,y = next(iter(data.train_dl))
x,y = x.cpu(),y.cpu()
x = default_denorm(x)
show_xy_images(x,y,4, figsize=(9,9))
x.shape, y.shape
head = std_upsample_head(2, 512,256,256,256,256)
head
def dice(input:Tensor, targs:Tensor) -> Rank0Tensor:
"Dice coefficient metric for binary target"
n = targs.shape[0]
input = input.argmax(dim=1).view(n,-1)
targs = targs.view(n,-1)
intersect = (input*targs).sum().float()
union = (input+targs).sum().float()
return 2. * intersect / union
def accuracy(input:Tensor, targs:Tensor) -> Rank0Tensor:
"Accuracy"
n = targs.shape[0]
input = input.argmax(dim=1).view(n,-1)
targs = targs.view(n,-1)
return (input==targs).float().mean()
metrics=[accuracy, dice]
learn = ConvLearner(data, models.resnet34, custom_head=head,
metrics=metrics)
lr_find(learn)
learn.recorder.plot()
learn.loss_func
lr = 1e-1
learn.fit_one_cycle(10, slice(lr))
# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
```
| github_jupyter |
```
import pandas as pd #数据分析
import numpy as np #科学计算
from pandas import Series,DataFrame
data_train = pd.read_csv("/Users/zhijun/Desktop/Titanic/all/train.csv")
data_train.columns
data_train.info()
data_train.describe()
import matplotlib.pyplot as plt
fig = plt.figure()
fig.set(alpha=0.2) # 设定图表颜色alpha参数
plt.subplot2grid((4,6),(0,0))
data_train.Survived.value_counts().plot(kind='pie')
plt.title('Number of Survival')
plt.ylabel('Number of people')
plt.subplot2grid((4,6),(0,2))
data_train.Pclass.value_counts().plot(kind='bar')
plt.title('Class Distribution')
plt.ylabel('Number of people')
plt.subplot2grid((4,6),(0,4))
plt.scatter(data_train.Survived,data_train.Age)
plt.ylabel('Age')
plt.grid(b=True, which='major', axis='y')# 显示网格线
plt.title('Survival in Age')
plt.subplot2grid((4,6),(2,0),colspan=2)
data_train.Age[data_train.Pclass==1].plot(kind='kde')
data_train.Age[data_train.Pclass==2].plot(kind='kde')
data_train.Age[data_train.Pclass==3].plot(kind='kde')
plt.xlabel('Age')
plt.ylabel('Density')
plt.title('Age Density in Classes')
plt.legend(('First', 'Second','Third'),loc='best')
plt.subplot2grid((4,6),(2,4))
data_train.Embarked.value_counts().plot(kind='bar')
plt.title('Number of People from Docks')
plt.ylabel('Nubmber of People')
plt.show()
fig = plt.figure()
fig1=fig.add_subplot(141)
data_train.Survived[data_train.Pclass !=3][data_train.Sex=='female'].value_counts().plot(kind='bar',label='Female in High Cabin',color='green')
plt.ylabel('Number of People')
plt.title('Female in High Cabin')
fig2=fig.add_subplot(142,sharey=fig1)
data_train.Survived[data_train.Pclass==3][data_train.Sex=='female'].value_counts().plot(kind='bar',label='Female in Low Cabin',color='red')
plt.ylabel('Number of People')
plt.title('Female in Low Cabin')
fig3=fig.add_subplot(143,sharey=fig1)
data_train.Survived[data_train.Pclass != 3][data_train.Sex=='male'].value_counts().plot(kind='bar',color='blue')
plt.ylabel('Number of People')
plt.title('Male in High Cabin')
fig4=fig.add_subplot(144,sharey=fig1)
data_train.Survived[data_train.Pclass==3][data_train.Sex=='male'].value_counts().plot(kind='bar',color='orange')
plt.ylabel('Number of People')
plt.title('Male in Low Cabin')
new=data_train.groupby(['SibSp','Survived'])
f=pd.DataFrame(new.count()['PassengerId'])
f
p=data_train.groupby(['Parch','Survived'])
f=pd.DataFrame(p.count()['PassengerId'])
f
from sklearn.ensemble import RandomForestRegressor# 使用 RandomForestClassifier 填补缺失的年龄属性
def set_missing_ages(df):
# 把已有的数值型特征取出来丢进Random Forest Regressor中
age_df = df[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
# 乘客分成已知年龄和未知年龄两部分
known_age = age_df[age_df.Age.notnull()].as_matrix()
unknown_age = age_df[age_df.Age.isnull()].as_matrix()
# y即目标年龄
y = known_age[:, 0]
# X即特征属性值
X = known_age[:, 1:]
# fit到RandomForestRegressor之中
rfr = RandomForestRegressor(random_state=0, n_estimators=2000, n_jobs=-1)
rfr.fit(X, y)
# 用得到的模型进行未知年龄结果预测
predictedAges = rfr.predict(unknown_age[:, 1::])
# 用得到的预测结果填补原缺失数据
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges
return df, rfr
def set_Cabin_type(df):
df.loc[ (df.Cabin.notnull()), 'Cabin' ] = "Yes"
df.loc[ (df.Cabin.isnull()), 'Cabin' ] = "No"
return df
data_train, rfr = set_missing_ages(data_train)
data_train = set_Cabin_type(data_train)
data_train
def set_Cabin_type(df):
df.loc[ (df.Cabin.notnull()), 'Cabin' ] = "Yes"
df.loc[ (df.Cabin.isnull()), 'Cabin' ] = "No"
return df
dummy_cabin=pd.get_dummies(data_train['Cabin'],prefix='Cabin')
dummy_Embarked=pd.get_dummies(data_train['Embarked'],prefix='Embarked')
dummy_Sex = pd.get_dummies(data_train['Sex'], prefix= 'Sex')
dummy_Pclass = pd.get_dummies(data_train['Pclass'], prefix= 'Pclass')
df=pd.concat([data_train,dummy_cabin,dummy_Embarked,dummy_Sex,dummy_Pclass],axis=1)
df.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'],axis=1,inplace=True)
df
import sklearn.preprocessing as preprocessing
scaler = preprocessing.StandardScaler()
age_scale_param = scaler.fit(df['Age']).reshape(-1, 1)
df['Age_scaled'] = scaler.fit_transform(df['Age'], age_scale_param)
fare_scale_param = scaler.fit(df['Fare']).reshape(-1, 1)
df['Fare_scaled'] = scaler.fit_transform(df['Fare'], fare_scale_param)
df
from sklearn import linear_model
train_df=df.filter(regex='Survived|Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
train_np=train_df.as_matrix()
y = train_np[:, 0]# y即Survival结果
X = train_np[:, 1:]# X即特征属性值
clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6)
clf.fit(X, y)
clf
X.shape
data_test = pd.read_csv("/Users/zhijun/Desktop/Titanic/all/test.csv")
data_test.loc[ (data_test.Fare.isnull()), 'Fare' ] = 0
# 接着我们对test_data做和train_data中一致的特征变换
# 首先用同样的RandomForestRegressor模型填上丢失的年龄
tmp_df = data_test[['Age','Fare', 'Parch', 'SibSp', 'Pclass']]
null_age = tmp_df[data_test.Age.isnull()].as_matrix()
# 根据特征属性X预测年龄并补上
X = null_age[:, 1:]
predictedAges = rfr.predict(X)
data_test.loc[ (data_test.Age.isnull()), 'Age' ] = predictedAges
data_test = set_Cabin_type(data_test)
dummies_Cabin = pd.get_dummies(data_test['Cabin'], prefix= 'Cabin')
dummies_Embarked = pd.get_dummies(data_test['Embarked'], prefix= 'Embarked')
dummies_Sex = pd.get_dummies(data_test['Sex'], prefix= 'Sex')
dummies_Pclass = pd.get_dummies(data_test['Pclass'], prefix= 'Pclass')
df_test = pd.concat([data_test, dummies_Cabin, dummies_Embarked, dummies_Sex, dummies_Pclass], axis=1)
df_test.drop(['Pclass', 'Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1, inplace=True)
df_test
X.shape
test = df_test.filter(regex='Age_.*|SibSp|Parch|Fare_.*|Cabin_.*|Embarked_.*|Sex_.*|Pclass_.*')
predictions = clf.predict(test)
result = pd.DataFrame({'PassengerId':data_test['PassengerId'].as_matrix(), 'Survived':predictions.astype(np.int32)})
result.to_csv("logistic_regression_predictions.csv", index=False)
pd.read_csv("logistic_regression_predictions.csv")
import numpy as np
import matplotlib.pyplot as plt
from sklearn.learning_curve import learning_curve
# 用sklearn的learning_curve得到training_score和cv_score,使用matplotlib画出learning curve
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1,
train_sizes=np.linspace(.05, 1., 20), verbose=0, plot=True):
"""
画出data在某模型上的learning curve.
参数解释
----------
estimator : 你用的分类器。
title : 表格的标题。
X : 输入的feature,numpy类型
y : 输入的target vector
ylim : tuple格式的(ymin, ymax), 设定图像中纵坐标的最低点和最高点
cv : 做cross-validation的时候,数据分成的份数,其中一份作为cv集,其余n-1份作为training(默认为3份)
n_jobs : 并行的的任务数(默认1)
"""
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes, verbose=verbose)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
if plot:
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel(u"训练样本数")
plt.ylabel(u"得分")
plt.gca().invert_yaxis()
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std,
alpha=0.1, color="b")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std,
alpha=0.1, color="r")
plt.plot(train_sizes, train_scores_mean, 'o-', color="b", label=u"训练集上得分")
plt.plot(train_sizes, test_scores_mean, 'o-', color="r", label=u"交叉验证集上得分")
plt.legend(loc="best")
plt.draw()
plt.gca().invert_yaxis()
plt.show()
midpoint = ((train_scores_mean[-1] + train_scores_std[-1]) + (test_scores_mean[-1] - test_scores_std[-1])) / 2
diff = (train_scores_mean[-1] + train_scores_std[-1]) - (test_scores_mean[-1] - test_scores_std[-1])
return midpoint, diff
X.shape
#plot_learning_curve(clf, u"学习曲线", X, y)
```
| github_jupyter |
# Measuring Monotonic Relationships
By Evgenia "Jenny" Nitishinskaya and Delaney Granizo-Mackenzie with example algorithms by David Edwards
Reference: DeFusco, Richard A. "Tests Concerning Correlation: The Spearman Rank Correlation Coefficient." Quantitative Investment Analysis. Hoboken, NJ: Wiley, 2007
Part of the Quantopian Lecture Series:
* [www.quantopian.com/lectures](https://www.quantopian.com/lectures)
* [github.com/quantopian/research_public](https://github.com/quantopian/research_public)
---
The Spearman Rank Correlation Coefficient allows us to determine whether or not two data series move together; that is, when one increases (decreases) the other also increases (decreases). This is more general than a linear relationship; for instance, $y = e^x$ is a monotonic function, but not a linear one. Therefore, in computing it we compare not the raw data but the ranks of the data.
This is useful when your data sets may be in different units, and therefore not linearly related (for example, the price of a square plot of land and its side length, since the price is more likely to be linear in the area). It's also suitable for data sets which not satisfy the assumptions that other tests require, such as the observations being normally distributed as would be necessary for a t-test.
```
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
import math
# Example of ranking data
l = [10, 9, 5, 7, 5]
print 'Raw data: ', l
print 'Ranking: ', list(stats.rankdata(l, method='average'))
```
## Spearman Rank Correlation
### Intuition
The intution is now that instead of looking at the relationship between the two variables, we look at the relationship between the ranks. This is robust to outliers and the scale of the data.
### Definition
The argument `method='average'` indicates that when we have a tie, we average the ranks that the numbers would occupy. For example, the two 5's above, which would take up ranks 1 and 2, each get assigned a rank of $1.5$.
To compute the Spearman rank correlation for two data sets $X$ and $Y$, each of size $n$, we use the formula
$$r_S = 1 - \frac{6 \sum_{i=1}^n d_i^2}{n(n^2 - 1)}$$
where $d_i$ is the difference between the ranks of the $i$th pair of observations, $X_i - Y_i$.
The result will always be between $-1$ and $1$. A positive value indicates a positive relationship between the variables, while a negative value indicates an inverse relationship. A value of 0 implies the absense of any monotonic relationship. This does not mean that there is no relationship; for instance, if $Y$ is equal to $X$ with a delay of 2, they are related simply and precisely, but their $r_S$ can be close to zero:
##Experiment
Let's see what happens if we draw $X$ from a poisson distribution (non-normal), and then set $Y = e^X + \epsilon$ where $\epsilon$ is drawn from another poisson distribution. We'll take the spearman rank and the correlation coefficient on this data and then run the entire experiment many times. Because $e^X$ produces many values that are far away from the rest, we can this of this as modeling 'outliers' in our data. Spearman rank compresses the outliers and does better at measuring correlation. Normal correlation is confused by the outliers and on average will measure less of a relationship than is actually there.
```
## Let's see an example of this
n = 100
def compare_correlation_and_spearman_rank(n, noise):
X = np.random.poisson(size=n)
Y = np.exp(X) + noise * np.random.normal(size=n)
Xrank = stats.rankdata(X, method='average')
# n-2 is the second to last element
Yrank = stats.rankdata(Y, method='average')
diffs = Xrank - Yrank # order doesn't matter since we'll be squaring these values
r_s = 1 - 6*sum(diffs*diffs)/(n*(n**2 - 1))
c_c = np.corrcoef(X, Y)[0,1]
return r_s, c_c
experiments = 1000
spearman_dist = np.ndarray(experiments)
correlation_dist = np.ndarray(experiments)
for i in range(experiments):
r_s, c_c = compare_correlation_and_spearman_rank(n, 1.0)
spearman_dist[i] = r_s
correlation_dist[i] = c_c
print 'Spearman Rank Coefficient: ' + str(np.mean(spearman_dist))
# Compare to the regular correlation coefficient
print 'Correlation coefficient: ' + str(np.mean(correlation_dist))
```
Let's take a look at the distribution of measured correlation coefficients and compare the spearman with the regular metric.
```
plt.hist(spearman_dist, bins=50, alpha=0.5)
plt.hist(correlation_dist, bins=50, alpha=0.5)
plt.legend(['Spearman Rank', 'Regular Correlation'])
plt.xlabel('Correlation Coefficient')
plt.ylabel('Frequency');
```
Now let's see how the Spearman rank and Regular coefficients cope when we add more noise to the situation.
```
n = 100
noises = np.linspace(0, 3, 30)
experiments = 100
spearman = np.ndarray(len(noises))
correlation = np.ndarray(len(noises))
for i in range(len(noises)):
# Run many experiments for each noise setting
rank_coef = 0.0
corr_coef = 0.0
noise = noises[i]
for j in range(experiments):
r_s, c_c = compare_correlation_and_spearman_rank(n, noise)
rank_coef += r_s
corr_coef += c_c
spearman[i] = rank_coef/experiments
correlation[i] = corr_coef/experiments
plt.scatter(noises, spearman, color='r')
plt.scatter(noises, correlation)
plt.legend(['Spearman Rank', 'Regular Correlation'])
plt.xlabel('Amount of Noise')
plt.ylabel('Average Correlation Coefficient')
```
We can see that the Spearman rank correlation copes with the non-linear relationship much better at most levels of noise. Interestingly, at very high levels, it seems to do worse than regular correlation.
##Delay in correlation
Of you might have the case that one process affects another, but after a time lag. Now let's see what happens if we add the delay.
```
n = 100
X = np.random.rand(n)
Xrank = stats.rankdata(X, method='average')
# n-2 is the second to last element
Yrank = stats.rankdata([1,1] + list(X[:(n-2)]), method='average')
diffs = Xrank - Yrank # order doesn't matter since we'll be squaring these values
r_s = 1 - 6*sum(diffs*diffs)/(n*(n**2 - 1))
print r_s
```
Sure enough, the relationship is not detected. It is important when using both regular and spearman correlation to check for lagged relationships by offsetting your data and testing for different offset values.
##Built-In Function
We can also use the `spearmanr` function in the `scipy.stats` library:
```
# Generate two random data sets
np.random.seed(161)
X = np.random.rand(10)
Y = np.random.rand(10)
r_s = stats.spearmanr(X, Y)
print 'Spearman Rank Coefficient: ', r_s[0]
print 'p-value: ', r_s[1]
```
We now have ourselves an $r_S$, but how do we interpret it? It's positive, so we know that the variables are not anticorrelated. It's not very large, so we know they aren't perfectly positively correlated, but it's hard to say from a glance just how significant the correlation is. Luckily, `spearmanr` also computes the p-value for this coefficient and sample size for us. We can see that the p-value here is above 0.05; therefore, we cannot claim that $X$ and $Y$ are correlated.
##Real World Example: Mutual Fund Expense Ratio
Now that we've seen how Spearman rank correlation works, we'll quickly go through the process again with some real data. For instance, we may wonder whether the expense ratio of a mutual fund is indicative of its three-year Sharpe ratio. That is, does spending more money on administration, management, etc. lower the risk or increase the returns? Quantopian does not currently support mutual funds, so we will pull the data from Yahoo Finance. Our p-value cutoff will be the usual default of 0.05.
### Data Source
Thanks to [Matthew Madurski](https://github.com/dursk) for the data. To obtain the same data:
1. Download the csv from this link. https://gist.github.com/dursk/82eee65b7d1056b469ab
2. Upload it to the 'data' folder in your research account.
```
mutual_fund_data = local_csv('mutual_fund_data.csv')
expense = mutual_fund_data['Annual Expense Ratio'].values
sharpe = mutual_fund_data['Three Year Sharpe Ratio'].values
plt.scatter(expense, sharpe)
plt.xlabel('Expense Ratio')
plt.ylabel('Sharpe Ratio')
r_S = stats.spearmanr(expense, sharpe)
print 'Spearman Rank Coefficient: ', r_S[0]
print 'p-value: ', r_S[1]
```
Our p-value is below the cutoff, which means we accept the hypothesis that the two are correlated. The negative coefficient indicates that there is a negative correlation, and that more expensive mutual funds have worse sharpe ratios. However, there is some weird clustering in the data, it seems there are expensive groups with low sharpe ratios, and a main group whose sharpe ratio is unrelated to the expense. Further analysis would be required to understand what's going on here.
## Real World Use Case: Evaluating a Ranking Model
### NOTE: [Factor Analysis](https://www.quantopian.com/lectures/factor-analysis) now covers this topic in much greater detail
Let's say that we have some way of ranking securities and that we'd like to test how well our ranking performs in practice. In this case our model just takes the mean daily return for the last month and ranks the stocks by that metric.
We hypothesize that this will be predictive of the mean returns over the next month. To test this we score the stocks based on a lookback window, then take the spearman rank correlation of the score and the mean returns over the walk forward month.
```
symbol_list = ['A', 'AA', 'AAC', 'AAL', 'AAMC', 'AAME', 'AAN', 'AAOI', 'AAON', 'AAP', 'AAPL', 'AAT', 'AAU', 'AAV', 'AAVL', 'AAWW', 'AB', 'ABAC', 'ABAX', 'ABB', 'ABBV', 'ABC', 'ABCB', 'ABCD', 'ABCO', 'ABCW', 'ABDC', 'ABEV', 'ABG', 'ABGB']
# Get the returns over the lookback window
start = '2014-12-01'
end = '2015-01-01'
historical_returns = get_pricing(symbol_list, fields='price', start_date=start, end_date=end).pct_change()[1:]
# Compute our stock score
scores = np.mean(historical_returns)
print 'Our Scores\n'
print scores
print '\n'
start = '2015-01-01'
end = '2015-02-01'
walk_forward_returns = get_pricing(symbol_list, fields='price', start_date=start, end_date=end).pct_change()[1:]
walk_forward_returns = np.mean(walk_forward_returns)
print 'The Walk Forward Returns\n'
print walk_forward_returns
print '\n'
plt.scatter(scores, walk_forward_returns)
plt.xlabel('Scores')
plt.ylabel('Walk Forward Returns')
r_s = stats.spearmanr(scores, walk_forward_returns)
print 'Correlation Coefficient: ' + str(r_s[0])
print 'p-value: ' + str(r_s[1])
```
The p-value indicates that our hypothesis is false and we accept the null hypothesis that our ranking was no better than random. This is a really good check of any ranking system one devises for constructing a long-short equity portfolio.
*This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. ("Quantopian"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.*
| github_jupyter |
# Spacy
### Models
Spacy comes with a variety of different models that can used per language. For instance, the models for English are available [here](https://spacy.io/models/en). You'll need to download each model separately:
```python
python3 -m spacy download en_core_web_sm
python3 -m spacy download en_core_web_md
```
## Pattern Matching Using Spacy
The below code and example is from Ashiq KS's article [Rule-Based Matching with spacy](https://medium.com/@ashiqgiga07/rule-based-matching-with-spacy-295b76ca2b68):
```
#The input text string is converted to a Document object
text = '''
Computer programming is the process of writing instructions that get executed by computers.
The instructions, also known as code, are written in a programming language which the computer
can understand and use to perform a task or solve a problem. Basic computer programming involves
the analysis of a problem and development of a logical sequence of instructions to solve it.
There can be numerous paths to a solution and the computer programmer seeks to design and
code that which is most efficient. Among the programmer’s tasks are understanding requirements,
determining the right programming language to use, designing or architecting the solution, coding,
testing, debugging and writing documentation so that the solution can be easily
understood by other programmers.Computer programming is at the heart of computer science. It is the
implementation portion of software development, application development
and software engineering efforts, transforming ideas and theories into actual, working solutions.
'''
from spacy.matcher import Matcher #import Matcher class from spacy
#import the Span class to extract the words from the document object
from spacy.tokens import Span
#Language class with the English model 'en_core_web_sm' is loaded
nlp = spacy.load("en_core_web_sm")
doc = nlp(text) # convert the string above to a document
#instantiate a new Matcher class object
matcher = Matcher(nlp.vocab)
```
### Define the Target Pattern
The `pattern` object that you define should be a list of dictionary elements, each dictionary describing the token to match for.
Here, we
```
#define the pattern
pattern = [{'LOWER': 'computer', 'POS': 'NOUN'},
{'POS':{'NOT_IN': ['VERB']}}]
```
### Load the Pattern into the Matcher
```
#add the pattern to the previously created matcher object
matcher.add("Matching", None, pattern)
```
## Using Regular Expressions in Spacy
The below example can be found at https://spacy.io/usage/rule-based-matching. It uses the `re.finditer()` function to
quickly iterate through all the matches found.
```
import spacy
import re
nlp = spacy.load("en_core_web_sm")
doc = nlp("The United States of America (USA) are commonly known as the United States (U.S. or US) or America.")
expression = r"[Uu](nited|\.?) ?[Ss](tates|\.?)"
for match in re.finditer(expression, doc.text):
start, end = match.span()
span = doc.char_span(start, end)
# This is a Span object or None if match doesn't map to valid token sequence
if span is not None:
print("Found match:", span.text)
```
## Part of Speech Tagging
```
!python3 -m spacy download en_core_web_sm
!python3 -m spacy download en_core_web_md
import en_core_web_sm
import spacy
from scipy.spatial.distance import cosine
import spacy
nlp = spacy.load('en_core_web_md')
import pandas as pd
rows = []
doc = nlp(u"Steve Jobs and Apple is looking at buying U.K. startup for $1 billion")
for token in doc:
rows.append((token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, token.is_alpha, token.is_stop))
data = pd.DataFrame(rows, columns=["text", "lemma", "part_of_speech", "tag", "dependency", "shape", "is_alphanumeric", "is_stopword"])
data.head()
```
### Named Entity Recognition
```
doc = nlp(u"Steve Jobs and Apple is looking at buying U.K. startup for $1 billion")
import en_core_web_sm
import spacy
from scipy.spatial.distance import cosine
nlp = en_core_web_sm.load()
for ent in doc.ents:
print(ent.text, ent.start_char, ent.end_char, ent.label_)
# visualize this using displacy:
from spacy import displacy
displacy.render(doc, style="ent", jupyter=True)
```
# Word Embeddings (word2vec Introduction)
## Continuous Bag of Words (Use Context to Predict Target Word)

## Softmax

## Skipgram

## Softmax

```
import en_core_web_sm
import spacy
from scipy.spatial.distance import cosine
nlp = en_core_web_sm.load()
tokens = nlp(u'dog cat Beijing sad depressed couch sofa canine China Chinese France Paris banana')
for token1 in tokens:
for token2 in tokens:
if token1 != token2:
print(f" {token1} - {token2}: {1 - cosine(token1.vector, token2.vector)}")
```
# Finding Most Similar Words (Using Our Old Methods)
```
from sklearn.feature_extraction.text import CountVectorizer
# inspect the default settings for CountVectorizer
CountVectorizer()
reviews = open("poor_amazon_toy_reviews.txt").readlines()
vectorizer = CountVectorizer(ngram_range=(1, 1),
stop_words="english",
max_features=500,token_pattern='(?u)\\b[a-zA-Z][a-zA-Z]+\\b')
X = vectorizer.fit_transform(reviews)
data = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names())
data.head()
from sklearn.metrics.pairwise import cosine_similarity
# create similiarity matrix
similarity_matrix = pd.DataFrame(cosine_similarity(data.T.values),
columns=vectorizer.get_feature_names(),
index=vectorizer.get_feature_names())
# unstack matrix into table
similarity_table = similarity_matrix.rename_axis(None).rename_axis(None, axis=1).stack().reset_index()
# rename columns
similarity_table.columns = ["word1", "word2", "similarity"]
similarity_table.shape
similarity_table = similarity_table[similarity_table["similarity"] < 0.99]
similarity_table.shape
similarity_table.sort_values(by="similarity", ascending=False).drop_duplicates(
subset="similarity", keep="first").head(10)
top_500_words = vectorizer.get_feature_names()
```
# Exercise: Similar Words Using Word Embeddings
```
# load into spacy your top 500 words
tokens = nlp(f'{" ".join(top_500_words)}')
from itertools import product
# create a list of similarity tuples
similarity_tuples = []
for token1, token2 in product(tokens, repeat=2):
similarity_tuples.append((token1, token2, token1.similarity(token2)))
similarities = pd.DataFrame(similarity_tuples, columns=["word1","word2", "score"])
# find similar words
similarities[similarities["score"] < 1].sort_values(
by="score", ascending=False).drop_duplicates(
subset="score", keep="first").head(5)
```
| github_jupyter |
# In this notebook an estimator for the Volume will be trained. No hyperparameters will be searched for, and the ones from the 'Close' values estimator will be used instead.
```
# Basic imports
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime as dt
import scipy.optimize as spo
import sys
from time import time
from sklearn.metrics import r2_score, median_absolute_error
%matplotlib inline
%pylab inline
pylab.rcParams['figure.figsize'] = (20.0, 10.0)
%load_ext autoreload
%autoreload 2
sys.path.append('../../')
from sklearn.externals import joblib
import utils.preprocessing as pp
import predictor.feature_extraction as fe
```
## Let's generate the datasets
```
def generate_one_set(params):
# print(('-'*70 + '\n {}, {} \n' + '-'*70).format(params['base_days'].values, params['ahead_days'].values))
tic = time()
train_val_time = int(params['train_val_time'])
base_days = int(params['base_days'])
step_days = int(params['step_days'])
ahead_days = int(params['ahead_days'])
print('Generating: base{}_ahead{}'.format(base_days, ahead_days))
pid = 'base{}_ahead{}'.format(base_days, ahead_days)
# Getting the data
data_df = pd.read_pickle('../../data/data_train_val_df.pkl')
today = data_df.index[-1] # Real date
print(pid + ') data_df loaded')
# Drop symbols with many missing points
data_df = pp.drop_irrelevant_symbols(data_df, params['GOOD_DATA_RATIO'])
print(pid + ') Irrelevant symbols dropped.')
# Generate the intervals for the predictor
x, y = fe.generate_train_intervals(data_df,
train_val_time,
base_days,
step_days,
ahead_days,
today,
fe.feature_volume_one_to_one,
target_feature=fe.VOLUME_FEATURE)
print(pid + ') Intervals generated')
# Drop "bad" samples and fill missing data
x_y_df = pd.concat([x, y], axis=1)
x_y_df = pp.drop_irrelevant_samples(x_y_df, params['SAMPLES_GOOD_DATA_RATIO'])
x = x_y_df.iloc[:, :-1]
y = x_y_df.iloc[:, -1]
x = pp.fill_missing(x)
print(pid + ') Irrelevant samples dropped and missing data filled.')
# Pickle that
x.to_pickle('../../data/x_volume_{}.pkl'.format(pid))
y.to_pickle('../../data/y_volume_{}.pkl'.format(pid))
toc = time()
print('%s) %i intervals generated in: %i seconds.' % (pid, x.shape[0], (toc-tic)))
return pid, x, y
best_params_df = pd.read_pickle('../../data/best_params_final_df.pkl').loc[1,:]
to_drop = [
'model',
'mre',
'r2',
'x_filename',
'y_filename',
'train_days'
]
best_params_df.drop(to_drop, inplace=True)
best_params_df
generate_one_set(best_params_df)
x_volume = pd.read_pickle('../../data/x_volume_base112_ahead1.pkl')
print(x_volume.shape)
x_volume.head()
y_volume = pd.read_pickle('../../data/y_volume_base112_ahead1.pkl')
print(y_volume.shape)
y_volume.head()
```
## Let's generate the test dataset, also
```
def generate_one_test_set(params, data_df):
# print(('-'*70 + '\n {}, {} \n' + '-'*70).format(params['base_days'].values, params['ahead_days'].values))
tic = time()
train_val_time = int(params['train_val_time'])
base_days = int(params['base_days'])
step_days = int(params['step_days'])
ahead_days = int(params['ahead_days'])
print('Generating: base{}_ahead{}'.format(base_days, ahead_days))
pid = 'base{}_ahead{}'.format(base_days, ahead_days)
# Getting the data
today = data_df.index[-1] # Real date
print(pid + ') data_df loaded')
# Drop symbols with many missing points
y_train_df = pd.read_pickle('../../data/y_volume_{}.pkl'.format(pid))
kept_symbols = y_train_df.index.get_level_values(1).unique().tolist()
data_df = data_df.loc[:, (slice(None), kept_symbols)]
print(pid + ') Irrelevant symbols dropped.')
# Generate the intervals for the predictor
x, y = fe.generate_train_intervals(data_df,
train_val_time,
base_days,
step_days,
ahead_days,
today,
fe.feature_volume_one_to_one,
target_feature=fe.VOLUME_FEATURE)
print(pid + ') Intervals generated')
# Drop "bad" samples and fill missing data
x_y_df = pd.concat([x, y], axis=1)
x_y_df = pp.drop_irrelevant_samples(x_y_df, params['SAMPLES_GOOD_DATA_RATIO'])
x = x_y_df.iloc[:, :-1]
y = x_y_df.iloc[:, -1]
x = pp.fill_missing(x)
print(pid + ') Irrelevant samples dropped and missing data filled.')
# Pickle that
x.to_pickle('../../data/x_volume_{}_test.pkl'.format(pid))
y.to_pickle('../../data/y_volume_{}_test.pkl'.format(pid))
toc = time()
print('%s) %i intervals generated in: %i seconds.' % (pid, x.shape[0], (toc-tic)))
return pid, x,
data_test_df = pd.read_pickle('../../data/data_test_df.pkl')
generate_one_test_set(best_params_df, data_test_df)
x_volume_test = pd.read_pickle('../../data/x_volume_base112_ahead1_test.pkl')
print(x_volume_test.shape)
x_volume_test.head()
y_volume_test = pd.read_pickle('../../data/y_volume_base112_ahead1_test.pkl')
print(y_volume_test.shape)
y_volume_test.head()
```
## Let's train a predictor for the 'Volume' with the same hyperparameters as for the 'Close' one.
```
best_params_df = pd.read_pickle('../../data/best_params_final_df.pkl')
import predictor.feature_extraction as fe
from predictor.linear_predictor import LinearPredictor
import utils.misc as misc
import predictor.evaluation as ev
ahead_days = 1
# Get some parameters
train_days = int(best_params_df.loc[ahead_days, 'train_days'])
GOOD_DATA_RATIO, \
train_val_time, \
base_days, \
step_days, \
ahead_days, \
SAMPLES_GOOD_DATA_RATIO, \
x_filename, \
y_filename = misc.unpack_params(best_params_df.loc[ahead_days,:])
pid = 'base{}_ahead{}'.format(base_days, ahead_days)
# Get the datasets
x_train = pd.read_pickle('../../data/x_volume_{}.pkl'.format(pid))
y_train = pd.read_pickle('../../data/y_volume_{}.pkl'.format(pid))
x_test = pd.read_pickle('../../data/x_volume_{}_test.pkl'.format(pid)).sort_index()
y_test = pd.DataFrame(pd.read_pickle('../../data/y_volume_{}_test.pkl'.format(pid))).sort_index()
# Let's cut the training set to use only the required number of samples
end_date = x_train.index.levels[0][-1]
start_date = fe.add_market_days(end_date, -train_days)
x_sub_df = x_train.loc[(slice(start_date,None),slice(None)),:]
y_sub_df = pd.DataFrame(y_train.loc[(slice(start_date,None),slice(None))])
# Create the estimator and train
estimator = LinearPredictor()
estimator.fit(x_sub_df, y_sub_df)
# Get the training and test predictions
y_train_pred = estimator.predict(x_sub_df)
y_test_pred = estimator.predict(x_test)
# Get the training and test metrics for each symbol
metrics_train = ev.get_metrics_df(y_sub_df, y_train_pred)
metrics_test = ev.get_metrics_df(y_test, y_test_pred)
# Show the mean metrics
metrics_df = pd.DataFrame(columns=['train', 'test'])
metrics_df['train'] = metrics_train.mean()
metrics_df['test'] = metrics_test.mean()
print('Mean metrics: \n{}\n{}'.format(metrics_df,'-'*70))
# Plot the metrics in time
metrics_train_time = ev.get_metrics_in_time(y_sub_df, y_train_pred, base_days + ahead_days)
metrics_test_time = ev.get_metrics_in_time(y_test, y_test_pred, base_days + ahead_days)
plt.plot(metrics_train_time[2], metrics_train_time[0], label='train', marker='.')
plt.plot(metrics_test_time[2], metrics_test_time[0], label='test', marker='.')
plt.title('$r^2$ metrics')
plt.legend()
plt.figure()
plt.plot(metrics_train_time[2], metrics_train_time[1], label='train', marker='.')
plt.plot(metrics_test_time[2], metrics_test_time[1], label='test', marker='.')
plt.title('MRE metrics')
plt.legend()
joblib.dump(estimator, '../../data/best_volume_predictor.pkl')
```
| github_jupyter |
## Dependencies
```
import json, glob
from tweet_utility_scripts import *
from tweet_utility_preprocess_roberta_scripts_aux import *
from transformers import TFRobertaModel, RobertaConfig
from tokenizers import ByteLevelBPETokenizer
from tensorflow.keras import layers
from tensorflow.keras.models import Model
```
# Load data
```
test = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/test.csv')
print('Test samples: %s' % len(test))
display(test.head())
```
# Model parameters
```
input_base_path = '/kaggle/input/166-robertabase-last/'
with open(input_base_path + 'config.json') as json_file:
config = json.load(json_file)
config
base_path = '/kaggle/input/qa-transformers/roberta/'
vocab_path = base_path + 'roberta-base-vocab.json'
merges_path = base_path + 'roberta-base-merges.txt'
config['base_model_path'] = base_path + 'roberta-base-tf_model.h5'
config['config_path'] = base_path + 'roberta-base-config.json'
model_path_list = glob.glob(input_base_path + '*.h5')
model_path_list.sort()
print('Models to predict:')
print(*model_path_list, sep = "\n")
```
# Tokenizer
```
tokenizer = ByteLevelBPETokenizer(vocab_file=vocab_path, merges_file=merges_path,
lowercase=True, add_prefix_space=True)
```
# Pre process
```
test['text'].fillna('', inplace=True)
test["text"] = test["text"].apply(lambda x: x.lower())
test["text"] = test["text"].apply(lambda x: x.strip())
x_test, x_test_aux, x_test_aux_2 = get_data_test(test, tokenizer, config['MAX_LEN'], preprocess_fn=preprocess_roberta_test)
```
# Model
```
module_config = RobertaConfig.from_pretrained(config['config_path'], output_hidden_states=False)
def model_fn(MAX_LEN):
input_ids = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='input_ids')
attention_mask = layers.Input(shape=(MAX_LEN,), dtype=tf.int32, name='attention_mask')
base_model = TFRobertaModel.from_pretrained(config['base_model_path'], config=module_config, name="base_model")
last_hidden_state, _ = base_model({'input_ids': input_ids, 'attention_mask': attention_mask})
x_start = layers.Dropout(.1)(last_hidden_state)
x_start = layers.Dense(1)(x_start)
x_start = layers.Flatten()(x_start)
y_start = layers.Activation('softmax', name='y_start')(x_start)
x_end = layers.Dropout(.1)(last_hidden_state)
x_end = layers.Dense(1)(x_end)
x_end = layers.Flatten()(x_end)
y_end = layers.Activation('softmax', name='y_end')(x_end)
model = Model(inputs=[input_ids, attention_mask], outputs=[y_start, y_end])
return model
```
# Make predictions
```
NUM_TEST_IMAGES = len(test)
test_start_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
test_end_preds = np.zeros((NUM_TEST_IMAGES, config['MAX_LEN']))
for model_path in model_path_list:
print(model_path)
model = model_fn(config['MAX_LEN'])
model.load_weights(model_path)
test_preds = model.predict(get_test_dataset(x_test, config['BATCH_SIZE']))
test_start_preds += test_preds[0]
test_end_preds += test_preds[1]
```
# Post process
```
test['start'] = test_start_preds.argmax(axis=-1)
test['end'] = test_end_preds.argmax(axis=-1)
test['text_len'] = test['text'].apply(lambda x : len(x))
test['text_wordCnt'] = test['text'].apply(lambda x : len(x.split(' ')))
test["end"].clip(0, test["text_len"], inplace=True)
test["start"].clip(0, test["end"], inplace=True)
test['selected_text'] = test.apply(lambda x: decode(x['start'], x['end'], x['text'], config['question_size'], tokenizer), axis=1)
test["selected_text"].fillna(test["text"], inplace=True)
```
# Visualize predictions
```
display(test.head(10))
```
# Test set predictions
```
submission = pd.read_csv('/kaggle/input/tweet-sentiment-extraction/sample_submission.csv')
submission['selected_text'] = test["selected_text"]
submission.to_csv('submission.csv', index=False)
submission.head(10)
```
| github_jupyter |
# Monitoring Data Drift
Over time, models can become less effective at predicting accurately due to changing trends in feature data. This phenomenon is known as *data drift*, and it's important to monitor your machine learning solution to detect it so you can retrain your models if necessary.
In this lab, you'll configure data drift monitoring for datasets.
## Before you start
In addition to the latest version of the **azureml-sdk** and **azureml-widgets** packages, you'll need the **azureml-datadrift** package to run the code in this notebook. Run the cell below to verify that it is installed.
```
!pip show azureml-datadrift
```
## Connect to your workspace
With the required SDK packages installed, now you're ready to connect to your workspace.
> **Note**: If you haven't already established an authenticated session with your Azure subscription, you'll be prompted to authenticate by clicking a link, entering an authentication code, and signing into Azure.
```
from azureml.core import Workspace
# Load the workspace from the saved config file
ws = Workspace.from_config()
print('Ready to work with', ws.name)
```
## Create a *baseline* dataset
To monitor a dataset for data drift, you must register a *baseline* dataset (usually the dataset used to train your model) to use as a point of comparison with data collected in the future.
```
from azureml.core import Datastore, Dataset
# Upload the baseline data
default_ds = ws.get_default_datastore()
default_ds.upload_files(files=['./data/diabetes.csv', './data/diabetes2.csv'],
target_path='diabetes-baseline',
overwrite=True,
show_progress=True)
# Create and register the baseline dataset
print('Registering baseline dataset...')
baseline_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, 'diabetes-baseline/*.csv'))
baseline_data_set = baseline_data_set.register(workspace=ws,
name='diabetes baseline',
description='diabetes baseline data',
tags = {'format':'CSV'},
create_new_version=True)
print('Baseline dataset registered!')
```
## Create a *target* dataset
Over time, you can collect new data with the same features as your baseline training data. To compare this new data to the baseline data, you must define a target dataset that includes the features you want to analyze for data drift as well as a timestamp field that indicates the point in time when the new data was current -this enables you to measure data drift over temporal intervals. The timestamp can either be a field in the dataset itself, or derived from the folder and filename pattern used to store the data. For example, you might store new data in a folder hierarchy that consists of a folder for the year, containing a folder for the month, which in turn contains a folder for the day; or you might just encode the year, month, and day in the file name like this: *data_2020-01-29.csv*; which is the approach taken in the following code:
```
import datetime as dt
import pandas as pd
print('Generating simulated data...')
# Load the smaller of the two data files
data = pd.read_csv('data/diabetes2.csv')
# We'll generate data for the past 6 weeks
weeknos = reversed(range(6))
file_paths = []
for weekno in weeknos:
# Get the date X weeks ago
data_date = dt.date.today() - dt.timedelta(weeks=weekno)
# Modify data to ceate some drift
data['Pregnancies'] = data['Pregnancies'] + 1
data['Age'] = round(data['Age'] * 1.2).astype(int)
data['BMI'] = data['BMI'] * 1.1
# Save the file with the date encoded in the filename
file_path = 'data/diabetes_{}.csv'.format(data_date.strftime("%Y-%m-%d"))
data.to_csv(file_path)
file_paths.append(file_path)
# Upload the files
path_on_datastore = 'diabetes-target'
default_ds.upload_files(files=file_paths,
target_path=path_on_datastore,
overwrite=True,
show_progress=True)
# Use the folder partition format to define a dataset with a 'date' timestamp column
partition_format = path_on_datastore + '/diabetes_{date:yyyy-MM-dd}.csv'
target_data_set = Dataset.Tabular.from_delimited_files(path=(default_ds, path_on_datastore + '/*.csv'),
partition_format=partition_format)
# Register the target dataset
print('Registering target dataset...')
target_data_set = target_data_set.with_timestamp_columns('date').register(workspace=ws,
name='diabetes target',
description='diabetes target data',
tags = {'format':'CSV'},
create_new_version=True)
print('Target dataset registered!')
```
## Create a data drift monitor
Now you're ready to create a data drift monitor for the diabetes data. The data drift monitor will run periodicaly or on-demand to compare the baseline dataset with the target dataset, to which new data will be added over time.
### Create a compute target
To run the data drift monitor, you'll need a compute target. Run the following cell to specify a compute cluster (if it doesn't exist, it will be created).
> **Important**: Change *your-compute-cluster* to the name of your compute cluster in the code below before running it! Cluster names must be globally unique names between 2 to 16 characters in length. Valid characters are letters, digits, and the - character.
```
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
cluster_name = "your-compute-cluster"
try:
# Check for existing compute target
training_cluster = ComputeTarget(workspace=ws, name=cluster_name)
print('Found existing cluster, use it.')
except ComputeTargetException:
# If it doesn't already exist, create it
try:
compute_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_DS11_V2', max_nodes=2)
training_cluster = ComputeTarget.create(ws, cluster_name, compute_config)
training_cluster.wait_for_completion(show_output=True)
except Exception as ex:
print(ex)
```
> **Note**: Compute instances and clusters are based on standard Azure virtual machine images. For this exercise, the *Standard_DS11_v2* image is recommended to achieve the optimal balance of cost and performance. If your subscription has a quota that does not include this image, choose an alternative image; but bear in mind that a larger image may incur higher cost and a smaller image may not be sufficient to complete the tasks. Alternatively, ask your Azure administrator to extend your quota.
### Define the data drift monitor
Now you're ready to use a **DataDriftDetector** class to define the data drift monitor for your data. You can specify the features you want to monitor for data drift, the name of the compute target to be used to run the monitoring process, the frequency at which the data should be compared, the data drift threshold above which an alert should be triggered, and the latency (in hours) to allow for data collection.
```
from azureml.datadrift import DataDriftDetector
# set up feature list
features = ['Pregnancies', 'Age', 'BMI']
# set up data drift detector
monitor = DataDriftDetector.create_from_datasets(ws, 'mslearn-diabates-drift', baseline_data_set, target_data_set,
compute_target=cluster_name,
frequency='Week',
feature_list=features,
drift_threshold=.3,
latency=24)
monitor
```
## Backfill the data drift monitor
You have a baseline dataset and a target dataset that includes simulated weekly data collection for six weeks. You can use this to backfill the monitor so that it can analyze data drift between the original baseline and the target data.
> **Note** This may take some time to run, as the compute target must be started to run the backfill analysis. The widget may not always update to show the status, so click the link to observe the experiment status in Azure Machine Learning studio!
```
from azureml.widgets import RunDetails
backfill = monitor.backfill(dt.datetime.now() - dt.timedelta(weeks=6), dt.datetime.now())
RunDetails(backfill).show()
backfill.wait_for_completion()
```
## Analyze data drift
You can use the following code to examine data drift for the points in time collected in the backfill run.
```
drift_metrics = backfill.get_metrics()
for metric in drift_metrics:
print(metric, drift_metrics[metric])
```
You can also visualize the data drift metrics in [Azure Machine Learning studio](https://ml.azure.com) by following these steps:
1. On the **Datasets** page, view the **Dataset monitors** tab.
2. Click the data drift monitor you want to view.
3. Select the date range over which you want to view data drift metrics (if the column chart does not show multiple weeks of data, wait a minute or so and click **Refresh**).
4. Examine the charts in the **Drift overview** section at the top, which show overall drift magnitude and the drift contribution per feature.
5. Explore the charts in the **Feature detail** section at the bottom, which enable you to see various measures of drift for individual features.
> **Note**: For help understanding the data drift metrics, see the [How to monitor datasets](https://docs.microsoft.com/azure/machine-learning/how-to-monitor-datasets#understanding-data-drift-results) in the Azure Machine Learning documentation.
## Explore further
This lab is designed to introduce you to the concepts and principles of data drift monitoring. To learn more about monitoring data drift using datasets, see the [Detect data drift on datasets](https://docs.microsoft.com/azure/machine-learning/how-to-monitor-datasets) in the Azure machine Learning documentation.
You can also collect data from published services and use it as a target dataset for datadrift monitoring. See [Collect data from models in production](https://docs.microsoft.com/azure/machine-learning/how-to-enable-data-collection) for details.
| github_jupyter |
# Self-Driving Car Engineer Nanodegree
## Project: **Finding Lane Lines on the Road**
***
In this project, you will use the tools you learned about in the lesson to identify lane lines on the road. You can develop your pipeline on a series of individual images, and later apply the result to a video stream (really just a series of images). Check out the video clip "raw-lines-example.mp4" (also contained in this repository) to see what the output should look like after using the helper functions below.
Once you have a result that looks roughly like "raw-lines-example.mp4", you'll need to get creative and try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4". Ultimately, you would like to draw just one line for the left side of the lane, and one for the right.
In addition to implementing code, there is a brief writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) that can be used to guide the writing process. Completing both the code in the Ipython notebook and the writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/322/view) for this project.
---
Let's have a look at our first image called 'test_images/solidWhiteRight.jpg'. Run the 2 cells below (hit Shift-Enter or the "play" button above) to display the image.
**Note: If, at any point, you encounter frozen display windows or other confounding issues, you can always start again with a clean slate by going to the "Kernel" menu above and selecting "Restart & Clear Output".**
---
**The tools you have are color selection, region of interest selection, grayscaling, Gaussian smoothing, Canny Edge Detection and Hough Tranform line detection. You are also free to explore and try other techniques that were not presented in the lesson. Your goal is piece together a pipeline to detect the line segments in the image, then average/extrapolate them and draw them onto the image for display (as below). Once you have a working pipeline, try it out on the video stream below.**
---
<figure>
<img src="examples/line-segments-example.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your output should look something like this (above) after detecting line segments using the helper functions below </p>
</figcaption>
</figure>
<p></p>
<figure>
<img src="examples/laneLines_thirdPass.jpg" width="380" alt="Combined Image" />
<figcaption>
<p></p>
<p style="text-align: center;"> Your goal is to connect/average/extrapolate line segments to get output like this</p>
</figcaption>
</figure>
**Run the cell below to import some packages. If you get an `import error` for a package you've already installed, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
## Import Packages
```
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
%matplotlib inline
```
## Read in an Image
```
#reading in an image
image = mpimg.imread('test_images/solidWhiteRight.jpg')
#printing out some stats and plotting
print('This image is:', type(image), 'with dimensions:', image.shape)
xSize = image.shape[1]
print(xSize)
plt.imshow(image) # if you wanted to show a single color channel image called 'gray', for example, call as plt.imshow(gray, cmap='gray')
```
## Ideas for Lane Detection Pipeline
**Some OpenCV functions (beyond those introduced in the lesson) that might be useful for this project are:**
`cv2.inRange()` for color selection
`cv2.fillPoly()` for regions selection
`cv2.line()` to draw lines on an image given endpoints
`cv2.addWeighted()` to coadd / overlay two images
`cv2.cvtColor()` to grayscale or change color
`cv2.imwrite()` to output images to file
`cv2.bitwise_and()` to apply a mask to an image
**Check out the OpenCV documentation to learn about these and discover even more awesome functionality!**
## Helper Functions
Below are some helper functions to help get you started. They should look familiar from the lesson!
```
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
`vertices` should be a numpy array of integer points.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def draw_lines(img, lines, color=[255, 0, 0], thickness=8):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# If line is on the left half, and slope is expected, extend it to the bottom
new_lines = []
for line in lines:
for x1,y1,x2,y2 in line:
slope = (y2-y1)/(x2-x1)
if x1 < 480 and x2 < 480 and slope <-0.6:
xb = min(x1, x2)
yb = max(y1, y2)
xn = math.floor(xb +(540-yb)/slope)
yn = 540
new_lines.append([[xb, yb, int(xn), yn]])
if x1 > 480 and x2 > 480 and slope >0.6:
xb = max(x1, x2)
yb = max(y1, y2)
xn = math.floor(xb +(540-yb)/slope)
yn = 540
new_lines.append([[int(xb), int(yb), int(xn), int(yn)]])
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
for line in new_lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, a=0.8, b=1., c=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, a, img, b, c)
```
## Test Images
Build your pipeline to work on the images in the directory "test_images"
**You should make sure your pipeline works well on these images before you try the videos.**
```
import os
os.listdir("test_images/")
```
## Build a Lane Finding Pipeline
Build the pipeline and run your solution on all test_images. Make copies into the `test_images_output` directory, and you can use the images in your writeup report.
Try tuning the various parameters, especially the low and high Canny thresholds as well as the Hough lines parameters.
```
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# TODO: Build your pipeline that will draw lane lines on the test_images
# then save them to the test_images_output directory.
def run_pipeline(image):
plt.imshow(image)
gray_img = grayscale(image)
plt.imshow(gray_img)
blur_gray_img = gaussian_blur(gray_img, 5)
plt.imshow(blur_gray_img)
canny_img = canny(blur_gray_img, 50, 150)
plt.imshow(canny_img)
vertices = np.array([[(0,540),(450, 320), (490, 320), (960,540)]], dtype=np.int32)
mask_canny_img = region_of_interest(canny_img, vertices)
plt.imshow(mask_canny_img)
line_img = hough_lines(mask_canny_img, 2, np.pi/180, 15, 40, 15)
plt.imshow(line_img)
result = weighted_img(image, line_img)
plt.imshow(result)
return result
#image = mpimg.imread('test_images/solidWhiteCurve.jpg')
#image = mpimg.imread('test_images/solidYellowCurve.jpg')
#image = mpimg.imread('test_images/solidYellowLeft.jpg')
#image = mpimg.imread('test_images/solidYellowCurve2.jpg')
#image = mpimg.imread('test_images/solidWhiteRight.jpg')
image = mpimg.imread('test_images/whiteCarLaneSwitch.jpg')
result = run_pipeline(image)
```
## Test on Videos
You know what's cooler than drawing lanes over images? Drawing lanes over video!
We can test our solution on two provided videos:
`solidWhiteRight.mp4`
`solidYellowLeft.mp4`
**Note: if you get an import error when you run the next cell, try changing your kernel (select the Kernel menu above --> Change Kernel). Still have problems? Try relaunching Jupyter Notebook from the terminal prompt. Also, consult the forums for more troubleshooting tips.**
**If you get an error that looks like this:**
```
NeedDownloadError: Need ffmpeg exe.
You can download it by calling:
imageio.plugins.ffmpeg.download()
```
**Follow the instructions in the error message and check out [this forum post](https://discussions.udacity.com/t/project-error-of-test-on-videos/274082) for more troubleshooting tips across operating systems.**
```
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = run_pipeline(image)
return result
```
Let's try the one with the solid white lane on the right first ...
```
white_output = 'test_videos_output/solidWhiteRight.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
%time white_clip.write_videofile(white_output, audio=False)
```
Play the video inline, or if you prefer find the video in your filesystem (should be in the same directory) and play it in your video player of choice.
```
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(white_output))
```
## Improve the draw_lines() function
**At this point, if you were successful with making the pipeline and tuning parameters, you probably have the Hough line segments drawn onto the road, but what about identifying the full extent of the lane and marking it clearly as in the example video (P1_example.mp4)? Think about defining a line to run the full length of the visible lane based on the line segments you identified with the Hough Transform. As mentioned previously, try to average and/or extrapolate the line segments you've detected to map out the full extent of the lane lines. You can see an example of the result you're going for in the video "P1_example.mp4".**
**Go back and modify your draw_lines function accordingly and try re-running your pipeline. The new output should draw a single, solid line over the left lane line and a single, solid line over the right lane line. The lines should start from the bottom of the image and extend out to the top of the region of interest.**
Now for the one with the solid yellow lane on the left. This one's more tricky!
```
yellow_output = 'test_videos_output/solidYellowLeft.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4').subclip(0,5)
clip2 = VideoFileClip('test_videos/solidYellowLeft.mp4')
yellow_clip = clip2.fl_image(process_image)
%time yellow_clip.write_videofile(yellow_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(yellow_output))
```
## Writeup and Submission
If you're satisfied with your video outputs, it's time to make the report writeup in a pdf or markdown file. Once you have this Ipython notebook ready along with the writeup, it's time to submit for review! Here is a [link](https://github.com/udacity/CarND-LaneLines-P1/blob/master/writeup_template.md) to the writeup template file.
## Optional Challenge
Try your lane finding pipeline on the video below. Does it still work? Can you figure out a way to make it more robust? If you're up for the challenge, modify your pipeline so it works with this video and submit it along with the rest of your project!
```
challenge_output = 'test_videos_output/challenge.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip3 = VideoFileClip('test_videos/challenge.mp4').subclip(0,5)
clip3 = VideoFileClip('test_videos/challenge.mp4')
challenge_clip = clip3.fl_image(process_image)
%time challenge_clip.write_videofile(challenge_output, audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format(challenge_output))
```
| github_jupyter |
[View in Colaboratory](https://colab.research.google.com/github/thonic92/chal_TM/blob/master/model_tweets.ipynb)
```
import json
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
import sys
import re
import unicodedata
from collections import Counter
import nltk
with open("/content/gdrive/My Drive/json_datas_full.json", "r", encoding="latin1",errors='ignore' ) as read_file:
data = json.load(read_file)
tweets = []
for i in range(len(data)):
tweets.append(data[i]['text'].lower())
print(tweets[0:2])
tweets_str = ' '.join(tweets)
tweets_str=unicodedata.normalize('NFD',tweets_str).encode('ascii', 'ignore').decode("utf-8")
print(tweets_str[0:1000])
tweets_words = tweets_str.split(' ')
#print(tweets_words[1:100])
type(tweets_words)
#print(sorted(set(tweets_words)))
print(Counter(tweets_words).most_common()[0:100])
print(Counter(list(nltk.bigrams(tweets_words))).most_common()[0:100])
# on retire les urls
tweets_str = re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ", tweets_str)
tweets_str = re.sub("paris2024"," ", tweets_str)
tweets_str = re.sub("jo2024"," ", tweets_str)
tweets_str = re.sub("jo 2024"," ", tweets_str)
tweets_str = re.sub("jo de 2024"," ", tweets_str)
tweets_str = re.sub("paris"," ", tweets_str)
tweets_str = re.sub("sport"," ", tweets_str)
tweets_str = re.sub("olympicday"," ", tweets_str)
tweets_str = re.sub("enmodejo"," ", tweets_str)
tweets_str = re.sub("grandparis"," ", tweets_str)
tweets_str = re.sub("cio"," ", tweets_str)
tweets_str = re.sub("jouerlejeu"," ", tweets_str)
tweets_str = re.sub("jeuxolympiques"," ", tweets_str)
tweets_str = re.sub("venezpartager"," ", tweets_str)
tweets_str = re.sub("jo2024paris"," ", tweets_str)
tweets_str = re.sub("jerevedesjeux"," ", tweets_str)
tweets_str = re.sub("france"," ", tweets_str)
tweets_str = re.sub("madeforsharing"," ", tweets_str)
tweets_str = re.sub("rio2016"," ", tweets_str)
tweets_str = re.sub("generation2024"," ", tweets_str)
tweets_str = re.sub("gagnonsensemble"," ", tweets_str)
tweets_str = re.sub("engager"," ", tweets_str)
tweets_str = re.sub("pleinement"," ", tweets_str)
tweets_str = re.sub("candidature"," ", tweets_str)
tweets_str = re.sub("nouvelle etape"," ", tweets_str)
tweets_str = re.sub("hidalgo veut"," ", tweets_str)
tweets_str = re.sub("favorable"," ", tweets_str)
tweets_str = re.sub("s engage"," ", tweets_str)
tweets_str = ' '.join(tweets_str.split())
tweets_str = tweets_str[0:300000]
print(tweets_str[0:1000])
len(tweets_str)
chars = sorted(list(set(tweets_str)))
char_to_int = dict((c, i) for i, c in enumerate(chars))
print(chars)
len(char_to_int)
n_chars = len(tweets_str.split(sep=' '))
n_vocab = len(chars)
print(n_chars)
print(n_vocab)
seq_length = 100
dataX = []
dataY = []
for i in range(0, n_chars - seq_length, 1):
seq_in = tweets_str[i:i + seq_length]
seq_out = tweets_str[i + seq_length]
dataX.append([char_to_int[char] for char in seq_in])
dataY.append(char_to_int[seq_out])
n_patterns = len(dataX)
print(n_patterns)
# reshape X to be [samples, time steps, features]
X = np.reshape(dataX, (n_patterns, seq_length, 1))
# normalize
X = X / float(n_vocab)
# one hot encode the output variable
y = np_utils.to_categorical(dataY)
model = Sequential()
model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(256))
model.add(Dropout(0.2))
model.add(Dense(y.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam')
filepath="/content/gdrive/My Drive/weights-improvement-{epoch:02d}-{loss:.4f}-bigger3.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
model.fit(X, y, epochs=30, batch_size=128, callbacks=callbacks_list)
```
| github_jupyter |
<div align="center">
<h1>Homework 7</h1>
<p>
<div align="center">
<h2>Yutong Dai yutongd3@illinois.edu</h2>
</div>
</p>
</div>
## 6.33
The dual problem is
$$
\begin{align}
& \min \quad 3 w_1 + 6 w_2\\
& s.t \quad w_1 + 2w_2 \geq 2\\
& \qquad w_1 + 3w_2 \geq -3\\
& \qquad w_1\leq 0,w_2\geq 0
\end{align}
$$
It's easy to verify $(w_1^*,w_2^*)=(\frac{11}{-2}, \frac{5}{2})$ is a feasible solution to the dual and satisfy the KKT condition. Therefore, $(x_1^*,x_2^*)=(3/2, 3/2)$ is the optimal solution to the dual.
---
* The first method is "Big-M" method. First convert the problem to the standard form and adding the artificial variables, where they serve as the initial basis.
* The second method is the artificial constraints technique, where we adding a upper bound on the summation of all non-basic variables.
I will use the second method.
The tableau for the primal is as follow, where the dual is not feasible.
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | RHS |
| --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 2 | -3 | 0 | 0 | 0 |
| $x_3$ | 0 | -1 | -1 | 1 | 0 | -3 |
| $x_4$ | 0 | 3 | 1 | 0 | 1 | 6 |
Adding constrain $x_1 + x_2 \leq M$, we have the following tableau.
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 2 | -3 | 0 | 0 | 0 | 0 |
| $x_5$ | 0 | 1 | 1 | 0 | 0 | 1 | M |
| $x_3$ | 0 | -1 | -1 | 1 | 0 | 0 | -3 |
| $x_4$ | 0 | 3 | 1 | 0 | 1 | 0 | 6 |
* The first iteration:
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | -5 | 0 | 0 | -2 | -2M |
| $x_1$ | 0 | 1 | 1 | 0 | 0 | 1 | M |
| $x_3$ | 0 | 0 | 0 | 1 | 0 | 1 | -3 + M |
| $x_4$ | 0 | 0 | -2 | 0 | 1 | -3 | 6 -3M |
* The second iteration:
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | -11/3 | 0 | -2/3 | 0 | -4 |
| $x_1$ | 0 | 1 | 1/3 | 0 | 1/3 | 0 | 2 |
| $x_3$ | 0 | 0 | -2/3 | 1 | 1/3 | 0 | -1 |
| $x_5$ | 0 | 0 | 2/3 | 0 | -1/3 | 1 | M-2 |
* The third iteration:
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | 0 | -11/2 | -5/2 | 0 | 3/2 |
| $x_1$ | 0 | 1 | 0 | 1/2 | 1/2 | 0 | 3/2 |
| $x_2$ | 0 | 0 | 1 | -3/2 | -1/2 | 0 | 3/2 |
| $x_4$ | 0 | 0 | 0 | 1 | 0 | 1 | M-2 |
So the optimal solution for the primal is $(3/2, 3/2)$.
## 6.54
**a)**
The dual problem is
$$
\begin{align}
& \min \quad 8w_1 + 4w_2\\
& s.t \quad w_1 - w_2 \geq 2\\
& \qquad 2w_1 - w_2 \geq 1\\
& \qquad 3w_1 - 2w_2 \geq -1\\
& \qquad w_1\leq 0,w_2\geq 0
\end{align}
$$
Since the constraints in the primal are of $\leq$ type, we know that the optimal solution for the dual is $(2,0)$ .
**b)**
Note $x_2$ is a non-basic feasible solution and $c_2' - z_2=1>0$, therefore $x_2$ will enter the basis and change the optimal solution.
The tableau becomes
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | 1 | -3 | -2 | 0 | -16 |
| $x_1$ | 0 | 1 | 2 | 1 | 1 | 0 | 8 |
| $x_5$ | 0 | 0 | 3 | -1 | 1 | 1 | 12 |
After one iteration, we reach the optimal tableau.
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | 0 | -3/2 | -3/2 | 0 | -20 |
| $x_2$ | 0 | 1/2 | 0 | 1/2 | 1/2 | 0 | 4 |
| $x_5$ | 0 | -1/2 | 0 | -3/2 | 1/2 | 1 | 0 |
The new optimal solution becomes $(x_1, x_2, x_3)=(0,4,0)$
**c)**
Note $x_2$ is a non-basic feasible solution and $c_2 - c_B^TB^{-1}A_j'=1-1/3=2/3>0$, therefore $x_2$ will enter the basis and change the optimal solution.
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | 2/3 | -3 | -2 | 0 | -16 |
| $x_1$ | 0 | 1 | 1/6 | 1 | 1 | 0 | 8 |
| $x_5$ | 0 | 0 | 7/6 | -1 | 1 | 1 | 12 |
After one iteration, we reach the optimal tableau.
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | 0 | -17/7 | -18/7 | -4/7 | -28 |
| $x_1$ | 0 | 1 | 0 | 8/7 | 6/7 | -1/7 | 44/7 |
| $x_2$ | 0 | 0 | 1 | -6/7 | 1/7 | 6/7 | 72/7 |
The new optimal solution becomes $(x_1, x_2, x_3)=(44/7,72/7,0)$
**d)**
Set up the tableau as
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | -3 | -3 | -2 | 0 | 0 | -16 |
| $M$ | -1 | 0 | 0 | 0 | 0 | 0 | -1 | 0 |
| $x_1$ | 0 | 1 | 2 | 1 | 1 | 0 | 0 | 8 |
| $x_5$ | 0 | 0 | 3 | -1 | 1 | 1 | 0 | 12 |
| $x_6$ | 0 | 0 | 1 | 2 | 0 | 0 | 1 | 3 |
and make $x_6$ as true basic variable by adding the last row to the zero row. We obtain
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | -3 | -3 | -2 | 0 | 0 | -16 |
| $M$ | -1 | 1 | 2 | 0 | 0 | 0 | 0 | 3 |
| $x_1$ | 0 | 1 | 2 | 1 | 1 | 0 | 0 | 8 |
| $x_5$ | 0 | 0 | 3 | -1 | 1 | 1 | 0 | 12 |
| $x_6$ | 0 | 0 | 1 | 2 | 0 | 0 | 1 | 3 |
After one iteration,
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | -3/2 | 0 | -2 | 0 | 3/2 | -23/2 |
| $M$ | -1 | 0 | 0 | 0 | 0 | 0 | -1 | 0 |
| $x_1$ | 0 | 1 | 3/2 | 0 | 1 | 0 | -1/2 | 13/2 |
| $x_5$ | 0 | 0 | 7/2 | 0 | 1 | 1 | 1/2 | 27/2 |
| $x_6$ | 0 | 0 | 1/2 | 1 | 0 | 0 | 1/2 | 3/2 |
we reach the optimal. The new optimal solution becomes $(x_1, x_2, x_3)=(13/2, 0, 3/2)$
**e)**
Suppose the new right-hand-side is $b'$. Then $B^{-1}b'=(b_1' , b_1'+ b_2')^T$. As we will increase 8 or 4 to $b_1'$ or $b_2'$. Either way will ensure $B^{-1}b'\geq 0$, therefore, the same basis is still optimal.
- If we change $b_1$ then, we will change the optimal solution from $(b_1,0,0)$ to $(b_1',0,0)$. It will increase the objective value by $2(b_1' -b_1)$
- If we change $b_2$ then, we won't change the optimal solution $(b_1,0,0)$, hence the objective value.
**f)**
As $c_6 -x_6=6-wA_6=2>0$, $x_6$ will enter the basis.
The initial tableau is
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | -3 | -3 | -2 | 0 | 2 | -16 |
| $x_1$ | 0 | 1 | 2 | 1 | 1 | 0 | 2 | 8 |
| $x_5$ | 0 | 0 | 3 | -1 | 1 | 1 | 3 | 12 |
After one iteration, the tableau becomes
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | -1 | -5 | -4 | -3 | 0 | 0 | -24 |
| $x_6$ | 0 | 1/2 | 1 | 1/2 | 1/2 | 0 | 1 | 4 |
| $x_5$ | 0 | -3/2 | 0 | -5/2 | -1/2 | 1 | 0 | 0 |
The optimal solution is $(x_1, x_2, x_3,x_6)=(0,0,0,4)$
## 6.68
Before we proceed, we need to calculate a few quantity:
* $(c_6,c_7,c_8)-(c_1,c_2,c_3)B^{-1}A_{[:,(6,7,8)]} = (\bar c_6,\bar c_7, \bar c_8)=(-2,-1/10,-2) \Rightarrow (c_1,c_2,c_3)=(2,4,1)$, where $A_{[:,(6,7,8)]}$ is $I_3$.
* $(c_4,c_5) - (c_1,c_2,c_3)B^{-1}A_{[:,(3,4)]}=(\bar c_4,\bar c_5)=(-2,0)\Rightarrow (c_4,c_5)=(3,2)$
* $b=B\bar b=(14/9, 110/3, 46/9)^T$
We perturbe the $b$ along the direction $d=(-1,0,0)^T$.
**Iteration 1:**
* Calculate $B^{-1}d = (-0.5, 1 , -5)^T$, So $S=\{1,3\}$.
* Calculate the minimal ration $\theta=7/5$.
* If $\theta\in [0,7/5]$, the current basis $(A_1,A_2,A_3)$ is always optimal. Further, the objective value and right hand side will be
$$
z(\theta) = 17 - 2\theta \qquad \bar b = (3-\frac{1}{2}\theta, 1 + \theta, 7-5\theta)^T.
$$
* When $\theta =7/5$, then $x_3=0$, therefore we perform dual simplex method on the tableau below.
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | $x_7$ | $x_8$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | 0 | 0 | -2 | 0 | -2 | -1/10 | -2 | -71/5 |
| $x_1$ | 0 | 1 | 0 | 0 | -1 | 0 | 1/2 | 1/5 | -1 | 23/10 |
| $x_2$ | 0 | 0 | 1 | 0 | 2 | 1 | -1 | 0 | 1/2 | 12/5 |
| $x_3$ | 0 | 0 | 0 | 1 | -1 | -2 | 5 | -3/10 | 2 | 0 |
So $x_3$ will leave and $x_5$ will enter.
The tableau becomes
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | $x_7$ | $x_8$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | 0 | 0 | -2 | 0 | -2 | -1/10 | -2 | -71/5 |
| $x_1$ | 0 | 1 | 0 | 0 | -1 | 0 | 1/2 | 1/5 | -1 | 23/10 |
| $x_2$ | 0 | 0 | 1 | 1/2 | 3/2 | 0 | 3/2 | -3/20 | 3/2 | 12/5 |
| $x_5$ | 0 | 0 | 0 | -1/2 | 1/2 | 1 | -5/2 | 3/20 | -1 | 0 |
**Iteration 2:**
* Calculate $B^{-1}d = (-0.5, -1.5 , 2.5)^T$, $B^{-1}b=(3,4.5, -3.5)$So $S=\{1,2\}$.
* Calculate the minimal ration $\theta=3$.
* If $\theta\in [7/5, 3]$, the current basis $(A_1,A_2,A_5)$ is always optimal. Further, the objective value and right hand side will be
$$
z(\theta) = 17 - 2\theta \qquad \bar b = (3-\frac{1}{2}\theta, \frac{9}{2} - \frac{3}{2} \theta, \frac{-7}{2}+\frac{5}{2}\theta)^T.
$$
* When $\theta =3$, then $x_2=0$, therefore we perform dual simplex method on the tableau below.
The tableau becomes
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | $x_7$ | $x_8$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | 0 | 0 | -2 | 0 | -2 | -1/10 | -2 | -11 |
| $x_1$ | 0 | 1 | 0 | 0 | -1 | 0 | 1/2 | 1/5 | -1 | 3/2 |
| $x_2$ | 0 | 0 | 1 | 1/2 | 3/2 | 0 | 3/2 | -3/20 | 3/2 | 0 |
| $x_5$ | 0 | 0 | 0 | -1/2 | 1/2 | 1 | -5/2 | 3/20 | -1 | 4 |
So $x_2$ will leave and $x_7$ will enter.
The tableau becomes
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | $x_7$ | $x_8$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | -2/3 | -1/3 | -3 | 0 | -3 | 0 | -3 | -11 |
| $x_1$ | 0 | 0 | 1 | 4/3 | 2/3 | 0 | 5/2 | 0 | 1 | 3/2 |
| $x_7$ | 0 | 0 | -20/3 | -10/3 | -10 | 0 | -10 | 1 | -10 | 0 |
| $x_5$ | 0 | 0 | 1 | 0 | 2 | 1 | -1 | 0 | 1/2 | 4 |
**Iteration 3:**
* Calculate $B^{-1}d = (-2.5, 10 , 1)^T$, $B^{-1}b=(9,-30,1)$So $S=\{1\}$.
* Calculate the minimal ration $\theta=18/5$.
* If $\theta\in [3,18/5]$, the current basis $(A_1,A_7,A_5)$ is always optimal. Further, the objective value and right hand side will be
$$
z(\theta) = 20 - 3\theta \qquad \bar b = (9-\frac{5}{2}\theta, -30 + 10 \theta, 1+\theta)^T.
$$
* When $\theta =18/5$, then $x_1=0$, therefore we perform dual simplex method on the tableau below.
| | $z$ | $x_1$ | $x_2$ | $x_3$ | $x_4$ | $x_5$ | $x_6$ | $x_7$ | $x_8$ | RHS |
| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | --- |
| $z$ | -1 | 0 | -2/3 | -1/3 | -3 | 0 | -3 | 0 | -3 | -46/5 |
| $x_1$ | 0 | 0 | 1 | 4/3 | 2/3 | 0 | 5/2 | 0 | 1 | 0 |
| $x_7$ | 0 | 0 | -20/3 | -10/3 | -10 | 0 | -10 | 1 | -10 | 6 |
| $x_5$ | 0 | 0 | 1 | 0 | 2 | 1 | -1 | 0 | 1/2 | 23/5 |
We can not pivot anymore. Hence the algorithm terminates, which means the problem is infeasible for $\theta > 18/5$.
## 6.72
**a)**
$$
\begin{align}
& \max \quad 6w + \min_{(x_1,x_2) \in X} \{(1-3w)x_1 + (2-w)x_2\}\\
& s.t \quad w\geq 0
\end{align}
$$
**b)**
The minimal of $\min_{(x_1,x_2) \in X} \{(1-3w)x_1 + (2-w)x_2\}$ is obtained on one of the following extreme points
$$(0,0), (8,0), (3,5), (0,2).$$
Plug these four points into $f(w)$, we end up with
$$f(w)=6w + \min\{0, 4-2w, 13-14w, 8-24w\}.$$
**c)**
$$
f(w)=
\begin{cases}
6w, & 0 \leq w \leq 1/3 \\
8-18w, & w \geq 1/3
\end{cases}
$$
```
import numpy as np
import matplotlib.pyplot as plt
plt.plot(1/3,2,'ro',markersize=10)
x1 = np.linspace(0,1/3,10)
x2 = np.linspace(1/3,1,10)
plt.plot(x1,6*x1,'k-',label=r"$z=6w$")
plt.plot(x2,8 - 18*x2,'k-', label=r"$z=8-18w$")
plt.legend()
plt.show()
```
**d)**
The optimal solution for the Lagrangian dual problem is $w=1/3$.
**e)**
Since $w=1/3$, $f(w)=2 + \min_{(x_1,x_2) \in X}5/3 x_2=2$, we know $x_2=0$ and therefore $x_1=2$.
So the optimal solution for the primal $(x_1, x_2)=(2,0)$.
## Exercise 5.14
**a)**
$$(c - 10d)' x = (c + 10d)' x \Rightarrow d'x = 0.$$
The same holds for $Ax = b + \theta f$. Therefore, $5 d'x = 0$. As the optimality and feasibility conditions hold, the same basis remains optimal.
**b)**
For fixed $\theta$, let $B$ be an arbitrary basis. Then we have $x=(X_B,X_N)=(B^{-1}(b+\theta f),0)$. Suppose $\{B^j\}$ are all possible basis derived from A. Then our problem becomes
$$f(\theta) = \underset{j}{\text{min}} \{(c+ \theta d)' {B^j}^{-1}(b + \theta f)\}, $$
where ${B^j}^{-1}(b + \theta f) \geq 0$.
Clearly, $f(\theta)$ is a piecewise quadratic function of $\theta$ if $f\neq 0$
Let $K$ be the number of possible bases, then the upper bound on the number of pieces is $2K$.
**c)**
\begin{aligned}
& \text{minimize} && \theta d'x \\
& \text{subject to} && Ax = \theta f \\
& && x \geq 0
\end{aligned}
Let $B$ be an optimal basis for $\theta = 1$ and assume that $\theta > 0$. $d' - d'_B B^{-1} A \geq 0 \text{ and } B^{-1}f \geq 0$. Hence for nonnegative $\theta$ satisfying $\theta d' - d'_B B^{-1} A \geq 0 \text{ and } \theta B^{-1}f \geq 0$ keeps this same basis optimal.
**d)**
Consider $b, f = 0$, $f(\theta)$ is constant in $\theta$, hence both convex and concave.
| github_jupyter |
# Assignment: Global average budgets in the CESM pre-industrial control simulation
## Learning goals
Students completing this assignment will gain the following skills and concepts:
- Continued practice working with the Jupyter notebook
- Familiarity with atmospheric output from the CESM simulation
- More complete comparison of the global energy budget in the CESM control simulation to the observations
- Validation of the annual cycle of surface temperature against observations
- Opportunity to formulate a hypothesis about these global temperature variations
- Python programming skills: basic xarray usage: opening gridded dataset and taking averages
## Instructions
- In a local copy of this notebook (on the JupyterHub or your own device) **add your answers in additional cells**.
- **Complete the required problems** below.
- Remember to set your cell types to `Markdown` for text, and `Code` for Python code!
- **Include comments** in your code to explain your method as necessary.
- Remember to actually answer the questions. **Written answers are required** (not just code and figures!)
- Submit your solutions in **a single Jupyter notebook** that contains your text, your code, and your figures.
- *Make sure that your notebook* ***runs cleanly without errors:***
- Save your notebook
- From the `Kernel` menu, select `Restart & Run All`
- Did the notebook run from start to finish without error and produce the expected output?
- If yes, save again and submit your notebook file
- If no, fix the errors and try again.
## Problem 1: The global energy budget in the CESM control simulation
Compute the **global, time average** of each of the following quantities, and **compare them to the observed values** from the Trenberth and Fasullo (2012) figure in the course notes. Recall that when you want to repeat an operation, you should write a function for it!
- Solar Radiation budget:
- Incoming Solar Radiation, or Insolation
- Reflected Solar Radiation at the top of atmosphere
- Solar Radiation Reflected by Surface
- Solar Radiation Absorbed by Surface
- Solar Radiation Refelected by Clouds and Atmosphere *(you can calculate this as the difference between the reflected radiation at the top of atmosphere and reflected radiation at the surface)*
- Total Absorbed Solar Radiation (ASR) at the top of atmosphere
- Solar Radiation Absorbed by Atmosphere *(you can calculate this as the residual of your budget, i.e. what's left over after accounting for all other absorption and reflection)*
- Longwave Radiation budget:
- Outgoing Longwave Radiation
- Upward emission from the surface
- Downwelling radiation at the surface
- Other surface fluxes:
- "Thermals", or *sensible heat flux*. *You will find this in the field called `SHFLX` in your dataset.*
- "Evapotranspiration", or *latent heat flux*. *You will find this in the field called `LHFLX` in your dataset.*
*Note we will look more carefully at atmospheric absorption and emission processes later. You do not need to try to calculate terms such as "Emitted by Atmosphere" or "Atmospheric Window"*
**Based on your results above, answer the following questions:**
- Is the CESM control simulation at (or near) **energy balance**?
- Do you think this simulation is near equilibrium?
- Summarize in your own words what you think are the most important similarities and differences of the global energy budgets in the CESM simulation and the observations.
## Problem 2: Verifying the annual cycle in global mean surface temperature against observations
In the class notes we plotted the **timeseries of global mean surface temperature** in the CESM control simulation, and found an **annual cycle**. The purpose of this exercise is to verify that this phenomenon is also found in the observed temperature record. If so, then we can conclude that it is a real feature of Earth's climate and not an artifact of the numerical model.
For observations, we will use the **NCEP Reanalysis data**.
*Reanalysis data is really a blend of observations and output from numerical weather prediction models. It represents our “best guess” at conditions over the whole globe, including regions where observations are very sparse.*
The necessary data are all served up over the internet. We will look at monthly climatologies averaged over the 30 year period 1981 - 2010.
You can browse the available data here:
https://psl.noaa.gov/thredds/catalog/Datasets/ncep.reanalysis.derived/catalog.html
**Surface air temperature** is contained in a file called `air.2m.mon.ltm.nc`, which is found in the collection called `surface_gauss`.
Here's a link directly to the catalog page for this data file:
https://psl.noaa.gov/thredds/catalog/Datasets/ncep.reanalysis.derived/surface_gauss/catalog.html?dataset=Datasets/ncep.reanalysis.derived/surface_gauss/air.2m.mon.ltm.nc
Now click on the `OPeNDAP` link. A page opens up with lots of information about the contents of the file. The `Data URL` is what we need to read the data into our Python session. For example, this code opens the file and displays a list of the variables it contains:
```
import xarray as xr
url = 'https://psl.noaa.gov/thredds/dodsC/Datasets/ncep.reanalysis.derived/surface_gauss/air.2m.mon.ltm.nc'
ncep_air2m = xr.open_dataset(url, decode_times=False)
print(ncep_air2m)
```
The temperature data is called `air`. Take a look at the details:
```
print(ncep_air2m.air)
```
Notice that the dimensions are `(time: 12, lat: 94, lon: 192)`. The time dimension is calendar months. But note that the lat/lon grid is not the same as our model output!
*Think about how you will handle calculating the global average of these data.*
### Your task:
- Make a well-labeled timeseries graph of the global-averaged observed average surface air temperature climatology.
- Verify that the annual cycle we found in the CESM simulation also exists in the observations.
- In your own words, suggest a plausible physical explanation for why this annual cycle exists.
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import butcher
import bro
import os
from astropy.io import ascii
from astropy.timeseries import LombScargle
#Reading in the data
#Get the data directory
cwd = os.getcwd()
data_dir = cwd.replace('Figure_4', 'Data\\')
#ASAS data
orgasas_data = ascii.read(data_dir + 'asas.csv')
asas_mask = (orgasas_data['emag'] < 0.05)
asas_data = orgasas_data[asas_mask]
asas_flux = butcher.mag_to_flux(asas_data['mag'])
asas_eflux = butcher.emag_to_eflux(asas_data['mag'], asas_data['emag'])
#ASASSN data
orgasassn_data = ascii.read(data_dir + 'asassn.csv')
asassn_mask = (orgasassn_data['emag'] < 0.05)
asassn_data = orgasassn_data[asassn_mask]
asassn_flux = butcher.mag_to_flux(asassn_data['mag'])
asassn_eflux = butcher.emag_to_eflux(asassn_data['mag'], asassn_data['emag'])
#KELT data
orgkelt_data = ascii.read(data_dir + 'kelt.csv')
kelt_mask = (orgkelt_data['emag'] < 0.05)
kelt_data = orgkelt_data[kelt_mask]
kelt_flux = butcher.mag_to_flux(kelt_data['mag'])
kelt_eflux = butcher.emag_to_eflux(kelt_data['mag'], kelt_data['emag'])
#PROMPT data
orgprompt_data = ascii.read(data_dir + 'prompt.csv') #time is JD-2450000
prompt_mask = (orgprompt_data['emag'] < 0.05)
prompt_data = orgprompt_data[prompt_mask]
prompt_flux = butcher.mag_to_flux(prompt_data['mag'])
prompt_eflux = butcher.emag_to_eflux(prompt_data['mag'], prompt_data['emag'])
#ROAD data
orgroad_data = ascii.read(data_dir + 'road.csv') #time is JD-2450000
road_mask = (orgroad_data['emag'] < 0.05)
road_data = orgroad_data[road_mask]
road_flux = butcher.mag_to_flux(road_data['mag'])
road_eflux = butcher.emag_to_eflux(road_data['mag'], road_data['emag'])
#Correct for the long term flux decrease mentioned in section 3.1
asas_flux = butcher.long_correct(asas_data['MJD'], asas_flux, asas_eflux)
asassn_flux = butcher.long_correct(asassn_data['MJD'], asassn_flux, asassn_eflux)
kelt_flux = butcher.long_correct(kelt_data['HJD'], kelt_flux, kelt_eflux)
prompt_flux = butcher.long_correct(prompt_data['HJD'], prompt_flux, prompt_eflux)
road_flux = butcher.long_correct(road_data['HJD'], road_flux, road_eflux)
#Store the individual telescopes in lists
times = [asas_data['MJD'], asassn_data['MJD'], kelt_data['HJD'], prompt_data['HJD'], road_data['HJD']]
fluxes = [asas_flux, asassn_flux, kelt_flux, prompt_flux, road_flux]
uncertainties = [asas_eflux, asassn_eflux, kelt_eflux, prompt_eflux, road_eflux]
names = ['ASAS', 'ASAS-SN', 'KELT', 'PROMPT', 'ROAD']
#Remove the periodicities
names = ['ASAS', 'ASASSN', 'KELT', 'PROMPT', 'ROAD']
org_powers, powers2 = [], []
for j in range(5):
time, flux, eflux = times[j], fluxes[j], uncertainties[j]
corrflux1, periods1, freq1, power1 = bro.short_correct(time, flux, eflux, min_chunk_size = 10)
#Get the uncorrected lombscargle
frequencies = 1/np.linspace(2, 10, 3000)
org_power = LombScargle(time, flux-np.mean(flux), dy = eflux).power(frequencies)
#Get the doubly bro corrected lombscargle
corrflux2, periods2, freq2, power2 = bro.short_correct(time, corrflux1, eflux, min_chunk_size = 10)
frequencies = 1/np.linspace(2, 10, 3000)
power2 = LombScargle(time, corrflux2-np.mean(corrflux2), dy = eflux).power(frequencies)
org_powers.append(org_power)
powers2.append(power2)
#Create the figure
import matplotlib
plt.style.use('seaborn-dark-palette')
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 20}
matplotlib.rc('font', **font)
fig, ax = plt.subplots(5)
for i in range(5):
ax[i].plot(1/frequencies, org_powers[i], alpha = 0.7, c='grey', label = 'Pre-Correction ' + names[i])
ax[i].plot(1/frequencies, powers2[i], alpha = 0.7,label = 'Post-Correction ' + names[i])
ax[i].legend(fontsize = 18)
fig.text(0.35, 0.09, 'Signal Period (Days)', fontsize = 24)
fig.text(0.02, 0.35, 'Signal Power (Arbitrary Units)', rotation = 90, fontsize = 24)
fig = plt.gcf()
fig.set_size_inches(12,20)
#plt.savefig('Removing_Dominant_Cycle.pdf')
plt.show()
```
| github_jupyter |
# IMDb Movie Reviews Classifier
```
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfVectorizer
import re
reviews_train = []
for line in open(r'.\movie_data\full_train.txt', 'r', encoding="utf8"):
reviews_train.append(line.strip())
reviews_test = []
for line in open(r'.\movie_data\full_test.txt', 'r', encoding="utf8"):
reviews_test.append(line.strip())
```
## Using 50% for training and remaining 50% for testing
```
len(reviews_train), len(reviews_test)
reviews_train[5]
```
### We can see that the data is very messy, So let's do some cleaning and pre-processing
## Removing punctuation and HTML tags and making everything lower-case
```
REPLACE_NO_SPACE = re.compile("[.;:!\'?,\"()\[\]]")
REPLACE_WITH_SPACE = re.compile("(<br\s*/><br\s*/>)|(\-)|(\/)")
def preprocess_reviews(reviews):
reviews = [REPLACE_NO_SPACE.sub("", line.lower()) for line in reviews]
reviews = [REPLACE_WITH_SPACE.sub(" ", line) for line in reviews]
return reviews
reviews_train_clean = preprocess_reviews(reviews_train)
reviews_test_clean = preprocess_reviews(reviews_test)
reviews_train_clean[5]
```
## Removing stop words
```
english_stop_words = stopwords.words('english')
def remove_stop_words(corpus):
removed_stop_words = []
for review in corpus:
removed_stop_words.append(
' '.join([word for word in review.split()
if word not in english_stop_words])
)
return removed_stop_words
no_stop_words = remove_stop_words(reviews_train_clean)
no_stop_words[5]
```
# Normalizing Text
## 1. Stemming
```
def get_stemmed_text(corpus):
stemmer = PorterStemmer()
return [' '.join([stemmer.stem(word) for word in review.split()]) for review in corpus]
stemmed_reviews = get_stemmed_text(reviews_train_clean)
stemmed_reviews[5]
```
## 2. Lemmatization
```
def get_lemmatized_text(corpus):
lemmatizer = WordNetLemmatizer()
return [' '.join([lemmatizer.lemmatize(word) for word in review.split()]) for review in corpus]
lemmatized_reviews = get_lemmatized_text(reviews_train_clean)
lemmatized_reviews[5]
```
## Vectorization using binary represenation
```
ngram_vectorizer = CountVectorizer(binary=True, ngram_range=(1,1))
ngram_vectorizer.fit(reviews_train_clean)
X = ngram_vectorizer.transform(reviews_train_clean)
X_test = ngram_vectorizer.transform(reviews_test_clean)
X_train, X_val, y_train, y_val = train_test_split(
X, target, train_size = 0.75
)
```
## Classification using Logistic Regression
```
final_ngram = LogisticRegression(C=0.5)
final_ngram.fit(X, target)
print ("Final Accuracy: %s"
% accuracy_score(target, final_ngram.predict(X_test)))
```
## Classification using SVM Classifier
```
final_svm_ngram = LinearSVC(C=0.01)
final_svm_ngram.fit(X, target)
print ("Final Accuracy: %s"
% accuracy_score(target, final_svm_ngram.predict(X_test)))
```
## Vectorization using Word Counts
```
ngram_vectorizer = CountVectorizer(binary=False, ngram_range=(1,1))
ngram_vectorizer.fit(reviews_train_clean)
X = ngram_vectorizer.transform(reviews_train_clean)
X_test = ngram_vectorizer.transform(reviews_test_clean)
X_train, X_val, y_train, y_val = train_test_split(
X, target, train_size = 0.75
)
```
## Classification using Logistic Regression
```
final_wc = LogisticRegression(C=0.01)
final_wc.fit(X, target)
print ("Final Accuracy: %s"
% accuracy_score(target, final_wc.predict(X_test)))
```
## Classification using SVM Classifier
```
final_svm_ngram = LinearSVC(C=0.01)
final_svm_ngram.fit(X, target)
print ("Final Accuracy: %s"
% accuracy_score(target, final_svm_ngram.predict(X_test)))
```
## Vectorization using TF-IDF
```
tfidf_vectorizer = TfidfVectorizer(ngram_range=(1,1))
tfidf_vectorizer.fit(reviews_train_clean)
X = tfidf_vectorizer.transform(reviews_train_clean)
X_test = tfidf_vectorizer.transform(reviews_test_clean)
X_train, X_val, y_train, y_val = train_test_split(
X, target, train_size = 0.75
)
```
## Classification using Logistic Regression
```
final_wc = LogisticRegression(C=1)
final_wc.fit(X, target)
print ("Final Accuracy: %s"
% accuracy_score(target, final_wc.predict(X_test)))
```
## Classification using SVM
```
final_svm_ngram = LinearSVC(C=0.05)
final_svm_ngram.fit(X, target)
print ("Final Accuracy: %s"
% accuracy_score(target, final_svm_ngram.predict(X_test)))
```
# Final Model
```
stop_words = ['in', 'of', 'at', 'a', 'the']
ngram_vectorizer = CountVectorizer(binary=True, ngram_range=(1, 3), stop_words=stop_words)
ngram_vectorizer.fit(reviews_train_clean)
X = ngram_vectorizer.transform(reviews_train_clean)
X_test = ngram_vectorizer.transform(reviews_test_clean)
X_train, X_val, y_train, y_val = train_test_split(
X, target, train_size = 0.75
)
final = LinearSVC(C=0.01)
final.fit(X, target)
print ("Final Accuracy: %s"
% accuracy_score(target, final.predict(X_test)))
```
# Finally broke the 90% mark!!
```
feature_to_coef = {
word: coef for word, coef in zip(
cv.get_feature_names(), final_model.coef_[0]
)
}
for best_positive in sorted(
feature_to_coef.items(),
key=lambda x: x[1],
reverse=True)[:5]:
print (best_positive)
print('-------------------------------------')
for best_negative in sorted(
feature_to_coef.items(),
key=lambda x: x[1])[:5]:
print (best_negative)
```
| github_jupyter |
# Time Series Modeling
In this lecture, we'll do some **basic** work with time series modeling. Time series are surprisingly complicated objects to work with and model, and many people spend their careers considering statistical questions related to effective modeling of timeseries. In this set of lecture notes, we won't be able to go into too much detail, but we will highlight some of the key questions and approaches to addressing them.
## Note
*I had originally intended to approach time series modeling from a deep learning perspective, using TensorFlow. This is possible; see [here](https://www.tensorflow.org/tutorials/structured_data/time_series) for an example. The general idea is actually pretty similar to what we used for text generation. However, a quick check indicated that contemporary best practice is still to use models developed in econometrics and statistics, as these tend to be more accurate and more interpretable.*
*Parts of these lecture notes are based on [this tutorial](https://towardsdatascience.com/an-end-to-end-project-on-time-series-analysis-and-forecasting-with-python-4835e6bf050b). For an overview of the functionality available in the statsmodels package for timeseries, take a look [here](https://www.statsmodels.org/stable/tsa.html). Here is a [nice overview](https://people.duke.edu/~rnau/411arim.htm) of basic ARIMA models, which can help give some interpretation for the meaning of the `order` parameter that we use below.*
```
import sqlite3
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
plt.style.use('seaborn-whitegrid')
import statsmodels.api as sm
```
## Data: NOAA Climate
For this lecture, we're actually going to go back to the NOAA climate data that we used early in the quarter. Using the database that we constructed in Week 2, I'm going to grab data for Amundsen-Scott weather station, which is in the deep Antarctic.
```
with sqlite3.connect("../sql/temps.db") as conn:
cmd = \
"""
SELECT S.name, T.year, T.month, T.temp
FROM temperatures T
LEFT JOIN stations S ON T.id = S.id
WHERE S.NAME == "AMUNDSEN_SCOTT" AND T.year > 2000
"""
df = pd.read_sql_query(cmd, conn)
```
## Quick Data Prep
There's a bit of data preparation needed before we can do formal time series modeling. In particular, we need to make a **Date** column, and set it as the index for the timeseries that we care about.
```
df["Date"] = df["Year"].astype(str) + "-" + df["Month"].astype(str)
df["Date"] = pd.to_datetime(df["Date"])
df.head()
```
The next thing we need to do is set the Date as the index for our dataframe.
```
df.index = pd.DatetimeIndex(df["Date"], freq = "MS")
```
Finally, we are going to want to make predictions and test them, which means that we still perform a train/test split. I'm going to take the most recent 4 years as test data.
```
recency = -48
y_train = df.iloc[:recency]["Temp"]
y_test = df.iloc[recency:]["Temp"]
y_train.shape, y_test.shape
```
Finally, let's take a look at our training data.
```
m = y_train.mean()
time = y_train.index
fig, ax = plt.subplots(1, figsize = (6, 3))
ax.plot(time, np.ones(len(time))*m, label = "reference", color = "black")
ax.plot(y_train, label = "temp")
```
Notice that there is considerable seasonal variation, on the order of 30 degrees Celsius, within each year. This can make it difficult to see trends. For example, would you say that the overall trend in this image is upward, downard, or neutral? It's very difficult to say! Let's now introduce an exploratory tool that can help us think about this kind of question.
## Time series Decomposition
Time series decomposition is technique for exploratory data analysis that allows you to separate a time series into separate components, like this:
$$\text{data} = \text{trend} + \text{seasonal} + \text{noise}$$
Technically speaking, the above corresponds to an *additive* model. We can also use a multiplicative model:
$$\text{data} = \text{trend} \times \text{seasonal} \times \text{noise}$$
The choice of which model to use for decomposition can be a tricky one, but additive models are usually a sound place to start.
```
# specifying period not necessary because we have the frequency defined
# so this would also work:
# decomposition = sm.tsa.seasonal_decompose(y, model='additive')
decomposition = sm.tsa.seasonal_decompose(y_train, model='additive', period = 12, )
```
The object returned by the decomposition has a convenient `plot()` method.
```
fig = decomposition.plot()
```
Visually, you can "add up" the bottom three rows to obtain the top three row. In a bit more detail:
1. The `trend` component is the model's best estimate of the overall direction of the data up or down.
2. The `seasonal` component is the model's best estimate of seasonal variation. It's constrained to be periodic (in this case, with period 12).
3. The `resid`ual is whatever part of the data is left over. Large residuals, or residuals with nonstationary distributions (distributions that change over time) suggest that the model we used for the decomposition was not very good.
There are many choices of model that can be used for timeseries decomposition. The default in the `statsmodels` package is based on moving averages and is relatively naive. Much more sophisticated models are typically used in practice.
Now that we've performed our decomposition, we're equipped to re-examine our question from earlier about the trend in temperatures at Amundsen-Scott station.
```
trend = decomposition.trend
time = trend.index
m = decomposition.trend.mean()
fig, ax = plt.subplots(1, figsize = (6, 3))
# ax.plot(time, np.ones(len(time))*m, label = "reference", color = "grey")
ax.plot(trend, label = "trend")
```
The trendline still displays considerable fluctuation. It looks like there may be some upward trend, but more data or a more sophisticated decomposition method would be required in order to say anything conclusive here.
## Time Series Forecasting
Decomposition is a useful tool that can help us spot trends in data. However, we often want to do better than trendspotting. For this, we should select and fit statistical models. The problem of choosing exactly which model to use is quite subtle, and the statistical theory of this problem can occupy entire courses and even research careers. If you're interested in learning the theory of time series analysis, Statistics 170 at UCLA appears to be the way to go.
When we take a machine-learning approach to this problem, we can, to an extent, circumvent the theoretical questions by evaluating models on validation data. That's what we'll do today. However, this approach can really only go so far -- sound footing in both the theory of time series and the domain you're studying are necessary for best results here.
Let's do an example using a SARIMAX model, which stands for "Seasonal AutoRegressive Integrated Moving Average with eXogenous regressors." These are fairly general and flexible models for seasonal data. When fitting models such as these, it's necessary to specify one or more `order` parameters used to determine the structure of the model.
```
order = (0, 1, 0)
```
This specification means that we are going to use a first-order auto-regressive model with 0th-order differences and a 0th order moving average.
The SARIMAX model also uses a separate *seasonal* mini-model, which requires its own parameters. The `12` here refers to the 12 months of the year.
```
seasonal_order = (0, 1, 1, 12)
model = sm.tsa.SARIMAX(y_train, order = order, seasonal_order = seasonal_order)
fit = model.fit()
print(fit.summary())
```
## Forecasting
By default, the `get_prediction()` method of the `fit` object will produce the model's "prediction" on training data.
```
train_preds = fit.get_prediction().predicted_mean
recency = -48
plt.plot(y_train[recency:], color = "grey", label = "data")
plt.plot(train_preds[recency:], zorder = 10, label = "modeled")
plt.legend()
```
To get the predictions on test data, we can pass explicit `start` and `end` parameters.
```
test_preds = fit.get_prediction(start = y_test.index.values[0],
end = y_test.index.values[-1])
test_preds = test_preds.predicted_mean
recency = -48
plt.plot(y_train[recency:], color = "grey", label = "data")
plt.scatter(y_test.index.values, y_test, color = "red", label = "test data", s = 5)
plt.plot(test_preds, zorder = 10, label = "modeled")
# plt.legend()
```
Looks pretty reasonable overall!
One important item here is missing: an expression of our uncertainty. Because these are statistical models, they have error bars. Communicating the error bars appropriately is a fundamental part of responsible forecasting. Here's a way to plot them.
```
test_preds = fit.get_prediction(start = y_test.index.values[0],
end = y_test.index.values[-1])
test_ci = test_preds.conf_int()
test_preds = test_preds.predicted_mean
recency = -48
plt.plot(y_train[recency:], color = "grey", label = "data")
plt.fill_between(test_ci.index, test_ci.iloc[:,0], test_ci.iloc[:,1], color = "gray", alpha = 0.3)
plt.scatter(y_test.index.values, y_test, color = "red", label = "test data", s = 5)
plt.plot(test_preds, zorder = 10, label = "modeled")
# plt.legend()
```
As we'd expect, although the model doesn't perfectly fit every individual piece of data, the data falls within the error bars the vast majority of the time.
Finally, we can also use our model for long-term predictions:
```
test_preds = fit.get_prediction(start = y_test.index.values[0],
end = pd.to_datetime('2030-01-01'))
test_ci = test_preds.conf_int()
test_preds = test_preds.predicted_mean
recency = -48
plt.plot(y_train[recency:], color = "grey", label = "data")
plt.scatter(y_test.index.values, y_test, color = "red", label = "test data", s = 5)
plt.plot(test_preds, zorder = 10, label = "modeled")
```
This model appears to predict a considerable increase in temperature, on the order of 1-2 ℃ over the next 10 years.
## Model Selection
In the last example, we used the `order` and `seasonal_order` parameters above to specify the model structure. We didn't really talk about what these parameters mean, and going into detail would take us well beyond the scope of this course. However, we do have to face the problem of how to *choose* these parameters. For this, a convenient approach is to define a predictive loss function and choose the combination of parameters that minimizes it. This is not the statistically principled way to do things -- it's very much in the machine learning spirit.
Here's an example in which we'll compare multiple possibilities for the `order` parameter by searching across all combinations within a specified range. In machine learning, this strategy is called "*grid search*." It's really only practical when we have a small number of combinations and when our model is fairly quick to train.
There are multiple choices for the loss function. A common one to use is the AIC, which is a measure that trades off model complexity and accuracy.
```
fit.aic
```
The AIC is an ok thing to compute when we don't have access to test data, but...we do! So, instead of computing the AIC, we'll instead compute the mean-square prediction error on the test set.
```
def test_MSE(fit, y_test):
test_preds = fit.get_prediction(start = y_test.index.values[0],
end = y_test.index.values[-1])
test_preds = test_preds.predicted_mean
return ((y_test - test_preds)**2).mean()
from itertools import product
a = range(0, 2)
best_order = (0, 0, 0)
best_MSE = np.inf
for order in product(a, a, a):
model = sm.tsa.SARIMAX(y_train, order = order, seasonal_order = seasonal_order)
fit = model.fit()
MSE = test_MSE(fit, y_test)
if MSE < best_MSE:
print("Found MSE " + str(round(MSE, 2)) + " with order " + str(order))
best_MSE = MSE
best_order = order
best_model = model
```
By sheer coincidence, the best order is the same one that I used to construct the original model above. One could also use this strategy to choose the `seasonal_order` parameters.
## Reminder
Time series forecasting is a challenging art that requires both statistical know-how and knowledge of the data source in order to do responsibly. The approach here, using validation on unseen test data to perform model selection, is a reasonable way to get started. If you are seriously interested in time series forecasting, however, there's no substitute for a course (like Stat 170) and lots of practice.
| github_jupyter |
```
import numpy as np
import pandas as pd
import math
from math import sin, cos, radians
import os
import matplotlib.pyplot as plt
import datetime
import scipy.stats as st
import scipy.signal as sgl
pd.set_option('display.max_columns', 500)
#import fastdtw
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
import scipy.interpolate as spi
#hide warning
import warnings
warnings.filterwarnings('ignore')
# file_name='./signature_data_preprocessed/U'+str(i)+'S'+str(j)+'.txt'
prefix = "./dataset/test/"
file_name = prefix + '202084182540.sig'
# file_name = prefix + '002_1_1.sig'
file=pd.read_csv(file_name,delimiter=' ', names=['X','Y','TStamp','Pres','EndPts'], header=None, skiprows=2)
file_size=len(file)
# file2_name = 'signaturecontent_08191708.sig'
file2_name = prefix + 'verify_2020821123742.sig'
file2=pd.read_csv(file2_name,delimiter=' ', names=['X','Y','TStamp','Pres','EndPts'], header=None, skiprows=2)
file2_size=len(file2)
# file3_name = prefix + '2020720125531.sig'
file3_name = prefix + '202084182532.sig'
file3=pd.read_csv(file3_name,delimiter=' ', names=['X','Y','TStamp','Pres','EndPts'], header=None, skiprows=2)
file3_size=len(file3)
file_fake_name = prefix + 'verify_2020821123742.sig'
file_fake=pd.read_csv(file_fake_name,delimiter=' ', names=['X','Y','TStamp','Pres','EndPts'], header=None, skiprows=2)
file_fake_size=len(file_fake)
file_sigpad_name = prefix + 'sigpad.sig'
file_sigpad=pd.read_csv(file_sigpad_name,delimiter=' ', names=['X','Y','TStamp','Pres','EndPts'], header=None)
file_sigpad_size=len(file_sigpad)
startTime = file['TStamp'][0]
file['TStamp2'] = (file['TStamp'] - startTime) #ms
startTime = file3['TStamp'][0]
file3['TStamp2'] = (file3['TStamp'] - startTime) #ms
#数据对比来说,点数相对网上例子,少一半,也不算太差。
startTime = file_fake['TStamp'][0]
file_fake['TStamp2'] = (file_fake['TStamp'] - startTime) #ms
startTime = file_sigpad['TStamp'][0]
file_sigpad['TStamp2'] = (file_sigpad['TStamp'] - startTime) #ms
# file3
startTime = file2['TStamp'][0]
file2['TStamp2'] = (file2['TStamp'] - startTime) #ms
startTime = file3['TStamp'][0]
file3['TStamp2'] = (file3['TStamp'] - startTime) #ms
fig = plt.figure(figsize=[20,7])
#整理前的两张图片对比
ax1 = fig.add_subplot(2, 3, 1)
file.plot.scatter(x = "Y", y = "X", ax=ax1, marker='o',c='', edgecolors='g', ylim = (0, 1000))
file3.plot.scatter(x = "Y", y = "X", ax=ax1, marker='o',c='', edgecolors='r', ylim = (0, 1000))
ax2 = fig.add_subplot(2, 3, 2)
file.plot.scatter(x='TStamp2', y='X', ax=ax2, marker='o',c='', edgecolors='g')
file3.plot.scatter(x = "TStamp2", y = "X", ax=ax2, marker='o',c='', edgecolors='r')
ax3 = plt.subplot(2, 3, 3)
file.plot.scatter(x='TStamp2', y='Y', ax=ax3, marker='o',c='', edgecolors='g')
ax4 = fig.add_subplot(2, 3, 4)
file3.plot.scatter(x = "Y", y = "X", ax=ax4, marker='o',c='', edgecolors='g', ylim = (0, 1000))
ax5 = fig.add_subplot(2, 3, 5)
file3.plot.scatter(x='TStamp2', y='X', ax=ax5, marker='o',c='', edgecolors='g')
ax6 = plt.subplot(2, 3, 6)
file3.plot.scatter(x='TStamp2', y='Y', ax=ax6, marker='o',c='', edgecolors='g')
#可以清晰的发现,扫描的点不够,不能完整反映图片
fig = plt.figure(figsize=[20, 30])
file_sigpad.plot.scatter(x='Y', y='X', marker='o',c='', edgecolors='g')
fig = plt.figure(figsize=[20, 30])
file.plot.scatter(x='Y', y='X', marker='o',c='', edgecolors='g')
# ##Preprocessing
# P=[]
# V=[]
# SDX=[]
# SDY=[]
# fileP = file3
# file_size=len(fileP)
# X=fileP['X']
# Y=fileP['Y']
# TS=fileP['TStamp2']
# BS=fileP['EndPts']
# # AZ=file['AZ']
# # AL=file['AL']
# # P=file['P']
# aX=sum(X)/file_size
# aY=sum(Y)/file_size
# for k in range(0,file_size-1):
# if TS[k]==TS[k+1]:
# X[k+1]=(X[k]+X[k+1])/2
# Y[k+1]=(Y[k]+Y[k+1])/2
# TS[k+1]=(TS[k]+1)
# BS[k+1]=(BS[k]+BS[k+1])/2
# # AZ[k+1]=(AZ[k]+AZ[k+1])/2
# # AL[k+1]=(AL[k]+AL[k+1])/2
# # P[k+1]=(P[k]+P[k+1])/2
# if k<file_size-1:
# V.append(((math.sqrt((X[k+1]-X[k])**2+(Y[k+1]-Y[k])**2))*(TS[file_size-1]-TS[0]))/(TS[k+1]-TS[k]))
# SDX.append((X[k]-aX)**2)
# SDY.append((Y[k]-aY)**2)
# SDX.append((X[file_size-1]-aX)**2)
# SDY.append((Y[file_size-1]-aY)**2)
# V.append(0)
# # data={'X':X,'Y':Y,'TS':TS,'BS':BS,'AZ':AZ,'AL':AL,'P':P,'V':V,'SDX':SDX,'SDY':SDY}
# data={'X':X,'Y':Y,'TStamp2':TS,'EndPts':BS,'P':P,'V':V,'SDX':SDX,'SDY':SDY}
# fig = plt.figure(figsize=[6,4])
# # plt.scatter(x = data["Y"], y = data["V"], marker='o',c='', edgecolors='g')
# plt.scatter(x = list(range(0,len(file3), 1)), y = data["V"], marker='o',c='', edgecolors='g')
# # plt.bar(list(range(0,len(data1), 1)), data['V'])
# # print(data)
fig = plt.figure(figsize=[20, 30])
file3.plot.scatter(x='Y', y='X', marker='o',c='', edgecolors='g')
intervals = []
for index, row in file.iterrows():
if index == 0:
intervals.append(0)
continue
# print(file['TStamp2'][index-1])
interval = file['TStamp2'][index]-file['TStamp2'][index-1]
if interval > 300:
continue
intervals.append(interval)
# intervals = np.array(intervals)
fig = plt.figure(figsize=[20, 10])
plt.bar(range(len(intervals)), intervals)
# 显示横轴标签
plt.xlabel("Point sequence")
# 显示纵轴标签
plt.ylabel("Time interval(ms)")
# 显示图标题
plt.title("Histogram")
fig.show() # it is clearly shown that the intervals art not the same.
def remove_duplicated_point(df):
df_new = df.drop(index=df.index)
old_x = df['X'][0]
old_y = df['Y'][0]
for index, row in df.iterrows():
if row['X'] != old_x or row['Y']!=old_y or index == 0:
df_new.loc[len(df_new)] = {'X': row['X'], 'Y': row['Y'], 'TStamp': row['TStamp'], 'Pres': row['Pres'], 'EndPts': row['EndPts'],
'TStamp2': row['TStamp2']}
old_x = row['X']
old_y = row['Y']
# update the EndPts if the point is
elif row['X'] == old_x and row['Y'] == old_y and row['EndPts'] == 1:
df_new.iloc[len(df_new)-1]['EndPts'] = 1
return df_new
"""
The Ramer-Douglas-Peucker algorithm roughly ported from the pseudo-code provided
by http://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
"""
from math import sqrt
def distance(a, b):
return sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
def point_line_distance(point, start, end):
if (start == end):
return distance(point, start)
else:
n = abs(
(end[0] - start[0]) * (start[1] - point[1]) - (start[0] - point[0]) * (end[1] - start[1])
)
d = sqrt(
(end[0] - start[0]) ** 2 + (end[1] - start[1]) ** 2
)
return n / d
def rdp(points, epsilon):
"""
Reduces a series of points to a simplified version that loses detail, but
maintains the general shape of the series.
"""
dmax = 0.0
index = 0
for i in range(1, len(points) - 1):
d = point_line_distance(points[i], points[0], points[-1])
if d > dmax:
index = i
dmax = d
if dmax >= epsilon:
results = rdp(points[:index+1], epsilon)[:-1] + rdp(points[index:], epsilon)
else:
results = [points[0], points[-1]]
return results
def rdp_precoss(df):
l = []
for i in range(0, df.shape[0]):
l.append((df.loc[i, 'X'], df.loc[i, 'Y'], df.loc[i, 'TStamp'], df.loc[i, 'Pres'], df.loc[i, 'EndPts'], df.loc[i, 'TStamp2'] ))
final = rdp(l, 0.000000001)
df = pd.DataFrame(final, columns=['X', 'Y', 'TStamp', 'Pres', 'EndPts', 'TStamp2'])
return df
intervals = []
for index, row in file.iterrows():
if index == 0:
intervals.append(0)
continue
# print(file['TStamp2'][index-1])
interval = file['TStamp2'][index]-file['TStamp2'][index-1]
if interval > 300:
continue
intervals.append(interval)
# intervals = np.array(intervals)
print(len(intervals))
fig = plt.figure(figsize=[20, 10])
plt.bar(range(len(intervals)), intervals)
# 显示横轴标签
plt.xlabel("Point sequence")
# 显示纵轴标签
plt.ylabel("Time interval(ms)")
# 显示图标题
plt.title("Histogram")
fig.show() # it is clearly shown that the intervals art not the same
# Cubic-Spline to add points in stroke and make the curves smooth
def interpolate_points(df):
plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
df_new = df.loc[df["EndPts"] == 1]
# 按手指离开屏幕的点,分笔触差值,因为在笔触间差值没有意义
# 寻找断点
EndPts = [0]
EndPts.extend(list(df_new.index))
EndPts_len = len(EndPts)
if EndPts_len < 2:
EndPts.append(len(df)-2) #如果没有就设最后一个点为离开屏幕的点
EndPts_len += 1
print(EndPts)
i = 0
new_x_all = []
iy3_x_all = []
iy3_y_all = []
while i < EndPts_len - 1:
#data preparation
start = EndPts[i]
end = EndPts[i+1] + 1
if start == 0:
X= np.array(df['TStamp2'][0:end].tolist())
Y= np.array(df['X'][0:end].tolist())
Y2= np.array(df['Y'][0:end].tolist())
if df['TStamp2'][start+1:end].max() - df['TStamp2'][start+1:end].min() <=15 or end - start <= 6:
i+=1
continue
new_x=np.arange(0,df['TStamp2'][0:end].max(),7) #define interpolate points
else:
X= np.array(df['TStamp2'][start+1:end].tolist())
Y= np.array(df['X'][start+1:end].tolist())
Y2= np.array(df['Y'][start+1:end].tolist())
if df['TStamp2'][start+1:end].max() - df['TStamp2'][start+1:end].min() <=15 or end - start <= 6:
i+=1
continue
new_x=np.arange(df['TStamp2'][start+1:end].min(),df['TStamp2'][start+1:end].max(),7) #define interpolate points
# #进行一阶样条插值
# ipo1=spi.splrep(X,Y,k=1,s=10) #样本点导入,生成参数
# iy1=spi.splev(new_x,ipo1) #根据观测点和样条参数,生成插值
#进行三次样条拟合
ipo3=spi.splrep(X,Y,k=3, s=1000) #样本点导入,生成参数
iy3=spi.splev(new_x,ipo3) #根据观测点和样条参数,生成插值
ipo3_y=spi.splrep(X,Y2,k=3, s=1000) #样本点导入,生成参数
iy3_y=spi.splev(new_x,ipo3_y) #根据观测点和样条参数,生成插值
new_x_all.extend(new_x)
iy3_x_all.extend(iy3)
iy3_y_all.extend(iy3_y)
i += 1
print(len(iy3_x_all))
X_all= np.array(df['TStamp2'].tolist())
Y_all = np.array(df['X'].tolist())
Y2_all = np.array(df['Y'].tolist())
##作图
fig,(ax1,ax2)=plt.subplots(2,1,figsize=(10,12))
ax1.plot(X_all,Y2_all,'o',label='样本点')
ax1.plot(new_x_all,iy3_y_all,'*-',label='插值点')
ax1.set_ylim(Y2_all.min()-20,Y2_all.max()+20)
ax1.set_ylabel('Y坐标')
ax1.set_title('Y三次线性插值')
ax1.legend()
ax2.plot(X_all,Y_all,'o',label='样本点')
ax2.plot(new_x_all,iy3_x_all,'*-',label='插值点')
ax2.set_ylim(Y_all.min()-20,Y_all.max()+20)
ax2.set_ylabel('X坐标')
ax2.set_title('X三次样条插值')
ax2.legend()
fig.show()
# df_new = df.loc[df["EndPts"] == 1]
# result = pd.concat(frames)
# df_new.index = range(len(df_new.index))
df_new2 = df.drop(index=df.index)
for i, val in enumerate(new_x_all):
# if val in df['TStamp2'].tolist():
# continue
# if val in df['TStamp2'].tolist():
# continue
df_new2.loc[len(df_new2)] = {'X': iy3_x_all[i], 'Y': iy3_y_all[i], 'TStamp': 0, 'Pres': 0, 'EndPts': 0,'TStamp2': val}
df_new = pd.concat([df_new, df_new2])
df_new = df_new.sort_values(by=['TStamp2'])
df_new.index = range(len(df_new.index))
# print(df_new)
return df_new
# return df
# df_new = remove_duplicated_point(df_new)
# fig = plt.figure(figsize=[10,6])
# plt.plot( df_new['TStamp2'], df_new["X"],'c*-')
# fig.show()
# fig = plt.figure(figsize=[10,6])
# plt.plot( df['TStamp2'], df["X"],'c*-')
# fig.show()
file = remove_duplicated_point(file) # use RDP algorithm to remove duplicated points
file3 = remove_duplicated_point(file3) # use RDP algorithm to remove duplicated points
file_fake = remove_duplicated_point(file_fake) # use RDP algorithm to remove duplicated points
# file = rdp_precoss(file) # use RDP algorithm to remove duplicated points
# file3 = rdp_precoss(file3) # use RDP algorithm to remove duplicated points
# file_fake = rdp_precoss(file_fake) # use RDP algorithm to remove duplicated points
file = interpolate_points(file)
file3 = interpolate_points(file3)
file_fake = interpolate_points(file_fake)
from decimal import Decimal
def get_gravity_point(points):
"""
@brief 获取多边形的重心点
@param points The points
@return The center of gravity point.
"""
if len(points) <= 2:
return list()
area = Decimal(0.0)
x, y = Decimal(0.0), Decimal(0.0)
for i in range(len(points)):
lng = Decimal(points[i][0].item())
lat = Decimal(points[i][1].item())
nextlng = Decimal(points[i-1][0].item())
nextlat = Decimal(points[i-1][1].item())
tmp_area = (nextlng*lat - nextlat*lng)/Decimal(2.0)
area += tmp_area
x += tmp_area*(lng+nextlng)/Decimal(3.0)
y += tmp_area*(lat+nextlat)/Decimal(3.0)
x = x/area
y = y/area
return [float(x), float(y)]
# 求两直线夹角
def get_angle_betw_lines(x1, y1, x2, y2, x3, y3, x4, y4):
k1 = (y2-y1)/(float(x2-x1))
k2 = (y4-y3)/(float(x4-x3))
Cobb = math.fabs(np.arctan((k1-k2)/(float(1 + k1*k2)))*180/np.pi)+0.5
return Cobb
def get_grivity_angle(P):
points_left = []
points_right = []
for point in P.exterior.coords:
if point[0] <= P.centroid.x:
points_left.append([point[0], point[1]])
else:
points_right.append([point[0], point[1]])
P_left = Polygon(points_left)
P_right = Polygon(points_right)
# print( P_left.centroid)
theta = get_angle_betw_lines( P_left.centroid.x, P_left.centroid.y, P_right.centroid.x, P_right.centroid.y,
0,0, 10,0)
return theta
#rotate for Polygon
def rotate_polygon(polygon, angle, center_point=(0, 0)):
"""Rotates the given polygon which consists of corners represented as (x,y)
around center_point (origin by default)
Rotation is counter-clockwise
Angle is in degrees
"""
rotated_polygon = []
for corner in polygon.exterior.coords:
rotated_corner = rotate_point(corner, angle, center_point)
rotated_polygon.append(rotated_corner)
rotated_polygon = Polygon(rotated_polygon)
return rotated_polygon
def rotate_point(point, angle, center_point=(0, 0)):
"""Rotates a point around center_point(origin by default)
Angle is in degrees.
Rotation is counter-clockwise
"""
angle_rad = radians(angle % 360)
# Shift the point so that center_point becomes the origin
new_point = (point[0] - center_point[0], point[1] - center_point[1])
new_point = (new_point[0] * cos(angle_rad) - new_point[1] * sin(angle_rad),
new_point[0] * sin(angle_rad) + new_point[1] * cos(angle_rad))
# Reverse the shifting we have done
new_point = (new_point[0] + center_point[0], new_point[1] + center_point[1])
return new_point
# gravity_x, gravity_y = gravity_normalize(file)
# print(gravity_x, gravity_y)
from shapely.geometry import Polygon
def rotate_graphic(file):
points_left = []
points_right = []
points = []
for index, row in file.iterrows():
if abs(row['X']) > 100000 or abs(row['Y']) > 100000:
continue
points.append([row['X'], row['Y']])
P = Polygon(points)
print(P.centroid)
plt.figure()
plt.plot(P.centroid.y,P.centroid.x,"rx")
plt.show() # if you need...
theta = get_grivity_angle(P)
theta_old = theta
print(theta)
if theta >6 and get_grivity_angle(rotate_polygon(P, theta_old/24, (P.centroid.x, P.centroid.y))) < theta:
while theta > 6.0 :
# print("here")
# if theta <= 90: # clock-wise 顺时针
P = rotate_polygon(P, theta_old/24, (P.centroid.x, P.centroid.y))
theta = get_grivity_angle(P)
elif theta >6 and get_grivity_angle(rotate_polygon(P, -theta_old/24, (P.centroid.x, P.centroid.y))) < theta_old:
while theta > 6.0 :
P = rotate_polygon(P, -theta_old/24, (P.centroid.x, P.centroid.y))
theta = get_grivity_angle(P)
P_rotated = P
P_rotated_points_x = []
P_rotated_points_y = []
for point in P_rotated.exterior.coords:
P_rotated_points_x.append(point[0])
P_rotated_points_y.append(point[1])
print(P_rotated.centroid)
fig = plt.figure(figsize=[6,4])
plt.scatter(x = file["Y"], y = file["X"], marker='o',c='', edgecolors='g')
# plt.plot(gravity_y, gravity_x, 'rx')
# plt.plot(P_left.centroid.y, P_left.centroid.x, 'rx')
# plt.plot(P_right.centroid.y, P_right.centroid.x, 'rx')
plt.scatter(x = P_rotated_points_y, y = P_rotated_points_x, marker='o',c='', edgecolors='b')
points_left = []
points_right = []
for point in P_rotated.exterior.coords:
if point[0] <= P_rotated.centroid.x:
points_left.append([point[0], point[1]])
else:
points_right.append([point[0], point[1]])
P_left = Polygon(points_left)
P_right = Polygon(points_right)
plt.plot(P_left.centroid.y, P_left.centroid.x, 'bx')
plt.plot(P_right.centroid.y, P_right.centroid.x, 'bx')
theta = get_angle_betw_lines( P_left.centroid.x, P_left.centroid.y, P_right.centroid.x, P_right.centroid.y,
0,0, 10,0)
print(theta)
fig.show()
file['X'] = P_rotated_points_x[0:-1]
file['Y'] = P_rotated_points_y[0:-1]
return file
# file = rotate_graphic(file)
# file3 = rotate_graphic(file3)
# file_fake = rotate_graphic(file_fake)
# # 求两直线夹角
# def get_angle_betw_lines(x1, y1, x2, y2, x3, y3, x4, y4):
# k1 = (y2-y1)/(float(x2-x1))
# k2 = (y4-y3)/(float(x4-x3))
# Cobb = math.fabs(np.arctan((k1-k2)/(float(1 + k1*k2)))*180/np.pi)+0.5
# return Cobb
# def get_grivity_angle(P):
# points_left = []
# points_right = []
# points_x = []
# for point in P.exterior.coords:
# points_x.append(point[0])
# # points_y.append(point[1])
# for point in P.exterior.coords:
# if point[0] <= np.mean(points_x):
# points_left.append([point[0], point[1]])
# else:
# points_right.append([point[0], point[1]])
# left_xs, left_ys = zip(*points_left) #create lists of x and y values
# left_xs = list(left_xs)
# left_ys = list(left_ys)
# right_xs, right_ys = zip(*points_right) #create lists of x and y values
# right_xs = list(right_xs)
# right_ys = list(right_ys)
# # print( P_left.centroid)
# theta = get_angle_betw_lines( np.mean(left_xs), np.mean(left_ys), np.mean(right_xs), np.mean(right_xs),
# 0,0, 10,0)
# return theta
# #rotate for Polygon
# def rotate_polygon(polygon, angle, center_point=(0, 0)):
# """Rotates the given polygon which consists of corners represented as (x,y)
# around center_point (origin by default)
# Rotation is counter-clockwise
# Angle is in degrees
# """
# rotated_polygon = []
# for corner in polygon.exterior.coords:
# rotated_corner = rotate_point(corner, angle, center_point)
# rotated_polygon.append(rotated_corner)
# rotated_polygon = Polygon(rotated_polygon)
# return rotated_polygon
# def rotate_point(point, angle, center_point=(0, 0)):
# """Rotates a point around center_point(origin by default)
# Angle is in degrees.
# Rotation is counter-clockwise
# """
# angle_rad = radians(angle % 360)
# # Shift the point so that center_point becomes the origin
# new_point = (point[0] - center_point[0], point[1] - center_point[1])
# new_point = (new_point[0] * cos(angle_rad) - new_point[1] * sin(angle_rad),
# new_point[0] * sin(angle_rad) + new_point[1] * cos(angle_rad))
# # Reverse the shifting we have done
# new_point = (new_point[0] + center_point[0], new_point[1] + center_point[1])
# return new_point
# # gravity_x, gravity_y = gravity_normalize(file)
# # print(gravity_x, gravity_y)
# from shapely.geometry import Polygon
# def rotate_graphic(file):
# points_left = []
# points_right = []
# points = []
# for index, row in file.iterrows():
# if abs(row['X']) > 100000 or abs(row['Y']) > 100000:
# continue
# points.append([row['X'], row['Y']])
# P = Polygon(points)
# print(P.centroid)
# xs, ys = zip(*points) #create lists of x and y values
# xs = list(xs)
# ys = list(ys)
# print(max(xs),min(xs),max(ys),min(ys))
# plt.figure()
# plt.plot(ys,xs)
# plt.plot(np.mean(ys),np.mean(xs),"rx")
# plt.show() # if you need...
# print(np.mean(xs),np.mean(ys))
# theta = get_grivity_angle(P)
# theta_old = theta
# print("theta_old:", theta_old)
# if theta >6 and get_grivity_angle(rotate_polygon(P, theta_old/24, (np.mean(xs), np.mean(ys)))) < theta:
# while theta > 6.0 :
# # print("here")
# # if theta <= 90: # clock-wise 顺时针
# P = rotate_polygon(P, theta_old/24, (np.mean(xs), np.mean(ys)))
# theta = get_grivity_angle(P)
# print(theta)
# elif theta >6 and get_grivity_angle(rotate_polygon(P, -theta_old/24, (np.mean(xs), np.mean(ys)))) < theta_old:
# while theta > 6.0 :
# P = rotate_polygon(P, -theta_old/24, (np.mean(xs), np.mean(ys)))
# theta = get_grivity_angle(P)
# P_rotated = P
# P_rotated_points_x = []
# P_rotated_points_y = []
# for point in P_rotated.exterior.coords:
# P_rotated_points_x.append(point[0])
# P_rotated_points_y.append(point[1])
# print(P_rotated.centroid)
# print(theta,"here")
# fig = plt.figure(figsize=[6,4])
# plt.scatter(x = file["Y"], y = file["X"], marker='o',c='', edgecolors='g')
# # plt.plot(gravity_y, gravity_x, 'rx')
# # plt.plot(P_left.centroid.y, P_left.centroid.x, 'rx')
# # plt.plot(P_right.centroid.y, P_right.centroid.x, 'rx')
# plt.scatter(x = P_rotated_points_y, y = P_rotated_points_x, marker='o',c='', edgecolors='b')
# points_left = []
# points_right = []
# for point in P_rotated.exterior.coords:
# if point[0] <= P_rotated.centroid.x:
# points_left.append([point[0], point[1]])
# else:
# points_right.append([point[0], point[1]])
# P_left = Polygon(points_left)
# P_right = Polygon(points_right)
# left_xs, left_ys = zip(*points_left) #create lists of x and y values
# left_xs = list(left_xs)
# left_ys = list(left_ys)
# right_xs, right_ys = zip(*points_right) #create lists of x and y values
# right_xs = list(right_xs)
# right_ys = list(right_ys)
# plt.plot(np.mean(left_xs), np.mean(left_ys), 'rx')
# plt.plot(np.mean(right_xs), np.mean(right_ys), 'rx')
# # theta = get_angle_betw_lines( P_left.centroid.x, P_left.centroid.y, P_right.centroid.x, P_right.centroid.y,
# # 0,0, 10,0)
# # print(theta)
# fig.show()
# # file['X'] = P_rotated_points_x[0:-1]
# # file['Y'] = P_rotated_points_y[0:-1]
# return file
# file = rotate_graphic(file)
# # file3 = rotate_graphic(file3)
# # file_fake = rotate_graphic(file_fake)
# add columns
file['normalX'] = file['X']
file['normalY'] = file['Y']
file3['normalX'] = file3['X']
file3['normalY'] = file3['Y']
file_fake['normalX'] = file_fake['X']
file_fake['normalY'] = file_fake['Y']
# Size normalization 大小规整
# position normalization 位置规整
def normalizeFile(normalInputFile):
widthX = 200 #width
heightY = 500 #height
minX = normalInputFile['X'].min()
minY = normalInputFile['Y'].min()
maxX = normalInputFile['X'].max()
maxY = normalInputFile['Y'].max()
normalInputFile['normalX'] = widthX * ((normalInputFile['X'] - minX)/(maxX - minX))
normalInputFile['normalY'] = heightY * ((normalInputFile['Y'] - minY)/(maxY - minY))
#position normalization
averX = normalInputFile['normalX'].mean()
averY = normalInputFile['normalY'].mean()
normalInputFile['normalX'] = normalInputFile['normalX'] - averX
normalInputFile['normalY'] = normalInputFile['normalY'] - averY
return normalInputFile
# # Gravity normalization 重心规整
# def gravity_normalize(df):
# points = []
# for index, row in df.iterrows():
# points.append([row['normalX'], row['normalY']])
# return get_centerpoint(points)
# def get_centerpoint(lis):
# area = 0.0
# x,y = 0.0,0.0
# a = len(lis)
# for i in range(a):
# lat = lis[i][0] #weidu
# lng = lis[i][1] #jingdu
# if i == 0:
# lat1 = lis[-1][0]
# lng1 = lis[-1][1]
# else:
# lat1 = lis[i-1][0]
# lng1 = lis[i-1][1]
# fg = (lat*lng1 - lng*lat1)/2.0
# area += fg
# x += fg*(lat+lat1)/3.0
# y += fg*(lng+lng1)/3.0
# x = x/area
# y = y/area
# return x,y
file = normalizeFile(file)
file3 = normalizeFile(file3)
file_fake = normalizeFile(file_fake)
#整理后的两张图片对比
fig = plt.figure(figsize=[6,4])
plt.scatter(x = file["normalY"], y = file["normalX"], marker='o',c='', edgecolors='g')
plt.scatter(x= file3['normalY'], y= file3['normalX'] , marker='o',c='', edgecolors='r')
plt.scatter(x= file_fake['normalY'], y= file_fake['normalX'] , marker='o',c='', edgecolors='b')
fig.show()
# # 求两直线夹角
# def get_angle_betw_lines(x1, y1, x2, y2, x3, y3, x4, y4):
# k1 = (y2-y1)/(float(x2-x1))
# k2 = (y4-y3)/(float(x4-x3))
# Cobb = math.fabs(np.arctan((k1-k2)/(float(1 + k1*k2)))*180/np.pi)+0.5
# return Cobb
# def get_grivity_angle(P):
# points_left = []
# points_right = []
# for point in P.exterior.coords:
# if point[0] <= P.centroid.x:
# points_left.append([point[0], point[1]])
# else:
# points_right.append([point[0], point[1]])
# P_left = Polygon(points_left)
# P_right = Polygon(points_right)
# # plt.plot(P_left.centroid.y, P_left.centroid.x, 'bx')
# # plt.plot(P_right.centroid.y, P_right.centroid.x, 'bx')
# theta = get_angle_betw_lines( P_left.centroid.x, P_left.centroid.y, P_right.centroid.x, P_right.centroid.y,
# 0,0, 10,0)
# return theta
# #rotate for Polygon
# def rotate_polygon(polygon, angle, center_point=(0, 0)):
# """Rotates the given polygon which consists of corners represented as (x,y)
# around center_point (origin by default)
# Rotation is counter-clockwise
# Angle is in degrees
# """
# rotated_polygon = []
# for corner in polygon.exterior.coords:
# rotated_corner = rotate_point(corner, angle, center_point)
# rotated_polygon.append(rotated_corner)
# rotated_polygon = Polygon(rotated_polygon)
# return rotated_polygon
# def rotate_point(point, angle, center_point=(0, 0)):
# """Rotates a point around center_point(origin by default)
# Angle is in degrees.
# Rotation is counter-clockwise
# """
# angle_rad = radians(angle % 360)
# # Shift the point so that center_point becomes the origin
# new_point = (point[0] - center_point[0], point[1] - center_point[1])
# new_point = (new_point[0] * cos(angle_rad) - new_point[1] * sin(angle_rad),
# new_point[0] * sin(angle_rad) + new_point[1] * cos(angle_rad))
# # Reverse the shifting we have done
# new_point = (new_point[0] + center_point[0], new_point[1] + center_point[1])
# return new_point
# # gravity_x, gravity_y = gravity_normalize(file)
# # print(gravity_x, gravity_y)
# from shapely.geometry import Polygon
# def rotate_graphic(file):
# points_left = []
# points_right = []
# points = []
# for index, row in file.iterrows():
# points.append([row['normalX'], row['normalY']])
# P = Polygon(points)
# print(P.centroid)
# # for index, row in file.iterrows():
# # if row['normalX'] <= P.centroid.x:
# # points_left.append([row['normalX'], row['normalY']])
# # else:
# # points_right.append([row['normalX'], row['normalY']])
# # P_left = Polygon(points_left)
# # P_right = Polygon(points_right)
# # #print(P_left.centroid, P_right.centroid)
# # theta = get_angle_betw_lines( P_left.centroid.x, P_left.centroid.y, P_right.centroid.x, P_right.centroid.y,
# # 0,0, 10,0)
# theta = get_grivity_angle(P)
# theta_old = theta
# print(theta)
# if theta >6 and get_grivity_angle(rotate_polygon(P, theta_old/24, (P.centroid.x, P.centroid.y))) < theta:
# while theta > 6.0 :
# # print("here")
# # if theta <= 90: # clock-wise 顺时针
# P = rotate_polygon(P, theta_old/24, (P.centroid.x, P.centroid.y))
# theta = get_grivity_angle(P)
# elif theta >6 and get_grivity_angle(rotate_polygon(P, -theta_old/24, (P.centroid.x, P.centroid.y))) < theta_old:
# while theta > 6.0 :
# P = rotate_polygon(P, -theta_old/24, (P.centroid.x, P.centroid.y))
# theta = get_grivity_angle(P)
# P_rotated = P
# P_rotated_points_x = []
# P_rotated_points_y = []
# for point in P_rotated.exterior.coords:
# P_rotated_points_x.append(point[0])
# P_rotated_points_y.append(point[1])
# print(P_rotated.centroid)
# fig = plt.figure(figsize=[6,4])
# plt.scatter(x = file["normalY"], y = file["normalX"], marker='o',c='', edgecolors='g')
# # plt.plot(gravity_y, gravity_x, 'rx')
# # plt.plot(P_left.centroid.y, P_left.centroid.x, 'rx')
# # plt.plot(P_right.centroid.y, P_right.centroid.x, 'rx')
# plt.scatter(x = P_rotated_points_y, y = P_rotated_points_x, marker='o',c='', edgecolors='b')
# points_left = []
# points_right = []
# for point in P_rotated.exterior.coords:
# if point[0] <= P_rotated.centroid.x:
# points_left.append([point[0], point[1]])
# else:
# points_right.append([point[0], point[1]])
# P_left = Polygon(points_left)
# P_right = Polygon(points_right)
# plt.plot(P_left.centroid.y, P_left.centroid.x, 'bx')
# plt.plot(P_right.centroid.y, P_right.centroid.x, 'bx')
# theta = get_angle_betw_lines( P_left.centroid.x, P_left.centroid.y, P_right.centroid.x, P_right.centroid.y,
# 0,0, 10,0)
# print(theta)
# # plt.plot(gravity_x, gravity_y, 'bx')
# fig.show()
# file['normalX'] = P_rotated_points_x[0:-1]
# file['normalY'] = P_rotated_points_y[0:-1]
# return file
# file = rotate_graphic(file)
# file3 = rotate_graphic(file3)
#the plot before length normalization
fig = plt.figure(figsize=[9,6])
plt.plot(file["TStamp2"], file["normalX"],'c*-', )
plt.plot(file3['TStamp2'], file3['normalX'] , 'm.-.')
plt.plot(file_fake['TStamp2'], file_fake['normalX'] , 'r.-.')
#we can see clearly, the length of two plots are not same
def samelen(data,length=400):
#归一化标准长度为 400,若某一签名长度为 d,则需在此签名中每隔
#(d-1)/400 个坐标点提取一个数据, 所取得的数据根据前后两个点进行线性提取
data_size = len(data)
interval = (data_size-1)/length
start = 0
new_data = data.drop(index=data.index)
for dist in np.arange(start, data_size-1, interval):
first = math.floor(dist)
second = math.ceil(dist)
if second >= data_size:
second = data_size -1
percent = (dist - first) / 1.0
#'X','Y','TStamp','Pres','EndPts'
# print ((data.X[first] + data.X[second])/2)
# X = (data.at[first,'X'] + data.at[second,'X']) / 2
# Y = (data.Y[first] + data.Y[second])/2
TStamp = (data.TStamp[first] + data.TStamp[second])/2
Pres = (data.Pres[first] + data.Pres[second])/2
# normalX = (data.normalX[first] + data.normalX[second])/2
# normalY = (data.normalY[first] + data.normalY[second])/2
# TStamp2 = (data.TStamp2[first] + data.TStamp2[second])/2
X = data.at[first,'X'] + (data.at[second,'X'] - data.at[first,'X']) * percent
Y = data.at[first,'Y'] + (data.at[second,'Y'] - data.at[first,'Y']) * percent
normalX = data.at[first,'normalX'] + (data.at[second,'normalX'] - data.at[first,'normalX']) * percent
normalY = data.at[first,'normalY'] + (data.at[second,'normalY'] - data.at[first,'normalY']) * percent
TStamp2 = data.at[first,'TStamp2'] + (data.at[second,'TStamp2'] - data.at[first,'TStamp2']) * percent
if data.EndPts[first] == 1.0 or data.EndPts[second] == 1.0:
EndPts = 1
else:
EndPts = 0
new_data.loc[len(new_data)] = {'X': X, 'Y': Y, 'TStamp': TStamp, 'Pres': Pres, 'EndPts': EndPts,'normalX': normalX,
'normalY': normalY, 'TStamp2': TStamp2}
return new_data
# d = (data[first] + data[second])/2
# new_data.loc[len(new_data)] = d
data1 = file
data2 = file3
data3 = file_fake
data1 = samelen(file)
data2 = samelen(file3)
data3 = samelen(file_fake)
# print (data1)
# print (data1)
# df = file
# df=df.drop(index=df.index)
# file.loc[len(file)] = 3
# print (list(range(0,len(data1)-1, 1)))
#the plot after length normalization
fig = plt.figure(figsize=[8,5])
# plt.plot(data1["TStamp2"], data1["normalX"],'c*-', )
# plt.plot(data2['TStamp2'], data2['normalX'] , 'm.-.')
plt.plot( list(range(0,len(data1), 1)), data1["normalX"],'c*-')
plt.plot( list(range(0,len(data2),1)), data2['normalX'] , 'm.-.')
# plt.plot( list(range(0,len(data3),1)), data3['normalX'] , 'r.-.')
#we can see clearly, the length of two plots are same
# def calc_extrme_points(data):
# length=len(data)
# data_extr_points ={}
# data_extr_points['point'] = []
# data_extr_points['value'] = []
# data_extr_points['type'] = []
# for i in range(5,length-5):
# # local maximum point
# local_list = list(range(i-5, i)) #如果之前5个点之内已经记录有点,则不记录
# if data[i]>=data[i-1] and data[i]>=data[i-2] and data[i]>=data[i-3] and data[i]>=data[i-4] and data[i]>=data[i-5] and \
# data[i]>=data[i+1] and data[i]>=data[i+2] and data[i]>=data[i+3] and data[i]>=data[i+4] and data[i]>=data[i+5] and \
# len([j for j in local_list if j in data_extr_points['point'] ]) == 0:
# data_extr_points['point'].append(i)
# data_extr_points['value'].append(data[i])
# data_extr_points['type'].append(1)
# continue
# # local minimum point
# if data[i]<=data[i-1] and data[i]<=data[i-2] and data[i]<=data[i-3] and data[i]<=data[i-4] and data[i]<=data[i-5] and \
# data[i]<=data[i+1] and data[i]<=data[i+2] and data[i]<=data[i+3] and data[i]<=data[i+4] and data[i]<=data[i+5] and \
# len([j for j in local_list if j in data_extr_points['point'] ]) == 0:
# data_extr_points['point'].append(i)
# data_extr_points['value'].append(data[i])
# data_extr_points['type'].append(0)
# #add start,end point
# if data_extr_points['type'][0] == 0:
# start_type = 1
# else:
# start_type = 0
# data_extr_points['point'].insert(0, 0)
# data_extr_points['value'].insert(0, data[0])
# data_extr_points['type'].insert(0, start_type)
# if data_extr_points['type'][-1] == 0:
# end_type = 1
# else:
# end_type = 0
# data_extr_points['point'].append(length-1)
# data_extr_points['value'].append(data[length-1])
# data_extr_points['type'].append(end_type)
# return data_extr_points
def calc_extrme_points(df):
# 按手指离开屏幕的点,分笔触差值,因为在笔触间差值没有意义
# 寻找断点
EndPts = [0]
EndPts.extend(list(df.loc[df["EndPts"] == 1].index))
EndPts_len = len(EndPts)
# print(EndPts)
if EndPts_len < 2:
EndPts.append(len(df)-1) #如果没有就设最后一个点为离开屏幕的点
EndPts_len += 1
i = 0
EndPts2 = []
while i < EndPts_len - 1:
start = EndPts[i]
end = EndPts[i+1]
if end - start <= 6:
i += 1
continue
EndPts2.append(start)
i += 1
if EndPts[len(EndPts)-1] - EndPts2[len(EndPts2)-1] > 6:
EndPts2.append(EndPts[len(EndPts)-1])
re = {}
re['point'] = EndPts2
return re
# data1_extr_points = calc_extrme_points(data1['normalX'].tolist())
# data2_extr_points = calc_extrme_points(data2['normalX'].tolist())
data1_extr_points = calc_extrme_points(data1)
data2_extr_points = calc_extrme_points(data2)
print(len(data1_extr_points['point']), len(data2_extr_points['point']))
print(data1_extr_points['point'])
print(data2_extr_points['point'])
data2_extr_points['point'] = [0, 249, 399]
fig = plt.figure(figsize=[8,5])
# print(data1.loc[data1["EndPts"] == 1.0].head())
# print(data1)
plt.plot( list(range(0,len(data1), 1)), data1["normalX"],'c*-')
plt.plot( list(range(0,len(data2),1)), data2['normalX'] , 'm.-.')
# plt.plot( data1_extr_points['point'], data1_extr_points['value'],'rx')
# plt.plot( data2_extr_points['point'], data2_extr_points['value'],'rx')
# calculate dtw distance by strokes
def get_stroke(df, extr_points, index):
start = extr_points['point'][index]
end = extr_points['point'][index+1]
df_new = df.loc[start:end].copy()
# print(df)
# print(df_new)
# df_new = normalizeFile(df_new)
#set ts_a
normalX = np.array(df_new['normalX'])
normalY = np.array(df_new['normalY'])
list(zip(normalX,normalY))
ts_a = np.array(list(zip(normalX,normalY)),dtype=float)
return ts_a, df_new
total_distance = 0.0
print(len(data1_extr_points['point']), len(data2_extr_points['point']))
if len(data1_extr_points['point']) == len(data2_extr_points['point']):
i = 0
while i< len(data1_extr_points['point']) -1:
ts_a, df_stroke1 = get_stroke(data1, data1_extr_points, i)
ts_b, df_stroke2 = get_stroke(data2, data2_extr_points, i)
fig = plt.figure(figsize=[8,5])
# plt.plot(data1["TStamp2"], data1["normalX"],'c*-', )
# plt.plot(data2['TStamp2'], data2['normalX'] , 'm.-.')
plt.plot( list(range(0,len(df_stroke1),1)),df_stroke1["normalX"],'c*-')
plt.plot( list(range(0,len(df_stroke2),1)),df_stroke2['normalX'],'m.-.')
fig.show()
distance, path = fastdtw(ts_a, ts_b, dist=euclidean)
total_distance += distance
# break
i +=1
print("DTW distance by stroke: ", total_distance)
#set ts_a
normalX = np.array(data1['normalX'])
normalY = np.array(data1['normalY'])
list(zip(normalX,normalY))
ts_a = np.array(list(zip(normalX,normalY)),dtype=float)
# print(normalX)
#set ts_b
normalX = np.array(data2['normalX'])
normalY = np.array(data2['normalY'])
list(zip(normalX,normalY))
ts_b = np.array(list(zip(normalX,normalY)),dtype=float)
#set ts_c
normalX = np.array(data3['normalX'])
normalY = np.array(data3['normalY'])
list(zip(normalX,normalY))
ts_c = np.array(list(zip(normalX,normalY)),dtype=float)
#the plot of DTW distance for genuine signature
distance, path = fastdtw(ts_a, ts_b, dist=euclidean)
print("genuine signature distance: ", distance)
# print(path)
path = list(path)
xpath = []
ypath = []
for v in path:
xpath.append(v[0])
ypath.append(v[1])
fig = plt.figure(figsize=[6,4])
plt.plot(xpath, ypath, color="r",linewidth=1 )
plt.title("genuine signature")
fig.show()
#we can see clearly, the plot is smooth
#the plot of DTW distance for forgery signature
distance, path = fastdtw(ts_b, ts_c, dist=euclidean)
print("forgery signature distance: ", distance)
# print(path)
path = list(path)
xpath = []
ypath = []
for v in path:
xpath.append(v[0])
ypath.append(v[1])
fig = plt.figure(figsize=[6,4])
plt.plot(xpath, ypath,color="r",linewidth=1 )
plt.title("forgery signature")
fig.show()
#we can see clearly, the plot is not smooth
```
| github_jupyter |
# SentencePiece and BPE
## Introduction to Tokenization
In order to process text in neural network models it is first required to **encode** text as numbers with ids, since the tensor operations act on numbers. Finally, if the output of the network is to be words, it is required to **decode** the predicted tokens ids back to text.
To encode text, the first decision that has to be made is to what level of graularity are we going to consider the text? Because ultimately, from these **tokens**, features are going to be created about them. Many different experiments have been carried out using *words*, *morphological units*, *phonemic units*, *characters*. For example,
- Tokens are tricky. (raw text)
- Tokens are tricky . ([words](https://arxiv.org/pdf/1301.3781))
- Token s _ are _ trick _ y . ([morphemes](https://arxiv.org/pdf/1907.02423.pdf))
- t oʊ k ə n z _ ɑː _ ˈt r ɪ k i. ([phonemes](https://www.aclweb.org/anthology/W18-5812.pdf), for STT)
- T o k e n s _ a r e _ t r i c k y . ([character](https://www.aclweb.org/anthology/C18-1139/))
But how to identify these units, such as words, is largely determined by the language they come from. For example, in many European languages a space is used to separate words, while in some Asian languages there are no spaces between words. Compare English and Mandarin.
- Tokens are tricky. (original sentence)
- 标记很棘手 (Mandarin)
- Biāojì hěn jíshǒu (pinyin)
- 标记 很 棘手 (Mandarin with spaces)
So, the ability to **tokenize**, i.e. split text into meaningful fundamental units is not always straight-forward.
Also, there are practical issues of how large our *vocabulary* of words, `vocab_size`, should be, considering memory limitations vs. coverage. A compromise may be need to be made between:
* the finest-grained models employing characters which can be memory intensive and
* more computationally efficient *subword* units such as [n-grams](https://arxiv.org/pdf/1712.09405) or larger units.
In [SentencePiece](https://www.aclweb.org/anthology/D18-2012.pdf) unicode characters are grouped together using either a [unigram language model](https://www.aclweb.org/anthology/P18-1007.pdf) (used in this week's assignment) or [BPE](https://arxiv.org/pdf/1508.07909.pdf), **byte-pair encoding**. We will discuss BPE, since BERT and many of its variants use a modified version of BPE and its pseudocode is easy to implement and understand... hopefully!
## SentencePiece Preprocessing
### NFKC Normalization
Unsurprisingly, even using unicode to initially tokenize text can be ambiguous, e.g.,
```
eaccent = '\u00E9'
e_accent = '\u0065\u0301'
print(f'{eaccent} = {e_accent} : {eaccent == e_accent}')
```
SentencePiece uses the Unicode standard normalization form, [NFKC](https://en.wikipedia.org/wiki/Unicode_equivalence), so this isn't an issue. Looking at our example from above but with normalization:
```
from unicodedata import normalize
norm_eaccent = normalize('NFKC', '\u00E9')
norm_e_accent = normalize('NFKC', '\u0065\u0301')
print(f'{norm_eaccent} = {norm_e_accent} : {norm_eaccent == norm_e_accent}')
```
Normalization has actually changed the unicode code point (unicode unique id) for one of these two characters.
```
def get_hex_encoding(s):
return ' '.join(hex(ord(c)) for c in s)
def print_string_and_encoding(s):
print(f'{s} : {get_hex_encoding(s)}')
for s in [eaccent, e_accent, norm_eaccent, norm_e_accent]:
print_string_and_encoding(s)
```
This normalization has other side effects which may be considered useful such as converting curly quotes “ to " their ASCII equivalent. (<sup>*</sup>Although we *now* lose directionality of the quote...)
### Lossless Tokenization<sup>*</sup>
SentencePiece also ensures that when you tokenize your data and detokenize your data the original position of white space is preserved. <sup>*</sup>However, tabs and newlines are converted to spaces, please try this experiment yourself later below.
To ensure this **lossless tokenization**, SentencePiece replaces white space with _ (U+2581). So that a simple join of the tokens by replace underscores with spaces can restore the white space, even if there are consecutive symbols. But remember first to normalize and then replace spaces with _ (U+2581). As the following example shows.
```
s = 'Tokenization is hard.'
s_ = s.replace(' ', '\u2581')
s_n = normalize('NFKC', 'Tokenization is hard.')
print(get_hex_encoding(s))
print(get_hex_encoding(s_))
print(get_hex_encoding(s_n))
```
So the special unicode underscore was replaced by the ASCII unicode. Reversing the order of the second and third operations, we that the special unicode underscore was retained.
```
s = 'Tokenization is hard.'
sn = normalize('NFKC', 'Tokenization is hard.')
sn_ = s.replace(' ', '\u2581')
print(get_hex_encoding(s))
print(get_hex_encoding(sn))
print(get_hex_encoding(sn_))
```
## BPE Algorithm
Now that we have discussed the preprocessing that SentencePiece performs, we will go get our data, preprocess, and apply the BPE algorithm. We will show how this reproduces the tokenization produced by training SentencePiece on our example dataset (from this week's assignment).
### Preparing our Data
First, we get our Squad data and process as above.
```
import ast
def convert_json_examples_to_text(filepath):
example_jsons = list(map(ast.literal_eval, open(filepath))) # Read in the json from the example file
texts = [example_json['text'].decode('utf-8') for example_json in example_jsons] # Decode the byte sequences
text = '\n\n'.join(texts) # Separate different articles by two newlines
text = normalize('NFKC', text) # Normalize the text
with open('example.txt', 'w') as fw:
fw.write(text)
return text
text = convert_json_examples_to_text('./data/data.txt')
print(text[:900])
```
In the algorithm the `vocab` variable is actually a frequency dictionary of the words. Further, those words have been prepended with an *underscore* to indicate that they are the beginning of a word. Finally, the characters have been delimited by spaces so that the BPE algorithm can group the most common characters together in the dictionary in a greedy fashion. We will see how that is done shortly.
```
from collections import Counter
vocab = Counter(['\u2581' + word for word in text.split()])
vocab = {' '.join([l for l in word]): freq for word, freq in vocab.items()}
def show_vocab(vocab, end='\n', limit=20):
"""Show word frequencys in vocab up to the limit number of words"""
shown = 0
for word, freq in vocab.items():
print(f'{word}: {freq}', end=end)
shown +=1
if shown > limit:
break
show_vocab(vocab)
```
We check the size of the vocabulary (frequency dictionary) because this is the one hyperparameter that BPE depends on crucially on how far it breaks up a word into SentencePieces. It turns out that for our trained model on our small dataset that 60% of 455 merges of the most frequent characters need to be done to reproduce the upperlimit of a 32K `vocab_size` over the entire corpus of examples.
```
print(f'Total number of unique words: {len(vocab)}')
print(f'Number of merges required to reproduce SentencePiece training on the whole corpus: {int(0.60*len(vocab))}')
```
### BPE Algorithm
Directly from the BPE paper we have the following algorithm.
```
import re, collections
def get_stats(vocab):
pairs = collections.defaultdict(int)
for word, freq in vocab.items():
symbols = word.split()
for i in range(len(symbols) - 1):
pairs[symbols[i], symbols[i+1]] += freq
return pairs
def merge_vocab(pair, v_in):
v_out = {}
bigram = re.escape(' '.join(pair))
p = re.compile(r'(?<!\S)' + bigram + r'(?!\S)')
for word in v_in:
w_out = p.sub(''.join(pair), word)
v_out[w_out] = v_in[word]
return v_out
def get_sentence_piece_vocab(vocab, frac_merges=0.60):
sp_vocab = vocab.copy()
num_merges = int(len(sp_vocab)*frac_merges)
for i in range(num_merges):
pairs = get_stats(sp_vocab)
best = max(pairs, key=pairs.get)
sp_vocab = merge_vocab(best, sp_vocab)
return sp_vocab
```
To understand what's going on first take a look at the third function `get_sentence_piece_vocab`. It takes in the current `vocab` word-frequency dictionary and the fraction, `frac_merges`, of the total `vocab_size` to merge characters in the words of the dictionary, `num_merges` times. Then for each *merge* operation it `get_stats` on how many of each pair of character sequences there are. It gets the most frequent *pair* of symbols as the `best` pair. Then it merges that pair of symbols (removes the space between them) in each word in the `vocab` that contains this `best` (= `pair`). Consequently, `merge_vocab` creates a new `vocab`, `v_out`. This process is repeated `num_merges` times and the result is the set of SentencePieces (keys of the final `sp_vocab`).
### Additional Discussion of BPE Algorithm
Please feel free to skip the below if the above description was enough.
In a little more detail then, we can see in `get_stats` we initially create a list of bigram (two character sequence) frequencies from our vocabulary. Later, this may include trigrams, quadgrams, etc. Note that the key of the `pairs` frequency dictionary is actually a 2-tuple, which is just shorthand notation for a pair.
In `merge_vocab` we take in an individual `pair` (of character sequences, note this is the most frequency `best` pair) and the current `vocab` as `v_in`. We create a new `vocab`, `v_out`, from the old by joining together the characters in the pair (removing the space), if they are present in a word of the dictionary.
[Warning](https://regex101.com/): the expression `(?<!\S)` means that either a whitespace character follows before the `bigram` or there is nothing before the bigram (it is the beginning of the word), similarly for `(?!\S)` for preceding whitespace or the end of the word.
```
sp_vocab = get_sentence_piece_vocab(vocab)
show_vocab(sp_vocab)
```
## Train SentencePiece BPE Tokenizer on Example Data
### Explore SentencePiece Model
First let us explore the SentencePiece model provided with this week's assignment. Remember you can always use Python's built in `help` command to see the documentation for any object or method.
```
import sentencepiece as spm
sp = spm.SentencePieceProcessor(model_file='./data/sentencepiece.model')
# help(sp)
```
Let's work with the first sentence of our example text.
```
s0 = 'Beginners BBQ Class Taking Place in Missoula!'
# encode: text => id
print(sp.encode_as_pieces(s0))
print(sp.encode_as_ids(s0))
# decode: id => text
print(sp.decode_pieces(sp.encode_as_pieces(s0)))
print(sp.decode_ids([12847, 277]))
```
Notice how SentencePiece breaks the words into seemingly odd parts, but we've seen something similar from our work with BPE. But how close were we to this model trained on the whole corpus of examples with a `vocab_size` of 32,000 instead of 455? Here you can also test what happens to white space, like '\n'.
But first let us note that SentencePiece encodes the SentencePieces, the tokens, and has reserved some of the ids as can be seen in this week's assignment.
```
uid = 15068
spiece = "\u2581BBQ"
unknown = "__MUST_BE_UNKNOWN__"
# id <=> piece conversion
print(f'SentencePiece for ID {uid}: {sp.id_to_piece(uid)}')
print(f'ID for Sentence Piece {spiece}: {sp.piece_to_id(spiece)}')
# returns 0 for unknown tokens (we can change the id for UNK)
print(f'ID for unknown text {unknown}: {sp.piece_to_id(unknown)}')
print(f'Beginning of sentence id: {sp.bos_id()}')
print(f'Pad id: {sp.pad_id()}')
print(f'End of sentence id: {sp.eos_id()}')
print(f'Unknown id: {sp.unk_id()}')
print(f'Vocab size: {sp.vocab_size()}')
```
We can also check what are the ids for the first part and last part of the vocabulary.
```
print('\nId\tSentP\tControl?')
print('------------------------')
# <unk>, <s>, </s> are defined by default. Their ids are (0, 1, 2)
# <s> and </s> are defined as 'control' symbol.
for uid in range(10):
print(uid, sp.id_to_piece(uid), sp.is_control(uid), sep='\t')
# for uid in range(sp.vocab_size()-10,sp.vocab_size()):
# print(uid, sp.id_to_piece(uid), sp.is_control(uid), sep='\t')
```
### Train SentencePiece BPE model with our example.txt
Finally, let's train our own BPE model directly from the SentencePiece library and compare it to the results of our implemention of the algorithm from the BPE paper itself.
```
spm.SentencePieceTrainer.train('--input=example.txt --model_prefix=example_bpe --vocab_size=450 --model_type=bpe')
sp_bpe = spm.SentencePieceProcessor()
sp_bpe.load('example_bpe.model')
print('*** BPE ***')
print(sp_bpe.encode_as_pieces(s0))
show_vocab(sp_vocab, end = ', ')
```
Our implementation of BPE's code from the paper matches up pretty well with the library itself! The differences are probably accounted for by the `vocab_size`. There is also another technical difference in that in the SentencePiece implementation of BPE a priority queue is used to more efficiently keep track of the *best pairs*. Actually, there is a priority queue in the Python standard library called `heapq` if you would like to give that a try below!
## Optionally try to implement BPE using a priority queue below
```
from heapq import heappush, heappop
def heapsort(iterable):
h = []
for value in iterable:
heappush(h, value)
return [heappop(h) for i in range(len(h))]
a = [1,4,3,1,3,2,1,4,2]
heapsort(a)
```
For a more extensive example consider looking at the [SentencePiece repo](https://github.com/google/sentencepiece/blob/master/python/sentencepiece_python_module_example.ipynb). The last few sections of this code was repurposed from that tutorial. Thanks for your participation! Next stop BERT and T5!
| github_jupyter |
### *IPCC SR15 scenario assessment*
<img style="float: right; height: 80px; padding-left: 20px;" src="../_static/IIASA_logo.png">
<img style="float: right; height: 80px;" src="../_static/IAMC_logo.jpg">
# Characteristics of four illustrative model pathways
## Figure 3b of the *Summary for Policymakers*
This notebook derives the figure panels and indicators for the table in Figure 3b in the Summary for Policymakers
of the IPCC's _"Special Report on Global Warming of 1.5°C"_.
The scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer).
## Load `pyam` package and other dependencies
```
import pandas as pd
import numpy as np
import io
import itertools
import yaml
import math
import matplotlib.pyplot as plt
plt.style.use('style_sr15.mplstyle')
%matplotlib inline
import pyam
```
## Import scenario data, categorization and specifications files
The metadata file with scenario categorisation and quantitative indicators can be downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer).
Alternatively, it can be re-created using the notebook `sr15_2.0_categories_indicators`.
The last cell of this section loads and assigns a number of auxiliary lists as defined in the categorization notebook.
```
sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r2.0.xlsx')
sr1p5.load_meta('sr15_metadata_indicators.xlsx')
with open("sr15_specs.yaml", 'r') as stream:
specs = yaml.load(stream, Loader=yaml.FullLoader)
rc = pyam.run_control()
for item in specs.pop('run_control').items():
rc.update({item[0]: item[1]})
cats_15 = specs.pop('cats_15')
cats_15_no_lo = specs.pop('cats_15_no_lo')
marker = specs.pop('marker')
```
## Downselect scenario ensemble to categories of interest for this assessment
```
sr1p5.meta.rename(columns={'Kyoto-GHG|2010 (SAR)': 'kyoto_ghg_2010'}, inplace=True)
df = sr1p5.filter(category=cats_15)
```
## Global carbon dioxide emissions in four illustrative pathways
Figure SPM3b shows the contribution to CO2 emissions and removal by three categories in the four illustrative pathways.
This illustration does not use the emissions timeseries as reported by the models. This is because the variable `Emissions|CO2|Energy and Industrial Processes` represents net emissions, incorporating carbon dioxide removal in this sector.
The steps below compute the gross emissions. The long variable names are mapped to short variables for easier readibility.
```
afolu_var = 'Emissions|CO2|AFOLU'
ene_ind_var = 'Emissions|CO2|Energy and Industrial Processes'
beccs_var ='Carbon Sequestration|CCS|Biomass'
```
We downselect the entire data to the four illustrative pathways (`marker` scenarios) and the three variables of interest. For consistency with the figure in the SPM, the units are converted to Gt CO2.
```
pw = df.filter(marker=marker, variable=[afolu_var, ene_ind_var, beccs_var],
year=range(2010, 2101, 10))
pw.convert_unit('Mt CO2/yr', 'Gt CO2/yr', inplace=True)
```
As a first step, we extract the timeseries for the AFOLU emissions and rename the variable for brevity. This data will be used as is in this figure.
```
afolu = (
pw.filter(variable=afolu_var)
.rename(variable={afolu_var: 'AFOLU'})
)
```
The energy-and-industry and BECCS timeseries data needs some processing. It is first separated into two distinct dataframes, and the BECCS variable is renamed for brevity.
```
ene_ind = pw.filter(variable=ene_ind_var)
beccs = (
pw.filter(variable=beccs_var)
.rename(variable={beccs_var: 'BECCS'})
)
```
The variable `Carbon Sequestration|CCS|Biomass` reports removed carbon dioxide as positive values. For use in this figure, the sign needs to be reversed.
```
beccs.data.value = - beccs.data.value
```
The `LED` marker scenario does not use any BECCS by assumption of the scenario design. For this reason, the variable `Carbon Sequestration|CCS|Biomass` was not defined when the MESSAGE team submitted the scenario results to the IAMC 1.5°C Scenario Data ensemble.
For easier computation, we add this data series manually here.
```
years = beccs.timeseries().columns
beccs.append(
pyam.IamDataFrame(
pd.DataFrame([0] * len(years), index=years).T,
model='MESSAGEix-GLOBIOM 1.0', scenario='LowEnergyDemand',
region='World', variable='BECCS', unit='Gt CO2/yr'),
inplace=True
)
```
As a third step, we compute the difference between net CO2 emissions from the energy sector & industry and BECCS to obtain gross CO2 emissions in that sector.
```
def get_value(df):
cols = ['model', 'scenario', 'region', 'year', 'unit']
return df.data.set_index(cols)['value']
diff = get_value(ene_ind) - get_value(beccs)
ene_ind_gross = pyam.IamDataFrame(diff, variable='Fossil fuel and industry')
```
We now combine the three contribution dataframes into one joint dataframe for plotting. Because the `beccs` IamDataFrame was partially altered, concatenating directly causes an issue, so we remove all `meta` columns from that dataframe beforehand.
```
beccs.meta = beccs.meta.drop(columns=beccs.meta.columns)
co2 = pyam.concat([ene_ind_gross, afolu, beccs])
```
We now proceed to plot the four illustrative pathways.
```
fig, ax = plt.subplots(1, 4, figsize=(14, 4), sharey=True)
for i, m in enumerate(['LED', 'S1', 'S2', 'S5']):
co2.filter(marker=m).stack_plot(ax=ax[i], total=True, legend=False)
ax[i].title.set_text(m)
ax[3].legend(loc=1)
```
## Collecting indicators across illustrative pathways
### Initialize a `pyam.Statistics` instance
```
base_year = 2010
compare_years = [2030, 2050]
years = [base_year] + compare_years
stats = pyam.Statistics(df=df, groupby={'marker': ['LED', 'S1', 'S2', 'S5']},
filters=[(('pathways', 'no & lo os 1.5'), {'category': cats_15_no_lo})])
```
### CO2 and Kyoto GHG emissions reductions
```
co2 = (
df.filter(kyoto_ghg_2010='in range', variable='Emissions|CO2', year=years)
.convert_unit('Mt CO2/yr', 'Gt CO2/yr')
.timeseries()
)
for y in compare_years:
stats.add((co2[y] / co2[2010] - 1) * 100,
'CO2 emission reduction (% relative to 2010)',
subheader=y)
kyoto_ghg = (
df.filter(kyoto_ghg_2010='in range', variable='Emissions|Kyoto Gases (SAR-GWP100)', year=years)
.rename(unit={'Mt CO2-equiv/yr': 'Mt CO2e/yr'})
.convert_unit('Mt CO2e/yr','Gt CO2e/yr')
.timeseries()
)
for y in compare_years:
stats.add((kyoto_ghg[y] / kyoto_ghg[base_year] - 1) * 100,
'Kyoto-GHG emission reduction (SAR-GWP100), % relative to {})'.format(base_year),
subheader=y)
```
### Final energy demand reduction relative to 2010
```
fe = df.filter(variable='Final Energy', year=years).timeseries()
for y in compare_years:
stats.add((fe[y] / fe[base_year] - 1) * 100,
'Final energy demand reduction relative to {} (%)'.format(base_year),
subheader=y)
```
### Share of renewables in electricity generation
```
def add_stats_share(stats, var_list, name, total, total_name, years, df=df):
_df = df.filter(variable=var_list)
for v in var_list:
_df.require_variable(v, exclude_on_fail=True)
_df.filter(exclude=False, inplace=True)
component = (
_df.timeseries()
.groupby(['model', 'scenario']).sum()
)
share = component / total * 100
for y in years:
stats.add(share[y], header='Share of {} in {} (%)'.format(name, total_name),
subheader=y)
ele = df.filter(variable='Secondary Energy|Electricity', year=compare_years).timeseries()
ele.index = ele.index.droplevel([2, 3, 4])
ele_re_vars = [
'Secondary Energy|Electricity|Biomass',
'Secondary Energy|Electricity|Non-Biomass Renewables'
]
add_stats_share(stats, ele_re_vars, 'renewables', ele, 'electricity', compare_years)
```
### Changes in primary energy mix
```
mapping = [
('coal', 'Coal'),
('oil', 'Oil'),
('gas', 'Gas'),
('nuclear', 'Nuclear'),
('bioenergy', 'Biomass'),
('non-biomass renewables', 'Non-Biomass Renewables')
]
for (n, v) in mapping:
data = df.filter(variable='Primary Energy|{}'.format(v), year=years).timeseries()
for y in compare_years:
stats.add((data[y] / data[base_year] - 1) * 100,
header='Primary energy from {} (% rel to {})'.format(n, base_year),
subheader=y)
```
### Cumulative carbon capture and sequestration until the end of the century
```
def cumulative_ccs(variable, name, first_year=2016, last_year=2100):
data = (
df.filter(variable=variable)
.convert_unit('Mt CO2/yr', 'Gt CO2/yr')
.timeseries()
)
stats.add(
data.apply(pyam.cumulative, raw=False, axis=1,
first_year=first_year, last_year=last_year),
header='Cumulative {} until {} (GtCO2)'.format(name, last_year), subheader='')
cumulative_ccs('Carbon Sequestration|CCS', 'CCS')
cumulative_ccs('Carbon Sequestration|CCS|Biomass', 'BECCS')
```
### Land cover for energy crops
Convert unit to SI unit (million square kilometers).
```
energy_crops = (
df.filter(variable='Land Cover|Cropland|Energy Crops', year=2050)
.convert_unit('million ha', 'million km2', factor=0.01)
.timeseries()
)
stats.add(energy_crops[2050], header='Land are for energy crops (million km2)')
```
### Emissions from land use
```
species = ['CH4', 'N2O']
for n in species:
data = df.filter(kyoto_ghg_2010='in range', variable='Emissions|{}|AFOLU'.format(n), year=years).timeseries()
for y in compare_years:
stats.add((data[y] / data[base_year] - 1) * 100,
header='Agricultural {} emissions (% rel to {})'.format(n, base_year),
subheader=y)
```
## Display summary statistics and export to `xlsx`
```
summary = stats.summarize(interquartile=True, custom_format='{:.0f}').T
summary
summary.to_excel('output/spm_sr15_figure3b_indicators_table.xlsx')
```
| github_jupyter |
<h1 align=center><font size = 6> Crop Yield Prediction. </font></h1>
## import required libraries.
```
import numpy as np #Library to handle data in vectorized manner.
import pandas as pd #library for data analysis.
#Plotting libray matplotlib and associated ploting modules.
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
%matplotlib inline
print('All the required libraries are imported.....!')
```
## Initial data gathering and cleaning.
### read crop production dataset into pandas dataframe.
```
df_crop = pd.read_csv("crop_production.csv")
# Now look at the data frame.
df_crop.head()
```
#### replace blank cells with nan and remove these nan values.
```
nan_value = float("NaN")
df_crop.replace("", nan_value, inplace=True)
df_crop.dropna(subset = ["District_name"], inplace=True)
print("Drop Sucessfull")
#now look at the shape of our data frame.
print("Size of the data frame is :", df_crop.shape)
```
### Crop production data analysis.
```
# assign data in new dataframe for analysis.
df_analysis = df_crop
#Set the district name as index - useful for quickly looking up district using .loc method
df_analysis.set_index('District_name' , inplace = True)
#now see how data frame looks.
df_analysis.head()
length = len(df_analysis)
length
```
### Lets analyze the crop production data of each district where district name choosed by user.
Please Enter the district name in which you want to analyze crop data in the input field, use the district name from following list.<br>
[Ahemadnagar Akola Amravati Aurangabad Beed Bhandara Buldhana Chandrapur Dhule Gadchiroli Gondia Hingoli Jalgaon Jalna Kolhapur Latur Nagpur Nanded Nandurbar Nashik Osmanabad Parbhani Pune Raigad Ratnagiri Sangli Satara Sindhudurg Solapur Thane Wardha Washim Yavatmal
]<br>
Note : Input field is case sensitive.
```
value = input("Enter the District Name :")
df_data = df_analysis.loc[[value]] #assign the data of district which is selected by user input to the new dataframe.
df_data.reset_index(drop=True, inplace=True) # drop the index.
#transpose dataframe.
df_transposed = df_data.T
#rename the column name.
df_transposed.rename(columns={ 0: 'Production'}, inplace=True)
#now lets plot the dtataframe into bar graph.
df_transposed.plot(kind="bar", figsize = (16,14), fontsize = 12, color = 'rgbkymc')
plt.title("Distrcit Wise Crop Production In Maharashtra State", fontsize = 16)
plt.xlabel("Different Crops of District :" + value, fontsize = 16)
plt.ylabel("Crop Production in Tonns", fontsize = 16)
plt.savefig('yieldanalysis.png', format= 'png')
plt.show()
```
| github_jupyter |
# MaterialsCoord benchmarking – sensitivity to perturbation analysis
This notebook demonstrates how to use MaterialsCoord to benchmark the sensitivity of bonding algorithms to structural perturbations. Perturbations are introduced according the Einstein crystal test rig, in which site is perturbed so that the distribution around the equilibrium position yields a normal distribution for each Cartesian component.
The perturbation complies thus with the expectation for an Einstein crystal,
in which the potential is given by $V(\delta r) = 0.5 k_\mathrm{spring} \delta r^2$, where
$k_\mathrm{spring}$ denotes the spring constant with which the sites are tethered to
their equilibrium position, and $\delta r$ is the distance of the site under
consideration from its equilibrium position.
The MaterialsCoord `Benchmark` class accepts a `perturb_sigma` option, which is equal to $(k_\mathrm{B}T/k_\mathrm{spring})^{0.5}$.
*Written using:*
- MaterialsCoord==0.1.0
*Authors: Hillary Pan, Alex Ganose (10/12/19)*
---
First, lets initialize the near neighbor methods we are interested in.
```
from pymatgen.analysis.local_env import BrunnerNN_reciprocal, EconNN, JmolNN, \
MinimumDistanceNN, MinimumOKeeffeNN, MinimumVIRENN, \
VoronoiNN, CrystalNN
nn_methods = [
BrunnerNN_reciprocal(), EconNN(tol=0.5), JmolNN(), CrystalNN(), VoronoiNN(tol=0.5),
MinimumDistanceNN(), MinimumOKeeffeNN(), MinimumVIRENN()
]
```
Next, import the benchmark and choose which structures we are interested in.
```
from materialscoord.core import Benchmark
structure_groups = ["common_binaries", "elemental", "A2BX4", "ABX3", "ABX4"]
```
Choose the initial and final perturbation sigma values to include, as well as the number of steps inbetween.
```
import numpy as np
initial_sigma = 0
final_sigma = 0.2
nsteps = 51
sigmas = np.linspace(initial_sigma, final_sigma, nsteps)
```
Run the benchmark with the perturbation turned on. Note we have disabled symmetry so that each perturbed site is treated separately. Due to the absence of symmetry and the slow speed of `MinimumVIRENN`, this can take a long time (14 hours on a 2017 MacBook Pro).
```
from tqdm import tqdm_notebook
results = []
for sigma in tqdm_notebook(sigmas):
bm = Benchmark.from_structure_group(structure_groups, perturb_sigma=sigma, symprec=None)
sigma_scores = bm.score(nn_methods)
results.append(sigma_scores.iloc[-1].values)
```
Finally, plot the results.
```
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib import ticker
import os
from scipy.signal import savgol_filter
import seaborn as sns
plt_results = np.array(results).T
# define matplotlib style settings
style = {
"font.sans-serif": ["Helvetica", "Arial"], "axes.labelsize": 16,
"xtick.labelsize": 16, "ytick.labelsize": 16, "xtick.direction": "in",
"ytick.direction": "in", "xtick.major.size": 8, "xtick.minor.size": 4,
"ytick.major.size": 8, "ytick.minor.size": 4, "lines.linewidth": 2.5,
"lines.markersize": 10, "axes.linewidth": 1.2, "xtick.major.width": 1.2,
"xtick.minor.width": 1.2, "ytick.major.width": 1.2, "ytick.minor.width": 1.2,
"pdf.fonttype":42
}
nn_method_mapping = {"BrunnerNN_reciprocal": "BrunnerNN"}
colors = sns.color_palette("deep")
order = [5, 6, 7, 2, 1, 0, 4, 3]
plt.style.use(style)
fig = plt.figure(figsize=(6, 6))
ax = plt.gca()
for i, x in enumerate(order):
method = nn_methods[x]
y_vals = plt_results[x]
name = method.__class__.__name__
c = colors[i]
name = nn_method_mapping.get(name, name)
# smooth the lines with a double pass through a savgol filter
# more ideal would be to take averages accross multiple runs
# but due to the time taken to generate the data this is impractical
y_vals = savgol_filter(y_vals, 27, 2)
y_vals = savgol_filter(y_vals, 27, 2)
ax.plot(sigmas, y_vals, label=name, c=c)
ax.set(ylabel="Benchmark score", xlabel="Sigma (Å)")
ax.set_xlim((0, 0.2))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
plt.legend(loc='upper left', bbox_to_anchor=(1, 1), frameon=False, fontsize=15)
plt.savefig(os.path.join("plots", "perturbation-tolerance.pdf"), bbox_inches="tight")
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_12_02_qlearningreinforcement.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# T81-558: Applications of Deep Neural Networks
**Module 12: Reinforcement Learning**
* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# Module 12 Video Material
* Part 12.1: Introduction to the OpenAI Gym [[Video]](https://www.youtube.com/watch?v=_KbUxgyisjM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_01_ai_gym.ipynb)
* **Part 12.2: Introduction to Q-Learning** [[Video]](https://www.youtube.com/watch?v=A3sYFcJY3lA&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_02_qlearningreinforcement.ipynb)
* Part 12.3: Keras Q-Learning in the OpenAI Gym [[Video]](https://www.youtube.com/watch?v=qy1SJmsRhvM&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_03_keras_reinforce.ipynb)
* Part 12.4: Atari Games with Keras Neural Networks [[Video]](https://www.youtube.com/watch?v=co0SwPWoZh0&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_04_atari.ipynb)
* Part 12.5: Application of Reinforcement Learning [[Video]](https://www.youtube.com/watch?v=1jQPP3RfwMI&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](t81_558_class_12_05_apply_rl.ipynb)
# Google CoLab Instructions
The following code ensures that Google CoLab is running the correct version of TensorFlow.
```
try:
from google.colab import drive
%tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
if COLAB:
!sudo apt-get install -y xvfb ffmpeg x11-utils
!pip install -q 'gym==0.10.11'
!pip install -q 'imageio==2.4.0'
!pip install -q PILLOW
!pip install -q 'pyglet==1.3.2'
!pip install -q pyvirtualdisplay
!pip install -q tf-agents
```
# Part 12.2: Introduction to Q-Learning
Q-Learning is a foundational technique upon which deep reinforcement learning is based. Before we explore deep reinforcement learning, it is essential to understand Q-Learning. Several components make up any Q-Learning system.
* **Agent** - The agent is an entity that exists in an environment that takes actions to affect the state of the environment, to receive rewards.
* **Environment** - The environment is the universe that the agent exists in. The environment is always in a specific state that is changed by the actions of the agent.
* **Actions** - Steps that can be performed by the agent to alter the environment
* **Step** - A step occurs each time that the agent performs an action and potentially changes the environment state.
* **Episode** - A chain of steps that ultimately culminates in the environment entering a terminal state.
* **Epoch** - A training iteration of the agent that contains some number of episodes.
* **Terminal State** - A state in which further actions do not make sense. In many environments, a terminal state occurs when the agent has one, lost, or the environment exceeding the maximum number of steps.
Q-Learning works by building a table that suggests an action for every possible state. This approach runs into several problems. First, the environment is usually composed of several continuous numbers, resulting in an infinite number of states. Q-Learning handles continuous states by binning these numeric values into ranges.
Additionally, Q-Learning primarily deals with discrete actions, such as pressing a joystick up or down. Out of the box, Q-Learning does not deal with continuous inputs, such as a car's accelerator that can be in a range of positions from released to fully engaged. Researchers have come up with clever tricks to allow Q-Learning to accommodate continuous actions.
In the next chapter, we will learn more about deep reinforcement learning. Deep neural networks can help to solve the problems of continuous environments and action spaces. For now, we will apply regular Q-Learning to the Mountain Car problem from OpenAI Gym.
### Introducing the Mountain Car
This section will demonstrate how Q-Learning can create a solution to the mountain car gym environment. The Mountain car is an environment where a car must climb a mountain. Because gravity is stronger than the car's engine, even with full throttle, it cannot merely accelerate up the steep slope. The vehicle is situated in a valley and must learn to utilize potential energy by driving up the opposite hill before the car can make it to the goal at the top of the rightmost hill.
First, it might be helpful to visualize the mountain car environment. The following code shows this environment. This code makes use of TF-Agents to perform this render. Usually, we use TF-Agents for the type of deep reinforcement learning that we will see in the next module. However, for now, TF-Agents is just used to render the mountain care environment.
```
import tf_agents
from tf_agents.environments import suite_gym
import PIL.Image
import pyvirtualdisplay
display = pyvirtualdisplay.Display(visible=0, size=(1400, 900)).start()
env_name = 'MountainCar-v0'
env = suite_gym.load(env_name)
env.reset()
PIL.Image.fromarray(env.render())
```
The mountain car environment provides the following discrete actions:
* 0 - Apply left force
* 1 - Apply no force
* 2 - Apply right force
The mountain car environment is made up of the following continuous values:
* state[0] - Position
* state[1] - Velocity
The following code shows an agent that applies full throttle to climb the hill. The cart is not strong enough. It will need to use potential energy from the mountain behind it.
```
import gym
from gym.wrappers import Monitor
import glob
import io
import base64
from IPython.display import HTML
from pyvirtualdisplay import Display
from IPython import display as ipythondisplay
display = Display(visible=0, size=(1400, 900))
display.start()
"""
Utility functions to enable video recording of gym environment
and displaying it.
To enable video, just do "env = wrap_env(env)""
"""
def show_video():
mp4list = glob.glob('video/*.mp4')
if len(mp4list) > 0:
mp4 = mp4list[0]
video = io.open(mp4, 'r+b').read()
encoded = base64.b64encode(video)
ipythondisplay.display(HTML(data='''<video alt="test" autoplay
loop controls style="height: 400px;">
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii'))))
else:
print("Could not find video")
def wrap_env(env):
env = Monitor(env, './video', force=True)
return env
import gym
if COLAB:
env = wrap_env(gym.make("MountainCar-v0"))
else:
env = gym.make("MountainCar-v0")
env.reset()
done = False
i = 0
while not done:
i += 1
state, reward, done, _ = env.step(2)
env.render()
print(f"Step {i}: State={state}, Reward={reward}")
env.close()
show_video()
```
### Programmed Car
Now we will look at a car that I hand-programmed. This car is straightforward; however, it solves the problem. The programmed car always applies force to one direction or another. It does not break. Whatever direction the vehicle is currently rolling, the agent uses power in that direction. Therefore, the car begins to climb a hill, is overpowered, and turns backward. However, once it starts to roll backward force is immediately applied in this new direction.
The following code implements this preprogrammed car.
```
import gym
if COLAB:
env = wrap_env(gym.make("MountainCar-v0"))
else:
env = gym.make("MountainCar-v0")
state = env.reset()
done = False
i = 0
while not done:
i += 1
if state[1]>0:
action = 2
else:
action = 0
state, reward, done, _ = env.step(action)
env.render()
print(f"Step {i}: State={state}, Reward={reward}")
env.close()
```
We now visualize the preprogrammed car solving the problem.
```
show_video()
```
### Reinforcement Learning
Q-Learning is a system of rewards that the algorithm gives an agent for successfully moving the environment into a state considered successful. These rewards are the Q-values from which this algorithm takes its name. The final output from the Q-Learning algorithm is a table of Q-values that indicate the reward value of every action that the agent can take, given every possible environment state. The agent must bin continuous state values into a fixed finite number of columns.
Learning occurs when the algorithm runs the agent and environment through a series of episodes and updates the Q-values based on the rewards received from actions taken; Figure 12.REINF provides a high-level overview of this reinforcement or Q-Learning loop.
**Figure 12.REINF:Reinforcement/Q Learning**

The Q-values can dictate action by selecting the action column with the highest Q-value for the current environment state. The choice between choosing a random action and a Q-value driven action is governed by the epsilon ($\epsilon$) parameter, which is the probability of random action.
Each time through the training loop, the training algorithm updates the Q-values according to the following equation.
$Q^{new}(s_{t},a_{t}) \leftarrow \underbrace{Q(s_{t},a_{t})}_{\text{old value}} + \underbrace{\alpha}_{\text{learning rate}} \cdot \overbrace{\bigg( \underbrace{\underbrace{r_{t}}_{\text{reward}} + \underbrace{\gamma}_{\text{discount factor}} \cdot \underbrace{\max_{a}Q(s_{t+1}, a)}_{\text{estimate of optimal future value}}}_{\text{new value (temporal difference target)}} - \underbrace{Q(s_{t},a_{t})}_{\text{old value}} \bigg) }^{\text{temporal difference}}$
There are several parameters in this equation:
* alpha ($\alpha$) - The learning rate, how much should the current step cause the Q-values to be updated.
* lambda ($\lambda$) - The discount factor is the percentage of future reward that the algorithm should consider in this update.
This equation modifies several values:
* $Q(s_t,a_t)$ - The Q-table. For each combination of states, what reward would the agent likely receive for performing each action?
* $s_t$ - The current state.
* $r_t$ - The last reward received.
* $a_t$ - The action that the agent will perform.
The equation works by calculating a delta (temporal difference) that the equation should apply to the old state. This learning rate ($\alpha$) scales this delta. A learning rate of 1.0 would fully implement the temporal difference to the Q-values each iteration and would likely be very chaotic.
There are two parts to the temporal difference: the new and old values. The new value is subtracted from the old value to provide a delta; the full amount that we would change the Q-value by if the learning rate did not scale this value. The new value is a summation of the reward received from the last action and the maximum of the Q-values from the resulting state when the client takes this action. It is essential to add the maximum of action Q-values for the new state because it estimates the optimal future values from proceeding with this action.
### Q-Learning Car
We will now use Q-Learning to produce a car that learns to drive itself. Look out, Tesla! We begin by defining two essential functions.
```
import gym
import numpy as np
# This function converts the floating point state values into
# discrete values. This is often called binning. We divide
# the range that the state values might occupy and assign
# each region to a bucket.
def calc_discrete_state(state):
discrete_state = (state - env.observation_space.low)/buckets
return tuple(discrete_state.astype(np.int))
# Run one game. The q_table to use is provided. We also
# provide a flag to indicate if the game should be
# rendered/animated. Finally, we also provide
# a flag to indicate if the q_table should be updated.
def run_game(q_table, render, should_update):
done = False
discrete_state = calc_discrete_state(env.reset())
success = False
while not done:
# Exploit or explore
if np.random.random() > epsilon:
# Exploit - use q-table to take current best action
# (and probably refine)
action = np.argmax(q_table[discrete_state])
else:
# Explore - t
action = np.random.randint(0, env.action_space.n)
# Run simulation step
new_state, reward, done, _ = env.step(action)
# Convert continuous state to discrete
new_state_disc = calc_discrete_state(new_state)
# Have we reached the goal position (have we won?)?
if new_state[0] >= env.unwrapped.goal_position:
success = True
# Update q-table
if should_update:
max_future_q = np.max(q_table[new_state_disc])
current_q = q_table[discrete_state + (action,)]
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * \
(reward + DISCOUNT * max_future_q)
q_table[discrete_state + (action,)] = new_q
discrete_state = new_state_disc
if render:
env.render()
return success
```
Several hyperparameters are very important for Q-Learning. These parameters will likely need adjustment as you apply Q-Learning to other problems. Because of this, it is crucial to understand the role of each parameter.
* **LEARNING_RATE** The rate at which previous Q-values are updated based on new episodes run during training.
* **DISCOUNT** The amount of significance to give estimates of future rewards when added to the reward for the current action taken. A value of 0.95 would indicate a discount of 5% to the future reward estimates.
* **EPISODES** The number of episodes to train over. Increase this for more complex problems; however, training time also increases.
* **SHOW_EVERY** How many episodes to allow to elapse before showing an update.
* **DISCRETE_GRID_SIZE** How many buckets to use when converting each of the continuous state variables. For example, [10, 10] indicates that the algorithm should use ten buckets for the first and second state variables.
* **START_EPSILON_DECAYING** Epsilon is the probability that the agent will select a random action over what the Q-Table suggests. This value determines the starting probability of randomness.
* **END_EPSILON_DECAYING** How many episodes should elapse before epsilon goes to zero and no random actions are permitted. For example, EPISODES//10 means only the first 1/10th of the episodes might have random actions.
```
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 50000
SHOW_EVERY = 1000
DISCRETE_GRID_SIZE = [10, 10]
START_EPSILON_DECAYING = 0.5
END_EPSILON_DECAYING = EPISODES//10
```
We can now make the environment. If we are running in Google COLAB then we wrap the environment to be displayed inside the web browser. Next create the discrete buckets for state and build Q-table.
```
if COLAB:
env = wrap_env(gym.make("MountainCar-v0"))
else:
env = gym.make("MountainCar-v0")
epsilon = 1
epsilon_change = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
buckets = (env.observation_space.high - env.observation_space.low) \
/DISCRETE_GRID_SIZE
q_table = np.random.uniform(low=-3, high=0, size=(DISCRETE_GRID_SIZE \
+ [env.action_space.n]))
success = False
```
We can now make the environment. If we are running in Google COLAB then we wrap the environment to be displayed inside the web browser. Next, create the discrete buckets for state and build Q-table.
```
episode = 0
success_count = 0
# Loop through the required number of episodes
while episode<EPISODES:
episode+=1
done = False
# Run the game. If we are local, display render animation at SHOW_EVERY
# intervals.
if episode % SHOW_EVERY == 0:
print(f"Current episode: {episode}, success: {success_count}" +\
" ({float(success_count)/SHOW_EVERY})")
success = run_game(q_table, True, False)
success_count = 0
else:
success = run_game(q_table, False, True)
# Count successes
if success:
success_count += 1
# Move epsilon towards its ending value, if it still needs to move
if END_EPSILON_DECAYING >= episode >= START_EPSILON_DECAYING:
epsilon = max(0, epsilon - epsilon_change)
print(success)
```
As you can see, the number of successful episodes generally increases as training progresses. It is not advisable to stop the first time that we observe 100% success over 1,000 episodes. There is a randomness to most games, so it is not likely that an agent would retain its 100% success rate with a new run. Once you observe that the agent has gotten 100% for several update intervals, it might be safe to stop training.
# Running and Observing the Agent
Now that the algorithm has trained the agent, we can observe the agent in action. You can use the following code to see the agent in action.
```
run_game(q_table, True, False)
show_video()
```
# Inspecting the Q-Table
We can also display the Q-table. The following code shows the action that the agent would perform for each environment state. As the weights of a neural network, this table is not straightforward to interpret. Some patterns do emerge in that directions do arise, as seen by calculating the means of rows and columns. The actions seem consistent at upper and lower halves of both velocity and position.
```
import pandas as pd
df = pd.DataFrame(q_table.argmax(axis=2))
df.columns = [f'v-{x}' for x in range(DISCRETE_GRID_SIZE[0])]
df.index = [f'p-{x}' for x in range(DISCRETE_GRID_SIZE[1])]
df
df.mean(axis=0)
df.mean(axis=1)
```
| github_jupyter |
# BikeBuyer Regression
```
# importing libraries
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import numpy.random as nr
import math
%matplotlib inline
# loading data
customer_info = pd.read_csv('Data/AdvWorksCusts.csv')
customer_spending = pd.read_csv('Data/AW_AveMonthSpend.csv')
customer_has_bike = pd.read_csv('Data/AW_BikeBuyer.csv')
# checking for duplicate and removing them
print("For customer_info: " + "\n")
print(customer_info.shape)
print(customer_info.CustomerID.unique().shape)
print("\n" + "For customer_spending" + "\n")
print(customer_spending.shape)
print(customer_spending.CustomerID.unique().shape)
print("\n" + "For customer_has_bike" + "\n")
print(customer_has_bike.shape)
print(customer_has_bike.CustomerID.unique().shape)
# dropping duplicate if they exist
customer_info.drop_duplicates(subset='CustomerID',keep='last', inplace=True)
customer_spending.drop_duplicates(subset='CustomerID',keep='last',inplace=True)
customer_has_bike.drop_duplicates(subset='CustomerID',keep='last',inplace=True)
# checking if duplicate are dropped
print("For customer_info: " + "\n")
print(customer_info.shape)
print(customer_info.CustomerID.unique().shape)
print("\n" + "For customer_spending" + "\n")
print(customer_spending.shape)
print(customer_spending.CustomerID.unique().shape)
print("\n" + "For customer_has_bike" + "\n")
print(customer_has_bike.shape)
print(customer_has_bike.CustomerID.unique().shape)
# checking for null or missign values in all datasets
print((customer_info.astype(np.object).isnull()).any())
print((customer_spending.astype(np.object).isnull().any()))
print((customer_has_bike.astype(np.object).isnull().any()))
```
Doing some exploratory analysis once the data is cleaned
```
print(round(customer_info.describe(),2))
print("\n")
print(round(customer_spending.describe(),2))
print("\n")
print(round(customer_has_bike.describe(),2))
#merging data customer_info data and customer_spending data for modeling
data = customer_info.merge(customer_spending, on='CustomerID', how='left')
data.head()
```
#### Below function is useful but I prefer you use the other
#### because its simple
```
from datetime import datetime
from dateutil.parser import parse
def generate_age(data, format):
collect_date = birthday = datetime(1998,1,1,0,0,0)
age = []
for index, row in data.iterrows():
cust_date = datetime.strptime(row['BirthDate'], format)
age.append(int((collect_date - cust_date).days/365))
return age
data['Age'] = generate_age(data, '%Y-%m-%d')
data[['BirthDate','Age']].head()
```
#### generating age since we given the birthrate
#### This function for generating age work but it not safe since it does work with one form of format.
from datetime import datetime
from datetime import date
def calcute_age(age):
cust_date = datetime.strptime(age, "%Y-%m-%d")
f_date = date(1998,1,1)
return f_date.year - cust_date.year - ((f_date.month, f_date.day) <(cust_date.month, cust_date.day))
data['Age'] = data['BirthDate'].apply(calcute_age)
data[['BirthDate','Age']].head()
```
def plot_scatter(auto_prices, cols, col_y= 'AveMonthSpend'):
for col in cols:
fig = plt.figure(figsize=(7,6)) # define plot area
ax = fig.gca() # define axis
auto_prices.plot.scatter(x= col, y=col_y, ax= ax)
ax.set_title('Scatter plot of ' + col_y + ' vs. ' + col) #title of the plot
ax.set_xlabel(col) #set x axis text
ax.set_ylabel(col_y) #set y axis text
plt.show()
cols=['NumberChildrenAtHome','NumberCarsOwned','TotalChildren']
plot_scatter(data,cols)
cols= ['AveMonthSpend','YearlyIncome','Age']
sns.pairplot(data[cols], palette="Set2", diag_kind="kde", size=2).map_upper(sns.kdeplot,cmap="Blues_d")
def plot_box(auto_prices, cols, col_y='AveMonthSpend'):
for col in cols:
sns.set_style("whitegrid")
sns.boxplot(col,col_y, data=auto_prices)
plt.xlabel(col) #set x axis text
plt.ylabel(col_y) #set y axis text
plt.show()
cols= ['Occupation','Gender','MaritalStatus','HomeOwnerFlag']
plot_box(data, cols)
```
After visualizations above we selected the following features for model: Gender, MaritalStatus, HomeOwnerFlag, Occupation, Age, YearlyIncme and NumberChildrenAtHome
```
# Grouping Categorical and numerical data
categorical_features= ['Gender','MaritalStatus','HomeOwnerFlag','Occupation']
numeric_features= ['Age','YearlyIncome','NumberChildrenAtHome']
# define encoder for categorical
from sklearn import preprocessing
import sklearn.model_selection as ms
from sklearn import linear_model
import sklearn.metrics as sklm
def encode_string(cat_features):
enc= preprocessing.LabelEncoder()
enc.fit(cat_features)
enc_cat_features= enc.transform(cat_features)
ohe= preprocessing.OneHotEncoder()
encoded= ohe.fit(enc_cat_features.reshape(-1,1))
return encoded.transform(enc_cat_features.reshape(-1,1)).toarray()
def encode_cat_features(features):
categorical_features= ['Gender','MaritalStatus','HomeOwnerFlag']
f= encode_string(features['Occupation'])
for cat in categorical_features:
enc= encode_string(features[cat])
f= np.concatenate([f,enc],1)
return f
labels = np.array(data.AveMonthSpend)
selected = numeric_features + categorical_features
features = data[selected]
print(labels)
print(features.head())
#encoding features
encoded_features= encode_cat_features(features)
print(encoded_features[:,:])
#selecting numeric features and converting them to array
numeric_features= np.array(data[numeric_features])
print(numeric_features[:,:])
# Combining numeric and encoded features into 1 feature
features= np.concatenate([encoded_features,numeric_features],1)
print(features.shape)
print(features[:1,:])
# spliting data into training and test datasets
nr.seed(9988)
indx= range(features.shape[0])
indx= ms.train_test_split(indx, test_size= 300)
X_train= features[indx[0],:]
y_train= np.ravel(labels[indx[0]])
X_test= features[indx[1],:]
y_test= np.ravel(labels[indx[1]])
# Scaling the data to avoid features having different magnitudes
#scalar= preprocessing.MinMaxScaler(feature_range=(-1,1)).fit(X_train[:,11:])
scaler = preprocessing.StandardScaler().fit(X_train[:,11:13])
X_train[:,11:13] = scaler.transform(X_train[:,11:13])
X_test[:,11:13] = scaler.transform(X_test[:,11:13])
X_train[:2]
```
Now Features are prepared we try it on models
```
# using the linear regression model to define and fit
lin_mod= linear_model.Ridge(alpha = 0.05)
lin_mod.fit(X_train,y_train)
print(lin_mod.intercept_)
print(lin_mod.coef_)
# tunning the model to fine the best alpha
alphas = np.array([0.1,0.01,0.001,0.0001,0,0.01,0.05,0.04,0.03,0.02,1,2,3,4,5,6,7,8,9,10])
lin_mod= linear_model.Ridge()
linRidge_clf = ms.GridSearchCV(estimator=lin_mod, param_grid=dict(alpha=alphas))
linRidge_clf.fit(X_train,y_train)
#summarize results of grid search
print(linRidge_clf.best_score_)
print(linRidge_clf.best_estimator_.alpha)
# fitting the alpa value into the model.
lin_mod= linear_model.Ridge(alpha = 3.0)
lin_mod.fit(X_train,y_train)
print(lin_mod.intercept_)
print(lin_mod.coef_)
# function to calcuclate the matrices
def print_metrics(y_true, y_predicted):
# compute R^2 and the adjusted R^2
r2= sklm.r2_score(y_true,y_predicted)
n= X_test.shape[0]
p= X_test.shape[1]-1
r2_adj= 1-(1-r2)*((n-1)/(n-p-1))
## Print the usual metrics and the R^2 values
print('Mean Square Error = ' + str(sklm.mean_squared_error(y_true, y_predicted)))
print('Root Mean Square Error = ' + str(math.sqrt(sklm.mean_squared_error(y_true, y_predicted))))
print('Mean Absolute Error = ' + str(sklm.mean_absolute_error(y_true, y_predicted)))
print('Median Absolute Error = ' + str(sklm.median_absolute_error(y_true, y_predicted)))
print('R^2 = ' + str(r2))
print('Adjusted R^2 = ' + str(r2_adj))
# function to calculate accuracy
def print_evalute(y_true_, y_predicted_):
errors= abs(y_predicted_ - y_true_)
mape_= 100* np.mean(errors/y_true_)
accuracy= 100 - mape_
print('Model Performance')
print('Average Error: {:0.4f} degrees.'.format(np.mean(errors)))
print('Accuracy= {:0.2f}%.'.format(accuracy))
# predict and run metric
scores= lin_mod.predict(X_test)
print_metrics(y_test, scores)
print_evalute(y_test, scores)
# function to compute for the residuals
def hist_residue(y_test, y_score):
## compute vector of residuals
residue = np.subtract(y_test.reshape(-1,1), y_score.reshape(-1,1))
# making a plot
sns.distplot(residue)
plt.title('Histogram of residuals')
plt.xlabel('Residual value')
plt.ylabel('Count')
plt.show()
hist_residue(y_test,scores)
def plot_residue(y_test, y_score):
## compute vector of residuals
residue = np.subtract(y_test.reshape(-1,1), y_score.reshape(-1,1))
# making a plot
sns.regplot(y_score, residue, fit_reg= False)
plt.title('Residuals vs Predicted values')
plt.xlabel('Predicted Values')
plt.ylabel('Residuals')
plt.show()
plot_residue(y_test,scores)
```
The residual are not normally distrubuted as expected. Also there is a pattern for lower Average residuals. This indicate the model is not generalize as expected.
```
# using the polynomial regression to define and fit.
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import PolynomialFeatures
poly_mod= make_pipeline(PolynomialFeatures(4),
linear_model.LinearRegression())
poly_mod.fit(X_train,y_train)
scores = poly_mod.predict(X_test)
print_metrics(y_test,scores)
print_evalute(y_test, scores)
hist_residue(y_test,scores)
plot_residue(y_test,scores)
```
Comparing the polynomial feature to the linear regression. It can be seen that polynomial regression performs beter. The R2 and adj. R2 shows a good residual distrubution and also the histogram shows a form of a normal distribution.
Due to this I will expore other model to see how best it goes.
```
# Running the gridCV for the GradientBoostingRegessor
# to choose the best parameter for the GradientBoostingRegressor
from sklearn.ensemble import GradientBoostingRegressor
gbrt_mod= GradientBoostingRegressor(random_state=0)
param_grid= {
'n_estimators': [10,20,30,40,50,100,200,300,500],
'max_features': ['auto'],
'max_depth': [1,2,4,6,8,10],
'learning_rate': [0.1],
'subsample': [1]
}
gbrt_clf= ms.GridSearchCV(estimator=gbrt_mod,
param_grid=param_grid,
n_jobs=4,
cv=5,
scoring='neg_mean_squared_error')
gbrt_clf.fit(X_train,y_train)
print(gbrt_clf.best_score_)
print(gbrt_clf.best_params_)
# Using the GradientBoostingRegessor Tree
from sklearn.ensemble import GradientBoostingRegressor
gbrt_mod= GradientBoostingRegressor(n_estimators=200,
max_depth=4)
gbrt_mod.fit(X_train,y_train)
scores= gbrt_mod.predict(X_test)
print_metrics(y_test,scores)
print_evalute(y_test, scores)
hist_residue(y_test,scores)
plot_residue(y_test,scores)
# Using Neural network
from sklearn.neural_network import MLPRegressor
regressor_mod= MLPRegressor(hidden_layer_sizes= (100,),
activation= 'tanh',
learning_rate= 'adaptive',
max_iter=1000,
random_state=9,
learning_rate_init=0.001)
regressor_mod.fit(X_train, y_train)
scores= regressor_mod.predict(X_test)
print_metrics(y_test,scores)
print_evalute(y_test, scores)
hist_residue(y_test,scores)
plot_residue(y_test,scores)
# Using Random Forest
from sklearn.ensemble import RandomForestRegressor
rf_Regressor_mod= RandomForestRegressor(n_estimators=40)
rf_Regressor_mod.fit(X_train, y_train)
scores= rf_Regressor_mod.predict(X_test)
print_metrics(y_test,scores)
print_evalute(y_test, scores)
hist_residue(y_test,scores)
plot_residue(y_test,scores)
# tunning Random Forest Regressor to get the best
# parameters
n_estimators= [int(x) for x in np.linspace(10,500,10)]# # trees in random forest
max_features= ['auto','sqrt']# # features to consider at every split
max_depth= [int(x) for x in np.linspace(10,100,10)]# # maximum number of levels in tree
max_depth.append(None)
min_samples_split= [2,5,10] # minimum # samples required at each split a node
min_samples_leaf= [1,2,4] # minimum # of samples required at each leaf node
bootstrap= [True, False] # Method of selecting sample for training each tree
param_distributions= {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
rf_Regressor_mod= RandomForestRegressor()
rf_Regressor_clf= ms.RandomizedSearchCV(estimator= rf_Regressor_mod,
param_distributions= param_distributions,
n_iter= 100,
cv=3,
random_state=42,
n_jobs=-1)
rf_Regressor_clf.fit(X_train,y_train)
print('\n')
print(rf_Regressor_clf.best_score_)
print(rf_Regressor_clf.best_params_)
from sklearn.ensemble import RandomForestRegressor
rf_Regressor_mod= RandomForestRegressor(n_estimators= 227,
min_samples_split= 5,
min_samples_leaf= 1,
max_features= 'auto',
max_depth= 10,
bootstrap= 'True')
rf_Regressor_mod.fit(X_train, y_train)
scores= rf_Regressor_mod.predict(X_test)
print_metrics(y_test,scores)
print_evalute(y_test, scores)
hist_residue(y_test,scores)
plot_residue(y_test,scores)
```
From all the models, it could be seen that ML regressor does good in general compared to the other models.
```
# Testing the model on final test data
# importing the final test data
final= pd.read_csv('Data/AW_test.csv')
# checking if there are duplicate
print(final.shape)
print(final.CustomerID.unique().shape)
# calculate for age in age
final['Age'] = generate_age(final,'%m/%d/%Y')
final[['Age','BirthDate']].head()
encoded = encode_cat_features(final)
numeric_final_features = np.array(final[['Age','YearlyIncome', 'NumberChildrenAtHome']])
final_test = np.concatenate([encoded,numeric_final_features], 1)
final_test[:,11:13]= scaler.transform(final_test[:,11:13])
final_scores= regressor_mod.predict(final_test)
np.savetxt('final_answer_regression.csv', final_scores, delimiter=',',fmt='%i')
```
| github_jupyter |
___
<a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
___
# NumPy Exercises
Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions.
#### Import NumPy as np
```
import numpy as np
#
```
#### Create an array of 10 zeros
```
np.zeros(10)
```
#### Create an array of 10 ones
```
np.ones(10)
#
```
#### Create an array of 10 fives
```
np.ones(10)*5
```
#### Create an array of the integers from 10 to 50
```
np.arange(10,51)
```
#### Create an array of all the even integers from 10 to 50
```
np.arange(10,51,2)
```
#### Create a 3x3 matrix with values ranging from 0 to 8
```
np.arange(0,9).reshape(3,3)
```
#### Create a 3x3 identity matrix
```
np.eye(3)
```
#### Use NumPy to generate a random number between 0 and 1
```
np.random.rand(1)
```
#### Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution
```
np.random.randn(25)
```
#### Create the following matrix:
```
np.arange(1,101).reshape(10,10)/100 #np.linespace(0.01,1,100).reshape(10,10)
```
#### Create an array of 20 linearly spaced points between 0 and 1:
```
np.linspace(0,1,20)
```
## Numpy Indexing and Selection
Now you will be given a few matrices, and be asked to replicate the resulting matrix outputs:
```
mat = np.arange(1,26).reshape(5,5)
mat
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[2:,1:]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3,4]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[0:3,1:2]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[4]
# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW
# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T
# BE ABLE TO SEE THE OUTPUT ANY MORE
mat[3:5]
```
### Now do the following
#### Get the sum of all the values in mat
```
mat.sum()
```
#### Get the standard deviation of the values in mat
```
mat.std()
```
#### Get the sum of all the columns in mat
```
mat.sum(axis=0)
```
# Great Job!
| github_jupyter |
```
"""
Update Parameters Here
"""
COLLECTION_NAME = "Quaks"
CONTRACT = "0x07bbdaf30e89ea3ecf6cadc80d6e7c4b0843c729"
BEFORE_TIME = "2021-09-02T00:00:00" # One day after the last mint (e.g. https://etherscan.io/tx/0x206c846d0d1739faa9835e16ff419d15708a558357a9413619e65dacf095ac7a)
# these should usually stay the same
METHOD = "raritytools"
"""
Created on Tue Sep 14 20:17:07 2021
mint data. Doesn't work when Opensea's API is being shitty
@author: nbax1, slight modifications by mdigi14
"""
import pandas as pd
from utils import config
from utils import constants
from utils import opensea
"""
Helper Functions
"""
def get_mint_events(contract, before_time, rarity_db):
data = opensea.get_opensea_events(
contract_address=contract,
account_address=constants.MINT_ADDRESS,
event_type="transfer",
occurred_before=before_time,
)
df = pd.json_normalize(data)
df = df.loc[df["from_account.address"] == constants.MINT_ADDRESS]
df_rar = pd.DataFrame(rarity_db)
os_tokens = df["asset.token_id"].astype(int).tolist()
rar_tokens = df_rar["TOKEN_ID"].astype(int).tolist()
set1 = set(rar_tokens)
set2 = set(os_tokens)
missing_tokens = list(sorted(set1 - set2))
if missing_tokens:
print(
f"Missing tokens: {missing_tokens}\nTrying to fetch event for missing tokens..."
)
missing_data = []
for token in missing_tokens:
missing_data.extend(
opensea.get_opensea_events(
contract_address=contract,
account_address=constants.MINT_ADDRESS,
event_type="transfer",
occurred_before=before_time,
token_id=token,
)
)
df_missing_data = pd.json_normalize(missing_data)
# Merge missing data with rest of data
df_all = pd.concat([df, df_missing_data])
# make sure token_id is an integer
df_all["asset.token_id"] = df_all["asset.token_id"].astype(int)
RARITY_DB["TOKEN_ID"] = RARITY_DB["TOKEN_ID"].astype(int)
# add rarity rank to minting data
df_all = df_all.merge(RARITY_DB, left_on="asset.token_id", right_on="TOKEN_ID")
# Keep only the columns we want
df_all = df_all[
[
"transaction.transaction_hash",
"to_account.address",
"asset.token_id",
"asset.owner.address",
"Rank",
"transaction.timestamp",
]
]
# Rename columns
df_all.columns = [
"txid",
"to_account",
"TOKEN_ID",
"current_owner",
"rank",
"time",
]
print(f"Downloaded {df_all.shape[0]} events")
return df_all
"""
Gerenerate Dataset
"""
RARITY_CSV = f"{config.RARITY_FOLDER}/{COLLECTION_NAME}_{METHOD}.csv"
RARITY_DB = pd.read_csv(RARITY_CSV)
mint_db = get_mint_events(CONTRACT, BEFORE_TIME, RARITY_DB)
mint_db = mint_db.sort_values(by=["TOKEN_ID"])
mint_db.to_csv(f"{config.MINTING_FOLDER}/{COLLECTION_NAME}_minting.csv", index=False)
```
| github_jupyter |
```
from matplotlib import pyplot as plt
%matplotlib notebook
from matplotlib import animation
import numpy as np
#make a fake galaxy distribution from a MOG
mean1, std1 = (np.random.rand()*2-1, np.random.rand()*2-1), (np.random.rand()*3+0.5, np.random.rand()*3+0.5)
mean2, std2 = (np.random.rand()*2+1, np.random.rand()*2+1), (np.random.rand()*3+0.5, np.random.rand()*3+0.5)
N1, N2 = 500, 500
points = np.zeros((N1+N2, 2))
points[:N1] = np.random.randn(N1, 2)*np.array(std1)+np.array(mean1)
points[N1:] = np.random.randn(N2, 2)*np.array(std2)+np.array(mean2)
plt.scatter(points[:,0], points[:,1])
plt.scatter(mean1[0], mean1[ 1], color = 'r')
plt.scatter(mean2[0], mean2[1], color = 'r')
from itertools import combinations
random_points = np.random.randn(N1+N2, 2)*5
pairs = list(combinations(range(random_points.shape[0]), 2) )
n_bins = 10
hist_bins = np.logspace(-1, 1, n_bins+1)
hbc = (hist_bins[1:]+hist_bins[:-1])/2.0
dists = np.zeros(( len(pairs), ))
for i, pair in enumerate(pairs):
p1, p2 = pairs[i][0], pairs[i][1]
x1, y1 = random_points[p1]
x2, y2 = random_points[p2]
dists[i] = np.sqrt((x2-x1)**2+(y2-y1)**2)
random_hist, _ = np.histogram(dists, bins=hist_bins)
random_hist[random_hist==0] = 1e-3
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure(figsize = (8, 5))
ax1 = plt.subplot(1, 2, 1, xlim=(-6, 6), ylim=(-6, 6))
ax2 = plt.subplot(1, 2, 2, xlim=(-1, 1), ylim = (-5, 0))
pairs = list(combinations(range(points.shape[0]), 2) )
np.random.shuffle(pairs)
dist_counts = np.zeros((len(pairs),))
line1, = ax1.plot([], [], lw=2, color = 'r')
line2, = ax2.plot([], [], lw = 2, color = 'g', marker = 'o')
# initialization function: plot the background of each frame
def init():
ax1.scatter(points[:,0], points[:,1], color = 'b', alpha = 0.7)
return line1, line2
# animation function. This is called sequentially
def animate(i):
p1, p2 = pairs[i][0], pairs[i][1]
x1, y1 = points[p1]
x2, y2 = points[p2]
x = np.linspace(x1, x2, 100)
y = np.linspace(y1, y2, 100)
line1.set_data(x, y)
dist_counts[i] = np.sqrt((x2-x1)**2+(y2-y1)**2)
data_hist = np.histogram(dist_counts[:i], bins = hist_bins)[0].astype(float)
data_hist = data_hist*(N1+N2)/(i+1) # reweight
data_hist[data_hist == 0] = data_hist[data_hist==0]+ 1e-3
#print np.log10(data_hist /random_hist)
line2.set_data(np.log10(hbc), np.log10(data_hist/random_hist ))
return line1, line2
# call the animator. blit=True means only re-draw the parts that have changed.
#for i in xrange(100):
# animate(i)
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=50000, interval=1, blit=True)#, repeat = False)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
#anim.save('basic_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
#ax2.xlabel('Log r')
#ax2.ylabel('Log Xi')
#ax1.xlabel('x')
#ax2.ylabel('y')
plt.show()
```
| github_jupyter |
## INTRODUCTION
- It’s a Python based scientific computing package targeted at two sets of audiences:
- A replacement for NumPy to use the power of GPUs
- Deep learning research platform that provides maximum flexibility and speed
- pros:
- Iinteractively debugging PyTorch. Many users who have used both frameworks would argue that makes pytorch significantly easier to debug and visualize.
- Clean support for dynamic graphs
- Organizational backing from Facebook
- Blend of high level and low level APIs
- cons:
- Much less mature than alternatives
- Limited references / resources outside of the official documentation
- I accept you know neural network basics. If you do not know check my tutorial. Because I will not explain neural network concepts detailed, I only explain how to use pytorch for neural network
- Neural Network tutorial: https://www.kaggle.com/kanncaa1/deep-learning-tutorial-for-beginners
- The most important parts of this tutorial from matrices to ANN. If you learn these parts very well, implementing remaining parts like CNN or RNN will be very easy.
<br>
<br>**Content:**
1. Basics of Pytorch, Linear Regression, Logistic Regression, Artificial Neural Network (ANN), Concolutional Neural Network (CNN)
- https://www.kaggle.com/kanncaa1/pytorch-tutorial-for-deep-learning-lovers/code
1. [Recurrent Neural Network (RNN)](#1)
```
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
```
<a id="1"></a> <br>
### Recurrent Neural Network (RNN)
- RNN is essentially repeating ANN but information get pass through from previous non-linear activation function output.
- **Steps of RNN:**
1. Import Libraries
1. Prepare Dataset
1. Create RNN Model
- hidden layer dimension is 100
- number of hidden layer is 1
1. Instantiate Model Class
1. Instantiate Loss Class
- Cross entropy loss
- It also has softmax(logistic function) in it.
1. Instantiate Optimizer Class
- SGD Optimizer
1. Traning the Model
1. Prediction
```
# Import Libraries
import torch
import torch.nn as nn
import torchvision.transforms as transforms
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
# Prepare Dataset
# load data
train = pd.read_csv(r"../input/train.csv",dtype = np.float32)
# split data into features(pixels) and labels(numbers from 0 to 9)
targets_numpy = train.label.values
features_numpy = train.loc[:,train.columns != "label"].values/255 # normalization
# train test split. Size of train data is 80% and size of test data is 20%.
features_train, features_test, targets_train, targets_test = train_test_split(features_numpy,
targets_numpy,
test_size = 0.2,
random_state = 42)
# create feature and targets tensor for train set. As you remember we need variable to accumulate gradients. Therefore first we create tensor, then we will create variable
featuresTrain = torch.from_numpy(features_train)
targetsTrain = torch.from_numpy(targets_train).type(torch.LongTensor) # data type is long
# create feature and targets tensor for test set.
featuresTest = torch.from_numpy(features_test)
targetsTest = torch.from_numpy(targets_test).type(torch.LongTensor) # data type is long
# batch_size, epoch and iteration
batch_size = 100
n_iters = 10000
num_epochs = n_iters / (len(features_train) / batch_size)
num_epochs = int(num_epochs)
# Pytorch train and test sets
train = torch.utils.data.TensorDataset(featuresTrain,targetsTrain)
test = torch.utils.data.TensorDataset(featuresTest,targetsTest)
# data loader
train_loader = torch.utils.data.DataLoader(train, batch_size = batch_size, shuffle = False)
test_loader = torch.utils.data.DataLoader(test, batch_size = batch_size, shuffle = False)
# visualize one of the images in data set
plt.imshow(features_numpy[10].reshape(28,28))
plt.axis("off")
plt.title(str(targets_numpy[10]))
plt.savefig('graph.png')
plt.show()
# Create RNN Model
class RNNModel(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super(RNNModel, self).__init__()
# Number of hidden dimensions
self.hidden_dim = hidden_dim
# Number of hidden layers
self.layer_dim = layer_dim
# RNN
self.rnn = nn.RNN(input_dim, hidden_dim, layer_dim, batch_first=True,
nonlinearity='relu')
# Readout layer
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
# Initialize hidden state with zeros
h0 = Variable(torch.zeros(self.layer_dim, x.size(0), self.hidden_dim))
# One time step
out, hn = self.rnn(x, h0)
out = self.fc(out[:, -1, :])
return out
# batch_size, epoch and iteration
batch_size = 100
n_iters = 2500
num_epochs = n_iters / (len(features_train) / batch_size)
num_epochs = int(num_epochs)
# Pytorch train and test sets
train = torch.utils.data.TensorDataset(featuresTrain,targetsTrain)
test = torch.utils.data.TensorDataset(featuresTest,targetsTest)
# data loader
train_loader = torch.utils.data.DataLoader(train, batch_size = batch_size, shuffle = False)
test_loader = torch.utils.data.DataLoader(test, batch_size = batch_size, shuffle = False)
# Create RNN
input_dim = 28 # input dimension
hidden_dim = 100 # hidden layer dimension
layer_dim = 2 # number of hidden layers
output_dim = 10 # output dimension
model = RNNModel(input_dim, hidden_dim, layer_dim, output_dim)
# Cross Entropy Loss
error = nn.CrossEntropyLoss()
# SGD Optimizer
learning_rate = 0.05
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
seq_dim = 28
loss_list = []
iteration_list = []
accuracy_list = []
count = 0
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
train = Variable(images.view(-1, seq_dim, input_dim))
labels = Variable(labels )
# Clear gradients
optimizer.zero_grad()
# Forward propagation
outputs = model(train)
# Calculate softmax and ross entropy loss
loss = error(outputs, labels)
# Calculating gradients
loss.backward()
# Update parameters
optimizer.step()
count += 1
if count % 250 == 0:
# Calculate Accuracy
correct = 0
total = 0
# Iterate through test dataset
for images, labels in test_loader:
images = Variable(images.view(-1, seq_dim, input_dim))
# Forward propagation
outputs = model(images)
# Get predictions from the maximum value
predicted = torch.max(outputs.data, 1)[1]
# Total number of labels
total += labels.size(0)
correct += (predicted == labels).sum()
accuracy = 100 * correct / float(total)
# store loss and iteration
loss_list.append(loss.data)
iteration_list.append(count)
accuracy_list.append(accuracy)
if count % 500 == 0:
# Print Loss
print('Iteration: {} Loss: {} Accuracy: {} %'.format(count, loss.data[0], accuracy))
# visualization loss
plt.plot(iteration_list,loss_list)
plt.xlabel("Number of iteration")
plt.ylabel("Loss")
plt.title("RNN: Loss vs Number of iteration")
plt.show()
# visualization accuracy
plt.plot(iteration_list,accuracy_list,color = "red")
plt.xlabel("Number of iteration")
plt.ylabel("Accuracy")
plt.title("RNN: Accuracy vs Number of iteration")
plt.savefig('graph.png')
plt.show()
```
### Conclusion
In this tutorial, we learn:
1. Basics of pytorch
1. Linear regression with pytorch
1. Logistic regression with pytorch
1. Artificial neural network with with pytorch
1. Convolutional neural network with pytorch
- https://www.kaggle.com/kanncaa1/pytorch-tutorial-for-deep-learning-lovers/code
1. Recurrent neural network with pytorch
<br> If you have any question or suggest, I will be happy to hear it
| github_jupyter |
Random Sampling
=============
Copyright 2016 Allen Downey
License: [Creative Commons Attribution 4.0 International](http://creativecommons.org/licenses/by/4.0/)
```
from __future__ import print_function, division
import numpy
import scipy.stats
import matplotlib.pyplot as pyplot
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
# seed the random number generator so we all get the same results
numpy.random.seed(18)
# some nicer colors from http://colorbrewer2.org/
COLOR1 = '#7fc97f'
COLOR2 = '#beaed4'
COLOR3 = '#fdc086'
COLOR4 = '#ffff99'
COLOR5 = '#386cb0'
%matplotlib inline
```
Part One
========
Suppose we want to estimate the average weight of men and women in the U.S.
And we want to quantify the uncertainty of the estimate.
One approach is to simulate many experiments and see how much the results vary from one experiment to the next.
I'll start with the unrealistic assumption that we know the actual distribution of weights in the population. Then I'll show how to solve the problem without that assumption.
Based on data from the [BRFSS](http://www.cdc.gov/brfss/), I found that the distribution of weight in kg for women in the U.S. is well modeled by a lognormal distribution with the following parameters:
```
weight = scipy.stats.lognorm(0.23, 0, 70.8)
weight.mean(), weight.std()
```
Here's what that distribution looks like:
```
xs = numpy.linspace(20, 160, 100)
ys = weight.pdf(xs)
pyplot.plot(xs, ys, linewidth=4, color=COLOR1)
pyplot.xlabel('weight (kg)')
pyplot.ylabel('PDF')
None
```
`make_sample` draws a random sample from this distribution. The result is a NumPy array.
```
def make_sample(n=100):
sample = weight.rvs(n)
return sample
```
Here's an example with `n=100`. The mean and std of the sample are close to the mean and std of the population, but not exact.
```
sample = make_sample(n=100)
sample.mean(), sample.std()
```
We want to estimate the average weight in the population, so the "sample statistic" we'll use is the mean:
```
def sample_stat(sample):
return sample.mean()
```
One iteration of "the experiment" is to collect a sample of 100 women and compute their average weight.
We can simulate running this experiment many times, and collect a list of sample statistics. The result is a NumPy array.
```
def compute_sample_statistics(n=100, iters=1000):
stats = [sample_stat(make_sample(n)) for i in range(iters)]
return numpy.array(stats)
```
The next line runs the simulation 1000 times and puts the results in
`sample_means`:
```
sample_means = compute_sample_statistics(n=100, iters=1000)
```
Let's look at the distribution of the sample means. This distribution shows how much the results vary from one experiment to the next.
Remember that this distribution is not the same as the distribution of weight in the population. This is the distribution of results across repeated imaginary experiments.
```
pyplot.hist(sample_means, color=COLOR5)
pyplot.xlabel('sample mean (n=100)')
pyplot.ylabel('count')
None
```
The mean of the sample means is close to the actual population mean, which is nice, but not actually the important part.
```
sample_means.mean()
```
The standard deviation of the sample means quantifies the variability from one experiment to the next, and reflects the precision of the estimate.
This quantity is called the "standard error".
```
std_err = sample_means.std()
std_err
```
We can also use the distribution of sample means to compute a "90% confidence interval", which contains 90% of the experimental results:
```
conf_int = numpy.percentile(sample_means, [5, 95])
conf_int
```
The following function takes an array of sample statistics and prints the SE and CI:
```
def summarize_sampling_distribution(sample_stats):
print('SE', sample_stats.std())
print('90% CI', numpy.percentile(sample_stats, [5, 95]))
```
And here's what that looks like:
```
summarize_sampling_distribution(sample_means)
```
Now we'd like to see what happens as we vary the sample size, `n`. The following function takes `n`, runs 1000 simulated experiments, and summarizes the results.
```
def plot_sample_stats(n, xlim=None):
sample_stats = compute_sample_statistics(n, iters=1000)
summarize_sampling_distribution(sample_stats)
pyplot.hist(sample_stats, color=COLOR2)
pyplot.xlabel('sample statistic')
pyplot.xlim(xlim)
```
Here's a test run with `n=100`:
```
plot_sample_stats(100)
```
Now we can use `interact` to run `plot_sample_stats` with different values of `n`. Note: `xlim` sets the limits of the x-axis so the figure doesn't get rescaled as we vary `n`.
```
def sample_stat(sample):
return sample.mean()
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(plot_sample_stats, n=slider, xlim=fixed([55, 95]))
None
```
### Other sample statistics
This framework works with any other quantity we want to estimate. By changing `sample_stat`, you can compute the SE and CI for any sample statistic.
**Exercise 1**: Fill in `sample_stat` below with any of these statistics:
* Standard deviation of the sample.
* Coefficient of variation, which is the sample standard deviation divided by the sample standard mean.
* Min or Max
* Median (which is the 50th percentile)
* 10th or 90th percentile.
* Interquartile range (IQR), which is the difference between the 75th and 25th percentiles.
NumPy array methods you might find useful include `std`, `min`, `max`, and `percentile`.
Depending on the results, you might want to adjust `xlim`.
```
def sample_stat(sample):
# TODO: replace the following line with another sample statistic
return sample.std()
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(plot_sample_stats, n=slider, xlim=fixed([0, 100]))
None
```
STOP HERE
---------
We will regroup and discuss before going on.
Part Two
========
So far we have shown that if we know the actual distribution of the population, we can compute the sampling distribution for any sample statistic, and from that we can compute SE and CI.
But in real life we don't know the actual distribution of the population. If we did, we wouldn't need to estimate it!
In real life, we use the sample to build a model of the population distribution, then use the model to generate the sampling distribution. A simple and popular way to do that is "resampling," which means we use the sample itself as a model of the population distribution and draw samples from it.
Before we go on, I want to collect some of the code from Part One and organize it as a class. This class represents a framework for computing sampling distributions.
```
class Resampler(object):
"""Represents a framework for computing sampling distributions."""
def __init__(self, sample, xlim=None):
"""Stores the actual sample."""
self.sample = sample
self.n = len(sample)
self.xlim = xlim
def resample(self):
"""Generates a new sample by choosing from the original
sample with replacement.
"""
new_sample = numpy.random.choice(self.sample, self.n, replace=True)
return new_sample
def sample_stat(self, sample):
"""Computes a sample statistic using the original sample or a
simulated sample.
"""
return sample.mean()
def compute_sample_statistics(self, iters=1000):
"""Simulates many experiments and collects the resulting sample
statistics.
"""
stats = [self.sample_stat(self.resample()) for i in range(iters)]
return numpy.array(stats)
def plot_sample_stats(self):
"""Runs simulated experiments and summarizes the results.
"""
sample_stats = self.compute_sample_statistics()
summarize_sampling_distribution(sample_stats)
pyplot.hist(sample_stats, color=COLOR2)
pyplot.xlabel('sample statistic')
pyplot.xlim(self.xlim)
```
The following function instantiates a `Resampler` and runs it.
```
def plot_resampled_stats(n=100):
sample = weight.rvs(n)
resampler = Resampler(sample, xlim=[55, 95])
resampler.plot_sample_stats()
```
Here's a test run with `n=100`
```
plot_resampled_stats(100)
```
Now we can use `plot_resampled_stats` in an interaction:
```
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(plot_resampled_stats, n=slider, xlim=fixed([1, 15]))
None
```
**Exercise 2**: write a new class called `StdResampler` that inherits from `Resampler` and overrides `sample_stat` so it computes the standard deviation of the resampled data.
```
class StdResampler(Resampler):
def sample_stat(self, sample):
"""Computes a sample statistic using the original sample or a
simulated sample.
"""
return sample.std()
```
Test your code using the cell below:
```
def plot_resampled_stats(n=100):
sample = weight.rvs(n)
resampler = StdResampler(sample, xlim=[0, 100])
resampler.plot_sample_stats()
plot_resampled_stats()
```
When your `StdResampler` is working, you should be able to interact with it:
```
slider = widgets.IntSlider(min=10, max=1000, value=100)
interact(plot_resampled_stats, n=slider)
None
```
STOP HERE
---------
We will regroup and discuss before going on.
Part Three
==========
We can extend this framework to compute SE and CI for a difference in means.
For example, men are heavier than women on average. Here's the women's distribution again (from BRFSS data):
```
female_weight = scipy.stats.lognorm(0.23, 0, 70.8)
female_weight.mean(), female_weight.std()
```
And here's the men's distribution:
```
male_weight = scipy.stats.lognorm(0.20, 0, 87.3)
male_weight.mean(), male_weight.std()
```
I'll simulate a sample of 100 men and 100 women:
```
female_sample = female_weight.rvs(100)
male_sample = male_weight.rvs(100)
```
The difference in means should be about 17 kg, but will vary from one random sample to the next:
```
male_sample.mean() - female_sample.mean()
```
Here's the function that computes Cohen's $d$ again:
```
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / numpy.sqrt(pooled_var)
return d
```
The difference in weight between men and women is about 1 standard deviation:
```
CohenEffectSize(male_sample, female_sample)
```
Now we can write a version of the `Resampler` that computes the sampling distribution of $d$.
```
class CohenResampler(Resampler):
def __init__(self, group1, group2, xlim=None):
self.group1 = group1
self.group2 = group2
self.xlim = xlim
def resample(self):
group1 = numpy.random.choice(self.group1, len(self.group1), replace=True)
group2 = numpy.random.choice(self.group2, len(self.group2), replace=True)
return group1, group2
def sample_stat(self, groups):
group1, group2 = groups
return CohenEffectSize(group1, group2)
# NOTE: The following functions are the same as the ones in Resampler,
# so I could just inherit them, but I'm including them for readability
def compute_sample_statistics(self, iters=1000):
stats = [self.sample_stat(self.resample()) for i in range(iters)]
return numpy.array(stats)
def plot_sample_stats(self):
sample_stats = self.compute_sample_statistics()
summarize_sampling_distribution(sample_stats)
pyplot.hist(sample_stats, color=COLOR2)
pyplot.xlabel('sample statistic')
pyplot.xlim(self.xlim)
```
Now we can instantiate a `CohenResampler` and plot the sampling distribution.
```
resampler = CohenResampler(male_sample, female_sample)
resampler.plot_sample_stats()
```
This example demonstrates an advantage of the computational framework over mathematical analysis. Statistics like Cohen's $d$, which is the ratio of other statistics, are relatively difficult to analyze. But with a computational approach, all sample statistics are equally "easy".
One note on vocabulary: what I am calling "resampling" here is a specific kind of resampling called "bootstrapping". Other techniques that are also considering resampling include permutation tests, which we'll see in the next section, and "jackknife" resampling. You can read more at <http://en.wikipedia.org/wiki/Resampling_(statistics)>.
| github_jupyter |
# Tutorial: CommonRoad Route Planner
This tutorial demonstrates how the CommonRoad Route Planner package can be used to plan high-level routes for planning problems given in CommonRoad scenarios.
## 0. Preparation
* you have gone through the tutorial for **CommonRoad Input-Output**
* you have installed the [route planner](https://gitlab.lrz.de/tum-cps/commonroad-route-planner) package
Let's start with importing relevant modules and classes.
```
%matplotlib inline
%load_ext autoreload
%autoreload 2
import os
import sys
# add the root folder to python path
path_notebook = os.getcwd()
sys.path.append(os.path.join(path_notebook, "../"))
import matplotlib.pyplot as plt
from commonroad.common.file_reader import CommonRoadFileReader
from commonroad_route_planner.route_planner import RoutePlanner
from commonroad_route_planner.utility.visualization import visualize_route
from commonroad.visualization.mp_renderer import MPRenderer
```
## 1. Loading CR Scenario and Planning Problem
In the next step, we load a CommonRoad scenario and its planning problem(s), for which the routes should be planned. The route planner handles **one planning problem** at a time, thus we need to manually specify the planning problem for which the routes should be planned. In our case, we select the first planning problem in the planning problem set. The meaning of the symbols in a scenario are explained as follows:
* **Dot**: initial state of the planning problem projected onto the position domain
* **Blue rectangle**: dynamic obstacle
* **Yellow rectangle**: goal region projected onto the position domain
```
# load scenario
path_scenario = os.path.join(path_notebook, "../../scenarios/tutorial/")
id_scenario = 'USA_Peach-2_1_T-1'
# read in scenario and planning problem set
scenario, planning_problem_set = CommonRoadFileReader(path_scenario + id_scenario + '.xml').open()
# retrieve the first planning problem in the problem set
planning_problem = list(planning_problem_set.planning_problem_dict.values())[0]
# plot the scenario and the planning problem set
renderer = MPRenderer(figsize=(12, 12))
scenario.draw(renderer)
planning_problem.draw(renderer)
renderer.render()
plt.margins(0, 0)
```
## 2. Creating a route planner and planning for routes
### 2.1 Instantiation
A route planner can be easily constructed by passing the **scenario** and the **planning problem** to `RoutePlanner` object. As for the backend, there are currently three supported options:
1. NETWORKX: uses built-in functions from the networkx package, tends to change lane later
2. NETWORKX_REVERSED: uses built-in functions from the networkx package, tends to change lane earlier
3. PRIORITY_QUEUE: uses A-star search to find routes, lane change maneuver depends on the heuristic cost
### 2.2 Planning all possible routes
The route planner plans a route for all possible combinations of start / goal lanelets. E.g. if our initial state is located in two lanes (due to overlapping of lanelets), and the same for our goal state, the route planner will try to plan routes for the four possible combinations.
### 2.3 Retrieving a route
Planned routes can be retrieved by using simple indices, or based on some heuristic functions to determine the best route of all. A route consists of a list of lanelet ids that leads from the initial state to the goal state.
### 2.4 Retrieving reference path
A reference path is automatically generated for each planned routes. The center lines of lanelets of a route is used to construct the reference path. The resulting polyline is then smoothened with Chaikin's corner cutting algorithm.
```
# instantiate a route planner with the scenario and the planning problem
route_planner = RoutePlanner(scenario, planning_problem, backend=RoutePlanner.Backend.NETWORKX_REVERSED)
# plan routes, and save the routes in a route candidate holder
candidate_holder = route_planner.plan_routes()
# option 1: retrieve all routes
list_routes, num_route_candidates = candidate_holder.retrieve_all_routes()
print(f"Number of route candidates: {num_route_candidates}")
# here we retrieve the first route in the list, this is equivalent to: route = list_routes[0]
route = candidate_holder.retrieve_first_route()
# option 2: retrieve the best route by orientation metric
# route = candidate_holder.retrieve_best_route_by_orientation()
# print coordinates of the vertices of the reference path
print("\nCoordinates [x, y]:")
print(route.reference_path)
```
## 3. Visualizing planning results
The planned routes can be easily visualized with the `visualize_route` function. The arguements `draw_route_lanelets` and `draw_reference_path` indicates whether the lanelets of the route and the reference path should be drawn, respectively. The lanelets of the route is colored in green.
```
visualize_route(route, draw_route_lanelets=True, draw_reference_path=False, size_x=6)
```
We now plot the generated reference path as well, which is colored in red.
```
visualize_route(route, draw_route_lanelets=True, draw_reference_path=True, size_x=6)
```
| github_jupyter |
# Porting genome scale metabolic models for metabolomics
**rat-GEM as default rat model, for better compatibility**
https://github.com/SysBioChalmers/rat-GEM
**Use cobra to parse SBML models whereas applicable**
Not all models comply with the formats in cobra. Models from USCD and Thiele labs should comply.
**Base our code on metDataModel**
Each model needs a list of Reactions, list of Pathways, and a list of Compounds.
It's important to include with Compounds with all linked identifiers to other DBs (HMDB, PubChem, etc), and with formulae (usually charged form in these models) when available.
We can alwasy update the data later. E.g. the neural formulae can be inferred from charged formula or retrieved from public metabolite database (e.g., HMDB) if linked.
Save in Python pickle and in JSON.
**No compartmentalization**
- After decompartmentalization,
- transport reactions can be removed - they are identified by reactants and products being the same.
- redundant reactions can be merge - same reactions in diff compartments become one.
Shuzhao Li, 2021-10-21|
Minghao Gong, 2022-04-19
```
# !pip install cobra --user --ignore-installed ruamel.yaml
# !pip install --upgrade metDataModel # https://github.com/shuzhao-li/metDataModel/
# !pip install --upgrade numpy pandas
import cobra # https://cobrapy.readthedocs.io/en/latest/io.html#SBML
from metDataModel.core import Compound, Reaction, Pathway, MetabolicModel
import requests
import sys
import re
sys.path.append("/Users/gongm/Documents/projects/mass2chem/")
sys.path.append("/Users/gongm/Documents/projects/JMS/JMS/JMS")
from mass2chem.formula import *
from jms.formula import *
from jms.utils.gems import *
from jms.utils.git_download import *
# download the most updated Rat-GEM.xml
model_name = 'Rat-GEM'
xml_url = f'https://github.com/SysBioChalmers/{model_name}/blob/main/model/{model_name}.xml'
local_path = output_fdr = f'../testdata/{model_name}/'
try:
os.mkdir(local_path)
except:
None
xml_file_name = f'{model_name}.xml'
git_download_from_file(xml_url,local_path,xml_file_name)
# Read the model via cobra
xmlFile = os.path.join(local_path,xml_file_name)
model = cobra.io.read_sbml_model(xmlFile)
model
# metabolite entries, readily convert to list of metabolites
model.metabolites[990]
# reaction entries, Readily convert to list of reactions
model.reactions[33]
# groups are similar to pathways? Readily convert to list of pathway
model.groups[11].__dict__
```
## Port metabolite
```
def port_metabolite(M):
# convert cobra Metabolite to metDataModel Compound
Cpd = Compound()
Cpd.src_id = remove_compartment_by_substr(M.id,1)
Cpd.id = remove_compartment_by_substr(M.id,1) # temporarily the same with the source id
Cpd.name = M.name
Cpd.charge = M.charge
Cpd.neutral_formula = adjust_charge_in_formula(M.formula,M.charge)
Cpd.neutral_mono_mass = neutral_formula2mass(Cpd.neutral_formula)
Cpd.charged_formula = M.formula
Cpd.db_ids = [[model_name,Cpd.src_id]] # using src_id to also reference Rat-GEM ID in db_ids field
for k,v in M.annotation.items():
if k != 'sbo':
if isinstance(v,list):
Cpd.db_ids.append([[k,x] for x in v])
else:
if ":" in v:
Cpd.db_ids.append([k,v.split(":")[1]])
else:
Cpd.db_ids.append([k,v])
inchi_list = [x[1].split('=')[1] for x in Cpd.db_ids if x[0] == 'inchi']
if len(inchi_list) ==1:
Cpd.inchi = inchi_list[0]
elif len(inchi_list) >1:
Cpd.inchi = inchi_list
return Cpd
myCpds = []
for i in range(len(model.metabolites)):
myCpds.append(port_metabolite(model.metabolites[i]))
len(myCpds)
# remove duplicated compounds
myCpds = remove_duplicate_cpd(myCpds)
len(myCpds)
myCpds[100].__dict__
fetch_MetabAtlas_GEM_identifiers(compound_list = myCpds,
modelName = model_name,
local_path = local_path,
metab_file_name = 'metabolites.tsv',
overwrite = True)
myCpds[100].__dict__
```
## Port reactions
```
# port reactions, to include genes and enzymes
def port_reaction(R):
new = Reaction()
new.id = R.id
new.reactants = [remove_compartment_by_substr(m.id,1) for m in R.reactants] # decompartmentalization
new.products = [remove_compartment_by_substr(m.id,1) for m in R.products] # decompartmentalization
new.genes = [g.id for g in R.genes]
ecs = R.annotation.get('ec-code', [])
if isinstance(ecs, list):
new.enzymes = ecs
else:
new.enzymes = [ecs] # this version of Rat-GEM may have it as string
return new
test99 = port_reaction(model.reactions[199])
[test99.id,
test99.reactants,
test99.products,
test99.genes,
test99.enzymes
]
## Reactions to port
myRxns = []
for R in model.reactions:
myRxns.append( port_reaction(R) )
print(len(myRxns))
# remove duplicated reactions after decompartmentalization
myRxns = remove_duplicate_rxn(myRxns)
len(myRxns)
myRxns[0].__dict__
```
## Port pathway
```
# pathways, using group as pathway. Other models may use subsystem etc.
def port_pathway(P, model_name):
new = Pathway()
new.id = P.id
new.source = [f'{model_name} v1.10.0',]
new.name = P.name
new.list_of_reactions = [x.id for x in P.members]
return new
p = port_pathway(model.groups[12],model_name)
[p.id, p.name, p.list_of_reactions[:5]]
## Pathways to port
myPathways = []
for P in model.groups:
myPathways.append(port_pathway(P,model_name))
len(myPathways)
# retain the valid reactions in list of pathway
myPathways = retain_valid_Rxns_in_Pathways(myPathways,myRxns)
# test if the length of unique reactions matched with the length of decompartmentalized reaction list
test_list_Rxns = []
for pathway in myPathways:
for y in pathway.list_of_reactions:
test_list_Rxns.append(y)
len(set(test_list_Rxns))
```
## Collected data; now output
```
from datetime import datetime
today = str(datetime.today()).split(" ")[0]
today
note = """Rat-GEM compartmentalized, with genes and ECs."""
## metabolicModel to export
MM = MetabolicModel()
MM.id = f'az_{model_name}_{today}' #
MM.meta_data = {
'species': model_name.split('-')[0],
'version': '',
'sources': [f'https://github.com/SysBioChalmers/{model_name}, retrieved {today}'], #
'status': '',
'last_update': today, #
'note': note,
}
MM.list_of_pathways = [P.serialize() for P in myPathways]
MM.list_of_reactions = [R.serialize() for R in myRxns]
MM.list_of_compounds = [C.serialize() for C in myCpds]
# check output
[
MM.list_of_pathways[2],
MM.list_of_reactions[:2],
MM.list_of_compounds[100:102],
]
import pickle
import os
# Write pickle file
export_pickle(os.path.join(output_fdr,f'{MM.id}.pickle'), MM)
# Write json file
export_json(os.path.join(output_fdr,f'{MM.id}.json'), MM)
# Write dataframe
import pandas as pd
export_table(os.path.join(output_fdr,f'{MM.id}_list_of_compounds.csv'),MM, 'list_of_compounds')
export_table(os.path.join(output_fdr,f'{MM.id}_list_of_reactions.csv'),MM, 'list_of_reactions')
export_table(os.path.join(output_fdr,f'{MM.id}_list_of_pathways.csv'),MM, 'list_of_pathways')
```
## Summary
This ports reactions, pathways and compounds. Gene and enzyme information is now included.
The exported pickle can be re-imported and uploaded to Database easily.
This notebook, the pickle file and the JSON file go to GitHub repo (https://github.com/shuzhao-li/Azimuth).
| github_jupyter |
```
#The birth of skynet, always good to start with a joke
print("hello World!")
# Dependencies
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import os
import scipy.stats as st
import numpy as np
import requests
import time
import gmaps
import json
from pprint import pprint
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.arima_model import ARIMA
from pandas.plotting import register_matplotlib_converters
# Import API key
from config import api_key
from config import g_key
register_matplotlib_converters()
# Study data files
project_path = "Project_df.csv"
# Read the csv file
Project_df = pd.read_csv(project_path)
# Display the data table for preview
Project_df
```
# Part 1 - Statistical Analysis.
#### We will use statistical analysis to understand our data
```
# Filtering the data by year to run aggregate function.
gb_year = Project_df.groupby('Year')
Project_18_df = gb_year.get_group(2018)
Project_19_df = gb_year.get_group(2019)
Project_20_df = gb_year.get_group(2020)
# Working with 2018 Data by Category Sales
stats_summary_2018_df = (Project_18_df.groupby("Category")["Total Sales"].agg(["min","max", "sum", "mean","median","var","std","sem"]).style.format('${0:,.2f}'))
stats_summary_2018_df
# Working with 2019 Data by Category Sales
stats_summary_2019_df = (Project_19_df.groupby("Category")["Total Sales"].agg(["min","max", "sum","mean","median","var","std","sem"]).style.format('${0:,.2f}'))
stats_summary_2019_df
# Working with 2020 Data by Category Sales
stats_summary_2020_df = (Project_20_df.groupby("Category")["Total Sales"].agg(["min","max", "sum","mean","median","var","std","sem"]).style.format('${0:,.2f}'))
stats_summary_2020_df
```
# Part 2 - Analyzing Data by Number of Clients.
#### We will try to analyze if the Number of Clients has a direct impact on the Total Sales, regardless of their Category
```
# We will create a Data Frame with the number of different clients we had for each month of the three years
clients_by_month = Project_df.groupby('Year')
client_no_18 = pd.DataFrame(clients_by_month.get_group(2018))
client_no_18 = client_no_18.groupby('Month').count()[["Client ID"]]
client_no_18 = client_no_18.values.tolist()
client_no_19 = pd.DataFrame(clients_by_month.get_group(2019))
client_no_19 = client_no_19.groupby('Month').count()[["Client ID"]]
client_no_19 = client_no_19.values.tolist()
client_no_20 = pd.DataFrame(clients_by_month.get_group(2020))
client_no_20 = client_no_20.groupby('Month').count()[["Client ID"]]
client_no_20 = client_no_20.values.tolist()
clients_number = client_no_18 + client_no_19 + client_no_20
months = list(range(1, 37))
clients_by_month = pd.DataFrame(clients_number, index = months, columns =['Number of DIfferent Clients'])
clients_by_month.head()
# Plotting the Data Frame of Clients by Month on a 3 year term
clients_by_month.plot(kind="bar", color="r", figsize=(9,3))
plt.title("Clients by Month")
plt.xlabel("Month")
plt.ylabel("Number of Different Clients")
plt.legend(loc=1, prop={'size': 8})
plt.xticks(rotation = 0)
plt.savefig("plots/Count of different clients by month.png")
plt.show()
# Analizing total sales behavior per month
grouped_sales_by_month = Project_df.groupby(['Year','Month'])
sales_df = pd.DataFrame(grouped_sales_by_month['Total Sales'].sum())
sales_df['Date']= sales_df.index
sales_df
# Defining axes
xticks = sales_df['Total Sales'].tolist()
x_axis = np.arange(len(sales_df['Total Sales']))
# Plot the line
plt.plot(x_axis, sales_df['Total Sales'])
plt.title('Total Sales in Millons per Month')
plt.xticks(ticks=[0,12,24], labels = ['2018','2019','2020'])
plt.savefig('plots/Total sales per month.png')
plt.show()
# We will try to forecast the sales for the next year using an ARIMA model
# First we convert the columns we need into lists
month_list = list(range(1, 37))
sales_list = sales_df['Total Sales'].tolist()
# With those lists, we will create our Data Frame
arima_df = pd.DataFrame()
arima_df['Month'] = month_list
arima_df['Total Sales'] = sales_list
arima_df = arima_df.set_index('Month')
arima_df.head()
# We now will get the rolling mean and the rolling std and plot it with Total Sales
rolling_mean = arima_df.rolling(window = 3).mean()
rolling_std = arima_df.rolling(window = 3).std()
plt.plot(arima_df, color = 'blue', label = 'Original')
plt.plot(rolling_mean, color = 'red', label = 'Rolling Mean')
plt.plot(rolling_std, color = 'black', label = 'Rolling Std')
plt.legend(loc = 'best')
plt.title('Rolling mean and Rolling std')
plt.show()
# Ploting the log
df_log = np.log(arima_df)
plt.plot(df_log)
# We will define a function that will helps us know if our time series is stationary
def get_stationarity(timeseries):
# rolling statistics
rolling_mean = timeseries.rolling(window=3).mean()
rolling_std = timeseries.rolling(window=3).std()
# rolling statistics plot
original = plt.plot(timeseries, color='blue', label='Original')
mean = plt.plot(rolling_mean, color='red', label='Rolling Mean')
std = plt.plot(rolling_std, color='black', label='Rolling Std')
plt.legend(loc='best')
plt.title('Rolling Mean and std')
plt.show(block=False)
# Dickey–Fuller test:
result = adfuller(timeseries['Total Sales'])
print('ADF Statistic: {}'.format(result[0]))
print('p-value: {}'.format(result[1]))
print('Critical Values:')
for key, value in result[4].items():
print('\t{}: {}'.format(key, value))
# We will substract the rolling mean now to render it starionaty
rolling_mean = df_log.rolling(window=3).mean()
df_log_minus_mean = df_log - rolling_mean
df_log_minus_mean.dropna(inplace=True)
get_stationarity(df_log_minus_mean)
# We will now try to substract the point that preceed any given point to look for a better solution
df_log_shift = df_log - df_log.shift()
df_log_shift.dropna(inplace=True)
get_stationarity(df_log_shift)
# We will create an Arima model with an autoregressive model
decomposition = seasonal_decompose(df_log, period=1)
model = ARIMA(df_log, order=(2,1,2))
results = model.fit(disp=-1)
plt.plot(df_log_shift)
plt.plot(results.fittedvalues, color='red')
# We will now compare that model with our original series
predictions_ARIMA_diff = pd.Series(results.fittedvalues, copy=True)
predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff.cumsum()
predictions_ARIMA_log = pd.Series(df_log['Total Sales'].iloc[0], index=df_log.index)
predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum, fill_value=0)
predictions_ARIMA = np.exp(predictions_ARIMA_log)
plt.plot(arima_df)
plt.plot(predictions_ARIMA)
# Now we can predict the Total Sales for the next year
results.plot_predict(1,48)
plt.savefig('plots/ARIMA model for 12 months prediction.png')
# We will compare if Sales and Number of Clients hold a relation
sales_by_month = Project_df.groupby('Year')
total_sales_18 = pd.DataFrame(sales_by_month.get_group(2018))
total_sales_18 = total_sales_18.groupby('Month').sum()[["Total Sales"]]
total_sales_18 = total_sales_18.values.tolist()
total_sales_19 = pd.DataFrame(sales_by_month.get_group(2019))
total_sales_19 = total_sales_19.groupby('Month').sum()[["Total Sales"]]
total_sales_19 = total_sales_19.values.tolist()
total_sales_20 = pd.DataFrame(sales_by_month.get_group(2020))
total_sales_20 = total_sales_20.groupby('Month').sum()[["Total Sales"]]
total_sales_20 = total_sales_20.values.tolist()
total_sales_by_month = total_sales_18 + total_sales_19 + total_sales_20
total_sales = pd.DataFrame(total_sales_by_month, index = months, columns =['Total Sales'])
total_sales["Number of DIfferent Clients"] = clients_by_month["Number of DIfferent Clients"]
total_sales = total_sales.set_index("Number of DIfferent Clients")
total_sales.head()
# Converting 'Year' and 'Month' in to a Complete Date Format (m-d-y)
date_df = Project_df[Project_df['Month']!=24]
date_df['date'] = pd.to_datetime(date_df[['Year','Month']].assign(DAY=1),format="%m-%d-%Y")
# Create a new dataframe based on the new 'date' and create the columns for the count of 'Client ID' and the sum of 'Total Sales'
pct_change_df = date_df.groupby(['date']).agg({'Client ID':'count',
'Total Sales': 'sum'}).reset_index()
pct_change_df['Clients pctChange'] = pct_change_df['Client ID'].pct_change()
pct_change_df['Total Sales pctChange'] = pct_change_df['Total Sales'].pct_change()
# Create combo chart
fig, ax1 = plt.subplots(figsize = (9,3))
sns.set_style('whitegrid')
# Create lineplot for the 'Total Sales Percentage Change'
s = sns.lineplot(data=pct_change_df, x="date", y="Total Sales pctChange",
linestyle = 'dashed', marker = 'o', color = 'green', label = 'Total Sales')
# Create lineplot for the 'Clients Volume Percentage Change'
c = sns.lineplot(data=pct_change_df, x="date", y="Clients pctChange",
linestyle = 'dashed', marker = 'o', color = 'blue', label = 'Clients Volume')
# Set title, label and legend
plt.title('Percentage Change: Total Sales vs Clients Volume', fontsize = 16)
plt.xlabel('Date', fontsize = 16)
plt.ylabel('Percentage Change %', fontsize = 16)
plt.legend(loc='best')
plt.savefig("plots/Percentage Change Total Sales vs Clients Volume.png")
plt.show()
```
# Part 3 - Analyzing Data by Category.
#### We will try to analyze Sales by Category
```
# Sum the Total Sales by Category
category_sales_df=(pd.DataFrame(Project_df.groupby('Category')[["Total Sales","Points Worth ($)"]].sum()))
#Calculate expense ratio
#We define expense ratio as the cost of the Points Worth divided by the Total Sales
#The objective behind calculating the expense ratio is to compare all categories to see if the percentage of money returned to customer is uniform between categories or if it is different
#Higher ratios indicate more money returned to customers.
category_sales_df["ratio_expense"]=category_sales_df["Points Worth ($)"]/category_sales_df["Total Sales"]*100
category_sales_df
%matplotlib notebook
#Grouping sales by percentage to see how much each category contributes
total_sales=category_sales_df["Total Sales"].sum()
sales_category=category_sales_df["Total Sales"]/total_sales*100
sales_category
names_category= ["C1","C2","C3","C4","C5","C6"]
# The colors of each section of the pie chart
colors = ["grey", "lightgreen", "cornflowerblue", "lightskyblue","red", "pink"]
explode = (0.0, 0.0, 0.0, 0.0, 0.0, 0.1)
plt.pie(sales_category, explode=explode, labels=names_category, colors=colors,
autopct="%1.1f%%", shadow=True, startangle=140)
plt.title("Percentage of sales by category")
plt.savefig("plots/Sales percentage by category.png")
plt.show()
# Getting the min Monthly Sales by Category
min_sales_by_category = Project_df.drop_duplicates("Category").sort_values("Category", ascending= True)[["Category", "Min Monthly Sales"]].set_index('Category')
min_sales_by_category
%matplotlib notebook
# Comparing Compliance
category_com = Project_df.groupby(['Year','Category'])['Compliance'].mean().reset_index()
# Create line char for each category, to see changes in compliance per year
sns.set(rc={'axes.facecolor':'white'})
fig, ax1 = plt.subplots(figsize = (9,3))
g = sns.lineplot(data=category_com, x="Year", y="Compliance",hue='Category')
plt.grid(False)
plt.legend(loc=2, prop={'size': 8})
plt.xlabel('Year')
plt.ylabel('Compliance per Category')
g.set(xticks=[2018, 2019, 2020])
plt.savefig("plots/Compliance per Category.png")
plt.show()
# Creating Data Frames for ploting Avg Quarterly Sales vs Avg Min Monthly Sales by Category
gb_category = Project_df.groupby('Category')
Project_c1_df = gb_category.get_group("C1")
Project_c2_df = gb_category.get_group("C2")
Project_c3_df = gb_category.get_group("C3")
Project_c4_df = gb_category.get_group("C4")
Project_c5_df = gb_category.get_group("C5")
Project_c6_df = gb_category.get_group("C6")
avgsales_vs_avgmin_c1 = Project_c1_df.groupby(['Year', 'Quarter'])
avgsales_vs_avgmin_c1 = avgsales_vs_avgmin_c1['Min Monthly Sales', 'Total Sales'].mean()
avgsales_vs_avgmin_c1 = avgsales_vs_avgmin_c1.reset_index()
avgsales_vs_avgmin_c1 = avgsales_vs_avgmin_c1[['Total Sales', 'Min Monthly Sales']]
avgsales_vs_avgmin_c1["Quarter Number"] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
avgsales_vs_avgmin_c1 = avgsales_vs_avgmin_c1.set_index('Quarter Number')
avgsales_vs_avgmin_c2 = Project_c2_df.groupby(['Year', 'Quarter'])
avgsales_vs_avgmin_c2 = avgsales_vs_avgmin_c2['Min Monthly Sales', 'Total Sales'].mean()
avgsales_vs_avgmin_c2 = avgsales_vs_avgmin_c2.reset_index()
avgsales_vs_avgmin_c2 = avgsales_vs_avgmin_c2[['Total Sales', 'Min Monthly Sales']]
avgsales_vs_avgmin_c2["Quarter Number"] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
avgsales_vs_avgmin_c2 = avgsales_vs_avgmin_c2.set_index('Quarter Number')
avgsales_vs_avgmin_c3 = Project_c3_df.groupby(['Year', 'Quarter'])
avgsales_vs_avgmin_c3 = avgsales_vs_avgmin_c3['Min Monthly Sales', 'Total Sales'].mean()
avgsales_vs_avgmin_c3 = avgsales_vs_avgmin_c3.reset_index()
avgsales_vs_avgmin_c3 = avgsales_vs_avgmin_c3[['Total Sales', 'Min Monthly Sales']]
avgsales_vs_avgmin_c3["Quarter Number"] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
avgsales_vs_avgmin_c3 = avgsales_vs_avgmin_c3.set_index('Quarter Number')
avgsales_vs_avgmin_c4 = Project_c4_df.groupby(['Year', 'Quarter'])
avgsales_vs_avgmin_c4 = avgsales_vs_avgmin_c4['Min Monthly Sales', 'Total Sales'].mean()
avgsales_vs_avgmin_c4 = avgsales_vs_avgmin_c4.reset_index()
avgsales_vs_avgmin_c4 = avgsales_vs_avgmin_c4[['Total Sales', 'Min Monthly Sales']]
avgsales_vs_avgmin_c4["Quarter Number"] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
avgsales_vs_avgmin_c4 = avgsales_vs_avgmin_c4.set_index('Quarter Number')
avgsales_vs_avgmin_c5 = Project_c5_df.groupby(['Year', 'Quarter'])
avgsales_vs_avgmin_c5 = avgsales_vs_avgmin_c5['Min Monthly Sales', 'Total Sales'].mean()
avgsales_vs_avgmin_c5 = avgsales_vs_avgmin_c5.reset_index()
avgsales_vs_avgmin_c5 = avgsales_vs_avgmin_c5[['Total Sales', 'Min Monthly Sales']]
avgsales_vs_avgmin_c5["Quarter Number"] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
avgsales_vs_avgmin_c5 = avgsales_vs_avgmin_c5.set_index('Quarter Number')
avgsales_vs_avgmin_c6 = Project_c6_df.groupby(['Year', 'Quarter'])
avgsales_vs_avgmin_c6 = avgsales_vs_avgmin_c6['Min Monthly Sales', 'Total Sales'].mean()
avgsales_vs_avgmin_c6 = avgsales_vs_avgmin_c6.reset_index()
avgsales_vs_avgmin_c6 = avgsales_vs_avgmin_c6[['Total Sales', 'Min Monthly Sales']]
avgsales_vs_avgmin_c6["Quarter Number"] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
avgsales_vs_avgmin_c6 = avgsales_vs_avgmin_c6.set_index('Quarter Number')
%matplotlib inline
avgsales_vs_avgmin_c1.plot()
plt.savefig("plots/Average Sales vs Average Min Purchase (C1).png")
avgsales_vs_avgmin_c2.plot()
plt.savefig("plots/Average Sales vs Average Min Purchase (C2).png")
avgsales_vs_avgmin_c3.plot()
plt.savefig("plots/Average Sales vs Average Min Purchase (C3).png")
avgsales_vs_avgmin_c4.plot()
plt.savefig("plots/Average Sales vs Average Min Purchase (C4).png")
avgsales_vs_avgmin_c5.plot()
plt.savefig("plots/Average Sales vs Average Min Purchase (C5).png")
avgsales_vs_avgmin_c6.plot()
plt.savefig("plots/Average Sales vs Average Min Purchase (C6).png")
# Heatmap Points Worth ($)
compliance_drop = Project_df.drop(columns =['Min Monthly Sales','Town','State','Distributor','Quarter','Compliance','Total Sales','Total Points','Client ID'])
compliance_p = compliance_drop.groupby(['Month','Year']).mean().reset_index()
#Unable to use points and compliance in one map, scales are too different
compliance_pi2 = compliance_p.pivot('Month','Year','Points Worth ($)')
compliance_pi2
# Heatmap Compliance per Year
compliance_drop2 = Project_df.drop(columns =['Min Monthly Sales','Town','State','Distributor','Quarter','Points Worth ($)','Total Sales','Total Points','Client ID'])
compliance_p3 = compliance_drop2.groupby(['Month','Year']).mean().reset_index()
compliance_pi4 = compliance_p3.pivot('Month','Year','Compliance')
compliance_pi4
# Plotting the compliance
#Changed color and lines for a better visualization
comp_hm1 = sns.heatmap(compliance_pi4,cmap="PuBuGn", linewidth=.5, linecolor ='m')
#Chart for expense ratio by category
total_sales=category_sales_df["Total Sales"].sum()
x_axis=np.arange(0, 6, 1)
y_axis=category_sales_df["ratio_expense"]
plt.title("Expense ratio by category")
plt.xlabel("Category")
plt.ylabel("Expense Ratio")
tick_locations = [value for value in x_axis]
plt.xticks(tick_locations, ["C1","C2","C3","C4","C5","C6"], rotation="horizontal")
# Have to plot our chart once again as it doesn't stick after being shown
plt.plot(x_axis, y_axis)
plt.ylim(0, 5)
plt.grid(False)
plt.savefig("plots/Expense ratio comparison by category.png")
plt.show()
#Points Worth ($)
comp_hm = sns.heatmap(compliance_pi2,cmap="PuBuGn", linewidth=.5, linecolor ='m')
```
# Part 3 - Analyzing Data by Location.
#### We will try to analyze Sales by Client's location.
```
# We will create a States list to use it on our API/json requests
unique_locations = Project_df.drop_duplicates("State")
locations = unique_locations['State'].tolist()
locations
# Building the URL
url = "http://api.openweathermap.org/data/2.5/weather?"
appid = api_key
units = "metric"
url = f"{url}appid={appid}&units={units}&q="
# Creating a list for storing information
location_info = []
# For loop
for x in locations:
# Generating a unique URL for each location
location_url = url + x
# API request for each location
try:
location_geo = requests.get(location_url).json()
# Retrieving data, Lat and Lng for each city in the list
location_lat = location_geo["coord"]["lat"]
location_lng = location_geo["coord"]["lon"]
# Append the retrieved information into city_info
location_info.append({"Location": x,
"Lat": location_lat,
"Lng": location_lng})
# Exception for a not found value
except:
pass
#Creating a new DF with state and their coordinates
locations_df = pd.DataFrame(unique_locations["State"]).reset_index().reset_index()
locations_df = locations_df[["level_0", "State"]]
location_info_df = pd.DataFrame(location_info).reset_index().reset_index()
location_info_df = location_info_df[["level_0", "Lat", "Lng"]]
geo_location_df = pd.merge(locations_df, location_info_df, how="outer", on=["level_0"])
geo_location_df.head()
# Merging the coordinates with each state's Total Sales
totalsales_state = Project_df.groupby("State").sum()
totalsales_state = pd.merge(totalsales_state, geo_location_df, how="outer", on=["State"])
totalsales_state = totalsales_state[["Total Sales", "Lat", "Lng"]]
totalsales_state["Total Sales"] = totalsales_state["Total Sales"].fillna(0)
totalsales_state = totalsales_state.sort_values("Total Sales", ascending= False)
totalsales_state.head()
# Sales heatmap
sales = totalsales_state["Total Sales"].astype(float)
maxsales = sales.max()
location_df = totalsales_state[["Lat", "Lng"]]
figure_layout = {'width': '1000px','height': '600px'}
gmaps.configure(api_key=g_key)
fig = gmaps.figure(layout=figure_layout)
heat_layer = gmaps.heatmap_layer(location_df, weights = sales, dissipating = False, max_intensity = maxsales, point_radius = 1.5)
fig.add_layer(heat_layer)
fig
```
### DIVIDE states by economic region:
### Mexico by region (Banxico's methodology): (https://www.banxico.org.mx/SieInternet/consultarDirectorioInternetAction.do?sector=2&accion=consultarCuadro&idCuadro=CR122&locale=es)
### Noth zone: Nuevo Leon, Sonora, Tamaulipas, Coahuila, Chihuahua, Baja California
### Northcentre zone: Aguascalientes, Colima, Durango, Jalisco, michoacan, Nayarit, San Luis Potosi, Sinaloa, Zacatecas, Baja California Sur
### Central zone: Mexio City, Estado de Mexico, Guanajuato, Hidalgo, Morelos, Puebla, Queretaro, Tlaxcala
### South zone: Campeche, Chiapas, Guerrero,Oaxaca, Tabasco, Veracruz, Yucatan
```
#NORTH ZONE encompasses the states of:
#COAHUILA, SONORA, TAMAULIPAS, NUEVO LEON. (NO SALES IN B.C AND CHI and sonora)
North_Zone=Project_df[(Project_df.State =="Nuevo Leon")|(Project_df.State =="Tamaulipas")
|(Project_df.State =="Coahuila")]
North_Zone_II=North_Zone[["Year","Category","Total Sales","Total Points","Points Worth ($)"]]
north_zone_category= North_Zone_II.groupby(['Year',"Category"]).sum()
north_zone_category["ratio_expense"]=north_zone_category["Points Worth ($)"]/north_zone_category["Total Sales"]*100
north_zone_category.head()
#NORTH CENTRE ZONE encompasses the states of:
#AGUASCALIENTES, COLIMA, DURANGO,JALISCO,MICHOACAN,NAYARIT,SAN LUIS POTOSI, ZACATECAS, SINALOA
Northcentre_Zone=Project_df[(Project_df.State =="Aguascalientes")|(Project_df.State =="Colima")
|(Project_df.State =="Jalisco") | (Project_df.State == "Nayarit")
|(Project_df.State =="Michoacan")|(Project_df.State =="San Luis Potosi")
|(Project_df.State =="Sinaloa")|(Project_df.State =="Zacatecas")]
Northcentre_Zone_II=Northcentre_Zone[["Year","Category","Total Sales","Total Points","Points Worth ($)"]]
northcentre_zone_category= Northcentre_Zone_II.groupby(['Year',"Category"]).sum()
northcentre_zone_category["ratio_expense"]=northcentre_zone_category["Points Worth ($)"]/northcentre_zone_category["Total Sales"]*100
northcentre_zone_category.head()
#CENTRAL ZONE encompasses the states of:
#CIUDAD DE MEXICO, ESTADO DE MEXICO, GUANAJUATO,HIDALGO,MORELOS, PUEBLA,QUERETARO,TLAXCALA
central_Zone=Project_df[(Project_df.State =="Ciudad de Mexico")|(Project_df.State =="Estado de Mexico")
|(Project_df.State =="Guanajuato") | (Project_df.State == "Hidalgo")
|(Project_df.State =="Morelos")|(Project_df.State =="Puebla")
|(Project_df.State =="Queretaro")|(Project_df.State =="Tlaxcala")]
central_Zone_II=central_Zone[["Year","Category","Total Sales","Total Points","Points Worth ($)"]]
central_zone_category= central_Zone_II.groupby(['Year',"Category"]).sum()
central_zone_category["ratio_expense"]=central_zone_category["Points Worth ($)"]/central_zone_category["Total Sales"]*100
central_zone_category .head()
#SOUTH ZONE encompasses the states of:
#CHIAPAS,GUERRERO,OAXACA,Q.ROO, TABASCO, VERACRUZ, YUCATAN,CAMPECHE
south_Zone=Project_df[(Project_df.State =="Chiapas")|(Project_df.State =="Guerrero")
|(Project_df.State =="Oaxaca") | (Project_df.State == "Quintana Roo")
|(Project_df.State =="Tabasco")|(Project_df.State =="Veracruz")
|(Project_df.State =="Yucatan")|(Project_df.State =="Campeche")]
south_Zone_II=south_Zone[["Year","Category","Total Sales","Total Points","Points Worth ($)"]]
south_zone_category= south_Zone_II.groupby(['Year',"Category"]).sum()
south_zone_category["ratio_expense"]=south_zone_category["Points Worth ($)"]/south_zone_category["Total Sales"]*100
south_zone_category.head()
#Adding the sales per zone for analysis
NZ=North_Zone["Total Sales"].sum()
NCZ= Northcentre_Zone["Total Sales"].sum()
cZ=central_Zone["Total Sales"].sum()
sZ=south_Zone["Total Sales"].sum()
total_sales_3_years= NZ+NCZ+cZ+sZ
#Creating a list of the zones and their sales
sales_per_zone=[NZ,NCZ,cZ,sZ]
names_zones= ["North Zone","North Centre Zone","Central zone","South Zone"]
percentage_sales_zones={"North Zone":NZ,"North Centre Zone": NCZ,"Central zone":cZ,"South Zone":sZ}
percentage_sales_zones
sales_per_zone=[NZ,NCZ,cZ,sZ]
names_zones= ["North Zone","North Centre Zone","Central zone","South Zone"]
#Plotting the sales of the zones to identify biggest and smallest zone of sales
# The colors of each section of the pie chart
colors = ["grey", "lightgreen", "cornflowerblue", "lightskyblue"]
# Tells matplotlib to seperate the "Humans" section from the others
explode = (0, 0, 0.1, 0)
plt.pie(sales_per_zone, explode=explode, labels=names_zones, colors=colors,
autopct="%1.1f%%", shadow=True, startangle=140)
plt.title("Percentage of sales by zone")
plt.savefig("plots/Percentage of sales by zone.png")
plt.show()
#Grouping the sales per zone per year
NZ_2018=North_Zone[North_Zone.Year ==2018]["Total Sales"].sum()
NZ_2019=North_Zone[North_Zone.Year ==2019]["Total Sales"].sum()
NZ_2020=North_Zone[North_Zone.Year ==2020]["Total Sales"].sum()
NCZ_2018=Northcentre_Zone[Northcentre_Zone.Year ==2018]["Total Sales"].sum()
NCZ_2019=Northcentre_Zone[Northcentre_Zone.Year ==2019]["Total Sales"].sum()
NCZ_2020=Northcentre_Zone[Northcentre_Zone.Year ==2020]["Total Sales"].sum()
cZ_2018=central_Zone[central_Zone.Year ==2018]["Total Sales"].sum()
cZ_2019=central_Zone[central_Zone.Year ==2019]["Total Sales"].sum()
cZ_2020=central_Zone[central_Zone.Year ==2020]["Total Sales"].sum()
sZ_2018=south_Zone[south_Zone.Year ==2018]["Total Sales"].sum()
sZ_2019=south_Zone[south_Zone.Year ==2019]["Total Sales"].sum()
sZ_2020=south_Zone[south_Zone.Year ==2020]["Total Sales"].sum()
%matplotlib notebook
#Plotting the zones by sale by year
sales_byzone = [NZ_2018, NZ_2019, NZ_2020,
NCZ_2018, NCZ_2019,NCZ_2020,
cZ_2018,cZ_2019,cZ_2020,
sZ_2018,sZ_2019,sZ_2020]
x_axis = np.arange(len(sales_byzone))
plt.bar(x_axis, sales_byzone, color=['papayawhip',"blanchedalmond","moccasin","azure","lightcyan","paleturquoise","whitesmoke","gainsboro","lightgrey","mistyrose","salmon","tomato"], alpha=0.9, align="edge")
tick_locations = [value for value in x_axis]
plt.xticks(tick_locations, ["North 18","North 19","North 20",
"NorthCentre 18","NorthCentre 19","NorthCentre 20",
"Central 18","Central 19","Central 20",
"South 18","South 19","South 20"], rotation="vertical", fontsize= 6)
plt.xlim(-0.75, len(x_axis)-0.25)
#plt.ylim(0, max(sales_byzone))
plt.title("Total Sales by region and year")
plt.xlabel("Zones")
plt.ylabel("total sales")
plt.savefig("plots/Total Sales by region and year.png")
plt.show()
```
| github_jupyter |
# TTV Retrieval for Kepler-36 (a well-studied, dynamically-interacting system)
In this notebook, we will perform a dynamical retrieval for Kepler-36 = KOI-277. With two neighboring planets of drastically different densities (the inner planet is rocky and the outer planet is gaseous; see [Carter et al. 2012](https://ui.adsabs.harvard.edu/abs/2012Sci...337..556C/abstract)), this is one of the more well-studied TTV systems in existence. First, let's import packages and download data from the Rowe et al. (2015) TTV catalog:
```
%matplotlib inline
import ttvnest
import numpy as np
koi = 277
nplanets = 2
data, errs, epochs = ttvnest.load_data.get_data(koi, nplanets)
```
Now, let's set up the ttvnest system:
```
kepler36_b = ttvnest.TTVPlanet(data[1], errs[1], epochs[1], mass_prior = ('Uniform', 0, 100.),
period_prior = ('Normal', 13.84, 0.01)
)
kepler36_c = ttvnest.TTVPlanet(data[0], errs[0], epochs[0], mass_prior = ('Uniform', 0, 100.),
period_prior = ('Normal', 16.23, 0.01)
)
kepler36 = ttvnest.TTVSystem(kepler36_b, kepler36_c)
```
Before retrieval, let's plot the data alone to see what they look like:
```
ttvnest.plot_utils.plot_ttv_data(kepler36)
```
Clear, anticorrelated signals! Let's retrieve:
```
results = kepler36.retrieve()
```
Let's check out our results. I'm not going to work out the Carter et al. (2012) posterior distribution on the eccentricity vectors since they use a different basis than I choose here. But it's probably worth converting their mass ratio constraints to what we should expect here. They get a mass ratio sum $q_+ = (M_1 + M_2)/M_\star= 3.51\times10^{-5}$. In ttvnest dynamical masses are normalized by $3\times10^{-6} = M_\mathrm{Earth}/M_\mathrm{Sun}$, so this gives $q_+ = 11.7$ in our units. Their planetary mass ratio is $q_p = M_1/M_2 = 0.55$. Taken together, this gives dynamical masses of $M_1/M_\star = 4.15$ and $M_2/M_\star = 7.55$.
Let's see if we get there...
```
kepler36.posterior_summary()
ttvnest.plot_utils.plot_results(kepler36, uncertainty_curves = 100,
sim_length = 365.25*10, outname = 'kepler36')
```
We are a little on the low side, but that's apparently to be expected from other works like Hadden & Lithwick (2017). Let's make the dynesty plots for good measure:
```
ttvnest.plot_utils.dynesty_plots(kepler36, outname = 'kepler36')
```
Wow, what a nice system. Let's save our results for later:
```
ttvnest.io_utils.save_results(kepler36, 'kepler36.p')
```
| github_jupyter |
```
# group_by_SNR.ipynb
# Many stars that have mulitple APF spectra have some spectra from different nights of observation.
# Calculates the SNR for each group of spectra from one night of observing (calc_SNR combines all observations of one
# star and returns an SNR for the star instead), then finds for each star which group of observations together has the
# highest SNR. Will use only highest SNR group in run of Specmatch-Emp for each star.
# Last modified 8/12/20 by Anna Zuckerman
import os
import pandas as pd
import numpy as np
import astropy.io.fits
import shutil
def get_SNR(path_name, filenames): # Modified from get_SNR in calc_SNR
order_data = np.zeros([4608,1])
for spect_file in filenames:
hdul = astropy.io.fits.open(path_name + '/' + spect_file)
order_data = np.add(order_data,(hdul[0].data)[45])
SNR = np.sqrt(np.median(order_data))
return SNR
# for stars with mulitple spectra, get the set of observations with the highest SNR
big_path = './APF_spectra/all_apf_spectra' # './APF_spectra/additional_spectra'
SNR_filename = 'all_apf_highest_SNRs.csv' # 'additional_apf_highest_SNRs.csv'
new_dir_path = './APF_spectra/all_apf_spectra_highest_SNR/' #'./APF_spectra/additional_apf_spectra_highest_SNR/'
SNR_list = []
names = []
pathlist = [path for path in sorted(os.listdir(big_path)) if os.path.isdir(big_path + '/' + path)]
for star_dir in pathlist:
names += [star_dir.split('_')[0]]
spectlist = os.listdir(big_path + '/' + star_dir)
try: spectlist.remove('.ipynb_checkpoints')
except: ValueError
obslist = [filename.split('.')[0] for filename in spectlist]
unique_obs = list(dict.fromkeys(obslist)) #list of all observations of that star
highest_SNR = 0
highest_SNR_obs = ''
for obs in unique_obs:
obs_files = [file for file in spectlist if file.split('.')[0] == obs]
SNR_obs = get_SNR(big_path + '/' + star_dir, obs_files)
if SNR_obs > highest_SNR:
highest_SNR = SNR_obs
highest_SNR_obs = obs
SNR_list += [highest_SNR]
new_dir_name = new_dir_path + star_dir
highest_SNR_obs_files = [file for file in spectlist if file.split('.')[0] == highest_SNR_obs]
os.mkdir(new_dir_name)
for file in highest_SNR_obs_files:
shutil.copyfile(big_path + '/' + star_dir + '/' + file, new_dir_name + '/' + file)
df = pd.DataFrame(list(zip(names, SNR_list)), columns =['Name (Simbad resolvable)', 'Highest observation set SNR'])
df.to_csv('./' + SNR_filename)
# for stars with only one spectrum, copy that spectrum directly -- not applicable for ./APF_spectra/all_apf_spectra
pathlist_notdir = [path for path in sorted(os.listdir(big_path)) if not(os.path.isdir(big_path + '/' + path))]
for file in pathlist_notdir:
shutil.copyfile(big_path + '/' + file, './APF_spectra/apf_spectra_highest_SNR' + '/' + file)
# check that all stars were processed
print(len(os.listdir(big_path)))
print(len(os.listdir(new_dir_path)))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/kishkath/Data_Structures-Hashing-/blob/main/Tensorflow_fundamentals_withoutcode.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **Roadmap of this assignment**
**This assignment is divided into following sections.**
1. Learning about what is tensorflow, its usecases.
2. Learning what is tensor, tensor types.
3. Different tensor formats.
4. Mathematical operations in tensorflow.
5. Gradient operations in tensorflow.
6. Using learning of tensorflow basics to make one simple classifier.
# **NOTE**
Follow along the videos, and links given in the assignment. If you have **any doubt** related to assignment, **contact your mentor.**

##1. **What is Tensorflow?**
TensorFlow is an end-to-end open source platform for machine learning. It has a comprehensive, flexible ecosystem of tools, libraries and community resources that lets researchers push the state-of-the-art in ML and developers easily build and deploy ML powered applications.
video - **why tensorflow ?**- https://www.youtube.com/watch?v=yjprpOoH5c8
## **Must Read These Tensorflow Use Cases**
1. **How Airbnb uses tensorflow to improve their guests experiences?**
Read this medium article to understand - https://medium.com/airbnb-engineering/categorizing-listing-photos-at-airbnb-f9483f3ab7e3
2. **How paypal uses tensorflow for fraud detection?**
Read this to understand what paypal does - https://medium.com/paypal-tech/machine-learning-model-ci-cd-and-shadow-platform-8c4f44998c78
##2. **What is a Tensor?**
**A tensor is a container for data—usually numerical data. tensors are also called generalization of matrices to an arbitrary number of dimensions.**

# **Now we will learn types of tensor with different rank**
**1.Scalars (rank 0 tensor)**
- A tensor that contains only one number is called a scalar.
- a scalar tensor has 0 axes (ndim == 0).
**Go through this video for numpy array methods used in next cell**
```
from IPython.display import YouTubeVideo
YouTubeVideo('a8aDcLk4vRc', width=600, height=400)
# importing numpy as np
import numpy as np
# defining an array using np.array with value passing as 5
array = np.array(5)
zero_rank_tensor = array
# print zero_rank_tensor
print("Tensor with zero rank: {0}".format(zero_rank_tensor))
# print its dimension using .ndim method
print("Dimensions: {0}".format(zero_rank_tensor.ndim))
# print its shape using .shape method
print("Shape: {0}".format(zero_rank_tensor.shape))
```
**The above output of a scalar number shows that and array with a single digit is having zero rank as a tensor.**
**Observation from previous output**
- Dimension is 0.
- Shape gives empty parenthesis bracket.
**2.Vectors (rank 1 tensor)**
- An array of numbers is called a vector, or rank-1 tensor, or 1D tensor.
- A rank-1 tensor is said to have exactly one axis.
```
# define an array with value 1,2,3 in a list using np.array
one_rank_tensor = np.array([1,2,3])
# print one_rank_tensor
print("Tensor with rank 1:{0}".format(one_rank_tensor))
# print its dimension using .ndim
print("Dimensions: {0}".format(one_rank_tensor.ndim))
# print its shape using .shape
print("Shape: {0}".format(one_rank_tensor.shape))
```
**The above output shows that whenever there is a single square bracket we see around some numbers separated by comma, we get a tensor of rank 1.**
**Observation**
- As compared to previous output, this time dimension is 1.
- Its shape is (3,) showing no of parameters in the array which is 3.
**3. Matrices (rank 2 tensor)**
- An array of vectors is a matrix, or rank-2 tensor, or 2D tensor.
- A matrix has two axes (often referred to as rows and columns).
```
# define a matrix having values [[1, 2, 3, 4, 5],[6, 7, 8, 9, 10],[11, 12, 13, 14, 15]]
rank_2_tensor = np.array([[1,2,3,4,5],[6,7,8,9,10],[11,12,13,14,15]])
# print rank_2_tensor
print("Tensor with rank 2: {0}".format(rank_2_tensor))
# print its dimension using .ndim
print("Dimensions: {0}".format(rank_2_tensor.ndim))
# print its shape using .shape
print("Shape: {0}".format(rank_2_tensor.shape))
```
**The above output shows that whenever there is a double square bracket we see around some numbers separated by comma, we get a tensor of rank 2.**
**Observation**
- This time we got dimension as 2 since it's a matrix.
- We got shape as (3,5) where 3 is no of rows and 5 points to no of columns.
**4. Cube (rank 3 tensors)**
- If you pack 2-d matrices in a new array, you obtain a rank-3 tensor (or 3D tensor).
- By packing rank-3 tensors in an array, you can create a rank-4 tensor, and so on.
```
# define an array of 3 matrices whose matrices are [ [5, 78, 2, 34, 0],[6, 79, 3, 35, 1],[7, 80, 4, 36, 2] ],
# [ [5, 78, 2, 34, 0],[6, 79, 3, 35, 1],[7, 80, 4, 36, 2] ],[ [5, 78, 2, 34, 0],[6, 79, 3, 35, 1],[7, 80, 4, 36, 2] ]
rank_4_tensor = np.array([[[5,78,2,34,0],[6,79,3,35,1],[7,80,4,36,2]],[[5,78,2,34,0],[6,79,3,35,1],[7,80,4,36,2]],[[5,78,2,34,0],[6,79,3,35,1],[7,80,4,36,2]]])
# print rank_4_tensor
print("Tensor with rank4 :{0}".format(rank_4_tensor))
# print its dimension using .ndim
print("Dimensions: {0}".format(rank_4_tensor.ndim))
# print its shape using .shape
print('Shape: {0}'.format(rank_4_tensor.shape))
```
**The above output shows that whenever there is a triple square bracket we see around some numbers separated by comma, we get a tensor of rank 3.**
**Observation**
- Look at the dimension which outputs 3. Compare it with previous outputs.
- Look at the shape which has 3 values (3,3,5) where first value 3 is no of matrices, 2nd value 3 is no of rows and third value 5 is no of columns.
##3. **Now we will learn tensors of different formats**
**Watch this video for basic understanding on tensor operations in tensorflow**
```
YouTubeVideo('HPjBY1H-U4U', width=600, height=400)
# import tensorflow as tf
import tensorflow as tf
# create tensor of one's with shape (3,1)
x = tf.ones((3,1))
print(x)
# create tensor of zeros (3,1)
y = tf.zeros((3,1))
print(x+y)
print(tf.add(x,y))
# print x + y
# create tensor of random values using random.uniform with shape (5,1)
x = tf.random.uniform((5,1))
print(x)
# print x
# create tensor of random values using random.uniform with shape (5,1) with a minval=2., and maxval=4.
x = tf.random.uniform((5,1),minval=2,maxval=4)
print(x)
# print x
# create tensor of random values using random.normal with a defined mean = 0., and stddev = 1.0
x = tf.random.normal(shape=(5,1),mean=0,stddev=1.0)
print(x)
# print x
# Do you remember assigning a value in an array ?
# Let's try assigning a value in a tensor (x[0, 0] = 0.)
x[0, 0] = 0.
```
**We can see, updating the state of tensor above throw error. So we need to use variables in tensor. tf.Variables is the class meant to manage modifiable state in tensorflow.**
**Watch this video from 45:00 minute to 1:00 hr to understand how tf.variable, tf.assign works.**
```
YouTubeVideo('d9N0IGb5QP0', width=600, height=400)
# Create a tensor using tf.Variable with initial_value = tf.random.normal having shape (3,1)
x = tf.Variable(initial_value=tf.random.normal((3,1)))
print(x)
# print x
# assigning value 1. in the tensor variable x using .assign method at position [0,0]
x[0,0].assign(1.)
# print x
print(x)
# adding one to each value of the tensor variable x using assign_add method
x.assign_add(tf.ones((3,1)))
```
##4. **Now we will learn mathematical operations in tensorflow**

**Some tensorflow methods**
In TensorFlow the differences between constants and variables are that when you declare some constant, its value can't be changed in the future (also the initialization should be with a value, not with operation).
Nevertheless, when you declare a Variable, you can change its value in the future with tf.assign() method (and the initialization can be achieved with a value or operation).
```
# All eager tf.Tensor values are immutable (in contrast to tf.Variable)
# define a using tf.constant and pass [40., 30., 50.]
a = tf.constant([40.,30.,50.])
# define b using tf.constant and pass [12., 13., 23.]
b = tf.constant([12.,13.,23.])
print("a: {0},{1} , b: {2},{3}".format(a,a.dtype,b,b.dtype))
# add a and b using tf.add
print("Addition: ",tf.add(a,b))
# define x using tf.variable and pass initial value as tf.random.uniform(shape=(2,3), minval=3, maxval=5
x = tf.Variable(tf.random.uniform(shape=(2,3),minval=3,maxval=5))
# define y by squaring x using tf.square
print("X: {0}".format(x))
y = tf.pow(x,2)
print("y: {0}".format(y))
# print x and y
# define z by taking the square root of x using tf.sqrt
z = tf.sqrt(x)
print("z: {0}".format(z))
# print x+z
print("x+z : {0}".format(tf.add(x,z)))
```
## **Numpy Compatibility**
```
import numpy as np
# define an array with shape (4,3) using np.ones
ndarray = np.ones((4,3))
print("TensorFlow operations convert numpy arrays to Tensors automatically")
# define a variable tensor by multiplying ndarray with value 42 (use tf.multiply)
tensor = tf.multiply(ndarray,42)
# print variable tensor
print("tensor: {0}".format(tensor))
print("And NumPy operations convert Tensors to numpy arrays automatically")
# add one in each value of a tensor using np.add
print(np.add(tensor,1))
print("The .numpy() method explicitly converts a Tensor to a numpy array")
# convert tensor into numpy using tensor.numpy and print it
print(tensor.numpy())
```
##5. **How to do gradient of any differentiable expression?. let's learn how to find it....**
You must be asking yourself, what is the difference between numpy and tensorflow here. Suppose you want to differentiate some expression, numpy can't help you there. Tensorflow comes in handy then.

**Watch this tutorial to understand how gradient works in tensorflow.**
```
YouTubeVideo('ENOycxDU9RY', width=600, height=400)
# Using GradientTape(Sample example)
# taking some input
some_input = tf.Variable(initial_value = 5.)
# defining GradientTape as tape
with tf.GradientTape() as tape:
result = tf.square(some_input)
# using gradient tape to find gradient
gradient = tape.gradient(result, some_input)
# printing some_input and gradient
print(some_input)
print(gradient)
# another example of gradient
# define variable x using tf.variable and pass value as 3.0
x = tf.Variable(initial_value=3.0)
# define GradientTape as tape with y = x**2
with tf.GradientTape() as g:
y = x*x
dy_dx = g.gradient(y,x)
# define dy_dx and take derivative using tape.gradient
print(x)
print(y)
print(dy_dx)
# print x, y and dy_dx
# (Add on example)
# Another example of gradient using equation of
# falling apple along a vertical exis over time
time = tf.Variable(3.)
with tf.GradientTape() as outer:
with tf.GradientTape() as inner:
position = 4.9 * time ** 2
speed = inner.gradient(position, time)
acceleration = outer.gradient(speed, time)
# printing time, position, speed and acceleration
print("time: ", time)
print("position: ", position)
print("speed: ", speed)
print("acceleration: ", acceleration)
# Another example using weights and biases
# define w using tf.Variable and pass random values with shape (3,2) using tf.random.normal
w = tf.Variable(tf.random.normal((3,2)))
# define b using tf.Variable and pass zeros with shape 2 using tf.zeros
b = tf.Variable(tf.zeros((2,2)))
# define x with values [[1., 2., 3.]]
x = tf.Variable([[1.,2.,3.]])
# define GradientTape as tape
with tf.GradientTape(persistent=True) as tape:
# define y under it with values as y = x @ w + b (@ is dot product)
y = x @ w + b
# define loss using tf.reduce_mean and pass y**2 into it
dy_w = tape.gradient(y,w)
dy_x = tape.gradient(y,b)
loss = tf.reduce_mean(y*y)
# print w
print("w: ", w)
# print b
print("b: ", b)
# print x
print("x: ", x)
# print y
print("y: ", y)
# print y**2
print("y**2: ", y**2)
# print loss
print("loss: ", loss)
print("*"*50)
# Now differentiate y w.r.t w and b
[dy_dw, dy_db] = [dy_w,dy_x]
# Now print dy_dw, dy_db
print(dy_dw,dy_db)
```
## **Now we will use our learning till now to build a small linear classifier.**
##6. **Beginning of End to End Linear Classifier**
** Before we go for linear classifier, let me show you how to plot some points on scatterplot for visualization **
**Video reference for multivariate normal in method in numpy**
```
YouTubeVideo('mw-svKkGVaI', width=600, height=400)
# ( Sample code for visualization )
# we will use np.random.multivariate_normal to get random points having specific mean and covariance
import numpy as np
import matplotlib.pyplot as plt
x, y = np.random.multivariate_normal([1, 0.5], [[10, 5], [5, 10]], 5000).T
plt.plot(x, y, 'x')
plt.axis('equal')
plt.show()
```
## **Change Mean And Covariance To See The Differences in Plots in Next Cell**
```
# we will use np.random.multivariate_normal to get random points having specific mean and covariance
import matplotlib.pyplot as plt
fig, (ax1, ax2, ax3)= plt.subplots(3, figsize=(12, 8))
# visualize mean, cov
x, y = np.random.multivariate_normal(mean = [1, 0.5], cov = [[1, 0.5], [0.5, 1]],size = 5000).T
ax1.plot(x, y, 'x')
plt.axis('equal')
# visualize mean, cov
a, b = np.random.multivariate_normal(mean = [2, 3], cov = [[10, 5], [5, 10]], size = 5000).T
ax2.plot(a, b, 'bo')
plt.axis('equal')
# visualize mean, cov
c, d = np.random.multivariate_normal(mean = [1, 5], cov = [[5, 15], [15, 5]], size = 5000).T
ax3.plot(c, d, 'r+')
plt.axis('equal')
# Now we are defining two scatterplot, one for negative and one for positive
num_samples_per_class = 1000
# first negative samples
# Use np.random.multivariate_normal with mean [0, 3] and cov [[1, 0.5], [0.5, 1]] and size as num_samples_per_class
negative_samples = np.random.multivariate_normal(mean=[0,3],cov=[[1,0.5],[0.5,1]],size=num_samples_per_class)
# looking at first 5 negative samples
negative_samples[:5]
# defining positive samples
# Use np.random.multivariate_normal with mean [0, 3] and cov [[1, 0.5], [0.5, 1]] and size as num_samples_per_class
positive_samples = np.random.multivariate_normal(mean=[0,3],cov=[[1,0.5],[0.5,1]],size=num_samples_per_class)
# looking at first 5 positive samples
positive_samples[:5]
# Stacking both positive and negative samples using np.vstack
samples = np.vstack((positive_samples,negative_samples))
print(samples)
print('\n\n\n')
print(samples.shape)
# defining labels using np.vstack (stack vector of zeros and ones having num_samples_per_class length)
targets = np.vstack((np.zeros((num_samples_per_class,2)),np.ones((num_samples_per_class,2))))
targets.shape
# plot your samples using plt.scatter
plt.scatter(positive_samples,negative_samples)
# define input_dim =2 as we have two input variables and output_dim = 1 as we have one target
import tensorflow as tf
input_dim = 2
output_dim = 1
# define weights using tf.variable , shape of weights will be = (input_dim, output_dim)
weights = tf.Variable((input_dim,output_dim))
# define bias using tf.variable , shape of bias will be = (output_dim,)
bias = tf.Variable((output_dim,))
bias
print("Bias: {0}".format(bias))
print("Weights: {0}".format(weights))
# here is our model
# define a function named simple_model which will take inputs(X) and return (inputs*weights+bias )
def simple_model(inputs):
return (inputs*weights)+bias
# returning avg loss from this loss function
# define mean_sq_loss function which will take targets and predictions
def mean_sq_loss(targets, predictions):
# define losses variable first by taking square difference of targets and predictions
losses = tf.pow(tf.subtract(targets-predictions),2)
# return mean of losses using tf.reduce_mean
tf.reduce_mean(losses)
# define learning_rate=0.1
learning_rate = 0.1
# define training function which takes inputs and targets
def training(inputs, targets):
# define GradientTape as tape
with tf.GradientTape() as tape:
# define predictions by using simple_model function
predictions = simple_model(inputs)
# define losses using mean_sq_loss function
loss = mean_sq_loss(targets,predictions)
# take derivative of loss w.r.t. w and b
dloss_w = tape.gradient(loss,w)
dloss_wb = tape.gradient(dloss,b)
#assign loss w.r.t.w*learning_rate to weights
weights = weights + (learning_rate*loss)
#assign loss w.r.t.b*learning_rate to bias
bias = bias + (learning_rate*loss)
# return losses
return loss
# running training for multiple epochs usinf for loop
for i in range(30):
# define loss by calling training function
loss = training(samples,targets)
print("Epoch: {0}, loss: {1}".format(i,loss))
# print loss epoch wise
```
## **Hurray.... you finished the assignment.... It's time for the feedback**
## **FEEDBACK FORM**
Please help us in improving by filling this form. https://forms.zohopublic.in/cloudyml/form/CloudyMLDeepLearningFeedbackForm/formperma/VCFbldnXAnbcgAIl0lWv2blgHdSldheO4RfktMdgK7s
| github_jupyter |
<a href="https://colab.research.google.com/github/AmberLJC/FedScale/blob/master/dataset/Femnist_stats.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# **[Jupyter notebook] Understand the heterogeneous FL data.**
# Download the Femnist dataset and FedScale
Follow the sownload instruction in /content/FedScale/dataset/download.sh
```
# Download Fedscale and femnist dataset
!pwd
!wget -O /content/femnist.tar.gz https://fedscale.eecs.umich.edu/dataset/femnist.tar.gz
!tar -xf /content/femnist.tar.gz -C /content/
!rm -f /content/femnist.tar.gz
!echo -e "${GREEN}FEMNIST dataset downloaded!${NC}"
!git clone https://github.com/AmberLJC/FedScale.git
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
from FedScale.core.utils.femnist import FEMNIST
from FedScale.core.utils.utils_data import get_data_transform
from FedScale.core.utils.divide_data import DataPartitioner
from FedScale.core.argParser import args
```
# Data Loader
```
train_transform, test_transform = get_data_transform('mnist')
train_dataset = FEMNIST('/content/femnist', dataset='train', transform=train_transform)
test_dataset = FEMNIST('/content/femnist', dataset='test', transform=test_transform)
```
Partition the dataset by the `clientclient_data_mapping` file, which gives the real-world client-level heterogeneoity.
```
args.task = 'cv'
training_sets = DataPartitioner(data=train_dataset, args=args, numOfClass=62)
training_sets.partition_data_helper(num_clients=None, data_map_file='/content/femnist/client_data_mapping/train.csv')
#testing_sets = DataPartitioner(data=test_dataset, args=args, numOfClass=62, isTest=True)
#testing_sets.partition_data_helper(num_clients=None, data_map_file='/content/femnist/client_data_mapping/train.csv')
```
# Print and plot statistics of the dataset.
```
print(f'Total number of data smaples: {training_sets.getDataLen()}')
print(f'Total number of clients: {training_sets.getClientLen()}')
print(f'The number of data smaples of each clients: {training_sets.getSize()}')
print(f'The number of unique labels of each clients: {training_sets.getClientLabel()}')
fig, axs = plt.subplots(1, 2, sharey=True, tight_layout=True)
size_dist = training_sets.getSize()['size']
n_bins = 20
axs[0].hist(size_dist, bins=n_bins)
axs[0].set_title('Client data size distribution')
label_dist = training_sets.getClientLabel()
axs[1].hist(label_dist, bins=n_bins)
axs[1].set_title('Client label distribution')
```
# Visiualize the clients' data.
```
rank=1
isTest = False
dropLast = True
partition = training_sets.use(rank - 1, isTest)
num_loaders = min(int(len(partition)/ args.batch_size/2), args.num_loaders)
dataloader = DataLoader(partition, batch_size=16, shuffle=True, pin_memory=True, timeout=60, num_workers=num_loaders, drop_last=dropLast)
for data in iter(dataloader):
plt.imshow(np.transpose(data[0][0].numpy(), (1, 2, 0)))
break
```
| github_jupyter |
```
# Code ported from laptop onto 10.12.68.72 starting on 8/24/2020 (Gregory Rouze)
# To-do:
# 1) need to separate user functions and main code - I have done this successfully in the offline version, but I'm having a
# little more trouble in the cloud version
# 2) Add comments on putpose of individual user functions
# 3) Run a long term experiment this weekend to see if I run into VPN problems (processing will take a while)
# v2 differs from v1 in that excess code that I used to prototype this in the cloud (from my local laptop)
# was removed.
'''Import relevant packages, functions, and user functions used in this reference ET derivation'''
import boto3
from contextlib import contextmanager
import earthpy.spatial as es
import fsspec
from math import e
import rasterio as rio
import xarray as xr
from osgeo.gdalnumeric import *
from osgeo.gdalconst import *
import os
from osgeo import gdal, osr, gdal_array, gdalconst
import pandas as pd
import re
import numpy as np
import sys
import ogr
from rasterio import Affine, MemoryFile
from rasterio.enums import Resampling
import rioxarray
from shapely.geometry import Point, Polygon
import geopandas as gpd
from shapely.geometry import box
from fiona.crs import from_epsg
from matplotlib import pyplot as plt
from rasterio.plot import plotting_extent
import earthpy.plot as ep
import math
from itertools import chain
from ipynb.fs.full.ProjectedReferenceET_Classes_Functions import ET0_PM, aggregate_raster_inmem, resample_raster_write, \
reproject_raster, grepfxn, rastermath, lapply_brick, write_geotiff, atmospheric_pressure, relative_fromspecific, unique, s3_push_delete_local
from ipynb.fs.full.ProjectedReferenceET_Classes_Functions import *
import boto3
'''Set home path if not done so already'''
os.getcwd()
os.chdir('/home/jupyter-rouze')
'''Read configuration file and parse out the inputs line by line'''
# Note that the difference between historical and future outputs in cloud are based on these 2 configuration files.
configurationfile = 'configurationfile_referenceET_test_future.ini'
# configurationfile = 'configurationfile_referenceET_test_historical.ini'
# Note: if you want run rcp 8.5, then all you have to do is change the rcp_source parameter from within config file
# It only affects grepfxn(rcp_source,all_files) below
with open(configurationfile) as f:
data = {}
for line in f:
key, value = line.strip().split(' = ')
data[key] = value
print(data)
model_files = data['model_files']
data_source = data['data_source']
output_folder = data['output_folder']
elevfile = data['elevfile']
tiffolder = data['tiffolder']
ET0_method = data['ET0_method']
ET0_winddat = data['ET0_winddat']
ET0_crop = data['ET0_crop']
to_clip = data['to_clip']
model = data['model']
northmost = float(data['northmost'])
southmost = float(data['southmost'])
westmost = float(data['westmost'])
eastmost = float(data['eastmost'])
pad_factor = float(data['pad_factor'])
rcp_source = data['rcp_source']
MACA_start_bucket = data['MACA_start_bucket']
'''This is needed to retrieve the netCDF files from the dev-et-data AWS bucket'''
# os.chdir(model_files)
fs = fsspec.filesystem(model_files, anon=False, requester_pays=True)
all_files = fs.find(MACA_start_bucket)
# This prints all of the files in dev-et-data/in/DelawareRiverBasin/ or MACA_start_bucket...a big set of outputs, so skipped
# print(all_files)
# THE CODE BELOW IS PARSED FROM THE CONDIITION WHEN DEALING WITH METDATA
# Split models apart that are to be used for ensemble averaging
models_parsed = [x.strip() for x in model.split(',')]
# Whittle down the number of files if the folder contains both rcp 4.5 and rcp 8.5 files
# Right now, the code can only handle one model of METDATA output (8/21/2020)
rcp_all_files = [grepfxn(rcp_source,all_files)][0]
# Iterate the files by each each specified model
models_list=[]
for i in range(len(models_parsed)):
model_files_loop = [grepfxn(models_parsed[i],rcp_all_files)][0]
models_list.append(model_files_loop)
# Flatten series of lists into one list
rcp_all_files = list(chain(*models_list))
# prints all netCDF files from 1950-2100 from MACA (radiation, precipitation, wind etc.)
print(rcp_all_files)
# Find and compile the year blocks into a list
dfis=[]
for out in rcp_all_files:
a=out.split('_')
dfi = a[5]+'_'+a[6]
dfis.append(dfi)
# print(dfis)
# Distill the above list into unique year blocks, as there will be duplicates from multiple climate inputs
year_all=unique(dfis);print(year_all)
# For prototyping only
year_block=0
# print(year_all)
# Print the first entry in the year list
print(year_all[year_block])
# Take out the components of the for loop below for showcasing to other members of the ET group
# loop by each block associated with the MACA netCDF file naming structure
for year_block in range(0,len(year_all)):
year_block_files = grepfxn(year_all[year_block],rcp_all_files)
print(year_block_files)
bounds=[southmost,northmost,westmost,eastmost]
rcp_pr = lapply_brick(grepfxn("pr",year_block_files), 'precipitation', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# downwelling shortwave radiation
rcp_rsds = lapply_brick(grepfxn("rsds",year_block_files), 'surface_downwelling_shortwave_flux_in_air', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# maximum air temperature
rcp_tasmax = lapply_brick(grepfxn("tasmax",year_block_files), 'air_temperature', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# minimum air temperature
rcp_tasmin = lapply_brick(grepfxn("tasmin",year_block_files), 'air_temperature', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# Now repeat above for the rcp 8.5 model outputs below
if(data_source == 'METDATA'):
rcp_uas = lapply_brick(grepfxn("uas",year_block_files), 'eastward_wind', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
rcp_vas = lapply_brick(grepfxn("vas",year_block_files), 'northward_wind', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
rcp_rhsmax = lapply_brick(grepfxn("rhsmax",year_block_files), 'relative_humidity', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
rcp_rhsmin = lapply_brick(grepfxn("rhsmin",year_block_files), 'relative_humidity', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# The section below is meant to convert netCDF files into geoTIFFs
src = rio.open(elevfile)
# elevation_full_aggregate = aggregate_raster_inmem(src,scale=0.5)
aggoutput_name='elevation_aggregated.tif'
resample_raster_write(src, name= aggoutput_name,scale=0.5)
dst_filename='elevation_aggregated_resampled.tif'
match_filename=rcp_pr[0][0].name
reproject_raster(aggoutput_name, match_filename,dst_filename)
elevation_array=rio.open(dst_filename).read(1)
# from datetime import datetime - will need to update to make start/end adaptive (7/28/2020)
# start_year=year_all[year_block][0:4]
# end_year=year_all[year_block][5:9]
start_year=year_all[year_block][0:4]
end_year=year_all[year_block][5:9]
start=start_year+'-01-01'
end=end_year+'-12-31'
datetimes = pd.date_range(start=start,end=end)
# i=10
for i in range(0,rcp_pr[0][0].count):
doy_loop = pd.Period(datetimes[i],freq='D').dayofyear
year_loop = pd.Period(datetimes[i],freq='D').year
# step 1: extract ith band from the raster stack
# step 2: stack those ith bands together
# step 3: do raster mean math from step 2
pr_stack=[]
# Purpose: create stacks of variables individually - this is like brick in R
pr_ensemble = []
rsds_ensemble = []
tasmax_ensemble = []
tasmin_ensemble = []
j = 0
# should be 1 array for each variable (mean of x ensembles for a given doy)
# rcp_pr[0][0].read(1, masked=False).shape
rcp_pr_doy = rastermath(rcp_pr[0], i)
rcp_rsds_doy = rastermath(rcp_rsds[0], i)
rcp_tasmax_doy = rastermath(rcp_tasmax[0], i)
rcp_tasmin_doy = rastermath(rcp_tasmin[0], i)
dims = np.shape(rcp_pr_doy[0])
rows = dims[0]
cols = dims[1]
constant_1_dat = np.full((rows,cols), 17.27)
constant_2_dat = np.full((rows,cols), 0.6108)
constant_3_dat = np.full((rows,cols), 273.15)
constant_4_dat = np.full((rows,cols), 237.3)
rcp_vs_tmax_array = constant_2_dat * np.exp(constant_1_dat * (rcp_tasmax_doy[0]-constant_3_dat) / ( (rcp_tasmax_doy[0]-constant_3_dat) + constant_4_dat)) # Equation S2.5
rcp_vs_tmin_array = constant_2_dat * np.exp(constant_1_dat * (rcp_tasmin_doy[0]-constant_3_dat) / ( (rcp_tasmin_doy[0]-constant_3_dat) + constant_4_dat)) # Equation S2.5
rcp_saturatedvapor_doy = (rcp_vs_tmax_array + rcp_vs_tmin_array)/2
if(data_source == 'METDATA'): # line 180 from R script
# All of these are arrays by the way
rcp_rhsmax_doy = rastermath(rcp_rhsmax[0], i)
rcp_rhsmin_doy = rastermath(rcp_rhsmin[0], i)
rcp_uas_doy = rastermath(rcp_uas[0], i)
rcp_vas_doy = rastermath(rcp_vas[0], i)
# was below are just arrays, not metadata profiles
rcp_was_doy_10m = np.sqrt(rcp_uas_doy[0]**2 + rcp_vas_doy[0]**2 )
rcp_actualvapor_doy = (rcp_vs_tmin_array * rcp_rhsmax_doy[0]/100 + rcp_vs_tmax_array * rcp_rhsmin_doy[0]/100)/2
da = xr.open_rasterio(rcp_pr[1])
da_r = rio.open(rcp_pr[1])
ny, nx = len(da['y']), len(da['x'])
longitude_array, latitude_array = np.meshgrid(da['x'], da['y'])
latitude_array_rad = latitude_array * (math.pi/180)
# Wind speed at 2 meters
z = np.full((rows,cols), 10)
array_487 = np.full((rows,cols), 4.87)
array_678 = np.full((rows,cols), 67.8)
array_542 = np.full((rows,cols), 5.42)
if (data_source == 'METDATA'):
rcp_was_doy_2m = rcp_was_doy_10m * array_487 / np.log(array_678*z - array_542) # Equation S5.20 for PET formulations other than Penman
else:
rcp_was_doy_2m = rcp_was_doy_10m[0] * array_487 / np.log(array_678*z - array_542) # Equation S5.20 for PET formulations other than Penman
doy_array = np.full((rows,cols), i+1)
rcp_pr_doy[1]['count']=1
rcp_tasmin_doy[1]['count']=1
rcp_tasmax_doy[1]['count']=1
# To-do: go ahead and developed ET0 directly as opposed to the R implementation(7/29)
ET0_inputarrays_rcp = [rcp_pr_doy[0], rcp_rsds_doy[0], rcp_tasmin_doy[0],
rcp_tasmax_doy[0],rcp_was_doy_2m,rcp_saturatedvapor_doy,
rcp_actualvapor_doy,elevation_array,latitude_array_rad,doy_array]
# NameError: name 'ET0_method' is not defined
if ET0_method == "yes":
if ET0_crop != "short" and ET0_crop != "tall":
stop("Please enter 'short' or 'tall' for the desired reference crop type")
else:
alpha = 0.23 # albedo for both short and tall crop
if (ET0_crop == "short"):
z0 = 0.02 # roughness height for short grass
else:
z0 = 0.1 # roughness height for tall grass
else:
z0 = 0.02 # roughness height for short grass
alpha = 0.25 # semi-desert short grass - will not be used for calculation - just informative
constants=[alpha, z0]
ET0_rcp = ET0_PM(ET0_inputarrays_rcp,ET0_method,ET0_winddat,ET0_crop,constants)
ET0_rcp.incoming_shortwave()
ET0_rcp.outgoing_shortwave()
ET0_rcp.outgoing_longwave()
ET0_rcp.net_radiation()
ET0_rcp_array_from_class = ET0_rcp.ET0_calcs()
ET0_rcp_array_final = ET0_rcp_array_from_class.astype('float32')
rcp_pr_doy[1]['count']=1
os.chdir('/home/jupyter-rouze')
gTIFF_filename = write_geotiff(data=ET0_rcp_array_final,meta=rcp_pr_doy[1],var_name='reference_evapotranspiration',
doy=doy_loop,year=year_loop,folder=output_folder)
local_file = output_folder+'/' + 'reference_evapotranspiration' + '/' + gTIFF_filename
bucket = 'dev-et-data'
bucket_filepath = 'in/DelawareRiverBasin/ETo/'+ str(year_loop) + '/' + gTIFF_filename
os.chdir('/home/jupyter-rouze')
s3_push_delete_local(local_file, bucket, bucket_filepath)
##################### Break down for loop by components for showcasing
# range(0,len(year_all))
year_block_files = grepfxn(year_all[0],rcp_all_files)
print(year_block_files)
bounds=[southmost,northmost,westmost,eastmost]
'''For a given input netCDF file, lapplybrick() creates a rasterio object that is clipped specifically for Delaware'''
rcp_pr = lapply_brick(grepfxn("pr",year_block_files), 'precipitation', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# downwelling shortwave radiation
rcp_rsds = lapply_brick(grepfxn("rsds",year_block_files), 'surface_downwelling_shortwave_flux_in_air', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# maximum air temperature
rcp_tasmax = lapply_brick(grepfxn("tasmax",year_block_files), 'air_temperature', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# minimum air temperature
rcp_tasmin = lapply_brick(grepfxn("tasmin",year_block_files), 'air_temperature', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# Now repeat above for the rcp 8.5 model outputs below
if(data_source == 'METDATA'):
rcp_uas = lapply_brick(grepfxn("uas",year_block_files), 'eastward_wind', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
rcp_vas = lapply_brick(grepfxn("vas",year_block_files), 'northward_wind', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
rcp_rhsmax = lapply_brick(grepfxn("rhsmax",year_block_files), 'relative_humidity', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
rcp_rhsmin = lapply_brick(grepfxn("rhsmin",year_block_files), 'relative_humidity', model_files,tiffolder,data_source,to_clip=to_clip,bounds=bounds,pad_factor=pad_factor)
# The section below is meant to convert netCDF files into geoTIFFs
print(rcp_pr)
'''Read and resample elevation to match raster characteristics from all other MACA inputs needed for reference ET'''
src = rio.open(elevfile)
# elevation_full_aggregate = aggregate_raster_inmem(src,scale=0.5)
aggoutput_name='elevation_aggregated.tif'
resample_raster_write(src, name= aggoutput_name,scale=0.5)
dst_filename='elevation_aggregated_resampled.tif'
match_filename=rcp_pr[0][0].name
reproject_raster(aggoutput_name, match_filename,dst_filename)
elevation_array=rio.open(dst_filename).read(1)
# from datetime import datetime - will need to update to make start/end adaptive (7/28/2020)
start_year=year_all[year_block][0:4]
end_year=year_all[year_block][5:9]
start=start_year+'-01-01'
end=end_year+'-12-31'
datetimes = pd.date_range(start=start,end=end)
# i=10
print(start)
print(end)
# for i in range(0,rcp_pr[0][0].count):
doy_loop = pd.Period(datetimes[0],freq='D').dayofyear
year_loop = pd.Period(datetimes[0],freq='D').year
print(doy_loop)
print(year_loop)
# step 1: extract ith band from the raster stack
# step 2: stack those ith bands together
# step 3: do raster mean math from step 2
pr_stack=[]
# Purpose: create stacks of variables individually - this is like brick in R
pr_ensemble = []
rsds_ensemble = []
tasmax_ensemble = []
tasmin_ensemble = []
# Here we are using index = 0 or January 1 of the first year in the given netCDF file (e.g. 2021, 2026 2031, 2036 etc.)
# should be 1 array for each variable (mean of x ensembles for a given doy)
# rcp_pr[0][0].read(1, masked=False).shape
print(rcp_pr)
print(rcp_pr[0]) # rasterio opened geoTIFF of precipitation
print(rcp_pr[0][0])
'''rastermath() averages across all models...however, since we are only dealing with one model for now (i.e. MIROC5),
the rastermath() APPEARS to be redundant. However, future iterations of this code, such as rastermath(), are expecting
an ensemble average of inputs, which are better for to gauge model uncertainty.'''
rcp_pr_doy = rastermath(rcp_pr[0], i)
# print(rcp_pr_doy)
# print(rcp_pr_doy[0]) # array
# print(rcp_pr_doy[1]) # geoTIFF metadata
'''Repeat rastermath() for all other inputs needed for reference ET'''
rcp_rsds_doy = rastermath(rcp_rsds[0], i)
rcp_tasmax_doy = rastermath(rcp_tasmax[0], i)
rcp_tasmin_doy = rastermath(rcp_tasmin[0], i)
dims = np.shape(rcp_pr_doy[0])
rows = dims[0]
cols = dims[1]
'''Derive saturated vapor pressure for Penman-Monteith Approximation'''
constant_1_dat = np.full((rows,cols), 17.27)
constant_2_dat = np.full((rows,cols), 0.6108)
constant_3_dat = np.full((rows,cols), 273.15)
constant_4_dat = np.full((rows,cols), 237.3)
rcp_vs_tmax_array = constant_2_dat * np.exp(constant_1_dat * (rcp_tasmax_doy[0]-constant_3_dat) / ( (rcp_tasmax_doy[0]-constant_3_dat) + constant_4_dat)) # Equation S2.5
rcp_vs_tmin_array = constant_2_dat * np.exp(constant_1_dat * (rcp_tasmin_doy[0]-constant_3_dat) / ( (rcp_tasmin_doy[0]-constant_3_dat) + constant_4_dat)) # Equation S2.5
rcp_saturatedvapor_doy = (rcp_vs_tmax_array + rcp_vs_tmin_array)/2 # s2.6
if(data_source == 'METDATA'): # line 180 from R script
# All of these are arrays by the way
rcp_rhsmax_doy = rastermath(rcp_rhsmax[0], i)
rcp_rhsmin_doy = rastermath(rcp_rhsmin[0], i)
rcp_uas_doy = rastermath(rcp_uas[0], i)
rcp_vas_doy = rastermath(rcp_vas[0], i)
# was below are just arrays, not metadata profiles
rcp_was_doy_10m = np.sqrt(rcp_uas_doy[0]**2 + rcp_vas_doy[0]**2 )
# inputs: min/max saturated vapor pressure from air temp., min/max relative humidity (assuming relative humidity is present)
rcp_actualvapor_doy = (rcp_vs_tmin_array * rcp_rhsmax_doy[0]/100 + rcp_vs_tmax_array * rcp_rhsmin_doy[0]/100)/2 # s2.7
da = xr.open_rasterio(rcp_pr[1])
da_r = rio.open(rcp_pr[1])
ny, nx = len(da['y']), len(da['x'])
longitude_array, latitude_array = np.meshgrid(da['x'], da['y'])
# Latitude (needed for Extraterrestrial radiation or Ra), in radians
latitude_array_rad = latitude_array * (math.pi/180)
# Convert from wind speed at 10 meters to wind speed at 2 meters
z = np.full((rows,cols), 10)
array_487 = np.full((rows,cols), 4.87)
array_678 = np.full((rows,cols), 67.8)
array_542 = np.full((rows,cols), 5.42)
if (data_source == 'METDATA'):
rcp_was_doy_2m = rcp_was_doy_10m * array_487 / np.log(array_678*z - array_542) # Equation S5.20 for PET formulations other than Penman
else:
rcp_was_doy_2m = rcp_was_doy_10m[0] * array_487 / np.log(array_678*z - array_542) # Equation S5.20 for PET formulations other than Penman
doy_array = np.full((rows,cols), i+1)
rcp_pr_doy[1]['count']=1
rcp_tasmin_doy[1]['count']=1
rcp_tasmax_doy[1]['count']=1
# To-do: go ahead and developed ET0 directly as opposed to the R implementation(7/29)
# To-do: go ahead and developed ET0 directly as opposed to the R implementation(7/29)
# Combine all of the inputs into an list of arrays
ET0_inputarrays_rcp = [rcp_pr_doy[0], rcp_rsds_doy[0], rcp_tasmin_doy[0],
rcp_tasmax_doy[0],rcp_was_doy_2m,rcp_saturatedvapor_doy,
rcp_actualvapor_doy,elevation_array,latitude_array_rad,doy_array]
# NameError: name 'ET0_method' is not defined
if ET0_method == "yes":
if ET0_crop != "short" and ET0_crop != "tall":
stop("Please enter 'short' or 'tall' for the desired reference crop type")
else:
alpha = 0.23 # albedo for both short and tall crop
if (ET0_crop == "short"):
z0 = 0.02 # roughness height for short grass
else:
z0 = 0.1 # roughness height for tall grass
else:
z0 = 0.02 # roughness height for short grass
alpha = 0.25 # semi-desert short grass - will not be used for calculation - just informative
constants=[alpha, z0]
'''Initialize the ET0 class from the imported Jupyter notebook external to this one (see top for imports)'''
ET0_rcp = ET0_PM(ET0_inputarrays_rcp,ET0_method,ET0_winddat,ET0_crop,constants)
ET0_rcp.incoming_shortwave()
ET0_rcp.outgoing_shortwave()
ET0_rcp.outgoing_longwave()
ET0_rcp.net_radiation()
ET0_rcp_array_from_class = ET0_rcp.ET0_calcs()
ET0_rcp_array_final = ET0_rcp_array_from_class.astype('float32')
print(ET0_rcp_array_final)
result = ET0_rcp_array_final.ravel()
cleanedList = [x for x in result if str(x) != 'nan']
from scipy import stats
stats.describe(cleanedList)
import matplotlib.pyplot as plt
plt.imshow(ET0_rcp_array_final)
plt.show()
plt.title("Histogram with 'auto' bins")
plt.hist(ET0_rcp_array_final)
plt.show()
rcp_pr_doy[1]['count']=1
os.chdir('/home/jupyter-rouze')
gTIFF_filename = write_geotiff(data=ET0_rcp_array_final,meta=rcp_pr_doy[1],var_name='reference_evapotranspiration',
doy=doy_loop,year=year_loop,folder=output_folder)
local_file = output_folder+'/' + 'reference_evapotranspiration' + '/' + gTIFF_filename
bucket = 'dev-et-data'
bucket_filepath = 'in/DelawareRiverBasin/ETo/'+ str(year_loop) + '/' + gTIFF_filename
os.chdir('/home/jupyter-rouze')
reread = rio.open(local_file)
reread.meta
print(round(reread.meta['transform'][0],4),round(reread.meta['transform'][4],4))
'''Push newly created geoTIFF into specified bucket and its filepath'''
s3_push_delete_local(local_file, bucket, bucket_filepath)
```
| github_jupyter |
## Code for policy section
```
# Load libraries
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mlp
# Ensure type 1 fonts are used
mlp.rcParams['ps.useafm'] = True
mlp.rcParams['pdf.use14corefonts'] = True
mlp.rcParams['text.usetex'] = True
import seaborn as sns
import pandas as pd
import pickle
import itertools as it
```
## Solve for the final size of the outbreak in Lombardy, Italy
```
# Estimate based on the value of the basic reproduction number as provided by best fit
# For formula, see here: https://web.stanford.edu/~jhj1/teachingdocs/Jones-on-R0.pdf
from sympy import Symbol, solve, log
x = Symbol('x')
r0 = 3.16
s_inf = solve(log(x)-r0_max*(x-1),x)[0]
print("% of the population that is still susceptible by the end of the outbreak in Lombardy, Italy: {0:10.4f}".format(s_inf*100))
print("% of the population that has ever been infected by the end of the outbreak in Lombardy, Italy: {0:10.4f}".format(100-s_inf*100))
# Set of colors
# For age group policies
color_list_shahin = ['orange','green','blue','purple','black']
# For additional baseline policies (50% or 100% of the population being asked to shelter-in-place)
color_list_add = ['dodgerblue','hotpink']
# Number of distinct ages in the UN age distribution
# Currently ages 0-100, with each age counted separately
n_ages = 101
# Shelter-in-place probabilities per age group, equivalent to 1 million of the considered generation in each case
age_ranges = [(0,14), (15,29), (30,49), (50,69), (70,100)]
isolation_rates_by_age = [0.803689, 0.713332, 0.380842, 0.358301, 0.516221]
# Learn about the structure of the folder containing the simulation results
all_possible_combos = []
for a, iso_rate in zip(age_ranges, isolation_rates_by_age):
combo = np.zeros(n_ages)
combo[a[0]:a[1]+1] = iso_rate
all_possible_combos.append(combo)
# Two possibilities for mean time to isolation: either 4.6 days (default value) or a large number to mimic no isolation in place
mean_time_to_isolations = [4.6, 10000]
all_possible_combos = list(it.product(mean_time_to_isolations, all_possible_combos))
NUM_COMBOS = len(all_possible_combos)
print("NUM COMBOS:",NUM_COMBOS)
mtti_val_even = all_possible_combos[0][0]
combo_frac_stay_home_even = all_possible_combos[0][1]
mtti_val_odd = all_possible_combos[1][0]
combo_frac_stay_home_odd = all_possible_combos[1][1]
print("Value of mean time to isolation - even index: ", mtti_val_even)
print("Combo fraction stay home - even index", combo_frac_stay_home_even)
print("Value of mean time to isolation - odd index: ", mtti_val_odd)
print("Combo fraction stay home - odd index: ", combo_frac_stay_home_odd)
# Learn about the structure of the folder containing the simulation results
all_possible_combos = []
for a in age_ranges:
# Either 50% or 100% of the population in each age group is asked to shelter-in-place
for val in [0.5, 1.0]:
combo = np.zeros(n_ages)
combo[a[0]:a[1]+1]=val
all_possible_combos.append(combo)
# Two possibilities for mean time to isolation: either 4.6 days (default value) or a large number to mimic no isolation in place
mean_time_to_isolations = [4.6, 10000]
all_possible_combos = list(it.product(mean_time_to_isolations, all_possible_combos))
NUM_COMBOS = len(all_possible_combos)
print("NUM COMBOS:",NUM_COMBOS)
mtti_val_even = all_possible_combos[0][0]
combo_frac_stay_home_even = all_possible_combos[0][1]
mtti_val_odd = all_possible_combos[1][0]
combo_frac_stay_home_odd = all_possible_combos[1][1]
print("Value of mean time to isolation - even index: ", mtti_val_even)
print("Combo fraction stay home - even index: ", combo_frac_stay_home_even)
print("Value of mean time to isolation - odd index: ", mtti_val_even)
print("Combo fraction stay home - odd index: ", combo_frac_stay_home_even)
# Set font sizes for plots
legend_fontsize = 13
title_fontsize = 15
xlab_fontsize = 23
ylab_fontsize = 23
xtick_fontsize = 17
ytick_fontsize = 17
```
## Functions to be used to plot four subgraphs in Figure 8
### Function to be used to plot the projected percentage of infected people in the population over time, in the absence of physical distancing
### Figures 8(a) and 8(b)
```
def perc_infected_age_group_node_removal(pop_size, group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder1, folder2, filename1, filename2, option, specific_title):
if option == 2:
nb = 0
# baseline
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline 0: No intervention")
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color='gray',linestyle='-.')
for j in range(combo_start,combo_end,2):
nb +=1
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
if i < 50:
folder = folder1
filename = filename1
else:
folder = folder2
filename = filename2
Mild = pd.read_csv( folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Age group: ", group_vec_age[nb-1])
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_shahin[nb-1])
# new baseline - 50% population is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv( base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline: ", j-1)
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,color='red',linestyle='--')
plt.legend(['Absence of\n intervention']+['Ages ' + str(group_vec_age[i]) for i in range(len(group_vec_age))]+['All ages\n50\% confined','All ages\n100\% confined'], fontsize = 13)
plt.ylim(0,100)
plt.title(specific_title,fontsize=15)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.ylabel('Percentage of infected', fontsize=23)
plt.xlabel('Days since patient zero', fontsize=23)
return(plt)
elif option == 1:
nb = 0
# baseline
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
Infected_Trials=np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline 0: No intervention")
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color='gray',linestyle='-.')
for j in range(combo_start+1,combo_end,2):
nb +=1
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
if i < 50:
folder = folder1
filename = filename1
else:
folder = folder2
filename = filename2
Mild = pd.read_csv( folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Age group: ", group_vec_age[nb-1])
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_shahin[nb-1])
# new baseline - 50% population is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv( base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline: ", j-1)
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,color='red',linestyle='--')
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(len(group_vec_age))]+['All ages\n50\% confined','All ages\n100\% confined'], fontsize = 13)
plt.ylim(0,100)
plt.title(specific_title,fontsize=15)
plt.ylabel('Percentage of infected', fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.xlabel('Days since patient zero', fontsize=23)
return(plt)
else:
nb = 0
# baseline
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
Infected_Trials=np.zeros((100,sim_end+1))
for i in range(100):
Mild = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Documented = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_documented.csv',delimiter=' ',header=None)
Severe = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline 0: No intervention")
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color='gray',linestyle='-.')
for j in range(combo_start,combo_end):
nb +=1
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
if i < 50:
folder = folder1
filename = filename1
else:
folder = folder2
filename = filename2
Mild = pd.read_csv( folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size
print("Age group: ", group_vec_age[nb-1])
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_shahin[nb-1])
# new baseline - 50% population is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv( base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline: ", j-1)
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,color='red',linestyle='--')
plt.ylim(0,100)
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(5)]+['All ages\n50\% confined','All ages\n100\% confined'], fontsize = 13)
plt.title(specific_title, fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.ylabel('Percentage of infected', fontsize=23)
plt.xlabel('Days since patient zero', fontsize=23)
return(plt)
```
### Function to be used to plot the projected number of deaths over time, in the absence of physical distancing
### Figures 8(c) and 8(d)
```
def death_age_group_node_removal(group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end,
folder1, folder2, filename1, filename2, option, specific_title):
if option == 2:
nb = 0
# Baseline - No intervention
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
D=np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv', delimiter=' ',header=None)
D[i,:]=Deaths
D = D.mean(axis=0)
print("Baseline 0: No intervention")
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color='gray',linestyle='-.')
for j in range(combo_start,combo_end,2):
nb +=1
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
if i < 50:
folder = folder1
filename = filename1
else:
folder = folder2
filename = filename2
Deaths = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_deaths.csv', delimiter=' ',header=None)
D[i,:] = Deaths
D = D.mean(axis=0)
print("Age group: ", group_vec_age[nb-1])
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today : ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color=color_list_shahin[nb-1])
# Additional baselines - 50% and 100% of population stays home
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv', delimiter=' ',header=None)
D[i,:]=Deaths
D = D.mean(axis=0)
print("Baseline: ", j-1)
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color=color_list_add[j-2], linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0, color='red', linestyle='--')
plt.legend(['Absence of\n intervention']+['Ages ' + str(group_vec_age[i]) for i in range(len(group_vec_age))]+['All ages\n50\% confined','All ages\n100\% confined'], fontsize=legend_fontsize)
plt.ylim(0,400)
plt.title(specific_title, fontsize=title_fontsize)
plt.xlabel('Days since patient zero', fontsize=xlab_fontsize)
plt.ylabel('Total deaths (thousands)', fontsize=ylab_fontsize)
plt.xticks(fontsize=xtick_fontsize)
plt.yticks(fontsize=ytick_fontsize)
return(plt)
elif option == 1:
nb = 0
# Baseline - No intervention
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv', delimiter=' ', header=None)
D[i,:]=Deaths
D = D.mean(axis=0)
print("Baseline 0: No intervention")
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color='gray',linestyle='-.')
# Average simulations per age group over n_sims random seeds
for j in range(combo_start+1,combo_end,2):
nb +=1
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
if i < 50:
folder = folder1
filename = filename1
else:
folder = folder2
filename = filename2
Deaths = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_deaths.csv', delimiter=' ',header=None)
D[i,:] = Deaths
D = D.mean(axis=0)
print("Age group: ", group_vec_age[nb-1])
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulatuon: ", D[today])
D = D/1000.
plt.plot(D,color=color_list_shahin[nb-1])
# Additional baselines - 50% and 100% of population stays home
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:]=Deaths
D = D.mean(axis=0)
print("Baseline: ", j-1)
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,color='red',linestyle='--')
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(5)]+['All ages\n50\% confined','All ages\n100\% confined'], fontsize = 13)
plt.ylim(0,400)
plt.title(specific_title,fontsize=15)
plt.ylabel('Total deaths (thousands)', fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.xlabel('Days since patient zero', fontsize=23)
return(plt)
else:
nb = 0
# baseline
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
D=np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:]=Deaths
D = D.mean(axis=0)
print("Baseline 0: No intervention")
print("# of deaths on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", Infected_Trials[sim_end])
D = D/1000.
plt.plot(D,color='gray',linestyle='-.')
for j in range(combo_start,combo_end):
nb = nb+1
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
if i < 50:
folder = folder1
filename = filename1
else:
folder = folder2
filename = filename2
Deaths = pd.read_csv(folder + filename + str(j) + '_N' + str(i%50) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:] = Deaths
D = D.mean(axis=0)
print("Age group: ", group_vec_age[nb-1])
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color=color_list_shahin[nb-1])
# new baseline - 50% population is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:]=Deaths
D = D.mean(axis=0)
print("Baseline: ", j-1)
print("% infected on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,color='red',linestyle='--')
plt.ylim(0,400)
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(5)]+['All ages\n50\% confined','All ages\n100\% confined'], fontsize = 13)
plt.title(specific_title, fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.ylabel('Total deaths (thousands)', fontsize=23)
plt.xlabel('Days since patient zero', fontsize=23)
return(plt)
```
## Functions to be used to plot four subgraphs in Figure 9
### Function to be used to plot the projected percentage of infected people in the population over time, when physical distancing is in place
### Figures 9(a) and 9(b)
```
def perc_infected_age_group_node_removal_lockdown(pop_size, group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder, filename, option, specific_title):
if option == 2:
nb = 0
Infected_Trials = np.zeros((n_sims,sim_end+1))
# Baseline - "No intervention" scenario
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
for i in range(n_sims):
Mild = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_mild.csv', delimiter=' ', header=None)
Severe = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_severe.csv', delimiter=' ', header=None)
Critical = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_critical.csv', delimiter=' ',header=None)
R = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_recovered.csv', delimiter=' ', header=None)
D = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv', delimiter=' ', header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline 0: No intervention")
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color='gray',linestyle='-.')
for j in range(combo_start,combo_end,2):
nb +=1
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv( folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_mild.csv', delimiter=' ',header=None)
Severe = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_severe.csv', delimiter=' ',header=None)
Critical = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_critical.csv', delimiter=' ',header=None)
R = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_recovered.csv', delimiter=' ',header=None)
D = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv', delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Age group: ", group_vec_age[nb-1])
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_shahin[nb-1])
# new baseline - 50% or 100% of the population of an age group is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv( base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials/pop_size*100.
Infected_Trials = Infected_Trials.mean(axis=0)
print("Baseline: ", j-1)
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,linestyle='--',color='red')
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(len(group_vec_age))]+['All ages\n50\% confined','All ages\n100\% confined'],fontsize=13)
plt.ylim(0,100)
plt.title(specific_title)
plt.ylabel('Percentage of infected',fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.xlabel('Days since patient zero',fontsize=23)
return(plt)
elif option == 1:
nb = 0
Infected_Trials = np.zeros((n_sims,sim_end+1))
# baseline
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
for i in range(n_sims):
Mild = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline 0: No intervention")
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color='gray',linestyle='-.')
for j in range(combo_start+1,combo_end,2):
nb = nb+1
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv( folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Age group: ", group_vec_age[nb-1])
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_shahin[nb-1])
# new baseline - 50% population is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv( base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline: ", j-1)
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,linestyle='--',color='red')
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(5)]+['All ages\n50\% confined','All ages\n100\% confined'],fontsize=13)
plt.ylim(0,100)
plt.title(specific_title)
plt.ylabel('Percentage of infected',fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.xlabel('Days since patient zero',fontsize=23)
return(plt)
else:
nb = 0
Infected_Trials = np.zeros((n_sims,sim+end+1))
# baseline
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
for i in range(n_sims):
Mild = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Documented = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_documented.csv',delimiter=' ',header=None)
Severe = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline 0: No intervention")
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color='gray',linestyle='-.')
for j in range(combo_start,combo_end):
nb +=1
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Age group: ", group_vec_age[j-1])
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials, color=color_list_shahin[nb-1])
# new baseline - 50% population is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
Infected_Trials = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Mild = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_mild.csv',delimiter=' ',header=None)
Severe = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_severe.csv',delimiter=' ',header=None)
Critical = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_critical.csv',delimiter=' ',header=None)
R = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_recovered.csv',delimiter=' ',header=None)
D = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
Infected_Trials[i,:] = Mild+Severe+Critical+R+D
Infected_Trials = Infected_Trials.mean(axis=0)
Infected_Trials = Infected_Trials/pop_size*100.
print("Baseline ",j-1)
print("% infected on lockdown day: ", Infected_Trials[t_lockdown_vec[0]])
print("% infected today: ", Infected_Trials[today])
print("% infected at the end of the simulation: ", Infected_Trials[sim_end])
plt.plot(Infected_Trials,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,linestyle='--',color='red')
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(len(group_vec_age))]+['All ages\n50\% confined','All ages\n100\% confined'],fontsize=13)
plt.ylim(0,100)
plt.title(specific_title)
plt.ylabel('Percentage of infected',fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.xlabel('Days since patient zero',fontsize=23)
return(plt)
```
### Function to be used to plot the projected number of deaths over time, when physical distancing is in place
### Figures 9(c) and 9(d)
```
def death_age_group_node_removal_lockdown(group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder, filename, option, specific_title):
if option == 2:
nb = 0
D=np.zeros((n_sims,sim_end+1))
# baseline
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
for i in range(n_sims):
Deaths = pd.read_csv(base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:]=Deaths
D = D.mean(axis=0)
print("Baseline 0: No intervention")
print("# of deaths on lockdown day", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color='gray',linestyle='-.')
# not baseline
for j in range(combo_start,combo_end,2):
nb +=1
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:]=Deaths
D = D.mean(axis=0)
print("Age group: ", group_vec_age[nb-1])
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color=color_list_shahin[nb-1])
# new baseline - 50% or 100% of the population of an age group is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:] = Deaths
D = D.mean(axis=0)
print("Baseline: ",j-1)
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0, linestyle='--',color='red')
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(5)]+['All ages\n50\% confined','All ages\n100\% confined'],fontsize=13)
plt.ylim(0,400)
plt.title(specific_title)
plt.ylabel('Total deaths (thousands)',fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.xlabel('Days since patient zero',fontsize=23)
return(plt)
elif option == 1:
nb = 0
# Baseline
D=np.zeros((n_sims,sim_end+1))
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
for i in range(n_sims):
Deaths = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:] = Deaths
D = D.mean(axis=0)
print("Baseline: No intervention")
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color='gray',linestyle='-.')
# Per age group
for j in range(combo_start+1,combo_end,2):
nb = nb +1
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:] = Deaths
D = D.mean(axis=0)
print("Age group: ", group_vec_age[nb-1])
print("# of deaths on lockdown day: ", D[t_lockdown_vec[0]])
print("# of deaths today: ", D[today])
print("# of deaths at the end of the simulation: ", D[sim_end])
D = D/1000.
plt.plot(D,color=color_list_shahin[nb-1])
# new baseline - 50% population is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:] = Deaths
D = D.mean(axis=0)
print("Baseline: ", j-1)
print("# of deaths on lockdown day: " + str(D[t_lockdown_vec[0]]))
print("# of deaths today: " + str(D[today]))
print("# of deaths at the end of the simulation: " + str(D[sim_end]))
D = D/1000.
plt.plot(D,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,linestyle='--',color='red')
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(5)]+['All ages\n50\% confined','All ages\n100\% confined'],fontsize=13)
plt.ylim(0,400)
plt.title(specific_title)
plt.ylabel('Total deaths (thousands)',fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.xlabel('Days since patient zero',fontsize=23)
return(plt)
else:
nb = 0
# baseline
D = np.zeros((n_sims,sim_end+1))
base_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline_0_paramsweep_n10000000.0_i0_N'
base_folder = 'nolockdown_noage/'
for i in range(n_sims):
Deaths = pd.read_csv( base_folder + base_filename + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:] = Deaths
D = D.mean(axis=0)
print("Baseline: No intervention")
print("# of deaths on lockdown day: " + str(D[t_lockdown_vec[0]]))
print("# of deaths today: " + str(D[today]))
print("# of deaths at the end of the simulation: " + str(D[sim_end]))
D = D/1000.
plt.plot(D,color='gray',linestyle='-.')
# Per age group
for j in range(combo_start,combo_end):
nb +=1
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(folder + filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:]=Deaths
D = D.mean(axis=0)
print("Age group: ", group_vec_age[nb-1])
print("# of deaths on lockdown day: " + str(D[t_lockdown_vec[0]]))
print("# of deaths today: " + str(D[today]))
print("# of deaths at the end of the simulation: " + str(D[sim_end]))
D = D/1000.
plt.plot(D,color=color_list_shahin[nb-1])
# new baseline - 50% population is isolated
base2_filename = 'lombardy_distributed_agepolicy_nolockdown_baseline2_0_paramsweep_n10000000.0_i'
base2_folder = 'nolockdown_fullisolation/'
for j in range(2,4):
D = np.zeros((n_sims,sim_end+1))
for i in range(n_sims):
Deaths = pd.read_csv(base2_folder + base2_filename + str(j) + '_N' + str(i) + '_p0.029_m4_s22_deaths.csv',delimiter=' ',header=None)
D[i,:] = Deaths
D = D.mean(axis=0)
print("Baseline: ",j-1)
print("# of deaths on lockdown day:" + str(t_lockdown_vec[0]))
print("# of deaths today: " + str(D[today]))
print("# of deaths at the end of the simulation: "+ str(D[sim_end]))
D = D/1000.
plt.plot(D,color=color_list_add[j-2],linestyle='-.')
plt.axvline(t_lockdown_vec[0], 0,linestyle='--',color='red')
plt.legend(['Absence of\nintervention']+['Ages ' + str(group_vec_age[i]) for i in range(len(group_vec_age))]+['All ages\n50\% confined','All ages\n100\% confined'],fontsize=13)
plt.ylim(0,400)
plt.title(specific_title)
plt.ylabel('Total deaths (thousands)',fontsize=23)
plt.xticks(fontsize=17)
plt.yticks(fontsize=17)
plt.xlabel('Days since patient zero',fontsize=23)
return(plt)
```
## Figure 8(a)
```
# Mean time to isolation 4.6 and 50% of age category removed
t_lockdown_vec = [46]
sim_end = 119
today = 67
group_vec_age = ['0-14','15-29','30-49','50-69','70+']
combo_start = 0
combo_end = 10
pop_size = 10000000
filename1 = 'lombardy_distributed_agepolicy_0_paramsweep_n10000000.0_i'
filename2 = 'lombardy_distributed_agepolicy_1_paramsweep_n10000000.0_i'
folder1 = 'perc_policy_results/run1/'
folder2 = 'perc_policy_results/run2/'
option = 2
specific_title = ''
perc_infected_age_group_node_removal(pop_size, group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder1, folder2, filename1, filename2, option, specific_title)
```
## Figure 8(c)
```
# Mean time to isolation 4.6 and 50% of age category removed
t_lockdown_vec = [46]
sim_end = 119
today = 67
group_vec_age = ['0-14','15-29','30-49','50-69','70+']
combo_start = 0
combo_end = 10
pop_size = 10000000
filename1 = 'lombardy_distributed_agepolicy_0_paramsweep_n10000000.0_i'
filename2 = 'lombardy_distributed_agepolicy_1_paramsweep_n10000000.0_i'
folder1 = 'perc_policy_results/run1/'
folder2 = 'perc_policy_results/run2/'
option = 2
#specific_title = 'Mean Time to Isolation = 4.6 days for all' + '\n50% stay home, per age group'
specific_title = ''
death_age_group_node_removal( group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder1, folder2, filename1, filename2, option, specific_title)
```
## Figure 8(b)
```
# Mean time to isolation 4.6 and 100% of age category removed
t_lockdown_vec = [46]
n_sims = 100
sim_end = 119
today = 67
group_vec_age = ['0-14','15-29','30-49','50-69','70+']
combo_start = 0
combo_end = 10
pop_size = 10000000
filename1 = 'lombardy_distributed_agepolicy_0_paramsweep_n10000000.0_i'
filename2 = 'lombardy_distributed_agepolicy_1_paramsweep_n10000000.0_i'
folder1 = 'perc_policy_results/run1/'
folder2 = 'perc_policy_results/run2/'
option = 1
specific_title = ''
perc_infected_age_group_node_removal(pop_size, group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder1, folder2, filename1, filename2, option, specific_title)
```
## Figure 8(d)
```
# Mean time to isolation 4.6 and 100% of age category removed
t_lockdown_vec = [46]
n_sims = 100
sim_end = 119
today = 67
group_vec_age = ['0-14','15-29','30-49','50-69','70+']
combo_start = 0
combo_end = 10
pop_size = 10000000
filename1 = 'lombardy_distributed_agepolicy_0_paramsweep_n10000000.0_i'
filename2 = 'lombardy_distributed_agepolicy_1_paramsweep_n10000000.0_i'
folder1 = 'perc_policy_results/run1/'
folder2 = 'perc_policy_results/run2/'
option = 1
specific_title = ''
death_age_group_node_removal(group_vec_age,t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder1, folder2, filename1, filename2, option, specific_title)
```
## Figure 9(a)
```
# Mean time to isolation 4.6 and 50% of age category removed
t_lockdown_vec = [46]
n_sims = 100
sim_end = 119
# As of March 29 of 2020
today = 67
group_vec_age = ['0-14','15-29','30-49','50-69','70+']
combo_start = 0
combo_end = 10
pop_size = 10000000
filename = 'lombardy_distributed_agepolicy_yeslockdown_0_paramsweep_n10000000.0_i'
folder = 'lockdown_perc_policy_results/'
option = 2
specific_title = ''
perc_infected_age_group_node_removal_lockdown(pop_size, group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder, filename, option, specific_title)
```
## Figure 9(c)
```
# Mean time to isolation 4.6 and 50% of age category removed
t_lockdown_vec = [46]
n_sims = 100
sim_end = 119
# As of March 29 of 2020
today = 67
group_vec_age = ['0-14','15-29','30-49','50-69','70+']
combo_start = 0
combo_end = 10
pop_size = 10000000
filename = 'lombardy_distributed_agepolicy_yeslockdown_0_paramsweep_n10000000.0_i'
folder = 'lockdown_perc_policy_results/'
option = 2
specific_title = ''
death_age_group_node_removal_lockdown(group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder, filename, option, specific_title)
```
## Figure 9(b)
```
# Mean time to isolation 4.6 and 100% of age category removed
t_lockdown_vec = [46]
n_sims = 100
sim_end = 119
today = 67
group_vec_age = ['0-14','15-29','30-49','50-69','70+']
combo_start = 0
combo_end = 10
pop_size = 10000000
filename = 'lombardy_distributed_agepolicy_yeslockdown_0_paramsweep_n10000000.0_i'
folder = 'lockdown_perc_policy_results/'
option = 1
# Lombardy - Time of Lockdown = 46 days\n, \nInfected = Mild+Severe+Critical+R+D
#specific_title = 'Mean Time to Isolation = 4.6 days for all' + '\n100% stay home, per age group' + '\n+ Social distance increased by a factor of 2'
specific_title = ''
perc_infected_age_group_node_removal_lockdown(pop_size, group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder, filename, option, specific_title)
```
## Figure 9(d)
```
# Mean time to isolation 4.6 and 100% of age category removed
t_lockdown_vec = [46]
n_sims = 100
sim_end = 119
today = 67
group_vec_age = ['0-14','15-29','30-49','50-69','70+']
combo_start = 0
combo_end = 10
pop_size = 10000000
filename = 'lombardy_distributed_agepolicy_yeslockdown_0_paramsweep_n10000000.0_i'
folder = 'lockdown_perc_policy_results/'
option = 1
specific_title = ''
death_age_group_node_removal_lockdown(group_vec_age, t_lockdown_vec, n_sims, sim_end, today, combo_start, combo_end, folder, filename, option, specific_title)
```
| github_jupyter |
```
%load_ext autoreload
%autoreload 2
```
# Sampling from a Bayesian network: an open problem
A Bayesian network encodes a probability distribution. It is often desirable to be able to sample from a Bayesian network. The most common way to do this is via forward sampling (also called prior sampling). It's a really dumb algorithm that is trivial to implement. You just loop over the nodes in breadth-first order and sample a value each node, conditioning on the parents (which have already been sampled).
The problem with forward sampling is that impossible situations can arise for some networks. Basically, forward sampling doesn't ensure that the produced samples are *valid*. The easiest way to grok this is via some examples.
## Example 1
```
import hedgehog as hh
import pandas as pd
def example_1():
X = pd.DataFrame(
[
[True, True, True],
[False, False, False]
],
columns=['A', 'B', 'C']
)
bn = hh.BayesNet(
(['A', 'B'], 'C')
)
bn.fit(X)
return bn
bn = example_1()
bn
bn.full_joint_dist()
```
The problem with forward sampling is this case is that if we sample from A and then B independently, then we can end up by sampling pairs (A, B) that don't exist. This will raise an error when we condition P(C) on its parents.
In `hedhehog`, this will raise a `KeyError` when `sample` is called because the distribution that corresponds to `(A=False, B=True)` doesn't exist.
```
while True:
try:
bn.sample()
except KeyError:
print('Yep, told you.')
break
```
## Example 2
```
import hedgehog as hh
import pandas as pd
def example_2():
X = pd.DataFrame(
[
[1, 1, 1, 1],
[2, 1, 2, 1]
],
columns=['A', 'B', 'C', 'D']
)
bn = hh.BayesNet(
('A', 'B'),
('B', 'C'),
(['A', 'C'], 'D')
)
bn.fit(X)
return bn
bn = example_2()
bn
```
In this case, a problem will occur if we sample `(A, 1)`, then `(B, 1)`, then `(C, 2)`. Indeed, `(A, 1)` and `(C, 1)` have never been seen so there's now way of sampling `D`.
```
while True:
try:
bn.sample()
except KeyError:
print('Yep, told you.')
break
```
One way to circumvent these issues would be to sample from the full joint distribution. But this is too costly. Another way is to add a prior distribution by supposing that every combination occurred once, but that's not elegant.
Ideally we would like to have some way of doing forward sampling that only produces valid data. This is still an open question for me.
| github_jupyter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.