code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Adversarial-Robustness-Toolbox for scikit-learn ExtraTreesClassifier
# +
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.datasets import load_iris
import numpy as np
from matplotlib import pyplot as plt
from art.estimators.classification import SklearnClassifier
from art.attacks.evasion import ZooAttack
from art.utils import load_mnist
import warnings
warnings.filterwarnings('ignore')
# -
# ## 1 Training scikit-learn ExtraTreesClassifier and attacking with ART Zeroth Order Optimization attack
def get_adversarial_examples(x_train, y_train):
# Create and fit ExtraTreesClassifier
model = ExtraTreesClassifier()
model.fit(X=x_train, y=y_train)
# Create ART classifier for scikit-learn ExtraTreesClassifier
art_classifier = SklearnClassifier(model=model)
# Create ART Zeroth Order Optimization attack
zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=20,
binary_search_steps=10, initial_const=1e-3, abort_early=True, use_resize=False,
use_importance=False, nb_parallel=1, batch_size=1, variable_h=0.2)
# Generate adversarial samples with ART Zeroth Order Optimization attack
x_train_adv = zoo.generate(x_train)
return x_train_adv, model
# ## 1.1 Utility functions
def get_data(num_classes):
x_train, y_train = load_iris(return_X_y=True)
x_train = x_train[y_train < num_classes][:, [0, 1]]
y_train = y_train[y_train < num_classes]
x_train[:, 0][y_train == 0] *= 2
x_train[:, 1][y_train == 2] *= 2
x_train[:, 0][y_train == 0] -= 3
x_train[:, 1][y_train == 2] -= 2
x_train[:, 0] = (x_train[:, 0] - 4) / (9 - 4)
x_train[:, 1] = (x_train[:, 1] - 1) / (6 - 1)
return x_train, y_train
def plot_results(model, x_train, y_train, x_train_adv, num_classes):
fig, axs = plt.subplots(1, num_classes, figsize=(num_classes * 5, 5))
colors = ['orange', 'blue', 'green']
for i_class in range(num_classes):
# Plot difference vectors
for i in range(y_train[y_train == i_class].shape[0]):
x_1_0 = x_train[y_train == i_class][i, 0]
x_1_1 = x_train[y_train == i_class][i, 1]
x_2_0 = x_train_adv[y_train == i_class][i, 0]
x_2_1 = x_train_adv[y_train == i_class][i, 1]
if x_1_0 != x_2_0 or x_1_1 != x_2_1:
axs[i_class].plot([x_1_0, x_2_0], [x_1_1, x_2_1], c='black', zorder=1)
# Plot benign samples
for i_class_2 in range(num_classes):
axs[i_class].scatter(x_train[y_train == i_class_2][:, 0], x_train[y_train == i_class_2][:, 1], s=20,
zorder=2, c=colors[i_class_2])
axs[i_class].set_aspect('equal', adjustable='box')
# Show predicted probability as contour plot
h = .01
x_min, x_max = 0, 1
y_min, y_max = 0, 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z_proba = model.predict_proba(np.c_[xx.ravel(), yy.ravel()])
Z_proba = Z_proba[:, i_class].reshape(xx.shape)
im = axs[i_class].contourf(xx, yy, Z_proba, levels=[0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
vmin=0, vmax=1)
if i_class == num_classes - 1:
cax = fig.add_axes([0.95, 0.2, 0.025, 0.6])
plt.colorbar(im, ax=axs[i_class], cax=cax)
# Plot adversarial samples
for i in range(y_train[y_train == i_class].shape[0]):
x_1_0 = x_train[y_train == i_class][i, 0]
x_1_1 = x_train[y_train == i_class][i, 1]
x_2_0 = x_train_adv[y_train == i_class][i, 0]
x_2_1 = x_train_adv[y_train == i_class][i, 1]
if x_1_0 != x_2_0 or x_1_1 != x_2_1:
axs[i_class].scatter(x_2_0, x_2_1, zorder=2, c='red', marker='X')
axs[i_class].set_xlim((x_min, x_max))
axs[i_class].set_ylim((y_min, y_max))
axs[i_class].set_title('class ' + str(i_class))
axs[i_class].set_xlabel('feature 1')
axs[i_class].set_ylabel('feature 2')
# # 2 Example: Iris dataset
# ### legend
# - colored background: probability of class i
# - orange circles: class 1
# - blue circles: class 2
# - green circles: class 3
# - red crosses: adversarial samples for class i
num_classes = 2
x_train, y_train = get_data(num_classes=num_classes)
x_train_adv, model = get_adversarial_examples(x_train, y_train)
plot_results(model, x_train, y_train, x_train_adv, num_classes)
num_classes = 3
x_train, y_train = get_data(num_classes=num_classes)
x_train_adv, model = get_adversarial_examples(x_train, y_train)
plot_results(model, x_train, y_train, x_train_adv, num_classes)
# # 3 Example: MNIST
# ## 3.1 Load and transform MNIST dataset
# +
(x_train, y_train), (x_test, y_test), min_, max_ = load_mnist()
n_samples_train = x_train.shape[0]
n_features_train = x_train.shape[1] * x_train.shape[2] * x_train.shape[3]
n_samples_test = x_test.shape[0]
n_features_test = x_test.shape[1] * x_test.shape[2] * x_test.shape[3]
x_train = x_train.reshape(n_samples_train, n_features_train)
x_test = x_test.reshape(n_samples_test, n_features_test)
y_train = np.argmax(y_train, axis=1)
y_test = np.argmax(y_test, axis=1)
n_samples_max = 200
x_train = x_train[0:n_samples_max]
y_train = y_train[0:n_samples_max]
x_test = x_test[0:n_samples_max]
y_test = y_test[0:n_samples_max]
# -
# ## 3.2 Train ExtraTreesClassifier classifier
model = ExtraTreesClassifier(n_estimators=10, criterion='gini', max_depth=None, min_samples_split=2,
min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features='auto',
max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None,
bootstrap=False, oob_score=False, n_jobs=None, random_state=None, verbose=0,
warm_start=False, class_weight=None)
model.fit(X=x_train, y=y_train)
# ## 3.3 Create and apply Zeroth Order Optimization Attack with ART
art_classifier = SklearnClassifier(model=model)
zoo = ZooAttack(classifier=art_classifier, confidence=0.0, targeted=False, learning_rate=1e-1, max_iter=100,
binary_search_steps=20, initial_const=1e-3, abort_early=True, use_resize=False,
use_importance=False, nb_parallel=10, batch_size=1, variable_h=0.25)
x_train_adv = zoo.generate(x_train)
x_test_adv = zoo.generate(x_test)
# ## 3.4 Evaluate ExtraTreesClassifier on benign and adversarial samples
score = model.score(x_train, y_train)
print("Benign Training Score: %.4f" % score)
plt.matshow(x_train[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_train[0:1, :])[0]
print("Benign Training Predicted Label: %i" % prediction)
score = model.score(x_train_adv, y_train)
print("Adversarial Training Score: %.4f" % score)
plt.matshow(x_train_adv[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_train_adv[0:1, :])[0]
print("Adversarial Training Predicted Label: %i" % prediction)
score = model.score(x_test, y_test)
print("Benign Test Score: %.4f" % score)
plt.matshow(x_test[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_test[0:1, :])[0]
print("Benign Test Predicted Label: %i" % prediction)
score = model.score(x_test_adv, y_test)
print("Adversarial Test Score: %.4f" % score)
plt.matshow(x_test_adv[0, :].reshape((28, 28)))
plt.clim(0, 1)
prediction = model.predict(x_test_adv[0:1, :])[0]
print("Adversarial Test Predicted Label: %i" % prediction)
| notebooks/classifier_scikitlearn_ExtraTreesClassifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
from childes_mi.utils.paths import DATA_DIR, FIGURE_DIR, ensure_dir
from childes_mi.utils.general import flatten,save_fig
from childes_mi.information_theory import model_fitting as mf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
MI_DF = pd.read_pickle(DATA_DIR/'mi/phonbank_longest_seqs.pickle')
MI_DF[:3]
MI_DF.age_cohort_low.unique()
age_cohorts = [[0.5, 1],[1, 1.5], [1.5, 2], [2, 2.5], [2.5, 3], [3, 20]]
MI_DF_list = []
for [ac_low, ac_high] in age_cohorts:
MI_DF_list.append(MI_DF[MI_DF.age_cohort_low == ac_low][:10])
MI_DF = pd.concat(MI_DF_list)
len(MI_DF)
MI_DF = MI_DF.assign(**{i:np.nan for i in ['exp_results', 'pow_results', 'concat_results',
'R2_exp', 'R2_concat', 'R2_power', 'AICc_exp',
'AICc_concat', 'AICc_power', 'bestfitmodel', 'curvature', 'min_peak']})
MI_DF['curvature'] = MI_DF['curvature'].astype(object)
n = 100 # max distance for computation
for idx, row in tqdm(MI_DF.iterrows(), total=len(MI_DF)):
# get signal
sig = np.array(row.MI-row.shuff_MI)
distances = row.distances
sig = sig
dist_mask = distances < 100
distances = distances[dist_mask]
sig = sig[dist_mask]
# fit models
results_power, results_exp, results_pow_exp, best_fit_model = mf.fit_models(distances, sig)
# get fit results
R2_exp, R2_concat, R2_power, AICc_exp, \
AICc_pow, AICc_concat = mf.fit_results(sig, distances,
results_exp, results_power,
results_pow_exp)
# get model y
distances_mod = np.logspace(0,np.log10(n), base=10, num=1000)
if best_fit_model == 'pow_exp':
y_model = mf.get_y(mf.pow_exp_decay, results_pow_exp, distances_mod)
elif best_fit_model == 'exp':
y_model = mf.get_y(mf.exp_decay, results_exp, distances_mod)
elif best_fit_model == 'pow':
y_model = mf.get_y(mf.powerlaw_decay, results_power, distances_mod)
# get curvature of model_y
curvature_model = mf.curvature(np.log(y_model))
# if the best fit model is pow_exp, then grab the min peak
if best_fit_model == 'pow_exp':
# get peaks of curvature
peaks = np.where((
(curvature_model[:-1] < curvature_model[1:])[1:] & (curvature_model[1:] < curvature_model[:-1])[:-1]
))
min_peak = peaks[0][0]
else:
min_peak = np.nan
# get save model fit results to MI_DF
MI_DF.loc[idx, np.array(['exp_results', 'pow_results', 'concat_results',
'R2_exp', 'R2_concat', 'R2_power', 'AICc_exp',
'AICc_concat', 'AICc_power', 'bestfitmodel', 'curvature', 'min_peak'])] = [
results_exp, results_power, results_pow_exp,
R2_exp, R2_concat, R2_power, AICc_exp,
AICc_concat, AICc_pow, best_fit_model,
curvature_model, min_peak
]
# ### plot fits
from matplotlib import gridspec
yoff=-.20
for [ac_low, ac_high] in age_cohorts:
print(ac_low, ac_high)
fig, axs = plt.subplots(ncols=5, nrows=1, figsize=(24, 4))
MI_DF_cohort = MI_DF[MI_DF.age_cohort_low == ac_low]
for axi, (idx, row) in enumerate(MI_DF_cohort.iterrows()):
ax = axs.flatten()[axi]
color = "k"
sig = np.array(row.MI - row.shuff_MI)
distances = row.distances
dist_mask = distances < 100
distances = distances[dist_mask]
sig = sig[dist_mask]
sig_lims = np.log([np.min(sig[sig>0]), np.nanmax(sig)])
sig_lims = [sig_lims[0] - (sig_lims[1]-sig_lims[0])/10,
sig_lims[1] + (sig_lims[1]-sig_lims[0])/10]
if axi in [0, 5]:
ax.set_ylabel("MI (bits)", labelpad=5, fontsize=24)
ax.yaxis.set_label_coords(yoff, 0.5)
distances_model = np.logspace(0, np.log10(distances[-1]), base=10, num=1000)
# plot real data
ax.scatter(distances, sig, alpha=1, s=40, color=color)
if row.bestfitmodel == "pow_exp":
y_model = mf.get_y(mf.pow_exp_decay, row.concat_results, distances)
elif row.bestfitmodel == "exp":
y_model = mf.get_y(mf.exp_decay, row.exp_results, distances)
elif row.bestfitmodel == "pow":
y_model = mf.get_y(mf.powerlaw_decay, row.pow_results, distances)
# plot modelled data
ax.plot(distances, y_model, alpha=0.25, lw=10, color=color)
ax.set_xlim([distances[0], distances[-1]])
sig_lims[0] = np.log(10e-6) #
ax.set_ylim([1e-4, 1])
ax.tick_params(which="both", direction="in", labelsize=14, pad=10)
ax.tick_params(which="major", length=10, width=3)
ax.tick_params(which="minor", length=5, width=2)
ax.set_xscale("log", basex=10)
ax.set_yscale("log", basey=10)
ax.set_xticks([])
for axis in ["top", "bottom", "left", "right"]:
ax.spines[axis].set_linewidth(3)
ax.spines[axis].set_color("k")
if axi != 0:
ax.set_yticks([])
ax.set_title(
"{} ({})".format(
'/'.join(row.xml_loc.as_posix().split("/")[-3:])[:-4], row.bestfitmodel
),
fontsize=14,
)
if ac_low == 3:
ax.set_xticks([1,10,100])
ax.set_xticklabels(['1','10','100'])
ax.set_xlabel("Distance (phones)", labelpad=5, fontsize=24)
else:
ax.set_xticks([1,10,100])
ax.set_xticklabels([])
fig.subplots_adjust(wspace = 0.05)
if ac_low == 3:
fig.suptitle("36+ months", fontsize=36, y=1.1)
else:
fig.suptitle("{}-{} months".format(int(ac_low*12), int(12*ac_high)), fontsize=36, y=1.1)
ensure_dir(FIGURE_DIR/ 'mi' / 'indv')
save_fig(FIGURE_DIR/ 'mi' / 'indv' /('phonbank_mi_longest_' + str(ac_low) + '_' + str(ac_high)))
plt.show()
| notebooks/phonbank2/6.1-phonbank-MI_fit-plot_longest-seqs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
air = pd.read_csv("./Data/Air_Traffic_Passenger_Statistics.csv")
air.set_index(["Operating Airline","Activity Period","Activity Type Code"],inplace=True)
air.sort_index(inplace=True)
air.head(10)
air.loc[( 'Air France',200507,'Enplaned')]
air.loc[( 'Air France',200507)]
air.loc[('Air France')].head(10)
air.loc[('Air France',200507,'Enplaned'),'Passenger Count']
air.loc[('Air France',200508,'Deplaned'),'Passenger Count']
air.iloc[20:25]
air.iloc[20:25,8]
| Multiindex_tabele_przestawne/Wyszukanie_Dannych_Po_Multiindexie_lab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
train_set_dir = "train_set/"
val_set_dir = "val_set/"
test_set_dir = "test_set/"
print(len(os.listdir(train_set_dir)))
print(len(os.listdir(test_set_dir)))
print(len(os.listdir(test_set_dir)))
# -
# cd /data-output/Data/
# +
import os
bird_dir = "/data-output/Data"
x_train_path = os.path.join(bird_dir,"train_set")
x_test_path = os.path.join(bird_dir,"test_set")
x_valid_path = os.path.join(bird_dir,"val_set")
y_train_path = os.path.join(bird_dir,"train_pname_to_index.csv")
y_valid_path = os.path.join(bird_dir,"val_pname_to_index.csv")
# +
import pandas as pd
y_train = pd.read_csv(y_train_path,skiprows=0)
y_valid = pd.read_csv(y_valid_path,skiprows=0)
# -
y_train.head()
y_valid.head()
# +
x_train_img_path = y_train["img_path"]
y_train = y_train["label"] - 1
x_valid_img_path = y_valid["img_path"]
y_valid = y_valid["label"] -1
print(x_train_img_path[:5])
print(y_train[:5])
print(x_valid_img_path[:5])
print(y_valid[:5])
# +
# 定义读取图片函数
import cv2
import numpy as np
def get_img(file_path,img_rows,img_cols):
img = cv2.imread(file_path)
img = cv2.resize(img,(img_rows,img_cols))
if img.shape[2] == 1:
img = np.dstack([img,img,img])
else:
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = img.astype(np.float32)
return img
# +
# 加载训练集
x_train = []
for img_name in x_train_img_path:
img = get_img(os.path.join(x_train_path,img_name),224,224)
x_train.append(img)
x_train = np.array(x_train,np.float32)
# +
# 加载验证集
x_valid = []
for img_name in x_valid_img_path:
img = get_img(os.path.join(x_valid_path,img_name),224,224)
x_valid.append(img)
x_valid = np.array(x_valid,np.float32)
# +
# 加载预测集
import re
x_test_img_path = os.listdir(x_test_path)
x_test_img_path = sorted(x_test_img_path,key = lambda i:int(re.match(r"(\d+)",i).group()))
print(x_test_img_path)
x_test = []
for img_name in x_test_img_path:
img = get_img(os.path.join(x_test_path,img_name),224,224)
x_test.append(img)
x_test = np.array(x_test,np.float32)
# +
print(x_train.shape)
print(y_train.shape)
print(x_valid.shape)
print(y_valid.shape)
print(x_test.shape)
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.imshow(x_train[0]/255)
print(y_train[0])
# +
X_train = np.concatenate((x_train,x_valid),axis=0)
Y_train = np.concatenate((y_train,y_valid),axis=0)
print(X_train.shape)
print(Y_train.shape)
print(x_test.shape)
# -
sum = np.unique(y_train)
n_classes = len(sum)
# +
# 直方图来显示图像训练集的各个类别的分别情况
def plot_y_train_hist():
fig = plt.figure(figsize=(15,5))
ax = fig.add_subplot(1,1,1)
hist = ax.hist(Y_train,bins=n_classes)
ax.set_title("the frequentcy of each category sign")
ax.set_xlabel("bird")
ax.set_ylabel("frequency")
plt.show()
return hist
hist = plot_y_train_hist()
# +
# 对标签数据进行one-hot编码
from keras.utils import np_utils
Y_train = np_utils.to_categorical(Y_train,n_classes)
print("Shape after one-hot encoding:",Y_train.shape)
# +
# 划分数据集
from sklearn.model_selection import train_test_split
x_train,x_valid,y_train,y_valid = train_test_split(X_train,Y_train,test_size=0.3,random_state=2019)
print(x_train.shape)
print(y_train.shape)
print(x_valid.shape)
print(y_valid.shape)
print(x_test.shape)
# +
# 导入开发需要的库
from keras import optimizers, Input
from keras.applications import imagenet_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.models import *
from keras.layers import *
from keras.optimizers import *
from keras.callbacks import *
from keras.applications import *
from sklearn.preprocessing import *
from sklearn.model_selection import *
from sklearn.metrics import *
# +
# 绘制训练过程中的 loss 和 acc 变化曲线
import matplotlib.pyplot as plt
# %matplotlib inline
def history_plot(history_fit):
plt.figure(figsize=(12,6))
# summarize history for accuracy
plt.subplot(121)
plt.plot(history_fit.history["acc"])
plt.plot(history_fit.history["val_acc"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "valid"], loc="upper left")
# summarize history for loss
plt.subplot(122)
plt.plot(history_fit.history["loss"])
plt.plot(history_fit.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
plt.show()
# -
# fine-tune 模型
def fine_tune_model(model, optimizer, batch_size, epochs, freeze_num):
'''
discription: 对指定预训练模型进行fine-tune,并保存为.hdf5格式
MODEL:传入的模型,VGG16, ResNet50, ...
optimizer: fine-tune all layers 的优化器, first part默认用adadelta
batch_size: 每一批的尺寸,建议32/64/128
epochs: fine-tune all layers的代数
freeze_num: first part冻结卷积层的数量
'''
# datagen = ImageDataGenerator(
# rescale=1.255,
# # shear_range=0.2,
# # zoom_range=0.2,
# # horizontal_flip=True,
# # vertical_flip=True,
# # fill_mode="nearest"
# )
# datagen.fit(X_train)
# first: 仅训练全连接层(权重随机初始化的)
# 冻结所有卷积层
for layer in model.layers[:freeze_num]:
layer.trainable = False
model.compile(optimizer=optimizer,
loss="categorical_crossentropy",
metrics=["accuracy"])
# model.fit_generator(datagen.flow(x_train,y_train,batch_size=batch_size),
# steps_per_epoch=len(x_train)/32,
# epochs=3,
# shuffle=True,
# verbose=1,
# datagen.flow(x_valid, y_valid))
model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=3,
shuffle=True,
verbose=1,
validation_data=(x_valid,y_valid)
)
print('Finish step_1')
# second: fine-tune all layers
for layer in model.layers[freeze_num:]:
layer.trainable = True
rc = ReduceLROnPlateau(monitor="val_loss",
factor=0.2,
patience=3,
verbose=1,
mode='min')
model_name = model.name + ".hdf5"
mc = ModelCheckpoint(model_name,
monitor="val_loss",
save_best_only=True,
verbose=1,
mode='min')
el = EarlyStopping(monitor="val_loss",
min_delta=0,
patience=5,
verbose=1,
restore_best_weights=True)
model.compile(optimizer=optimizer,
loss='categorical_crossentropy',
metrics=["accuracy"])
# history_fit = model.fit_generator(datagen.flow(x_train,y_train,batch_size=32),
# steps_per_epoch=len(x_train)/32,
# epochs=epochs,
# shuffle=True,
# verbose=1,
# callbacks=[mc,rc,el],
# datagen.flow(x_valid, y_valid))
history_fit = model.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
shuffle=True,
verbose=1,
validation_data=(x_valid,y_valid),
callbacks=[mc,rc,el])
print('Finish fine-tune')
return history_fit
# +
# 定义双线性VGG16模型
from keras import backend as K
def batch_dot(cnn_ab):
return K.batch_dot(cnn_ab[0], cnn_ab[1], axes=[1, 1])
def sign_sqrt(x):
return K.sign(x) * K.sqrt(K.abs(x) + 1e-10)
def l2_norm(x):
return K.l2_normalize(x, axis=-1)
def bilinear_vgg16(img_rows,img_cols):
input_tensor = Input(shape=(img_rows,img_cols,3))
input_tensor = Lambda(imagenet_utils.preprocess_input)(input_tensor)
model_vgg16 = VGG16(include_top=False, weights="imagenet",
input_tensor=input_tensor,pooling="avg")
cnn_out_a = model_vgg16.layers[-2].output
cnn_out_shape = model_vgg16.layers[-2].output_shape #? 7 7 128
cnn_out_a = Reshape([cnn_out_shape[1]*cnn_out_shape[2],
cnn_out_shape[-1]])(cnn_out_a) #? 49, 128
cnn_out_b = cnn_out_a
cnn_out_dot = Lambda(batch_dot)([cnn_out_a, cnn_out_b]) # ? 49,128
cnn_out_dot = Reshape([cnn_out_shape[-1]*cnn_out_shape[-1]])(cnn_out_dot) #? 128*128
sign_sqrt_out = Lambda(sign_sqrt)(cnn_out_dot)
l2_norm_out = Lambda(l2_norm)(sign_sqrt_out)
fc1 = Dense(1024,activation="relu",name="fc1")(l2_norm_out)
dropout = Dropout(0.5)(fc1)
output = Dense(n_classes, activation="softmax",name="output")(dropout)
bvgg16_model = Model(inputs=model_vgg16.input, outputs=output,name="bvgg16")
return bvgg16_model
# -
# 创建双线性VGG16模型
img_rows,img_cols = 224,224
bvgg16_model = bilinear_vgg16(img_rows,img_cols)
for i,layer in enumerate(bvgg16_model.layers):
print(i,layer.name)
bvgg16_model.summary()
# +
# 导入Efficient模块
from efficientnet.keras import EfficientNetB3
import keras.backend as K
# -
# 定义一个EfficientNet模型
def efficient_model(img_rows,img_cols):
K.clear_session()
x = Input(shape=(img_rows,img_cols,3))
x = Lambda(imagenet_utils.preprocess_input)(x)
base_model = EfficientNetB3(input_tensor=x,weights="imagenet",include_top=False,pooling="avg")
x = base_model.output
x = Dense(1024,activation="relu",name="fc1")(x)
x = Dropout(0.5)(x)
predictions = Dense(n_classes,activation="softmax",name="predictions")(x)
eB_model = Model(inputs=base_model.input,outputs=predictions,name="eB3")
return eB_model
# 创建Efficient模型
img_rows,img_cols=224,224
eB_model = efficient_model(img_rows,img_cols)
for i,layer in enumerate(eB_model.layers):
print(i,layer.name)
eB_model.summary()
optimizer = optimizers.Adam(lr=0.0001)
batch_size = 32
epochs = 15
freeze_num = 379
eB_model_history = fine_tune_model(eB_model,optimizer,batch_size,epochs,freeze_num)
optimizer = optimizers.Adam(lr=0.0001)
batch_size = 32
epochs = 6
freeze_num = 26
bvgg16_history = fine_tune_model(bvgg16_model,optimizer,batch_size,epochs,freeze_num)
history_plot(bvgg16_history)
# +
# 后面是测试部分
# -
# cd /data-output
# ls
# cd Data
# ls
from keras.models import load_model
from keras.models import Model
import keras
import numpy as np
filepath = "bvgg16.hdf5"
model = load_model(filepath)
model.summary()
for i in model.layers:
print(i.name)
imgpath = "test_set/1231.jpg"
test_img = keras.preprocessing.image.load_img(imgpath,target_size=(224,224,3))
test_img = keras.preprocessing.image.img_to_array(test_img)
test_img = test_img
test_img = np.expand_dims(test_img, 0)
pred = model.predict(test_img)
result = max(pred[0])
count = pred.argmax()
print(result)
print(count)
| IKB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="copyright"
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="title"
# # Vertex client library: AutoML tabular classification model for batch prediction
#
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_tabular_classification_batch.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_tabular_classification_batch.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# </table>
# <br/><br/><br/>
# + [markdown] id="overview:automl"
# ## Overview
#
#
# This tutorial demonstrates how to use the Vertex client library for Python to create tabular classification models and do batch prediction using Google Cloud's [AutoML](https://cloud.google.com/vertex-ai/docs/start/automl-users).
# + [markdown] id="dataset:iris,lcn"
# ### Dataset
#
# The dataset used for this tutorial is the [Iris dataset](https://www.tensorflow.org/datasets/catalog/iris) from [TensorFlow Datasets](https://www.tensorflow.org/datasets/catalog/overview). This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model predicts the type of Iris flower species from a class of three species: setosa, virginica, or versicolor.
# + [markdown] id="objective:automl,training,batch_prediction"
# ### Objective
#
# In this tutorial, you create an AutoML tabular classification model from a Python script, and then do a batch prediction using the Vertex client library. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console.
#
# The steps performed include:
#
# - Create a Vertex `Dataset` resource.
# - Train the model.
# - View the model evaluation.
# - Make a batch prediction.
#
# There is one key difference between using batch prediction and using online prediction:
#
# * Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time.
#
# * Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready.
# + [markdown] id="costs"
# ### Costs
#
# This tutorial uses billable components of Google Cloud (GCP):
#
# * Vertex AI
# * Cloud Storage
#
# Learn about [Vertex AI
# pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
# + [markdown] id="install_aip"
# ## Installation
#
# Install the latest version of Vertex client library.
# + id="install_aip"
import os
import sys
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
# ! pip3 install -U google-cloud-aiplatform $USER_FLAG
# + [markdown] id="install_storage"
# Install the latest GA version of *google-cloud-storage* library as well.
# + id="install_storage"
# ! pip3 install -U google-cloud-storage $USER_FLAG
# + [markdown] id="restart"
# ### Restart the kernel
#
# Once you've installed the Vertex client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.
# + id="restart"
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="before_you_begin"
# ## Before you begin
#
# ### GPU runtime
#
# *Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**
#
# ### Set up your Google Cloud project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
#
# 3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
#
# 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
#
# 5. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
# + id="set_project_id"
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# + id="autoset_project_id"
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
# + id="set_gcloud_project_id"
# ! gcloud config set project $PROJECT_ID
# + [markdown] id="region"
# #### Region
#
# You can also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.
#
# - Americas: `us-central1`
# - Europe: `europe-west4`
# - Asia Pacific: `asia-east1`
#
# You may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/vertex-ai/docs/general/locations)
# + id="region"
REGION = "us-central1" # @param {type: "string"}
# + [markdown] id="timestamp"
# #### Timestamp
#
# If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.
# + id="timestamp"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# + [markdown] id="gcp_authenticate"
# ### Authenticate your Google Cloud account
#
# **If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
#
# **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
#
# **Otherwise**, follow these steps:
#
# In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
#
# **Click Create service account**.
#
# In the **Service account name** field, enter a name, and click **Create**.
#
# In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
#
# Click Create. A JSON file that contains your key downloads to your local environment.
#
# Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
# + id="gcp_authenticate"
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# + [markdown] id="bucket:batch_prediction"
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
# This tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for your batch predictions. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket.
#
# Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
# + id="bucket"
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
# + id="autoset_bucket"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
# + [markdown] id="create_bucket"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
# + id="create_bucket"
# ! gsutil mb -l $REGION $BUCKET_NAME
# + [markdown] id="validate_bucket"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + id="validate_bucket"
# ! gsutil ls -al $BUCKET_NAME
# + [markdown] id="setup_vars"
# ### Set up variables
#
# Next, set up some variables used throughout the tutorial.
# ### Import libraries and define constants
# + [markdown] id="import_aip:protobuf"
# #### Import Vertex client library
#
# Import the Vertex client library into our Python environment.
# + id="import_aip:protobuf"
import time
from google.cloud.aiplatform import gapic as aip
from google.protobuf import json_format
from google.protobuf.json_format import MessageToJson, ParseDict
from google.protobuf.struct_pb2 import Struct, Value
# + [markdown] id="aip_constants"
# #### Vertex constants
#
# Setup up the following constants for Vertex:
#
# - `API_ENDPOINT`: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.
# - `PARENT`: The Vertex location root path for dataset, model, job, pipeline and endpoint resources.
# + id="aip_constants"
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# Vertex location root path for your dataset, model and endpoint resources
PARENT = "projects/" + PROJECT_ID + "/locations/" + REGION
# + [markdown] id="automl_constants"
# #### AutoML constants
#
# Set constants unique to AutoML datasets and training:
#
# - Dataset Schemas: Tells the `Dataset` resource service which type of dataset it is.
# - Data Labeling (Annotations) Schemas: Tells the `Dataset` resource service how the data is labeled (annotated).
# - Dataset Training Schemas: Tells the `Pipeline` resource service the task (e.g., classification) to train the model for.
# + id="automl_constants:lcn"
# Tabular Dataset type
DATA_SCHEMA = "gs://google-cloud-aiplatform/schema/dataset/metadata/tables_1.0.0.yaml"
# Tabular Labeling type
LABEL_SCHEMA = (
"gs://google-cloud-aiplatform/schema/dataset/ioformat/table_io_format_1.0.0.yaml"
)
# Tabular Training task
TRAINING_SCHEMA = "gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_tables_1.0.0.yaml"
# + [markdown] id="accelerators:prediction"
# #### Hardware Accelerators
#
# Set the hardware accelerators (e.g., GPU), if any, for prediction.
#
# Set the variable `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:
#
# (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)
#
# For GPU, available accelerators include:
# - aip.AcceleratorType.NVIDIA_TESLA_K80
# - aip.AcceleratorType.NVIDIA_TESLA_P100
# - aip.AcceleratorType.NVIDIA_TESLA_P4
# - aip.AcceleratorType.NVIDIA_TESLA_T4
# - aip.AcceleratorType.NVIDIA_TESLA_V100
#
# Otherwise specify `(None, None)` to use a container image to run on a CPU.
# + id="accelerators:prediction"
if os.getenv("IS_TESTING_DEPOLY_GPU"):
DEPLOY_GPU, DEPLOY_NGPU = (
aip.AcceleratorType.NVIDIA_TESLA_K80,
int(os.getenv("IS_TESTING_DEPOLY_GPU")),
)
else:
DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)
# + [markdown] id="container:automl"
# #### Container (Docker) image
#
# For AutoML batch prediction, the container image for the serving binary is pre-determined by the Vertex prediction service. More specifically, the service will pick the appropriate container for the model depending on the hardware accelerator you selected.
# + [markdown] id="machine:prediction"
# #### Machine Type
#
# Next, set the machine type to use for prediction.
#
# - Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VM you will use for prediction.
# - `machine type`
# - `n1-standard`: 3.75GB of memory per vCPU.
# - `n1-highmem`: 6.5GB of memory per vCPU
# - `n1-highcpu`: 0.9 GB of memory per vCPU
# - `vCPUs`: number of \[2, 4, 8, 16, 32, 64, 96 \]
#
# *Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*
# + id="machine:prediction"
if os.getenv("IS_TESTING_DEPLOY_MACHINE"):
MACHINE_TYPE = os.getenv("IS_TESTING_DEPLOY_MACHINE")
else:
MACHINE_TYPE = "n1-standard"
VCPU = "4"
DEPLOY_COMPUTE = MACHINE_TYPE + "-" + VCPU
print("Deploy machine type", DEPLOY_COMPUTE)
# + [markdown] id="tutorial_start:automl"
# # Tutorial
#
# Now you are ready to start creating your own AutoML tabular classification model.
# + [markdown] id="clients:automl,batch_prediction"
# ## Set up clients
#
# The Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.
#
# You will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.
#
# - Dataset Service for `Dataset` resources.
# - Model Service for `Model` resources.
# - Pipeline Service for training.
# - Job Service for batch prediction and custom training.
# + id="clients:automl,batch_prediction"
# client options same for all services
client_options = {"api_endpoint": API_ENDPOINT}
def create_dataset_client():
client = aip.DatasetServiceClient(client_options=client_options)
return client
def create_model_client():
client = aip.ModelServiceClient(client_options=client_options)
return client
def create_pipeline_client():
client = aip.PipelineServiceClient(client_options=client_options)
return client
def create_job_client():
client = aip.JobServiceClient(client_options=client_options)
return client
clients = {}
clients["dataset"] = create_dataset_client()
clients["model"] = create_model_client()
clients["pipeline"] = create_pipeline_client()
clients["job"] = create_job_client()
for client in clients.items():
print(client)
# + [markdown] id="dataset:tabular"
# ## Dataset
#
# Now that your clients are ready, your first step is to create a `Dataset` resource instance. This step differs from Vision, Video and Language. For those products, after the `Dataset` resource is created, one then separately imports the data, using the `import_data` method.
#
# For tabular, importing of the data is deferred until the training pipeline starts training the model. What do we do different? Well, first you won't be calling the `import_data` method. Instead, when you create the dataset instance you specify the Cloud Storage location of the CSV file or BigQuery location of the data table, which contains your tabular data as part of the `Dataset` resource's metadata.
#
# #### Cloud Storage
#
# `metadata = {"input_config": {"gcs_source": {"uri": [gcs_uri]}}}`
#
# The format for a Cloud Storage path is:
#
# gs://[bucket_name]/[folder(s)/[file]
#
# #### BigQuery
#
# `metadata = {"input_config": {"bigquery_source": {"uri": [gcs_uri]}}}`
#
# The format for a BigQuery path is:
#
# bq://[collection].[dataset].[table]
#
# Note that the `uri` field is a list, whereby you can input multiple CSV files or BigQuery tables when your data is split across files.
# + [markdown] id="data_preparation:tabular,u_dataset"
# ### Data preparation
#
# The Vertex `Dataset` resource for tabular has a couple of requirements for your tabular data.
#
# - Must be in a CSV file or a BigQuery query.
# + [markdown] id="data_import_format:lcn,u_dataset,csv"
# #### CSV
#
# For tabular classification, the CSV file has a few requirements:
#
# - The first row must be the heading -- note how this is different from Vision, Video and Language where the requirement is no heading.
# - All but one column are features.
# - One column is the label, which you will specify when you subsequently create the training pipeline.
# + [markdown] id="import_file:u_dataset,csv"
# #### Location of Cloud Storage training data.
#
# Now set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.
# + id="import_file:iris,csv,lcn"
IMPORT_FILE = "gs://cloud-samples-data/tables/iris_1000.csv"
# + [markdown] id="quick_peek:tabular"
# #### Quick peek at your data
#
# You will use a version of the Iris dataset that is stored in a public Cloud Storage bucket, using a CSV index file.
#
# Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows.
#
# You also need for training to know the heading name of the label column, which is save as `label_column`. For this dataset, it is the last column in the CSV file.
# + id="quick_peek:tabular"
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
# ! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(",")[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
# + [markdown] id="create_aip_dataset:tabular"
# ## Dataset
#
# Now that your clients are ready, your first step in training a model is to create a managed dataset instance, and then upload your labeled data to it.
#
# ### Create `Dataset` resource instance
#
# Use the helper function `create_dataset` to create the instance of a `Dataset` resource. This function does the following:
#
# 1. Uses the dataset client service.
# 2. Creates an Vertex `Dataset` resource (`aip.Dataset`), with the following parameters:
# - `display_name`: The human-readable name you choose to give it.
# - `metadata_schema_uri`: The schema for the dataset type.
# - `metadata`: The Cloud Storage or BigQuery location of the tabular data.
# 3. Calls the client dataset service method `create_dataset`, with the following parameters:
# - `parent`: The Vertex location root path for your `Database`, `Model` and `Endpoint` resources.
# - `dataset`: The Vertex dataset object instance you created.
# 4. The method returns an `operation` object.
#
# An `operation` object is how Vertex handles asynchronous calls for long running operations. While this step usually goes fast, when you first use it in your project, there is a longer delay due to provisioning.
#
# You can use the `operation` object to get status on the operation (e.g., create `Dataset` resource) or to cancel the operation, by invoking an operation method:
#
# | Method | Description |
# | ----------- | ----------- |
# | result() | Waits for the operation to complete and returns a result object in JSON format. |
# | running() | Returns True/False on whether the operation is still running. |
# | done() | Returns True/False on whether the operation is completed. |
# | canceled() | Returns True/False on whether the operation was canceled. |
# | cancel() | Cancels the operation (this may take up to 30 seconds). |
# + id="create_aip_dataset:tabular"
TIMEOUT = 90
def create_dataset(name, schema, src_uri=None, labels=None, timeout=TIMEOUT):
start_time = time.time()
try:
if src_uri.startswith("gs://"):
metadata = {"input_config": {"gcs_source": {"uri": [src_uri]}}}
elif src_uri.startswith("bq://"):
metadata = {"input_config": {"bigquery_source": {"uri": [src_uri]}}}
dataset = aip.Dataset(
display_name=name,
metadata_schema_uri=schema,
labels=labels,
metadata=json_format.ParseDict(metadata, Value()),
)
operation = clients["dataset"].create_dataset(parent=PARENT, dataset=dataset)
print("Long running operation:", operation.operation.name)
result = operation.result(timeout=TIMEOUT)
print("time:", time.time() - start_time)
print("response")
print(" name:", result.name)
print(" display_name:", result.display_name)
print(" metadata_schema_uri:", result.metadata_schema_uri)
print(" metadata:", dict(result.metadata))
print(" create_time:", result.create_time)
print(" update_time:", result.update_time)
print(" etag:", result.etag)
print(" labels:", dict(result.labels))
return result
except Exception as e:
print("exception:", e)
return None
result = create_dataset("iris-" + TIMESTAMP, DATA_SCHEMA, src_uri=IMPORT_FILE)
# + [markdown] id="dataset_id:result"
# Now save the unique dataset identifier for the `Dataset` resource instance you created.
# + id="dataset_id:result"
# The full unique ID for the dataset
dataset_id = result.name
# The short numeric ID for the dataset
dataset_short_id = dataset_id.split("/")[-1]
print(dataset_id)
# + [markdown] id="train_automl_model"
# ## Train the model
#
# Now train an AutoML tabular classification model using your Vertex `Dataset` resource. To train the model, do the following steps:
#
# 1. Create an Vertex training pipeline for the `Dataset` resource.
# 2. Execute the pipeline to start the training.
# + [markdown] id="create_pipeline:automl"
# ### Create a training pipeline
#
# You may ask, what do we use a pipeline for? You typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of:
#
# 1. Being reusable for subsequent training jobs.
# 2. Can be containerized and ran as a batch job.
# 3. Can be distributed.
# 4. All the steps are associated with the same pipeline job for tracking progress.
#
# Use this helper function `create_pipeline`, which takes the following parameters:
#
# - `pipeline_name`: A human readable name for the pipeline job.
# - `model_name`: A human readable name for the model.
# - `dataset`: The Vertex fully qualified dataset identifier.
# - `schema`: The dataset labeling (annotation) training schema.
# - `task`: A dictionary describing the requirements for the training job.
#
# The helper function calls the `Pipeline` client service'smethod `create_pipeline`, which takes the following parameters:
#
# - `parent`: The Vertex location root path for your `Dataset`, `Model` and `Endpoint` resources.
# - `training_pipeline`: the full specification for the pipeline training job.
#
# Let's look now deeper into the *minimal* requirements for constructing a `training_pipeline` specification:
#
# - `display_name`: A human readable name for the pipeline job.
# - `training_task_definition`: The dataset labeling (annotation) training schema.
# - `training_task_inputs`: A dictionary describing the requirements for the training job.
# - `model_to_upload`: A human readable name for the model.
# - `input_data_config`: The dataset specification.
# - `dataset_id`: The Vertex dataset identifier only (non-fully qualified) -- this is the last part of the fully-qualified identifier.
# - `fraction_split`: If specified, the percentages of the dataset to use for training, test and validation. Otherwise, the percentages are automatically selected by AutoML.
# + id="create_pipeline:automl"
def create_pipeline(pipeline_name, model_name, dataset, schema, task):
dataset_id = dataset.split("/")[-1]
input_config = {
"dataset_id": dataset_id,
"fraction_split": {
"training_fraction": 0.8,
"validation_fraction": 0.1,
"test_fraction": 0.1,
},
}
training_pipeline = {
"display_name": pipeline_name,
"training_task_definition": schema,
"training_task_inputs": task,
"input_data_config": input_config,
"model_to_upload": {"display_name": model_name},
}
try:
pipeline = clients["pipeline"].create_training_pipeline(
parent=PARENT, training_pipeline=training_pipeline
)
print(pipeline)
except Exception as e:
print("exception:", e)
return None
return pipeline
# + [markdown] id="task_requirements:automl,tabular"
# ### Construct the task requirements
#
# Next, construct the task requirements. Unlike other parameters which take a Python (JSON-like) dictionary, the `task` field takes a Google protobuf Struct, which is very similar to a Python dictionary. Use the `json_format.ParseDict` method for the conversion.
#
# The minimal fields you need to specify are:
#
# - `prediction_type`: Whether we are doing "classification" or "regression".
# - `target_column`: The CSV heading column name for the column we want to predict (i.e., the label).
# - `train_budget_milli_node_hours`: The maximum time to budget (billed) for training the model, where 1000 = 1 hour.
# - `disable_early_stopping`: Whether True/False to let AutoML use its judgement to stop training early or train for the entire budget.
# - `transformations`: Specifies the feature engineering for each feature column.
#
# For `transformations`, the list must have an entry for each column. The outer key field indicates the type of feature engineering for the corresponding column. In this tutorial, you set it to `"auto"` to tell AutoML to automatically determine it.
#
# Finally, create the pipeline by calling the helper function `create_pipeline`, which returns an instance of a training pipeline object.
# + id="task_transformations:automl,tabular,iris"
TRANSFORMATIONS = [
{"auto": {"column_name": "sepal_width"}},
{"auto": {"column_name": "sepal_length"}},
{"auto": {"column_name": "petal_length"}},
{"auto": {"column_name": "petal_width"}},
]
# + id="task_requirements:automl,tabular,transformations"
PIPE_NAME = "iris_pipe-" + TIMESTAMP
MODEL_NAME = "iris_model-" + TIMESTAMP
task = Value(
struct_value=Struct(
fields={
"target_column": Value(string_value=label_column),
"prediction_type": Value(string_value="classification"),
"train_budget_milli_node_hours": Value(number_value=1000),
"disable_early_stopping": Value(bool_value=False),
"transformations": json_format.ParseDict(TRANSFORMATIONS, Value()),
}
)
)
response = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task)
# + [markdown] id="pipeline_id:response"
# Now save the unique identifier of the training pipeline you created.
# + id="pipeline_id:response"
# The full unique ID for the pipeline
pipeline_id = response.name
# The short numeric ID for the pipeline
pipeline_short_id = pipeline_id.split("/")[-1]
print(pipeline_id)
# + [markdown] id="get_training_pipeline"
# ### Get information on a training pipeline
#
# Now get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's `get_training_pipeline` method, with the following parameter:
#
# - `name`: The Vertex fully qualified pipeline identifier.
#
# When the model is done training, the pipeline state will be `PIPELINE_STATE_SUCCEEDED`.
# + id="get_training_pipeline"
def get_training_pipeline(name, silent=False):
response = clients["pipeline"].get_training_pipeline(name=name)
if silent:
return response
print("pipeline")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" state:", response.state)
print(" training_task_definition:", response.training_task_definition)
print(" training_task_inputs:", dict(response.training_task_inputs))
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", dict(response.labels))
return response
response = get_training_pipeline(pipeline_id)
# + [markdown] id="wait_training_complete"
# # Deployment
#
# Training the above model may take upwards of 30 minutes time.
#
# Once your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field `model_to_deploy.name`.
# + id="wait_training_complete"
while True:
response = get_training_pipeline(pipeline_id, True)
if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:
print("Training job has not completed:", response.state)
model_to_deploy_id = None
if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:
raise Exception("Training Job Failed")
else:
model_to_deploy = response.model_to_upload
model_to_deploy_id = model_to_deploy.name
print("Training Time:", response.end_time - response.start_time)
break
time.sleep(60)
print("model to deploy:", model_to_deploy_id)
# + [markdown] id="model_information"
# ## Model information
#
# Now that your model is trained, you can get some information on your model.
# + [markdown] id="evaluate_the_model:automl"
# ## Evaluate the Model resource
#
# Now find out how good the model service believes your model is. As part of training, some portion of the dataset was set aside as the test (holdout) data, which is used by the pipeline service to evaluate the model.
# + [markdown] id="list_model_evaluations:automl,lcn"
# ### List evaluations for all slices
#
# Use this helper function `list_model_evaluations`, which takes the following parameter:
#
# - `name`: The Vertex fully qualified model identifier for the `Model` resource.
#
# This helper function uses the model client service's `list_model_evaluations` method, which takes the same parameter. The response object from the call is a list, where each element is an evaluation metric.
#
# For each evaluation (you probably only have one) we then print all the key names for each metric in the evaluation, and for a small set (`logLoss` and `auPrc`) you will print the result.
# + id="list_model_evaluations:automl,lcn"
def list_model_evaluations(name):
response = clients["model"].list_model_evaluations(parent=name)
for evaluation in response:
print("model_evaluation")
print(" name:", evaluation.name)
print(" metrics_schema_uri:", evaluation.metrics_schema_uri)
metrics = json_format.MessageToDict(evaluation._pb.metrics)
for metric in metrics.keys():
print(metric)
print("logloss", metrics["logLoss"])
print("auPrc", metrics["auPrc"])
return evaluation.name
last_evaluation = list_model_evaluations(model_to_deploy_id)
# + [markdown] id="deploy:batch_prediction"
# ## Model deployment for batch prediction
#
# Now deploy the trained Vertex `Model` resource you created for batch prediction. This differs from deploying a `Model` resource for on-demand prediction.
#
# For online prediction, you:
#
# 1. Create an `Endpoint` resource for deploying the `Model` resource to.
#
# 2. Deploy the `Model` resource to the `Endpoint` resource.
#
# 3. Make online prediction requests to the `Endpoint` resource.
#
# For batch-prediction, you:
#
# 1. Create a batch prediction job.
#
# 2. The job service will provision resources for the batch prediction request.
#
# 3. The results of the batch prediction request are returned to the caller.
#
# 4. The job service will unprovision the resoures for the batch prediction request.
# + [markdown] id="make_prediction"
# ## Make a batch prediction request
#
# Now do a batch prediction to your deployed model.
# + [markdown] id="make_test_items:automl,batch_prediction"
# ### Make test items
#
# You will use synthetic data as a test data items. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.
# + id="make_test_items:automl,tabular,iris"
HEADING = "petal_length,petal_width,sepal_length,sepal_width"
INSTANCE_1 = "1.4,1.3,5.1,2.8"
INSTANCE_2 = "1.5,1.2,4.7,2.4"
# + [markdown] id="make_batch_file:automl,tabular"
# ### Make the batch input file
#
# Now make a batch input file, which you will store in your local Cloud Storage bucket. Unlike image, video and text, the batch input file for tabular is only supported for CSV. For CSV file, you make:
#
# - The first line is the heading with the feature (fields) heading names.
# - Each remaining line is a separate prediction request with the corresponding feature values.
#
# For example:
#
# "feature_1", "feature_2". ...
# value_1, value_2, ...
# + id="make_batch_file:automl,tabular"
import tensorflow as tf
gcs_input_uri = BUCKET_NAME + "/test.csv"
with tf.io.gfile.GFile(gcs_input_uri, "w") as f:
f.write(HEADING + "\n")
f.write(str(INSTANCE_1) + "\n")
f.write(str(INSTANCE_2) + "\n")
print(gcs_input_uri)
# ! gsutil cat $gcs_input_uri
# + [markdown] id="instance_scaling"
# ### Compute instance scaling
#
# You have several choices on scaling the compute instances for handling your batch prediction requests:
#
# - Single Instance: The batch prediction requests are processed on a single compute instance.
# - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.
#
# - Manual Scaling: The batch prediction requests are split across a fixed number of compute instances that you manually specified.
# - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and batch prediction requests are evenly distributed across them.
#
# - Auto Scaling: The batch prediction requests are split across a scaleable number of compute instances.
# - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.
#
# The minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.
# + id="instance_scaling"
MIN_NODES = 1
MAX_NODES = 1
# + [markdown] id="make_batch_request:automl,lcn"
# ### Make batch prediction request
#
# Now that your batch of two test items is ready, let's do the batch request. Use this helper function `create_batch_prediction_job`, with the following parameters:
#
# - `display_name`: The human readable name for the prediction job.
# - `model_name`: The Vertex fully qualified identifier for the `Model` resource.
# - `gcs_source_uri`: The Cloud Storage path to the input file -- which you created above.
# - `gcs_destination_output_uri_prefix`: The Cloud Storage path that the service will write the predictions to.
# - `parameters`: Additional filtering parameters for serving prediction results.
#
# The helper function calls the job client service's `create_batch_prediction_job` metho, with the following parameters:
#
# - `parent`: The Vertex location root path for Dataset, Model and Pipeline resources.
# - `batch_prediction_job`: The specification for the batch prediction job.
#
# Let's now dive into the specification for the `batch_prediction_job`:
#
# - `display_name`: The human readable name for the prediction batch job.
# - `model`: The Vertex fully qualified identifier for the `Model` resource.
# - `dedicated_resources`: The compute resources to provision for the batch prediction job.
# - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.
# - `starting_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`.
# - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`.
# - `model_parameters`: Additional filtering parameters for serving prediction results. *Note*, image segmentation models do not support additional parameters.
# - `input_config`: The input source and format type for the instances to predict.
# - `instances_format`: The format of the batch prediction request file: `csv` only supported.
# - `gcs_source`: A list of one or more Cloud Storage paths to your batch prediction requests.
# - `output_config`: The output destination and format for the predictions.
# - `prediction_format`: The format of the batch prediction response file: `csv` only supported.
# - `gcs_destination`: The output destination for the predictions.
#
# This call is an asychronous operation. You will print from the response object a few select fields, including:
#
# - `name`: The Vertex fully qualified identifier assigned to the batch prediction job.
# - `display_name`: The human readable name for the prediction batch job.
# - `model`: The Vertex fully qualified identifier for the Model resource.
# - `generate_explanations`: Whether True/False explanations were provided with the predictions (explainability).
# - `state`: The state of the prediction job (pending, running, etc).
#
# Since this call will take a few moments to execute, you will likely get `JobState.JOB_STATE_PENDING` for `state`.
# + id="make_batch_request:automl,lcn"
BATCH_MODEL = "iris_batch-" + TIMESTAMP
def create_batch_prediction_job(
display_name,
model_name,
gcs_source_uri,
gcs_destination_output_uri_prefix,
parameters=None,
):
if DEPLOY_GPU:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_type": DEPLOY_GPU,
"accelerator_count": DEPLOY_NGPU,
}
else:
machine_spec = {
"machine_type": DEPLOY_COMPUTE,
"accelerator_count": 0,
}
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"model_parameters": json_format.ParseDict(parameters, Value()),
"input_config": {
"instances_format": IN_FORMAT,
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": OUT_FORMAT,
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
"dedicated_resources": {
"machine_spec": machine_spec,
"starting_replica_count": MIN_NODES,
"max_replica_count": MAX_NODES,
},
}
response = clients["job"].create_batch_prediction_job(
parent=PARENT, batch_prediction_job=batch_prediction_job
)
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try:
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" create_time:", response.create_time)
print(" start_time:", response.start_time)
print(" end_time:", response.end_time)
print(" update_time:", response.update_time)
print(" labels:", response.labels)
return response
IN_FORMAT = "csv"
OUT_FORMAT = "csv" # [csv]
response = create_batch_prediction_job(
BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME, None
)
# + [markdown] id="batch_job_id:response"
# Now get the unique identifier for the batch prediction job you created.
# + id="batch_job_id:response"
# The full unique ID for the batch job
batch_job_id = response.name
# The short numeric ID for the batch job
batch_job_short_id = batch_job_id.split("/")[-1]
print(batch_job_id)
# + [markdown] id="get_batch_prediction_job"
# ### Get information on a batch prediction job
#
# Use this helper function `get_batch_prediction_job`, with the following paramter:
#
# - `job_name`: The Vertex fully qualified identifier for the batch prediction job.
#
# The helper function calls the job client service's `get_batch_prediction_job` method, with the following paramter:
#
# - `name`: The Vertex fully qualified identifier for the batch prediction job. In this tutorial, you will pass it the Vertex fully qualified identifier for your batch prediction job -- `batch_job_id`
#
# The helper function will return the Cloud Storage path to where the predictions are stored -- `gcs_destination`.
# + id="get_batch_prediction_job"
def get_batch_prediction_job(job_name, silent=False):
response = clients["job"].get_batch_prediction_job(name=job_name)
if silent:
return response.output_config.gcs_destination.output_uri_prefix, response.state
print("response")
print(" name:", response.name)
print(" display_name:", response.display_name)
print(" model:", response.model)
try: # not all data types support explanations
print(" generate_explanation:", response.generate_explanation)
except:
pass
print(" state:", response.state)
print(" error:", response.error)
gcs_destination = response.output_config.gcs_destination
print(" gcs_destination")
print(" output_uri_prefix:", gcs_destination.output_uri_prefix)
return gcs_destination.output_uri_prefix, response.state
predictions, state = get_batch_prediction_job(batch_job_id)
# + [markdown] id="get_the_predictions:automl,lcn"
# ### Get Predictions
#
# When the batch prediction is done processing, the job state will be `JOB_STATE_SUCCEEDED`.
#
# Finally you view the predictions stored at the Cloud Storage path you set as output. The predictions will be in a CSV format, which you indicated at the time you made the batch prediction job, under a subfolder starting with the name `prediction`, and under that folder will be a file called `predictions*.csv`.
#
# Now display (cat) the contents. You will see multiple rows, one for each prediction.
#
# For each prediction:
#
# - The first four fields are the values (features) you did the prediction on.
# - The remaining fields are the confidence values, between 0 and 1, for each prediction.
# + id="get_the_predictions:automl,tabular"
def get_latest_predictions(gcs_out_dir):
""" Get the latest prediction subfolder using the timestamp in the subfolder name"""
# folders = !gsutil ls $gcs_out_dir
latest = ""
for folder in folders:
subfolder = folder.split("/")[-2]
if subfolder.startswith("prediction-"):
if subfolder > latest:
latest = folder[:-1]
return latest
while True:
predictions, state = get_batch_prediction_job(batch_job_id, True)
if state != aip.JobState.JOB_STATE_SUCCEEDED:
print("The job has not completed:", state)
if state == aip.JobState.JOB_STATE_FAILED:
raise Exception("Batch Job Failed")
else:
folder = get_latest_predictions(predictions)
# ! gsutil ls $folder/prediction*.csv
# ! gsutil cat $folder/prediction*.csv
break
time.sleep(60)
# + [markdown] id="cleanup"
# # Cleaning up
#
# To clean up all GCP resources used in this project, you can [delete the GCP
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial:
#
# - Dataset
# - Pipeline
# - Model
# - Endpoint
# - Batch Job
# - Custom Job
# - Hyperparameter Tuning Job
# - Cloud Storage Bucket
# + id="cleanup"
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
# Delete the dataset using the Vertex fully qualified identifier for the dataset
try:
if delete_dataset and "dataset_id" in globals():
clients["dataset"].delete_dataset(name=dataset_id)
except Exception as e:
print(e)
# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline
try:
if delete_pipeline and "pipeline_id" in globals():
clients["pipeline"].delete_training_pipeline(name=pipeline_id)
except Exception as e:
print(e)
# Delete the model using the Vertex fully qualified identifier for the model
try:
if delete_model and "model_to_deploy_id" in globals():
clients["model"].delete_model(name=model_to_deploy_id)
except Exception as e:
print(e)
# Delete the endpoint using the Vertex fully qualified identifier for the endpoint
try:
if delete_endpoint and "endpoint_id" in globals():
clients["endpoint"].delete_endpoint(name=endpoint_id)
except Exception as e:
print(e)
# Delete the batch job using the Vertex fully qualified identifier for the batch job
try:
if delete_batchjob and "batch_job_id" in globals():
clients["job"].delete_batch_prediction_job(name=batch_job_id)
except Exception as e:
print(e)
# Delete the custom job using the Vertex fully qualified identifier for the custom job
try:
if delete_customjob and "job_id" in globals():
clients["job"].delete_custom_job(name=job_id)
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job
try:
if delete_hptjob and "hpt_job_id" in globals():
clients["job"].delete_hyperparameter_tuning_job(name=hpt_job_id)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
# ! gsutil rm -r $BUCKET_NAME
| notebooks/community/gapic/automl/showcase_automl_tabular_classification_batch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="z9ssb4OZX7XE" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e8bac45e-715c-4a14-fa85-2fc271a731b8"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv("/content/drive/MyDrive/LifeExpectancyData.csv")
col = data.columns
col.tolist()
x_cols = [con for con in col[4:]]
# print(x_cols)
# x = data["Adult Mortality"].to_list()
for con in x_cols:
y = np.array([data["lf"].to_list()])
x = np.array([data[con].to_list()])
plt.xlabel(con)
plt.ylabel("Life Expectancy")
plt.plot(x[0],y[0])
print("\n")
plt.show()
| assignment1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Test de extraccion de datos
# ## Utilizare como prueba la pagina Lonza: https://www.lonza.com/careers/job-search para la extraccion de los datos.
#
# Definimos nuestra URL seed aplicando los criterios de filtrado:
# Seed: https://www.lonza.com/careers/job-search?q=data,%20biotech,%20Biostatistics,%20%20clinical%20research&pg=1&rows=100&job_location_facet_sm=Switzerland%2c+Basel
# +
# IMPORTAMOS LAS LIBRERIAS QUE VAMOS UTILIZAR
from lxml import html
import requests
import pandas as pd
import datetime
import os # PARA CREAR EL DIRECTORIO
# +
# USER AGENT PARA PROTEGERNOS DE BANEOS
headers = {
"user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.80 Chrome/71.0.3578.80 Safari/537.36",
}
# +
# URL SEED
seed = "https://www.lonza.com/careers/job-search?q=data,%20biotech,%20Biostatistics,%20%20clinical%20research&pg=1&rows=100&job_location_facet_sm=Switzerland%2c+Basel"
# -
# REQUERIMIENTO AL SERVIDOR
respuesta = requests.get(seed, headers=headers)
# +
respuesta
# -
# PARSEO DEL ARBOL HTML QUE RECIBO COMO RESPUESTA CON LXML
parser = html.fromstring(respuesta.text)
# + tags=[]
# PARSEO DE TODOS LOS PUESTOS POR CLASE
list_job_names = parser.find_class('search-result-title')
for job_name in list_job_names:
text_job_name = job_name.text_content()
print(text_job_name)
len(list_job_names)
# + tags=[]
# PARSEO DE TODOS LOS LOCATIONS POR CLASE
list_locations = parser.find_class('search-result-content')
for location in list_locations:
text_location = location.text_content()
print(text_location)
len(list_locations)
# + tags=[]
# CONVERTIMOS LOS ELEMENTOS LXML EN STR PARA PODER MANIPULAR LAS LISTAS
list_str_job_names = []
list_str_locations = []
for job_name in list_job_names:
str_job_name = str(job_name.text_content()) # UTILIZO EL METODO TEXT_CONTENT() PARA TRAER DIRECTAMENTE EL TEXTO
list_str_job_names.append(str_job_name)
for location in list_locations:
str_location = str(location.text_content())
list_str_locations.append(str_location)
print(list_str_job_names, len(list_str_job_names))
print()
print(list_str_locations, len(list_str_locations))
# + tags=[]
# PARSEO DE TODOS LOS HREF POR EMPLEO
list_links = parser.xpath("//div[@class='col-12 col-lg-6']/a/@href")
url_lonza = "https://www.lonza.com"
list_str_links = []
for link in list_links:
str_link = str(link) # UTILIZO EL METODO STR PARA CONVERTIR EL ELEMENTO EN STRING Y PODER CONCATENAR LAS VARIABLES
list_str_links.append(url_lonza + str_link)
print(list_str_links)
len(list_str_links)
# +
# CREANDO VARIABLE DE FECHA
date = datetime.date.today()
date
# + tags=[]
# CREANDO DATAFRAME CON LOS DATOS RECOPILADOS
lonza_dataframe = pd.DataFrame(data = { "Company": "LONZA",
"Data_Time": date,
"Job_Name": list_str_job_names,
"Location": list_str_locations,
"Links": list_str_links})
lonza_dataframe
# +
# CREANDO DIRECTORIO CON FECHA DE EJECUCION
# RUTA -> C:\Users\lecun\Dropbox\Mi PC (LAPTOP-URCP74CR)\Desktop\Job_Bot_CH\job_bot_ch\Victor Lecuna\Pruebas_Job_Bot_CH
date_dir = str(datetime.date.today())
if os.path.isdir('C:/Users/lecun/Dropbox/Mi PC (LAPTOP-URCP74CR)/Desktop/Job_Bot_CH/job_bot_ch/Victor Lecuna/Pruebas_Job_Bot_CH/' + date_dir):
print('La carpeta existe.')
else:
directorio = 'C:/Users/lecun/Dropbox/Mi PC (LAPTOP-URCP74CR)/Desktop/Job_Bot_CH/job_bot_ch/Victor Lecuna/Pruebas_Job_Bot_CH/' + date_dir
try:
os.mkdir(directorio)
except OSError:
print("La creación del directorio %s falló" % directorio)
else:
print("Se ha creado el directorio: %s" % directorio)
# +
# CREANDO ARCHIVO CSV CON EL DATAFRAME
lonza_file = lonza_dataframe.to_csv('C:/Users/lecun/Dropbox/Mi PC (LAPTOP-URCP74CR)/Desktop/Job_Bot_CH/job_bot_ch/<NAME>/Pruebas_Job_Bot_CH/'
+ date_dir + '/LONZA_' + date_dir + '.csv')
# -
| Victor Lecuna/Pruebas_Job_Bot_CH/Script_Lonza.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
sys.path.append(os.path.join(os.path.join('..'), 'data/'))
from src.features.build import Provider, Dataset
import spacy
from spacy import displacy
import markovify
# _d = Dataset()
# -
_d = Dataset()
aesop = _d.df[_d.df['artist'] == 'aesop-rock']
aesop.head()
nlp = spacy.load('en_core_web_md')
# +
aesop.loc[:,'lyrics_split'] = aesop['lyrics_transform'].apply(lambda x: x.split('\n'))
# aesop.loc[:,'nlp'] = aesop['lyrics_split'].apply(lambda x: [nlp(i) for i in x])
song = aesop[aesop['song'] == 'abandon-all-hope']
lyrics = song['lyrics_transform']
# -
lyrics = [i for i in song['lyrics'].tolist()[0].split('\n')]
lyrics_join = "\n".join(lyrics)
doc = nlp(lyrics_join)
for token in doc:
print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, token.is_alpha, token.is_stop)
for token in doc:
print(token.lemma_)
song_p = nlp(song['lyrics'].tolist()[0])
doc = song_p
for token in doc:
print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,
token.shape_, token.is_alpha, token.is_stop)
foo = markovify.NewlineText(song['lyrics'].tolist()[0])
help(foo.chain)
dir(song_p.vocab)
len([i for i in song_p.vocab.strings if i.isalpha])
dir(song_p)
from tensorflow.contrib import rnn
help(rnn.)
rnn.BidirectionalGridLSTMCell(128, num_frequency_blocks=[1,1])
| notebooks/nlp_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # List Comprehensions
#
#
# List comprehension is a pythonic way of writing code that can make your code more readable.
#
# Although they are a bit tricky to get used to, list comprehensions provide a
# consise way of generating lists and they can be very useful to speed up coding.
#
# +
nums = [1, 2, 3, 4, 5]
# square each element in nums
print([num*num for num in nums])
# square each element in numf if element is even
print([num*num for num in nums if num%2 == 0])
# -
# ## Simple Examples
# +
# Find all numbers between 1-50 that are divisible by 7
print([n for n in range(0,51) if n%7 == 0])
# Find all numbers between 1-30 that contain the digit 3
print([n for n in range(0,31) if '3' in str(n)])
# Count the number of spaces in a string
sentence = "This is my sentence and it has many spaces"
print(len([space for space in sentence if space.isspace()]))
# Find all words in a sentence with less than 4 letters
print({word for word in sentence.split() if len(word) < 4})
# Count the length of each word in a sentence
print({word:len(word) for word in sentence.split()})
# -
# ## Less Simple Examples
# +
# Find the highest single digit any of the numbers between 1-50 is
# divisible by
dicta = dict()
for i in range(2,10):
for num in range(1,11):
if num%i == 0:
dicta['{0}'.format(num)] = i
print((dicta))
#{number:max(div for div in range(2,10) if number % div == 0) for number in range(1,11) if [div for div in range(2,10) if num%div==0]}
ans = {num:max(div for div in range(2,10) if num % div == 0) for num in range(1,11) if [div for div in range(2,10) if num % div == 0]}
print(ans)
# +
# Convert each element to float
l = [['40', '20', '10', '30'],
['100', '100', '100', '100']]
print([[float(element) for element in row]for row in l])
print([float(y) for x in l for y in x])
# -
| .ipynb_checkpoints/List Comprehensions-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ---
# ### Universidad de Costa Rica
# #### IE0405 - Modelos Probabilísticos de Señales y Sistemas
# ---
#
# # `Py2` - *Librerías de computación científica*
#
# > Con librerías externas es posible acceder a poderosas herramientas computacionales que hacen a Python comparable con otros programas de cálculo numérico, como Matlab, R, Mathematica y otros.
#
# ---
# ## Librerías especializadas
#
# Fuera de las librerías de ejemplos anteriores, que pertenecen a [The Python Standard Library](https://docs.python.org/3/library/), otras personas y organizaciones han creado poderosas librerías de aplicación específica. Entre ellas, algunas útiles para el estudio de la probabilidad, la estadística y el análisis de datos, entre otros.
# ---
# ## 2.1 - NumPy
#
# Según su [página oficial](https://numpy.org/),
#
# > NumPy es el paquete fundamental para la computación científica con Python,es una biblioteca de Python utilizada para trabajar con matrices.
#
# NumPy generalmente se importa bajo el np alias.
#
# alias: en Python los alias son un nombre alternativo para referirse a la misma cosa.
#
#
#
# **Nota 1**: La función `print()` muestra el resultado...
#
# **Nota 2**: Cree un alias con la palabra clave mientras importa: `import numpy as np`
# +
import numpy as np
arr = np.array([1, 2, 3, 4, 5])
print(arr)
# -
# ### Crear un objeto NumPy `ndarray`
# NumPy se usa para trabajar con matrices. Se llama al objeto de matriz en NumPy `ndarray`.
#
# Podemos crear un `ndarray` objeto NumPy utilizando la `array()`función
#
# **Nota 1:** `type ()`: esta función incorporada de Python nos dice el tipo de objeto que se le pasó. Al igual que en el código anterior, muestra que `arr` es `numpy.ndarray` tipo.
# +
import numpy as np
arr = np.array([1, 3, 5, 7, 9])
print(arr)
print(type(arr))
# -
# ### Matrices tridimencionales
# Se pueden crear matrices en 3D, que podrían ser útiles para muchos casos, por ejemplo gracias a esta se puede representar un tensor.
#
# **Nota 1:** NumPy Arrays proporciona el `ndim` atributo que devuelve un entero que nos dice cuántas dimensiones tiene la matriz. Es decir, el número de filas de una matriz.
# +
import numpy as np
a = np.array(42)
b = np.array([1, 2, 3, 4, 5])
c = np.array([[7, 7, 7], [6, 6, 6]])
d = np.array([[[24, 12, 86], [98, 9, 6]], [[1, 2, 3], [4, 5, 6]]])
print(a.ndim)
print(b.ndim)
print(c.ndim)
print(d.ndim)
# -
# ### Acceso a elementos de una matriz
# La indexación de matriz es lo mismo que acceder a un elemento de matriz.
#
# Puede acceder a un elemento de matriz haciendo referencia a su número de índice.
#
# Los índices en las matrices NumPy comienzan con 0, lo que significa que el primer elemento tiene índice 0, y el segundo tiene índice 1, etc.
#
# En el siguiente ejemplo se desea acceder al tercer elemento de la segunda fila.
#
# **Nota 1:** El primer número representa la primera dimensión, que contiene dos matrices, Desde seleccionamos 0, nos quedamos con la primera matriz.
#
# **Nota 2:** El segundo número representa la segunda dimensión, que también contiene dos matrices, Como seleccionamos 1, nos queda la segunda matriz.
#
# **Nota 3:** El tercer número representa la tercera dimensión, que contiene tres valores, Como seleccionamos 2, terminamos con el tercer valor.
# +
import numpy as np
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
print(arr[0, 1, 2])
# -
# ### Tipos de datos en NumPy
# NumPy tiene algunos tipos de datos adicionales, y se refiere a los tipos de datos con un carácter, como ienteros, uenteros sin signo, etc.
#
# A continuación se muestra una lista de todos los tipos de datos en NumPy y los caracteres utilizados para representarlos.
# `i `- entero
#
# `b ` - booleano
#
# `u `- entero sin signo
#
# `f` - flotante
#
# `c` - flotante complejo
#
# `m` - timedelta
#
# `M` - fecha y hora
#
# `O` - objeto
#
# `S` - cadena
#
# `U` - cadena unicode
#
# `V` - fragmento de memoria fijo para otro tipo (nulo)
#
# **Nota 1:** El objeto de matriz NumPy tiene una propiedad llamada `dtype` que devuelve el tipo de datos de la matriz:
#
#
# +
import numpy as np
arr1 = np.array([[1, 2, 3, 4], [9, 8, 7, 6]])
arr2 = np.array(['apple', 'banana', 'cherry'])
print(arr1.dtype)
print(arr2.dtype)
# -
# ### Conversión de tipo de datos en matrices existentes
# La mejor manera de cambiar el tipo de datos de una matriz existente es hacer una copia de la matriz con el `astype()` método.
#
# La `astype()` función crea una copia de la matriz y le permite especificar el tipo de datos como parámetro.
#
# El tipo de datos se puede especificar usando una cadena, como `'f'`para flotante, `'i'`para entero, etc. o puede usar el tipo de datos directamente como `float`para flotante y intpara entero.
#
# **Nota1:** Para el ejemplo cambie el tipo de datos de flotante a entero utilizando `int` como valor de parámetro
#
# +
import numpy as np
arr = np.array([1.1, 2.1, 3.1])
newarr = arr.astype(int)
print(newarr)
print(newarr.dtype)
# -
# ### Arreglos iterativos
# Iterar significa pasar por elementos uno por uno.
#
# Como tratamos con matrices multidimensionales en numpy, podemos hacer esto usando el `for` bucle básico de python.
#
# **Nota:** En el siguiente ejemplo se debe iterar hasta los escalares:
# +
import numpy as np
arr = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])
for x in arr:
for y in x:
for z in y:
print(z)
# -
# ---
# ## 2.2 - SciPy
#
# Según su [página oficial](https://www.scipy.org/),
#
# > SciPy es un ecosistema de software de código abierto basado en Python para matemáticas, ciencias e ingeniería.
#
# Y son parte de este ecosistema, de hecho, NumPy, Matplotlib y Pandas, que estudiaremos por separado.
#
# > SciPy se basa en NumPy, y para todas las necesidades básicas de manejo de arreglos puede usar las funciones de NumPy
#
# **Nota 1:** En primera instancia se debe importar scipy
#
# ` from scipy import some_module`
#
# ` some_module.some_function()`
# ### Polinomios
# Existen dos formas de tratar con polinomios 1-D en SciPy. El primero es usar la `poly1d` clase de NumPy. Esta clase acepta coeficientes o raíces polinómicas para inicializar un polinomio. El objeto polinomial puede manipularse en expresiones algebraicas, integrarse, diferenciarse y evaluarse. Incluso se imprime como un polinomio:
#
# La otra forma de manejar polinomios es como una matriz de coeficientes con el primer elemento de la matriz dando el coeficiente de la potencia más alta. Hay funciones explícitas para sumar, restar, multiplicar, dividir, integrar, diferenciar y evaluar polinomios representados como secuencias de coeficientes.
#
# **Notas:** En el siguiente ejemplo, se utilizan algunas funciones como integrar y derivar.
>>> from numpy import poly1d
>>> p = poly1d([3,4,5])
>>> print(p)
>>> print(p*p)
>>> print(p.integ(k=6))
>>> print(p.deriv())
# ### Funciones de vectorización
# Una de las características que proporciona NumPy es una clase **vectorize** para convertir una función Python ordinaria que acepta escalares y devuelve escalares en una "función vectorizada" con las mismas reglas de transmisión que otras funciones NumPy (es decir, las funciones universales o ufuncs). Por ejemplo, suponga que tiene una función Python denominada `addsubtract` definida como:
>>> def addsubtract(a,b):
... if a > b:
... return a - b
... else:
... return a + b
# que define una función de dos variables escalares y devuelve un resultado escalar. La clase vectorize se puede usar para "vectorizar" esta función para que
import numpy as np
vec_addsubtract = np.vectorize(addsubtract)
vec_addsubtract([0,3,6,9],[1,3,5,7])
# ### Otras funciones útiles
# También hay varias otras funciones útiles que deben mencionarse. Para hacer el procesamiento de fase, las funciones **angle** y **unwrap** son útiles. Además, las funciones **linspace** y **logspace** devuelven muestras igualmente espaciadas en una escala lineal o logarítmica. Finalmente, es útil estar al tanto de las capacidades de indexación de NumPy. Debe mencionarse la función **select** que amplía la funcionalidad de **where** incluye múltiples condiciones y múltiples opciones. La convención de convocatoria es una forma vectorizada de la declaración múltiple *if*. Permite la construcción rápida de una función que devuelve una serie de resultados basados en una lista de condiciones.
#
# Cada elemento de la matriz de retorno se toma de la matriz en un correspondiente a la primera condición en `select(condlist, choicelist, default=0)numpy.selectchoicelistcondlist` eso es verdad. Por ejemplo:
import numpy as np
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
# ### Integración
#
#
# El **scipy.integrate** subpaquete proporciona varias técnicas de integración, incluido un integrador de ecuaciones diferenciales ordinarias.
#
# **Nota 1**
# La función quadse proporciona para integrar una función de una variable entre dos puntos.
#
# Si la función para integrar toma parámetros adicionales, se pueden proporcionar en el argumento `args` .
#
# Suponga que se calcula la siguiente integral:
# I(a,b) = int (aX^2 + b) dx, evaluada de 0 a 1.
>>> from scipy.integrate import quad
>>> def integrand(x, a, b):
return a*x**2 + b
>>> a = 2
>>> b = 1
>>> I = quad(integrand, 0, 1, args=(a,b))
>>> I
# ### Variable aleatoria
#
# Hay dos clases de distribución general que se han implementado para encapsular variables aleatorias continuas y variables aleatorias discretas . Se han implementado más de 80 variables aleatorias continuas (RV) y 10 variables aleatorias discretas utilizando estas clases. Además de esto, el usuario final puede agregar fácilmente nuevas rutinas y distribuciones. (Si crea uno, por favor contribuya).
#
# Todas las funciones estadísticas se encuentran en el subpaquete **scipy.stats** y se puede obtener una lista bastante completa de estas funciones `info(stats)`. La lista de las variables aleatorias disponibles también se puede obtener de la cadena de documentación para el subpaquete de estadísticas.
#
# En la discusión a continuación, nos centramos principalmente en vehículos recreativos continuos. Casi todo también se aplica a variables discretas, pero señalamos algunas diferencias aquí: Puntos específicos para distribuciones discretas .
#
# En los ejemplos de código a continuación, asumimos que el **scipy.stats** paquete se importa como
#
# **Nota 1:**
#
# El siguiente ejemplo se calculará una función de distribución acumulativa se pueden calcular mediante `from scipy import stats` y `from scipy.stats import norm`
#
# **Nota 2:**
#
# Para el ejemplo dos se calcula una distribución uniforme usando `from scipy.stats import uniform`
#
>>> import numpy as np
>>> from scipy import stats
>>> from scipy.stats import norm
>>> norm.cdf(np.array([-1., 0, 1]))
>>> from scipy.stats import uniform
>>> uniform.cdf([0, 1, 2, 3, 4, 5], loc=1, scale=4)
# ---
# ## 2.3 - Matplotlib
#
# Según su [página oficial](https://matplotlib.org/),
#
# > Matplotlib es una biblioteca completa para crear visualizaciones estáticas, animadas e interactivas en Python.
#
# En esa primera aproximación a Matplotlib, estudiaremos gráficas bidimensionales estáticas.
#
# **Nota 1:** Para generar gráficas de funciones se debe utilizar:
#
# `import matplotlib.pyplot as plt`
#
# `import numpy as np`
import matplotlib.pyplot as plt
from numpy import arange,sin,pi
x=arange(0.0,4*pi,0.01)
y=sin(x)
plt.plot(x,y)
plt.grid(axis='both')
plt.ylabel('seno(x)')
plt.xlabel('X')
plt.show()
# +
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 2, 100)
plt.plot(x, x, label='lineal') # Plot some data on the (implicit) axes.
plt.plot(x, x**2, label='cuadratica') # etc.
plt.plot(x, x**3, label='cubica')
plt.xlabel('x ')
plt.ylabel('y ')
plt.title("Algunas funciones")
plt.legend()
plt.show()
# -
#se grafica un funcion exponencial
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return np.exp(x)
x=np.linspace(0,10,100)
plt.plot(x,f(x))
plt.show()
#se grafica un funcion logaritmica
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return np.log(x)
x=np.linspace(1,10,100)
plt.plot(x,f(x))
plt.show()
# ### Coordenada de datos
# Comencemos con la coordenada más utilizada, el sistema de coordenadas de datos . Cada vez que agrega datos a los ejes, Matplotlib actualiza los límites de datos, más comúnmente actualizados con los métodos `set_xlim()` y `set_ylim()`. Por ejemplo, en la figura siguiente, los límites de datos se extienden de 0 a 10 en el eje xy de -1 a 1 en el eje y
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
x = np.arange(0, 10, 0.005)
y = np.exp(-x/2.) * np.sin(2*np.pi*x)
fig, ax = plt.subplots()
ax.plot(x, y)
ax.set_xlim(0, 10)
ax.set_ylim(-1, 1)
plt.show()
# -
# ### Transformaciones combinadas
#
# Dibujar en espacios de coordenadas combinadas que mezclan ejes con coordenadas de datos es extremadamente útil, por ejemplo, para crear un tramo horizontal que resalta alguna región de los datos y pero se extiende a través del eje x independientemente de los límites de datos, nivel de panorámica o zoom, etc. . de hecho, estas líneas combinadas y vanos son tan útiles, que hemos construido en funciones para que sean fáciles de trama (véase `axhline(), axvline(), axhspan(), axvspan()`) pero con fines didácticos vamos a implementar la extensión horizontal aquí usando una transformación mezclado. Este truco solo funciona para transformaciones separables, como se ve en los sistemas de coordenadas cartesianas normales, pero no en transformaciones inseparables como el `PolarTransform`.
# +
import matplotlib.transforms as transforms
fig, ax = plt.subplots()
x = np.random.randn(1000)
ax.hist(x, 30)
ax.set_title(r'$\sigma=1 \/ \dots \/ \sigma=2$', fontsize=16)
# the x coords of this transformation are data, and the
# y coord are axes
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
# highlight the 1..2 stddev region with a span.
# We want x to be in data coordinates and y to
# span from 0..1 in axes coords
rect = mpatches.Rectangle((1, 0), width=1, height=1,
transform=trans, color='yellow',
alpha=0.5)
ax.add_patch(rect)
plt.show()
# -
# **Nota**
#
# Las transformaciones combinadas donde x está en coordenadas de datos e y en coordenadas de ejes es tan útil que tenemos métodos auxiliares para devolver las versiones que Matplotlib usa internamente para dibujar ticks, ticklabels, etc. Los métodos son `matplotlib.axes.Axes.get_xaxis_transform()y matplotlib.axes.Axes.get_yaxis_transform()`. Entonces, en el ejemplo anterior, la llamada a `blended_transform_factory()` se puede reemplazar por `get_xaxis_transform`.
# ---
# ### Más información
#
# * [Página web](https://www.google.com/)
# * Libro o algo
# * Tutorial [w3schools](https://www.w3schools.com/python/)
# ---
# ---
#
# **Universidad de Costa Rica**
#
# Facultad de Ingeniería
#
# Escuela de Ingeniería Eléctrica
#
# ---
| Py2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training a RL Agent with Stable-Baselines3 Using a GEM Environment
#
# This notebook serves as an educational introduction to the usage of Stable-Baselines3 using a gym-electric-motor (GEM) environment. The goal of this notebook is to give an understanding of what Stable-Baselines3 is and how to use it to train and evaluate a reinforcement learning agent that can solve a current control problem of the GEM toolbox.
# ## 1. Installation
# Before you can start you need to make sure that you have both gym-electric-motor and Stable-Baselines3 installed. You can install both easily using pip:
#
# - ```pip install gym-electric-motor```
# - ```pip install stable-baselines3```
#
# Alternatively, you can install them and their latest developer version directly from GitHub:
#
# - https://github.com/upb-lea/gym-electric-motor
# - https://github.com/DLR-RM/stable-baselines3
#
# For this notebook, the following cell will do the job:
# !pip install -q git+https://github.com/upb-lea/gym-electric-motor.git git+https://github.com/DLR-RM/stable-baselines3.git
# ## 2. Setting up a GEM Environment
#
# The basic idea behind reinforcement learning is to create a so-called agent, that should learn by itself to solve a specified task in a given environment.
# This environment gives the agent feedback on its actions and reinforces the targeted behavior.
# In this notebook, the task is to train a controller for the current control of a *permanent magnet synchronous motor* (*PMSM*).
#
# In the following, the used GEM-environment is briefly presented, but this notebook does not focus directly on the detailed usage of GEM. If you are new to the used environment and interested in finding out what it does and how to use it, you should take a look at the [GEM cookbook](https://colab.research.google.com/github/upb-lea/gym-electric-motor/blob/master/examples/example_notebooks/GEM_cookbook.ipynb).
#
# The basic idea of the control setup from the GEM-environment is displayed in the following figure.
#
# 
#
# The agent controls the converter who converts the supply currents to the currents flowing into the motor - for the *PMSM*: $i_{sq}$ and $i_{sd}$
#
# In the continuous case, the agent's action equals a duty cycle which will be modulated into a corresponding voltage.
#
# In the discrete case, the agent's actions denote switching states of the converter at the given instant. Here, only a discrete amount of options are available. In this notebook, for the PMSM the *discrete B6 bridge converter* with six switches is utilized per default. This converter provides a total of eight possible actions.
#
# 
#
# The motor schematic is the following:
#
#
# 
#
# And the electrical ODEs for that motor are:
#
# <h3 align="center">
#
# <!-- $\frac{\mathrm{d}i_{sq}}{\mathrm{d}t} = \frac{u_{sq}-pL_d\omega_{me}i_{sd}-R_si_{sq}}{L_q}$
#
# $\frac{\mathrm{d}i_{sd}}{\mathrm{d}t} = \frac{u_{sd}-pL_q\omega_{me}i_{sq}-R_si_{sd}}{L_d}$
#
# $\frac{\mathrm{d}\epsilon_{el}}{\mathrm{d}t} = p\omega_{me}$
# -->
#
# $ \frac{\mathrm{d}i_{sd}}{\mathrm{d}t}=\frac{u_{sd} + p\omega_{me}L_q i_{sq} - R_s i_{sd}}{L_d} $ <br><br>
# $\frac{\mathrm{d} i_{sq}}{\mathrm{d} t}=\frac{u_{sq} - p \omega_{me} (L_d i_{sd} + \mathit{\Psi}_p) - R_s i_{sq}}{L_q}$ <br><br>
# $\frac{\mathrm{d}\epsilon_{el}}{\mathrm{d}t} = p\omega_{me}$
#
# </h3>
#
# The target for the agent is now to learn to control the currents. For this, a reference generator produces a trajectory that the agent has to follow.
# Therefore, it has to learn a function (policy) from given states, references and rewards to appropriate actions.
#
# For a deeper understanding of the used models behind the environment see the [documentation](https://upb-lea.github.io/gym-electric-motor/).
# Comprehensive learning material to RL is also [freely available](https://github.com/upb-lea/reinforcement_learning_course_materials).
# %matplotlib notebook
import numpy as np
from pathlib import Path
import gym_electric_motor as gem
from gym_electric_motor.reference_generators import \
MultipleReferenceGenerator,\
WienerProcessReferenceGenerator
from gym_electric_motor.visualization import MotorDashboard
from gym_electric_motor.core import Callback
from gym.spaces import Discrete, Box
from gym.wrappers import FlattenObservation, TimeLimit
from gym import ObservationWrapper
# +
# helper functions and classes
class RewardLogger(Callback):
"""Logs the reward accumulated in each episode"""
def __init__(self):
self._step_rewards = []
self._mean_episode_rewards = []
dir_path = Path.cwd() /"rl_frameworks" / "saved_agents"
dir_path.mkdir(parents=True, exist_ok=True)
self.fpath = dir_path / "EpisodeRewards.npy"
def on_step_end(self):
"""Stores the received reward at each step"""
self._step_rewards.append(self._env._reward)
def on_reset_begin(self):
"""Stores the mean reward received in every episode"""
self._mean_episode_rewards.append(np.mean(self._step_rewards))
self._step_rewards = []
def on_close(self):
"""Writes the mean episode reward of the experiment to a file."""
np.save(self.fpath, np.array(self._mean_episode_rewards))
class FeatureWrapper(ObservationWrapper):
"""
Wrapper class which wraps the environment to change its observation. Serves
the purpose to improve the agent's learning speed.
It changes epsilon to cos(epsilon) and sin(epsilon). This serves the purpose
to have the angles -pi and pi close to each other numerically without losing
any information on the angle.
Additionally, this wrapper adds a new observation i_sd**2 + i_sq**2. This should
help the agent to easier detect incoming limit violations.
"""
def __init__(self, env, epsilon_idx, i_sd_idx, i_sq_idx):
"""
Changes the observation space to fit the new features
Args:
env(GEM env): GEM environment to wrap
epsilon_idx(integer): Epsilon's index in the observation array
i_sd_idx(integer): I_sd's index in the observation array
i_sq_idx(integer): I_sq's index in the observation array
"""
super(FeatureWrapper, self).__init__(env)
self.EPSILON_IDX = epsilon_idx
self.I_SQ_IDX = i_sq_idx
self.I_SD_IDX = i_sd_idx
new_low = np.concatenate((self.env.observation_space.low[
:self.EPSILON_IDX], np.array([-1.]),
self.env.observation_space.low[
self.EPSILON_IDX:], np.array([0.])))
new_high = np.concatenate((self.env.observation_space.high[
:self.EPSILON_IDX], np.array([1.]),
self.env.observation_space.high[
self.EPSILON_IDX:],np.array([1.])))
self.observation_space = Box(new_low, new_high)
def observation(self, observation):
"""
Gets called at each return of an observation. Adds the new features to the
observation and removes original epsilon.
"""
cos_eps = np.cos(observation[self.EPSILON_IDX] * np.pi)
sin_eps = np.sin(observation[self.EPSILON_IDX] * np.pi)
currents_squared = observation[self.I_SQ_IDX]**2 + observation[self.I_SD_IDX]**2
observation = np.concatenate((observation[:self.EPSILON_IDX],
np.array([cos_eps, sin_eps]),
observation[self.EPSILON_IDX + 1:],
np.array([currents_squared])))
return observation
# +
# define motor arguments
motor_parameter = dict(
p=3, # [p] = 1, nb of pole pairs
r_s=17.932e-3, # [r_s] = Ohm, stator resistance
l_d=0.37e-3, # [l_d] = H, d-axis inductance
l_q=1.2e-3, # [l_q] = H, q-axis inductance
psi_p=65.65e-3, # [psi_p] = Vs, magnetic flux of the permanent magnet
)
# supply voltage
u_supply = 350
# nominal and absolute state limitations
nominal_values=dict(
omega=4000*2*np.pi/60,
i=230,
u=u_supply
)
limit_values=dict(
omega=4000*2*np.pi/60,
i=1.5*230,
u=u_supply
)
# sampling interval
tau = 1e-5
# define maximal episode steps
max_eps_steps = 10000
motor_initializer={'random_init': 'uniform', 'interval': [[-230, 230], [-230, 230], [-np.pi, np.pi]]}
reward_function=gem.reward_functions.WeightedSumOfErrors(
reward_weights={'i_sq': 10, 'i_sd': 10},
gamma=0.99, # discount rate
reward_power=1
)
# creating gem environment
env = gem.make( # define a PMSM with discrete action space
"Finite-CC-PMSM-v0",
# visualize the results
visualization=MotorDashboard(state_plots=['i_sq', 'i_sd'], reward_plot=True),
# parameterize the PMSM and update limitations
motor=dict(
motor_parameter=motor_parameter,
limit_values=limit_values,
nominal_values=nominal_values,
motor_initializer=motor_initializer,
),
# define the random initialisation for load and motor
load=dict(
load_initializer={'random_init': 'uniform', },
),
reward_function=reward_function,
supply=dict(u_sup=u_supply),
# define the duration of one sampling step
tau=tau,
ode_solver='euler',
)
# remove one action from the action space to help the agent speed up its training
# this can be done as both switchting states (1,1,1) and (-1,-1,-1) - which are encoded
# by action 0 and 7 - both lead to the same zero voltage vector in alpha/beta-coordinates
env.action_space = Discrete(7)
# applying wrappers
eps_idx = env.physical_system.state_names.index('epsilon')
i_sd_idx = env.physical_system.state_names.index('i_sd')
i_sq_idx = env.physical_system.state_names.index('i_sq')
env = TimeLimit(
FeatureWrapper(
FlattenObservation(env),
eps_idx, i_sd_idx, i_sq_idx
),
max_eps_steps
)
# -
# ## 3. Training an Agent with Stable-Baselines3
# Stable-Baselines3 collects Reinforcement Learning algorithms implemented in Pytorch.
#
# Stable-Baselines3 is still a very new library with its current release being 0.9. That is why its collection of algorithms is not very large yet and most algorithms lack more advanced variants. However, its authors planned to broaden the available algorithms in the future. For currently available algorithms see their [documentation](https://stable-baselines3.readthedocs.io/en/master/guide/rl.html).
#
# To use an agent provided by Stable-Baselines3 your environment has to have a [gym interface](https://stable-baselines3.readthedocs.io/en/master/guide/custom_env.html).
# ### 3.1 Imports
# The environment in this control problem poses a discrete action space. Therefore, the [Deep Q-Network (DQN)](https://arxiv.org/abs/1312.5602) is a suitable agent.
# For the specific implementation of the DQN you can refer to [Stable-Baslines3's docs](https://stable-baselines3.readthedocs.io/en/master/modules/dqn.html).
#
# In this tutorial a multi-layer perceptron (MLP) is used. For this you have to import the DQN and the MlpPolicy. You can also see in the docs which gym spaces for the observation and the actions are supported. You might have to take that into account for your environment.
import matplotlib.pyplot as plt
from stable_baselines3 import DQN
from stable_baselines3.dqn import MlpPolicy
from pathlib import Path
# ### 3.2 Setting the parameters
# For the DQN algorithm you have to define a set of parameters. The policy_kwargs dictionary is a parameter which is directly given to the MlpPolicy. The net_arch key defines the network architecture of the MLP.
buffer_size = 200000 #number of old obsersation steps saved
learning_starts = 10000 # memory warmup
train_freq = 1 # prediction network gets an update each train_freq's step
batch_size = 25 # mini batch size drawn at each update step
policy_kwargs = {
'net_arch': [64,64] # hidden layer size of MLP
}
exploration_fraction = 0.1 # Fraction of training steps the epsilon decays
target_update_interval = 1000 # Target network gets updated each target_update_interval's step
gamma = 0.99
verbose = 1 # verbosity of stable-basline's prints
# Additionally, you have to define how long your agent shall train. You can just set a concrete number of steps or use knowledge of the environment's temporal resolution to define an in-simulation training time. In this example, the agent is trained for five seconds which translates in this environment's case to 500000 steps.
tau = 1e-5
simulation_time = 5 # seconds
nb_steps = int(simulation_time // tau)
# ### 3.3 Starting the training
# Once you've setup the environment and defined your parameters starting the training is nothing more than a one-liner. For each algorithm all you have to do is call its ```learn()``` function. However, you should note that the execution of the training can take a long time. Currently, Stable-Baselines3 does not provide any means of saving the training reward for later visualization. Therefore, a ```RewardLogger``` callback is used for this environment (see code a few cells above).
model = DQN(MlpPolicy, env, buffer_size=buffer_size, learning_starts=learning_starts ,train_freq=train_freq,
batch_size=batch_size, gamma=gamma, policy_kwargs=policy_kwargs,
exploration_fraction=exploration_fraction, target_update_interval=target_update_interval,
verbose=verbose)
model.learn(total_timesteps=nb_steps)
# ### 3.4 Saving the Model
# When the training has finished you can save the model your DQN has learned to reuse it later, e.g. for evaluation or if you want to continue your training. For this, each Stable-Baselines3 algorithm has a ```.save()``` function where you only have to specify your path.
log_path = Path.cwd() / "saved_agents"
log_path.mkdir(parents=True, exist_ok=True)
model.save(str(log_path / "TutorialAgent"))
# ## 4. Evaluating an Agent
# After you have trained your agent you would like to see how well it does on your control problem. For this you can look at a visual representation of your currents in a test trajectory or see how well your agent does in a test scenario.
# ### 4.1 Loading a Model (Optional)
# First, before you start your evaluation you have to load a trained agent or take the trained agent from above which is still saved in the variable ```model```. To load a trained agent you simply have to call the ```load()``` function of your algorithm with the respective path.
model = DQN.load(log_path / 'TutorialAgent') #your agent
# ### 4.2 Taking a Look at the Mean Reward per Episode During Training
# The ```RewardLogger``` callback saved the mean reward per episode during training. There you can observe how the training reward did grow over time and if any problems occured during training.
rewards = np.load(reward_logger.fpath)[1:] # your training rewards
plt.grid(True)
plt.xlim(0,len(rewards))
plt.ylim(min(rewards), 0)
plt.yticks(np.arange(min(rewards), 1, 1.0))
plt.tick_params(axis = 'y', left = False, labelleft = False)
plt.xticks(np.arange(0, len(rewards), 10))
plt.xlabel('#Episode')
plt.ylabel('Mean Reward per Episode (Qualitative)')
plt.plot(rewards)
plt.show()
# ### 4.3 Taking a Look at a Test Trajectory
# You can take a look at a test trajectory to see how well your trained agent is able to control the currents to follow the test trajectory. For the agent to decide for an action given an observation you can just call its ```predict()``` function. The key deterministic is important so that the agent is not using a stochastic policy like epsilon greedy but is instead chosing an action greedily. The ```env.render()``` will then visualize the agent's and reference generator's trajectories as well as the reward.
visualization_steps = int(9e4) # currently this crashes for larger values
obs = env.reset()
for i in range(visualization_steps):
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, _ = env.step(action)
env.render()
if done:
obs = env.reset()
# ### 4.4 Calculating further evaluation parameters
# With the knowledge you acquired in the previous sections you are now able to train and evaluate any in Stable-Baselines3 available reinforcement learning algorithm. The code below should give you an example how to use the trained agent to calculate a mean reward and mean episode length over a specific amount of steps. For further questions you can always have a look at the documentation of gym-electric-motor and Stable-Baselines3 or raise an issue in their respective GitHub repositories.
test_steps = int(1e5)
cum_rew = 0
episode_step = 0
episode_lengths = []
for i in range(test_steps):
print(f"{i+1}", end = '\r')
episode_step += 1
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, _ = env.step(action)
cum_rew += reward
if done:
episode_lengths.append(episode_step)
episode_step = 0
obs = env.reset()
print(f"The reward per step with {test_steps} steps was: {cum_rew/test_steps:.4f} ")
print(f"The average Episode length was: {round(np.mean(episode_lengths))} ")
| examples/reinforcement_learning_controllers/stable_baselines3_dqn_disc_pmsm_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="u41hQ8ysr2eO" colab_type="code" colab={}
import pandas as pd
import ast
import numpy as np
import tensorflow.compat.v1 as tf
import matplotlib.pyplot as plt
tf.disable_v2_behavior()
# + id="fPkwwf1zgCqz" colab_type="code" outputId="bc6ca78a-95d7-408a-8299-6ffeae2ed6e1" colab={"base_uri": "https://localhost:8080/", "height": 54}
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.model_selection import train_test_split
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="TGD14q4agKHG" colab_type="text"
# **1. Load Data**
# + id="yp4Dov6JgHUs" colab_type="code" outputId="d4a0b4bf-5fd9-4dae-9b8a-c9a5128766b7" colab={"base_uri": "https://localhost:8080/", "height": 255}
movies = pd. read_csv('/content/drive/My Drive/SWM/the-movies-dataset/movies_metadata.csv')
extlinks = pd.read_csv('/content/drive/My Drive/SWM/the-movies-dataset/links_small.csv')
ratings = pd.read_csv('/content/drive/My Drive/SWM/the-movies-dataset/ratings_small.csv')
extlinks.head()
# + id="BFYqy1CIoPF4" colab_type="code" outputId="f47a77a0-1185-49bf-aa19-629587c1b9b0" colab={"base_uri": "https://localhost:8080/", "height": 204}
credits = pd.read_csv('/content/drive/My Drive/SWM/the-movies-dataset/credits.csv')
keywords = pd.read_csv('/content/drive/My Drive/SWM/the-movies-dataset/keywords.csv')
keywords.head()
# + id="XBACoPaRjvhY" colab_type="code" colab={}
movies = movies.drop([19730, 29503, 35587])
extlinks = extlinks[extlinks['tmdbId'].notnull()]['tmdbId'].astype('int')
# + id="DMfZbhlD2wWK" colab_type="code" colab={}
movies['genres'] = movies['genres'].fillna('[]').apply(ast.literal_eval).apply(lambda genres: [genre['name'] for genre in genres] if isinstance(genres, list) else [])
# + id="Kig6rw9apH-p" colab_type="code" colab={}
movies['id'] = movies['id'].astype('int')
keywords['id'] = keywords['id'].astype('int')
credits['id'] = credits['id'].astype('int')
movies = movies.merge(credits, on='id')
movies = movies.merge(keywords, on='id')
# + id="lOoKE2ABj0po" colab_type="code" outputId="dfdf74c1-b4d5-47e1-a23d-4be341ece266" colab={"base_uri": "https://localhost:8080/", "height": 34}
movies_filtered = movies[movies['id'].isin(extlinks)]
movies_filtered.head()
movies_filtered.shape
# + [markdown] id="bukAbWmp80JN" colab_type="text"
# **2. Extract Meta Data (Cast, Crew and Keywords)**
# + id="jccg94aYrYaG" colab_type="code" outputId="afbd7329-b977-4bd8-fc80-36a026eb6358" colab={"base_uri": "https://localhost:8080/", "height": 323}
movies_filtered['cast'] = movies_filtered['cast'].apply(ast.literal_eval)
movies_filtered['crew'] = movies_filtered['crew'].apply(ast.literal_eval)
movies_filtered['keywords'] = movies_filtered['keywords'].apply(ast.literal_eval)
# + id="F44e8tsTsBOp" colab_type="code" outputId="46a1976f-a124-4b08-a0a3-f46487e6f957" colab={"base_uri": "https://localhost:8080/", "height": 221}
movies_filtered['cast_size'] = movies_filtered['cast'].apply(lambda x: len(x))
movies_filtered['crew_size'] = movies_filtered['crew'].apply(lambda x: len(x))
# + [markdown] id="E-Ve4DDh9En0" colab_type="text"
# **3. Extract Director Name From Crew**
# + id="9581pM4Ns483" colab_type="code" outputId="a795f37a-e5db-4b6f-fec5-48573599830c" colab={"base_uri": "https://localhost:8080/", "height": 119}
def getDirector(crew):
for person in crew:
if person['job'] == 'Director':
return person['name']
return np.nan
movies_filtered['director'] = movies_filtered['crew'].apply(getDirector)
# + id="kky8AM-yvAti" colab_type="code" outputId="3035572d-0ec6-4ffb-ec17-5401102896c4" colab={"base_uri": "https://localhost:8080/", "height": 425}
movies_filtered['cast'] = movies_filtered['cast'].apply(lambda cast: [person['name'] for person in cast] if isinstance(cast, list) else [])
movies_filtered['cast'] = movies_filtered['cast'].apply(lambda cast: cast[:3] if len(cast) >=3 else cast)
movies_filtered['cast']
# + id="K615VtFFvXKO" colab_type="code" outputId="529eb824-18b5-43fc-c109-1ecb8edeaddc" colab={"base_uri": "https://localhost:8080/", "height": 119}
movies_filtered['keywords'] = movies_filtered['keywords'].apply(lambda keywords: [keyword['name'] for keyword in keywords] if isinstance(keywords, list) else [])
# + id="o90F4i2kw8r_" colab_type="code" outputId="a21dbf4d-58c0-4393-e09d-9f7a18f3de92" colab={"base_uri": "https://localhost:8080/", "height": 221}
movies_filtered['director'] = movies_filtered['director'].astype('str').apply(lambda name: str.lower(name.replace(" ", "")))
movies_filtered['director'] = movies_filtered['director'].apply(lambda name: [name, name, name])
# + id="EYBcksQQyQ4F" colab_type="code" outputId="75d0c36c-85f1-4915-eb73-6bcfd59a3052" colab={"base_uri": "https://localhost:8080/", "height": 71}
keywords = movies_filtered.apply(lambda movie: pd.Series(movie['keywords']),axis=1).stack().reset_index(level=1, drop=True)
keywords = keywords.value_counts()
# + id="xHiTRs1izbPQ" colab_type="code" outputId="d229a45c-419e-4984-8cdc-c74b4721a246" colab={"base_uri": "https://localhost:8080/", "height": 119}
keywords = keywords[keywords>1]
keywords.head()
# + id="UgEdQYPyztdg" colab_type="code" colab={}
def filterKeywords(list):
words = []
for word in list:
if word in keywords:
words.append(word)
return words
# + id="dWqX-Lm90QfH" colab_type="code" outputId="1927c8f4-c25a-4bb5-a3a5-5bc35109c997" colab={"base_uri": "https://localhost:8080/", "height": 119}
movies_filtered['keywords'] = movies_filtered['keywords'].apply(filterKeywords)
# + id="y9CYnahR0TrQ" colab_type="code" outputId="bac2d209-532e-4882-8f19-93e12d6a4396" colab={"base_uri": "https://localhost:8080/", "height": 119}
movies_filtered['keywords'] = movies_filtered['keywords'].apply(lambda movieKeywords: [SnowballStemmer('english').stem(word) for word in movieKeywords])
# + id="1bcXPMyC0xIY" colab_type="code" outputId="59df9214-8490-4bc5-9773-89a4b3039738" colab={"base_uri": "https://localhost:8080/", "height": 119}
movies_filtered['keywords'] = movies_filtered['keywords'].apply(lambda movieKeywords: [str.lower(word.replace(" ", "")) for word in movieKeywords])
# + [markdown] id="hmZWIYL19ZYf" colab_type="text"
# **4. Create Meta Data Dump**
# + id="3LfYC-Lq1SDb" colab_type="code" outputId="930c391b-ffe6-4b1e-fa45-0b1008f0cdc9" colab={"base_uri": "https://localhost:8080/", "height": 425}
movies_filtered['document'] = movies_filtered['keywords'] + movies_filtered['cast'] + movies_filtered['director'] + movies_filtered['genres']
movies_filtered['document'] = movies_filtered['document'].apply(lambda x: ' '.join(x))
movies_filtered['document']
# + id="gdgy_S6C31vb" colab_type="code" colab={}
count_vector = CountVectorizer(analyzer='word',ngram_range=(1, 2),min_df=0, stop_words='english')
count_matrix = count_vector.fit_transform(movies_filtered['document'] )
similarity = cosine_similarity(count_matrix, count_matrix)
# + id="96a_wAVI353x" colab_type="code" colab={}
movies_filtered = movies_filtered.reset_index()
titles = movies_filtered['title']
indices = pd.Series(movies_filtered.index, index=movies_filtered['title'])
# + [markdown] id="Q3tytT8I9kxG" colab_type="text"
# **5. Get Content Based Recommendations**
# + id="q8jGcD-t6Fja" colab_type="code" colab={}
def get_recommendations(title):
idx = indices[title]
sim_scores = list(enumerate(similarity[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:31]
movie_indices = [i[0] for i in sim_scores]
return titles.iloc[movie_indices]
# + id="hzuHSZA96IyF" colab_type="code" colab={}
content_based_recommendations = get_recommendations('Batman Begins').head(10)
# + id="c6thmiWS6J-x" colab_type="code" outputId="79395990-f307-494c-ea7d-5de8d2fbded1" colab={"base_uri": "https://localhost:8080/", "height": 102}
ratings.count()
# + [markdown] id="JlVDXvy6hiCZ" colab_type="text"
# **6. Convert String to numbers**
# + id="7uOfKqrxhnJp" colab_type="code" outputId="265dd559-dab6-48bd-870f-b7f6715dd79a" colab={"base_uri": "https://localhost:8080/", "height": 204}
ratings.userId = ratings.userId.astype(str).astype(int)
ratings.movieId = ratings.movieId.astype(str).astype(int)
ratings.rating = ratings.rating.astype(str).astype(float)
ratings.timestamp = pd.to_datetime(ratings.timestamp.astype(int), unit='s')
ratings.head()
# + id="5YPaAO6yqIZr" colab_type="code" outputId="3e35c8ca-9735-4174-c197-a4a3540d131e" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(movies_filtered)
# + id="3jHBWFgXVW2b" colab_type="code" colab={}
movies_filtered['List Index']=movies_filtered.index.astype(str).astype(int)
# + id="7EfOqDbCWzfS" colab_type="code" colab={}
data_combined=pd.merge(movies_filtered, ratings, left_on='id' , right_on='movieId')
# + id="jvbDE_sVz5Ic" colab_type="code" colab={}
def getCombinedData():
return data_combined
# + id="6X1YR-bKYSSv" colab_type="code" outputId="60679d79-b2e7-4849-b683-1bc7c5db9f22" colab={"base_uri": "https://localhost:8080/", "height": 204}
data_combined = data_combined[['movieId', 'List Index', 'userId', 'rating']]
data_combined['List Index']=data_combined['List Index'].astype(str).astype(int)
data_combined.head()
# + [markdown] id="4XLapn77q6uU" colab_type="text"
# **7. Group By UserId**
# + id="s59xmV1kqdUL" colab_type="code" colab={}
user_group = data_combined.groupby('userId')
# + id="-c9pp063xf28" colab_type="code" outputId="0a81b0dc-25d5-4c39-9e49-8651a4765100" colab={"base_uri": "https://localhost:8080/", "height": 34}
n_users = user_group.first().shape[0]
n_users
# + [markdown] id="v57mljz0-AMS" colab_type="text"
# **8. Create User Movie Rating matrix**
# + id="sg_QbwTaqXhZ" colab_type="code" colab={}
totalUsers=n_users
user_movie = [None]* n_users
for userId, curUser in user_group:
temp = [0]* len(movies_filtered)
for num, movie in curUser.iterrows():
temp[int(movie['List Index'])] = movie['rating']/5.0
user_movie[userId-1]=temp
if totalUsers == 0:
break
totalUsers-=1
# + id="H8S9j9sDemtd" colab_type="code" outputId="4fccde20-91e3-4da1-a007-d6e9c4ec961f" colab={"base_uri": "https://localhost:8080/", "height": 34}
len(user_movie)
# + [markdown] id="nIJGuil7-MfC" colab_type="text"
# **9. Create RBM Layers**
# + id="tcgjdUHwcvlY" colab_type="code" colab={}
hidden_units=256
visible_units = len(movies_filtered)
visible = tf.placeholder("float", [visible_units])
hidden = tf.placeholder("float", [hidden_units])
w= tf.placeholder("float", [visible_units, hidden_units])
# + [markdown] id="pABhKr7ciXPM" colab_type="text"
# **10. Forward pass**
# + id="0qdWPCIXl-D_" colab_type="code" colab={}
v0 = tf.placeholder("float", [None, visible_units])
_h0 = tf.nn.sigmoid(tf.matmul(v0, w)+hidden)
h0 = tf.nn.relu(tf.sign(_h0 - tf.random_uniform(tf.shape(_h0))))
# + [markdown] id="FZG4FDS6jKj9" colab_type="text"
# **11. Backward pass**
# + id="Am_T7zWHjJhR" colab_type="code" colab={}
_v1 = tf.nn.sigmoid(tf.matmul(h0, tf.transpose(w))+visible)
v1 = tf.nn.relu(tf.sign(_v1 - tf.random_uniform(tf.shape(_v1))))
h1 = tf.nn.sigmoid(tf.matmul(v1, w)+ hidden)
# + id="_KiGao2woLJo" colab_type="code" colab={}
alpha = 0.5
positive_phase = tf.matmul(tf.transpose(v0), h0)
negative_phase = tf.matmul(tf.transpose(v1), h1)
contrastive_divergernce = positive_phase - negative_phase
contrastive_divergernce = contrastive_divergernce/tf.to_float(tf.shape(v0)[0])
update_w = w+ alpha * contrastive_divergernce
update_vb = visible + alpha * tf.reduce_mean(v0-v1, 0)
update_hb = hidden + alpha * tf.reduce_mean(h0-h1, 0)
# + id="RH8C5GKtoU_C" colab_type="code" colab={}
err = v0-v1
err_sum = tf.reduce_mean(err * err)
# + id="4teF2A-cotTm" colab_type="code" colab={}
cur_w = np.zeros([visible_units, hidden_units], np.float32)
cur_vb = np.zeros([visible_units], np.float32)
cur_hb = np.zeros([hidden_units], np.float32)
prev_w = np.zeros([visible_units, hidden_units], np.float32)
prev_vb = np.zeros([visible_units], np.float32)
prev_hb = np.zeros([hidden_units], np.float32)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# + [markdown] id="MKVjv2sQ-UXE" colab_type="text"
# **12. Train the model**
# + id="fTsFIl_Ep75h" colab_type="code" outputId="fb2fb5f0-7c8c-4402-dc2d-08d83828ad0c" colab={"base_uri": "https://localhost:8080/", "height": 357}
epochs=20
batchsize=20
errors=[]
for i in range(epochs):
for start, end in zip(range(0, len(user_movie), batchsize), range(batchsize, len(user_movie), batchsize) ):
batch = user_movie[start:end]
cur_w = sess.run(update_w, feed_dict={v0 : batch, w : prev_w, visible: prev_vb , hidden : prev_hb})
cur_vb = sess.run(update_vb, feed_dict={v0 : batch, w : prev_w, visible: prev_vb , hidden : prev_hb})
cur_hb = sess.run(update_hb, feed_dict={v0 : batch, w : prev_w, visible: prev_vb , hidden : prev_hb})
prev_w = cur_w
prev_vb = prev_vb
prev_hb = prev_hb
errors.append(sess.run(err_sum, feed_dict={v0 : user_movie, w : cur_w, visible: cur_vb , hidden : cur_hb}))
print(errors[-1])
# + id="W7XIe_8m9-E7" colab_type="code" outputId="3759c051-7b24-4cb8-8e4b-184a41179ad6" colab={"base_uri": "https://localhost:8080/", "height": 279}
plt.plot(errors, label="train RMSE")
plt.ylabel('Errors')
plt.legend()
plt.xlabel('Epoch')
plt.show()
# + [markdown] id="cUs12yZ6-j0i" colab_type="text"
# **13. Predict For Test User**
# + id="MbjnBcg9sdd3" colab_type="code" colab={}
testUserId =21
testUser = [user_movie[testUserId-1]]
# + id="RrMFM_2Gs20B" colab_type="code" colab={}
hh0 = tf.nn.sigmoid(tf.matmul(v0, w)+hidden)
vv1 = tf.nn.sigmoid(tf.matmul(hh0, tf.transpose(w))+visible)
feed = sess.run(hh0, feed_dict={v0 : testUser, w: prev_w, hidden : prev_hb})
rec = sess.run(vv1, feed_dict={hh0 : feed, w: prev_w, visible : prev_vb})
# + id="c9_vxhDfAOy4" colab_type="code" colab={}
scored_movies=movies_filtered
scored_movies['Recommended Scores']=rec[0]
# + id="MMCMY5lsCMR4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="011c7306-91fd-4be7-d8c1-8bc4324f6c0b"
scored_movies.sort_values(['Recommended Scores'], ascending=False).head(10)
# + id="Imm4rXXmLC8n" colab_type="code" colab={}
def predict(userId):
testUser = [user_movie[userId-1]]
hh0 = tf.nn.sigmoid(tf.matmul(v0, w)+hidden)
vv1 = tf.nn.sigmoid(tf.matmul(hh0, tf.transpose(w))+visible)
feed = sess.run(hh0, feed_dict={v0 : testUser, w: prev_w, hidden : prev_hb})
rec = sess.run(vv1, feed_dict={hh0 : feed, w: prev_w, visible : prev_vb})
scored_movies['Recommended Scores']=rec[0]
return scored_movies
# + id="o4X3qNpwr-RN" colab_type="code" colab={}
def convert_int(x):
try:
return int(x)
except:
return np.nan
# + [markdown] id="nR7wnP2Z-oyi" colab_type="text"
# **14. Hybrid Model**
# + id="ZuazNe3OSOuB" colab_type="code" colab={}
id_map = pd.read_csv('/content/drive/My Drive/SWM/the-movies-dataset/links_small.csv')[['movieId', 'tmdbId']]
id_map['tmdbId'] = id_map['tmdbId'].apply(convert_int)
id_map.columns = ['movieId', 'id']
id_map = id_map.merge(movies_filtered[['title', 'id']], on='id').set_index('title')
# + id="jc9wv1GTShKQ" colab_type="code" colab={}
indices_map = id_map.set_index('id')
# + id="eMtmyki0TvSh" colab_type="code" colab={}
def hybrid(userId, title):
idx = indices[title]
tmdbId = id_map.loc[title]['id']
movie_id = id_map.loc[title]['movieId']
sim_scores = list(enumerate(similarity[int(idx)]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:26]
movie_indices = [i[0] for i in sim_scores]
movies = movies_filtered.iloc[movie_indices][['title', 'id']]
recommendations=predict(userId)
recommendations1 = recommendations[recommendations['id'].isin(movies['id'])]
recommendations1 = recommendations1.sort_values('Recommended Scores', ascending=False)
return recommendations1[['title', 'id', 'genres', 'Recommended Scores']].head(10)
# + id="3FrW9pmjOBpm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 376} outputId="0afd5b3d-6113-4867-be9f-e472584812d8"
print("Movie Recommendations From Hybrid Model : ")
hybrid(4,"Harry Potter and the Chamber of Secrets")
| notebook/rbm_based_recommender.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lesson 1 : Generate Gaussian Processes & Get prediction in 1D / 2D
#
# Below some packages to import that will be used for this lesson
#
# Cell just bellow is here for avoiding scrolling when plot is create within ipython notebook
# + language="javascript"
# IPython.OutputArea.prototype._should_scroll = function(lines){
# return false;
# }
# +
# Classical package for manipulating
# array, for plotting and interactiv plots.
import pylab as plt
from matplotlib import gridspec
import numpy as np
import ipywidgets as widgets
from ipywidgets import interact
import itertools
from treegp import AnisotropicRBF, eval_kernel
import treegp
# -
# ## Exercice 1): Play with gaussian random fields realization (1D)
# +
#################################################################
# EXERCICE 1: Play with gaussian random fields realization (1D) #
#################################################################
# function to generate a 1D gaussian random field for a
# given scikit-learn kernel.
def make_1d_grf(kernel, noise=None, seed=42, N_points=40):
# fixing the seed
np.random.seed(seed)
# generate random 1D coordinate
x = np.random.uniform(-10,10, N_points).reshape((N_points,1))
# creating the correlation matrix / kernel
K = kernel.__call__(x)
# generating gaussian random field
y = np.random.multivariate_normal(np.zeros(N_points), K)
if noise is not None:
# adding noise
y += np.random.normal(scale=noise, size=N_points)
y_err = np.ones_like(y) * noise
return x, y, y_err
else:
return x, y
# function to interactivly plot gaussian random fields realization.
@interact(n_real = widgets.IntSlider(value=5, min=1, max=20, step=1,
description='N real:', disabled=False,
continuous_update=False, orientation='horizontal',
readout=True,
readout_format='d'),
sigma = widgets.FloatSlider(value=1., min=0.01, max=5.0, step=0.01, description='$\sigma$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
l = widgets.FloatSlider(value=1, min=0.01, max=10.0, step=0.01, description='$l$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
kernel = widgets.Dropdown(options=['RBF', 'Matern'],
value='RBF',
description='Kernel:',
disabled=False,))
def plot_generate_grf(n_real, sigma, l, kernel):
gs = gridspec.GridSpec(1, 2, width_ratios=[1.5, 1])
plt.figure(figsize=(20,8))
plt.subplot(gs[0])
for i in range(n_real):
Kernel = "%f * %s(%f)"%((sigma**2, kernel, l))
Kernel = eval_kernel(Kernel)
x, y, y_err = make_1d_grf(Kernel, noise = sigma*0.01,
seed=62+i, N_points=80)
plt.scatter(x, y, label = 'data')
plt.errorbar(x, y, linestyle='', yerr=y_err,
alpha=0.7,marker='.',zorder=0)
plt.plot([-10,10], [0, 0],'k--')
plt.xlim(-10,10)
plt.ylim(-8.,8.)
plt.xlabel('X', fontsize=20)
plt.ylabel('Y', fontsize=20)
plt.title('Number of realizations: %i'%(n_real), fontsize=16)
plt.subplot(gs[1])
distance = np.linspace(0, 8, 30)
coord = np.array([distance, np.zeros_like(distance)]).T
pcf = Kernel.__call__(coord, Y=np.zeros_like(coord))[:,0]
plt.plot(distance, pcf, 'k', lw=3)
plt.ylim(0, 25)
plt.xlim(0, 8)
plt.ylabel('$\\xi(|x_i-x_j|)$', fontsize=20)
plt.xlabel('$|x_i-x_j|$', fontsize=20)
plt.title('Used correlation function (%s)'%(kernel), fontsize=16)
# -
# ## Exercice 2): Play with gaussian random fields realization (2D)
# +
################################################################
# EXERCICE 2: Play with gaussian random fields realization (2D)#
################################################################
# function to generate a 1D gaussian random field for a
# given scikit-learn kernel.
def get_correlation_length_matrix(size, g1, g2):
if abs(g1)>1:
g1 = 0
if abs(g2)>1:
g2 = 0
g = np.sqrt(g1**2 + g2**2)
q = (1-g) / (1+g)
phi = 0.5 * np.arctan2(g2,g1)
rot = np.array([[np.cos(phi), np.sin(phi)],
[-np.sin(phi), np.cos(phi)]])
ell = np.array([[size**2, 0],
[0, (size * q)**2]])
L = np.dot(rot.T, ell.dot(rot))
return L
def make_2d_grf(kernel, noise=None, seed=42, N_points=40):
# fixing the seed
np.random.seed(seed)
# generate random 2D coordinate
x1 = np.random.uniform(-10,10, N_points)
x2 = np.random.uniform(-10,10, N_points)
x = np.array([x1, x2]).T
# creating the correlation matrix / kernel
K = kernel.__call__(x)
# generating gaussian random field
y = np.random.multivariate_normal(np.zeros(N_points), K)
if noise is not None:
# adding noise
y += np.random.normal(scale=noise, size=N_points)
y_err = np.ones_like(y) * noise
return x, y, y_err
else:
return x, y
# function to interactivly plot gaussian random fields realization.
@interact(sigma = widgets.FloatSlider(value=2., min=0.01, max=5.0, step=0.01, description='$\sigma$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
size = widgets.FloatSlider(value=1, min=0.01, max=10.0, step=0.01, description='$l$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
g1 = widgets.FloatSlider(value=0, min=-0.99, max=0.99, step=0.01, description='$g_1$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
g2 = widgets.FloatSlider(value=0, min=-0.99, max=0.99, step=0.01, description='$g_2$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
kernel = widgets.Dropdown(options=['AnisotropicRBF'],
value='AnisotropicRBF',
description='Kernel:',
disabled=False,))
def plot_generate_grf_2d(sigma, size, g1, g2, kernel):
L = get_correlation_length_matrix(size, g1, g2)
inv_L = np.linalg.inv(L)
gs = gridspec.GridSpec(1, 2, width_ratios=[1.5, 1])
plt.figure(figsize=(20,8))
plt.subplot(gs[0])
Kernel = "%f"%(sigma**2) + " * AnisotropicRBF(invLam={0!r})".format(inv_L)
Kernel = eval_kernel(Kernel)
x, y, y_err = make_2d_grf(Kernel, noise=sigma*0.01, seed=42, N_points=500)
plt.scatter(x[:,0], x[:,1], c=y, s=80, cmap=plt.cm.seismic, vmin=-5, vmax=5)
cb = plt.colorbar()
cb.set_label('Y', fontsize=20)
plt.xlabel('$\\theta_{X}$', fontsize=20)
plt.ylabel('$\\theta_{Y}$', fontsize=20)
plt.title('Generated 2D Gaussian random fields', fontsize=20)
plt.subplot(gs[1])
pixel_squareroot = 25
npixels = pixel_squareroot**2
x = np.linspace(-5, 5, pixel_squareroot)
x1, x2 = np.meshgrid(x, x)
coord = np.array([x1.reshape(npixels), x2.reshape(npixels)]).T
pcf = Kernel.__call__(coord, Y=np.zeros_like(coord))[:,0]
pcf = pcf.reshape((pixel_squareroot, pixel_squareroot))
plt.imshow(pcf, extent=[x[0], x[-1], x[0], x[-1]],
origin='lower', cmap=plt.cm.Blues, vmin=0, vmax=5)
cb = plt.colorbar()
cb.set_label(r'$\xi (\Delta \theta_{X}, \Delta \theta_{Y})$', fontsize=20)
plt.ylabel('$\Delta \\theta_{Y} = \\theta_{Y,i} - \\theta_{Y,j}$', fontsize=20)
plt.xlabel('$\Delta \\theta_{X} = \\theta_{X,i} - \\theta_{X,j}$', fontsize=20)
plt.title('Used correlation function (%s)'%(kernel), fontsize=16)
# -
# ## Exercice 3): GP prediction and what are the best hyperparameters and kernel (1D)
# +
###################################################################################
# EXERCICE 3: GP prediction and what are the best hyperparameters and kernel (1D) #
###################################################################################
def gp_regression(x, new_x, y, kernel, y_err=None):
if y_err is None:
y_err =np.ones_like(y) *1e-10
gp = treegp.GPInterpolation(kernel=kernel, optimizer='none',
normalize=False, white_noise=0., p0=[3000., 0.,0.],
n_neighbors=4, average_fits=None, nbins=20,
min_sep=None, max_sep=None)
gp.initialize(x, y, y_err=y_err)
y_predict, y_cov = gp.predict(new_x, return_cov=True)
y_std = np.sqrt(np.diag(y_cov))
return gp, y_predict, y_std
data = np.loadtxt('data/data_1d_grf.txt')
x = data[:,0].reshape((len(data[:,0]),1))
y = data[:,1]
y_err = data[:,2]
@interact(sigma = widgets.FloatSlider(value=2., min=0.01, max=5.0, step=0.01, description='$\sigma$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
l = widgets.FloatSlider(value=1., min=0.01, max=10.0, step=0.01, description='$l$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
kernel = widgets.Dropdown(options=['RBF', 'Matern'],
value='RBF',
description='Kernel:',
disabled=False,))
def plot_predict_1D(sigma, l, kernel):
new_x = np.linspace(-24,24, 400).reshape((400,1))
Kernel = "%f * %s(%f)"%((sigma**2, kernel, l))
gp, y_pred, y_std = gp_regression(x, new_x, y, Kernel, y_err=y_err)
gs = gridspec.GridSpec(1, 2, width_ratios=[1.5, 1])
plt.figure(figsize=(20,8))
plt.subplot(gs[0])
# Data
plt.scatter(x, y, c='b', label = 'data')
plt.errorbar(x, y, linestyle='', yerr=y_err, ecolor='b',
alpha=0.7,marker='.',zorder=0)
# GP prediction
plt.plot(new_x, y_pred, 'r', lw =3, label = 'GP prediction')
plt.fill_between(new_x.T[0], y_pred-y_std, y_pred+y_std, color='r', alpha=0.3)
plt.plot(new_x, np.zeros_like(new_x),'k--')
plt.xlim(-24,24)
plt.ylim(-3.,3.)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel('X', fontsize=20)
plt.ylabel('Y', fontsize=20)
plt.legend(fontsize=18)
plt.subplot(gs[1])
distance = np.linspace(0, 8, 60)
coord = np.array([distance, np.zeros_like(distance)]).T
pcf = gp.kernel.__call__(coord, Y=np.zeros_like(coord))[:,0]
plt.plot(distance, pcf, 'k', lw=3)
plt.ylim(0, 10)
plt.xlim(0, 8)
plt.ylabel('$\\xi(|x_i-x_j|)$', fontsize=20)
plt.xlabel('$|x_i-x_j|$', fontsize=20)
plt.title('Used correlation function (%s)'%(kernel), fontsize=16)
# -
# ## Exercice 4) GP prediction and what are the best hyperparameters (2D)
# +
########################################################################
# EXERCICE 4: GP prediction and what are the best hyperparameters (2D) #
########################################################################
data = np.loadtxt('data/data_2d_grf.txt')
theta_x = data[:,0]
theta_y = data[:,1]
XXX = np.array([theta_x, theta_y]).T
YYY = data[:,2]
YYY_err = data[:,3]
# function to interactivly plot gaussian random fields realization.
@interact(sigma = widgets.FloatSlider(value=2., min=0.01, max=5.0, step=0.01, description='$\sigma$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
size = widgets.FloatSlider(value=1, min=0.01, max=10.0, step=0.01, description='$l$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
g1 = widgets.FloatSlider(value=0, min=-0.99, max=0.99, step=0.01, description='$g_1$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
g2 = widgets.FloatSlider(value=0, min=-0.99, max=0.99, step=0.01, description='$g_2$:',
disabled=False,
continuous_update=False,
orientation='horizontal',
readout=True,
readout_format='.2f'),
kernel = widgets.Dropdown(options=['AnisotropicRBF'],
value='AnisotropicRBF',
description='Kernel:',
disabled=False,))
def plot_predict_2D(sigma, size, g1, g2, kernel):
gs = gridspec.GridSpec(1, 3, width_ratios=[1.3, 1.3, 1])
plt.figure(figsize=(22,6))
plt.subplots_adjust(wspace=0.2)
plt.subplot(gs[0])
plt.scatter(XXX[:,0], XXX[:,1], c=YYY, s=80, cmap=plt.cm.seismic, vmin=-5, vmax=5)
cb = plt.colorbar()
cb.set_label('Y', fontsize=20)
plt.xlabel('$\\theta_{X}$', fontsize=20)
plt.ylabel('$\\theta_{Y}$', fontsize=20)
plt.title('Data', fontsize=20)
L = get_correlation_length_matrix(size, g1, g2)
inv_L = np.linalg.inv(L)
Kernel = "%f"%(sigma**2) + " * AnisotropicRBF(invLam={0!r})".format(inv_L)
plt.subplot(gs[1])
NPOINT = 31
new_x = np.linspace(-10,10, NPOINT)
new_x, new_y = np.meshgrid(new_x, new_x)
coord = np.array([new_x.reshape(NPOINT**2), new_y.reshape(NPOINT**2)]).T
gp, y_pred, y_std = gp_regression(XXX, coord, YYY, Kernel, y_err=YYY_err)
y_pred = y_pred.reshape((NPOINT, NPOINT))
plt.imshow(y_pred, extent=[-10, 10, -10, 10],
origin='lower', cmap=plt.cm.seismic, vmin=-5, vmax=5)
cb = plt.colorbar()
cb.set_label('Y', fontsize=20)
plt.xlabel('$\\theta_{X}$', fontsize=20)
plt.ylabel('$\\theta_{Y}$', fontsize=20)
plt.title('Gaussian Process prediction', fontsize=20)
plt.subplot(gs[2])
pixel_squareroot = 25
npixels = pixel_squareroot**2
x = np.linspace(-5, 5, pixel_squareroot)
x1, x2 = np.meshgrid(x, x)
coord = np.array([x1.reshape(npixels), x2.reshape(npixels)]).T
pcf = gp.kernel.__call__(coord, Y=np.zeros_like(coord))[:,0]
pcf = pcf.reshape((pixel_squareroot, pixel_squareroot))
plt.imshow(pcf, extent=[x[0], x[-1], x[0], x[-1]],
origin='lower', cmap=plt.cm.Blues, vmin=0, vmax=5)
cb = plt.colorbar()
cb.set_label(r'$\xi (\Delta \theta_{X}, \Delta \theta_{Y})$', fontsize=20)
plt.ylabel('$\Delta \\theta_{Y} = \\theta_{Y,i} - \\theta_{Y,j}$', fontsize=20)
plt.xlabel('$\Delta \\theta_{X} = \\theta_{X,i} - \\theta_{X,j}$', fontsize=20)
plt.title('Used correlation function \n (%s)'%(kernel), fontsize=16)
# -
| notebooks/example_1_gaussian_random_fields_and_gp_interp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow_p36)
# language: python
# name: conda_tensorflow_p36
# ---
# +
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda
from keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, BatchNormalization, LocallyConnected2D, Permute
from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback
from keras import regularizers
from keras import backend as K
import keras.losses
import tensorflow as tf
from tensorflow.python.framework import ops
import isolearn.keras as iso
import numpy as np
import tensorflow as tf
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import pandas as pd
import os
import pickle
import numpy as np
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
import isolearn.io as isoio
import isolearn.keras as isol
from genesis.visualization import *
from genesis.generator import *
from genesis.predictor import *
from genesis.optimizer import *
from definitions.generator.aparent_gan_seed_generator_tanh import get_load_generator_network_func
from definitions.predictor.aparent import load_saved_predictor
import sklearn
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from scipy.stats import pearsonr
import seaborn as sns
from matplotlib import colors
def initialize_sequence_templates(generator, sequence_templates) :
embedding_templates = []
embedding_masks = []
for k in range(len(sequence_templates)) :
sequence_template = sequence_templates[k]
onehot_template = isol.OneHotEncoder(seq_length=len(sequence_template))(sequence_template).reshape((1, len(sequence_template), 4))
for j in range(len(sequence_template)) :
if sequence_template[j] not in ['N', 'X'] :
nt_ix = np.argmax(onehot_template[0, j, :])
onehot_template[:, j, :] = -4.0
onehot_template[:, j, nt_ix] = 10.0
elif sequence_template[j] == 'X' :
onehot_template[:, j, :] = -1.0
onehot_mask = np.zeros((1, len(sequence_template), 4))
for j in range(len(sequence_template)) :
if sequence_template[j] == 'N' :
onehot_mask[:, j, :] = 1.0
embedding_templates.append(onehot_template.reshape(1, -1))
embedding_masks.append(onehot_mask.reshape(1, -1))
embedding_templates = np.concatenate(embedding_templates, axis=0)
embedding_masks = np.concatenate(embedding_masks, axis=0)
generator.get_layer('template_dense').set_weights([embedding_templates])
generator.get_layer('template_dense').trainable = False
generator.get_layer('mask_dense').set_weights([embedding_masks])
generator.get_layer('mask_dense').trainable = False
# +
from keras.layers import Layer, InputSpec
from keras import initializers, regularizers, constraints
class InstanceNormalization(Layer):
"""Instance normalization layer.
Normalize the activations of the previous layer at each step,
i.e. applies a transformation that maintains the mean activation
close to 0 and the activation standard deviation close to 1.
# Arguments
axis: Integer, the axis that should be normalized
(typically the features axis).
For instance, after a `Conv2D` layer with
`data_format="channels_first"`,
set `axis=1` in `InstanceNormalization`.
Setting `axis=None` will normalize all values in each
instance of the batch.
Axis 0 is the batch dimension. `axis` cannot be set to 0 to avoid errors.
epsilon: Small float added to variance to avoid dividing by zero.
center: If True, add offset of `beta` to normalized tensor.
If False, `beta` is ignored.
scale: If True, multiply by `gamma`.
If False, `gamma` is not used.
When the next layer is linear (also e.g. `nn.relu`),
this can be disabled since the scaling
will be done by the next layer.
beta_initializer: Initializer for the beta weight.
gamma_initializer: Initializer for the gamma weight.
beta_regularizer: Optional regularizer for the beta weight.
gamma_regularizer: Optional regularizer for the gamma weight.
beta_constraint: Optional constraint for the beta weight.
gamma_constraint: Optional constraint for the gamma weight.
# Input shape
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a Sequential model.
# Output shape
Same shape as input.
# References
- [Layer Normalization](https://arxiv.org/abs/1607.06450)
- [Instance Normalization: The Missing Ingredient for Fast Stylization](
https://arxiv.org/abs/1607.08022)
"""
def __init__(self,
axis=None,
epsilon=1e-3,
center=True,
scale=True,
beta_initializer='zeros',
gamma_initializer='ones',
beta_regularizer=None,
gamma_regularizer=None,
beta_constraint=None,
gamma_constraint=None,
**kwargs):
super(InstanceNormalization, self).__init__(**kwargs)
self.supports_masking = True
self.axis = axis
self.epsilon = epsilon
self.center = center
self.scale = scale
self.beta_initializer = initializers.get(beta_initializer)
self.gamma_initializer = initializers.get(gamma_initializer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_constraint = constraints.get(beta_constraint)
self.gamma_constraint = constraints.get(gamma_constraint)
def build(self, input_shape):
ndim = len(input_shape)
if self.axis == 0:
raise ValueError('Axis cannot be zero')
if (self.axis is not None) and (ndim == 2):
raise ValueError('Cannot specify axis for rank 1 tensor')
self.input_spec = InputSpec(ndim=ndim)
if self.axis is None:
shape = (1,)
else:
shape = (input_shape[self.axis],)
if self.scale:
self.gamma = self.add_weight(shape=shape,
name='gamma',
initializer=self.gamma_initializer,
regularizer=self.gamma_regularizer,
constraint=self.gamma_constraint)
else:
self.gamma = None
if self.center:
self.beta = self.add_weight(shape=shape,
name='beta',
initializer=self.beta_initializer,
regularizer=self.beta_regularizer,
constraint=self.beta_constraint)
else:
self.beta = None
self.built = True
def call(self, inputs, training=None):
input_shape = K.int_shape(inputs)
reduction_axes = list(range(0, len(input_shape)))
if self.axis is not None:
del reduction_axes[self.axis]
del reduction_axes[0]
mean = K.mean(inputs, reduction_axes, keepdims=True)
stddev = K.std(inputs, reduction_axes, keepdims=True) + self.epsilon
normed = (inputs - mean) / stddev
broadcast_shape = [1] * len(input_shape)
if self.axis is not None:
broadcast_shape[self.axis] = input_shape[self.axis]
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
normed = normed * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
normed = normed + broadcast_beta
return normed
def get_config(self):
config = {
'axis': self.axis,
'epsilon': self.epsilon,
'center': self.center,
'scale': self.scale,
'beta_initializer': initializers.serialize(self.beta_initializer),
'gamma_initializer': initializers.serialize(self.gamma_initializer),
'beta_regularizer': regularizers.serialize(self.beta_regularizer),
'gamma_regularizer': regularizers.serialize(self.gamma_regularizer),
'beta_constraint': constraints.serialize(self.beta_constraint),
'gamma_constraint': constraints.serialize(self.gamma_constraint)
}
base_config = super(InstanceNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def make_gen_resblock(n_channels=64, window_size=8, dilation_rate=1, group_ix=0, layer_ix=0) :
#Initialize res block layers
batch_norm_0 = InstanceNormalization(axis=-1, name='gan_generator_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_0')
relu_0 = Lambda(lambda x: K.relu(x))
conv_0 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='gan_generator_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_0')
batch_norm_1 = InstanceNormalization(axis=-1, name='gan_generator_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_1')
relu_1 = Lambda(lambda x: K.relu(x))
conv_1 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='gan_generator_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_1')
skip_1 = Lambda(lambda x: x[0] + x[1], name='gan_generator_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_skip_1')
#Execute res block
def _resblock_func(input_tensor, training=True) :
batch_norm_0_out = batch_norm_0(input_tensor, training=training)
relu_0_out = relu_0(batch_norm_0_out)
conv_0_out = conv_0(relu_0_out)
batch_norm_1_out = batch_norm_1(conv_0_out, training=training)
relu_1_out = relu_1(batch_norm_1_out)
conv_1_out = conv_1(relu_1_out)
skip_1_out = skip_1([conv_1_out, input_tensor])
return skip_1_out
return _resblock_func
#GAN Generator Model definition
def load_generator_network_4_resblocks(batch_size, seq_length=205) :
#Generator network parameters
latent_size = 100
n_resblocks = 4
n_channels = 32
#Policy network definition
policy_dense_0 = Dense(21 * 384, activation='linear', kernel_initializer='glorot_uniform', name='gan_generator_dense_0')
policy_dense_0_reshape = Reshape((1, 21, 384))
policy_deconv_0 = Conv2DTranspose(256, (1, 7), strides=(1, 2), padding='valid', activation='linear', kernel_initializer='glorot_normal', name='gan_generator_deconv_0')
policy_deconv_1 = Conv2DTranspose(192, (1, 8), strides=(1, 2), padding='valid', activation='linear', kernel_initializer='glorot_normal', name='gan_generator_deconv_1')
policy_deconv_2 = Conv2DTranspose(128, (1, 7), strides=(1, 2), padding='valid', activation='linear', kernel_initializer='glorot_normal', name='gan_generator_deconv_2')
policy_conv_3 = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='gan_generator_conv_3')
batch_norm_dense_0 = InstanceNormalization(axis=-1, name='gan_generator_batch_norm_dense_0')
relu_dense_0 = Lambda(lambda x: K.relu(x))
batch_norm_0 = InstanceNormalization(axis=-1, name='gan_generator_batch_norm_0')
relu_0 = Lambda(lambda x: K.relu(x))
batch_norm_1 = InstanceNormalization(axis=-1, name='gan_generator_batch_norm_1')
relu_1 = Lambda(lambda x: K.relu(x))
batch_norm_2 = InstanceNormalization(axis=-1, name='gan_generator_batch_norm_2')
relu_2 = Lambda(lambda x: K.relu(x))
skip_conv_0 = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='gan_generator_skip_conv_0')
resblocks = []
for layer_ix in range(n_resblocks) :
resblocks.append(make_gen_resblock(n_channels=n_channels, window_size=8, dilation_rate=1, group_ix=0, layer_ix=layer_ix))
last_block_conv = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='gan_generator_last_block_conv')
skip_add = Lambda(lambda x: x[0] + x[1], name='gan_generator_skip_add')
final_conv = Conv2D(4, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='gan_generator_final_conv')
final_reshape = Lambda(lambda x: K.permute_dimensions(x, (0, 2, 3, 1)))
def _generator_func(seed_input) :
relu_dense_0_out = relu_dense_0(batch_norm_dense_0(policy_dense_0_reshape(policy_dense_0(seed_input)), training=True))
policy_relu_0_out = relu_0(batch_norm_0(policy_deconv_0(relu_dense_0_out), training=True))
policy_relu_1_out = relu_1(batch_norm_1(policy_deconv_1(policy_relu_0_out), training=True))
policy_relu_2_out = relu_2(batch_norm_2(policy_deconv_2(policy_relu_1_out), training=True))
policy_conv_3_out = policy_conv_3(policy_relu_2_out)
#Connect group of res blocks
output_tensor = policy_conv_3_out
#Res block group 0
skip_conv_0_out = skip_conv_0(output_tensor)
for layer_ix in range(n_resblocks) :
output_tensor = resblocks[layer_ix](output_tensor, training=True)
#Last res block extr conv
last_block_conv_out = last_block_conv(output_tensor)
skip_add_out = skip_add([last_block_conv_out, skip_conv_0_out])
#Final conv out
final_conv_out = final_conv(skip_add_out)
return final_reshape(final_conv_out)
def _post_compile_func(generator_model, gan_path) :
saved_model = load_model(gan_path, custom_objects={'st_sampled_softmax': st_sampled_softmax, 'st_hardmax_softmax': st_hardmax_softmax, '<lambda>': lambda y_true, y_pred: y_pred, 'InstanceNormalization': InstanceNormalization})
#generator_model.load_weights(gan_path)
for layer in generator_model.layers :
if "gan_generator" in layer.name :
if len(layer.get_weights()) > 0 :
layer.set_weights(saved_model.get_layer('policy_generator' + layer.name[13:]).get_weights())
layer.trainable = False
return _generator_func, _post_compile_func
# +
#Define target isoform loss function
def get_isoform_loss(target_isos, isoform_start=80, isoform_end=115, use_start=0, use_end=70, cse_start=70, cse_end=76, dse_start=76, dse_end=125, similarity_weight=0.0, similarity_margin=0.5, punish_dn_cse=0.0, punish_up_c=0.0, punish_dn_c=0.0, punish_up_g=0.0, punish_dn_g=0.0, punish_up_aa=0.0, punish_dn_aa=0.0) :
target_iso = np.zeros((len(target_isos), 1))
for i, t_iso in enumerate(target_isos) :
target_iso[i, 0] = t_iso
punish_dn_cse_func = get_punish_cse(pwm_start=74, pwm_end=dse_end)
punish_up_c_func = get_punish_c(pwm_start=use_start, pwm_end=use_end)
punish_dn_c_func = get_punish_c(pwm_start=dse_start, pwm_end=dse_end)
punish_up_g_func = get_punish_g(pwm_start=use_start, pwm_end=use_end)
punish_dn_g_func = get_punish_g(pwm_start=use_start, pwm_end=use_end)
punish_up_aa_func = get_punish_aa(pwm_start=use_start, pwm_end=use_end)
punish_dn_aa_func = get_punish_aa(pwm_start=dse_start, pwm_end=dse_end)
pwm_sample_entropy_func = get_pwm_margin_sample_entropy_masked(pwm_start=20, pwm_end=200, margin=similarity_margin, shift_1_nt=True)
extra_sim = np.ones((len(target_isos), 1, 205, 4, 1))
for i in range(len(target_isos)) :
extra_sim[i, 0, 70-4:76, :, 0] = 0.0
def loss_func(loss_tensors) :
_, _, _, sequence_class, pwm_logits_1, pwm_logits_2, pwm_1, pwm_2, sampled_pwm_1, sampled_pwm_2, mask, sampled_mask, seed_1, seed_2, iso_pred, cut_pred, iso_score_pred, cut_score_pred = loss_tensors
#Create target isoform with sample axis
iso_targets = K.constant(target_iso)
iso_true = K.gather(iso_targets, sequence_class[:, 0])
iso_true = K.tile(K.expand_dims(iso_true, axis=-1), (1, K.shape(sampled_pwm_1)[1], 1))
#Specify costs
iso_loss = 2.0 * K.mean(symmetric_sigmoid_kl_divergence(iso_true, iso_pred), axis=1)
seq_loss = 0.0
seq_loss += punish_dn_cse * K.mean(punish_dn_cse_func(sampled_pwm_1), axis=1)
seq_loss += punish_up_c * K.mean(punish_up_c_func(sampled_pwm_1), axis=1)
seq_loss += punish_dn_c * K.mean(punish_dn_c_func(sampled_pwm_1), axis=1)
seq_loss += punish_up_g * K.mean(punish_up_g_func(sampled_pwm_1), axis=1)
seq_loss += punish_dn_g * K.mean(punish_dn_g_func(sampled_pwm_1), axis=1)
seq_loss += punish_up_aa * K.mean(punish_up_aa_func(sampled_pwm_1), axis=1)
seq_loss += punish_dn_aa * K.mean(punish_dn_aa_func(sampled_pwm_1), axis=1)
extra_sims = K.constant(extra_sim)
extra_sim_mask = K.gather(extra_sims, sequence_class[:, 0])
extra_sim_mask = K.tile(extra_sim_mask, (1, K.shape(sampled_pwm_1)[1], 1, 1, 1))
entropy_loss = similarity_weight * K.mean(pwm_sample_entropy_func(sampled_pwm_1, sampled_pwm_2, sampled_mask * extra_sim_mask), axis=1)
#Compute total loss
total_loss = iso_loss + seq_loss + entropy_loss
return total_loss
return loss_func
class EpochVariableCallback(Callback):
def __init__(self, my_variable, my_func):
self.my_variable = my_variable
self.my_func = my_func
def on_epoch_end(self, epoch, logs={}):
K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch))
#Function for running GENESIS
def run_genesis(sequence_templates, loss_func, library_contexts, model_path, get_gan_func, gan_path, batch_size=32, n_samples=1, n_epochs=10, steps_per_epoch=100) :
gan_func, post_compile_gan_func = get_gan_func(batch_size, seq_length=len(sequence_templates[0]))
load_generator_network = get_load_generator_network_func(gan_func)
#Build Generator Network
_, generator = build_generator(batch_size, len(sequence_templates[0]), load_generator_network, n_classes=len(sequence_templates), n_samples=n_samples, sequence_templates=sequence_templates, batch_normalize_pwm=False)
#Post compile generator, load pre-trained GAN weights
post_compile_gan_func(generator, gan_path)
#Build Predictor Network and hook it on the generator PWM output tensor
_, sample_predictor = build_predictor(generator, load_saved_predictor(model_path, library_contexts=library_contexts), batch_size, n_samples=n_samples, eval_mode='sample')
#Build Loss Model (In: Generator seed, Out: Loss function)
_, loss_model = build_loss_model(sample_predictor, loss_func)
#Specify Optimizer to use
opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
#Compile Loss Model (Minimize self)
loss_model.compile(loss=lambda true, pred: pred, optimizer=opt)
#Fit Loss Model
train_history = loss_model.fit(
[], np.ones((1, 1)),
epochs=n_epochs,
steps_per_epoch=steps_per_epoch
)
return generator, sample_predictor, train_history
# +
#Specfiy file path to pre-trained predictor network
save_dir = os.path.join(os.getcwd(), '../../../../aparent/saved_models')
saved_predictor_model_name = 'aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5'
saved_predictor_model_path = os.path.join(save_dir, saved_predictor_model_name)
#Specify file path to pre-trained GAN generator network
save_dir = os.path.join(os.getcwd(), '../gan/saved_models')
saved_gan_name = 'gan_simple_gen_resnet_4_resblocks_disc_conv_max_pool_hl_wgan_multisample_instancenorm_generator.h5'
saved_gan_path = os.path.join(save_dir, saved_gan_name)
# +
#Maximize isoform proportions for all native minigene libraries
sequence_templates = [
'TCCCTACACGACGCTCTTCCGATCTNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCGCCTAACCCTAAGCAGATTCTTCATGCAATTG'
]
library_contexts = [
'simple'
]
target_isos = [
1.0
]
margin_similarities = [
0.5
]
# +
#Train APA Cleavage GENESIS Network
print("Training GENESIS")
#Number of PWMs to generate per objective
batch_size = 32
#Number of One-hot sequences to sample from the PWM at each grad step
n_samples = 10
#Number of epochs per objective to optimize
n_epochs = 10
#Number of steps (grad updates) per epoch
steps_per_epoch = 500
get_gan_func = load_generator_network_4_resblocks
gan_save_suffix = '_simple_gen_resnet_4_resblocks_disc_conv_max_pool_hl_wgan_multisample_instancenorm_multisample_descent'
train_history = None
for class_i in range(len(sequence_templates)) :
print("Target iso = " + str(target_isos[class_i]))
lib_name = library_contexts[class_i].split("_")[0]
print("Library context = " + str(lib_name))
K.clear_session()
loss = get_isoform_loss(
[target_isos[class_i]],
use_start=25,
use_end=70,
cse_start=70,
cse_end=76,
dse_start=76,
dse_end=172,
similarity_weight=5.0,
similarity_margin=margin_similarities[class_i],
punish_dn_cse=1.0,
punish_up_c=0.0015,
punish_dn_c=0.0001,
punish_up_g=0.0001,
punish_dn_g=0.0001,
punish_up_aa=0.00025,
punish_dn_aa=0.005
)
genesis_generator, genesis_predictor, train_history = run_genesis([sequence_templates[class_i]], loss, [library_contexts[class_i]], saved_predictor_model_path, get_gan_func, saved_gan_path, batch_size, n_samples, n_epochs, steps_per_epoch)
genesis_generator.get_layer('lambda_rand_sequence_class').function = lambda inp: inp
genesis_generator.get_layer('lambda_rand_input_1').function = lambda inp: inp
genesis_generator.get_layer('lambda_rand_input_2').function = lambda inp: inp
genesis_predictor.get_layer('lambda_rand_sequence_class').function = lambda inp: inp
genesis_predictor.get_layer('lambda_rand_input_1').function = lambda inp: inp
genesis_predictor.get_layer('lambda_rand_input_2').function = lambda inp: inp
# Save model and weights
save_dir = 'saved_models'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_name = 'genesis_target_isoform_' + str(target_isos[class_i]).replace(".", "") + '_gan' + gan_save_suffix + '_' + str(lib_name) + '_marginsim_' + str(margin_similarities[class_i]).replace(".", "") + '_generator.h5'
model_path = os.path.join(save_dir, model_name)
genesis_generator.save(model_path)
print('Saved trained model at %s ' % model_path)
model_name = 'genesis_target_isoform_' + str(target_isos[class_i]).replace(".", "") + '_gan' + gan_save_suffix + '_' + str(lib_name) + '_marginsim_' + str(margin_similarities[class_i]).replace(".", "") + '_predictor.h5'
model_path = os.path.join(save_dir, model_name)
genesis_predictor.save(model_path)
print('Saved trained model at %s ' % model_path)
# +
#Plot training loss as a function of training epochs
f = plt.figure(figsize=(6, 4))
l1 = plt.plot(np.arange(n_epochs), train_history.history['loss'], linewidth=2, color='darkgreen', linestyle='--')
plt.xlim(0, n_epochs - 1)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("Training Loss", fontsize=14)
plt.tight_layout()
plt.show()
# +
#Load GENESIS models and predict sample sequences
model_names = [
'genesis_target_isoform_10_gan_simple_gen_resnet_4_resblocks_disc_conv_max_pool_hl_wgan_multisample_instancenorm_multisample_descent_simple_marginsim_05',
]
sequence_templates = [
'TCCCTACACGACGCTCTTCCGATCTNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCGCCTAACCCTAAGCAGATTCTTCATGCAATTG'
]
library_contexts = [
'simple'
]
target_isos = [
1.0
]
class_i = 0
print("Target iso = " + str(target_isos[class_i]))
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = model_names[class_i] + '_predictor.h5'
model_path = os.path.join(save_dir, model_name)
predictor = load_model(model_path, custom_objects={'st_sampled_softmax': st_sampled_softmax, 'st_hardmax_softmax': st_hardmax_softmax, 'InstanceNormalization': InstanceNormalization})#, '<lambda>': lambda y_true, y_pred: y_pred
n = 32
sequence_class = np.array([0] * n).reshape(-1, 1) #np.random.uniform(-6, 6, (n, 1)) #
noise_1 = np.random.uniform(-1, 1, (n, 100))
noise_2 = np.random.uniform(-1, 1, (n, 100))
pred_outputs = predictor.predict([sequence_class, noise_1, noise_2], batch_size=32)
_, _, _, optimized_pwm, _, _, _, _, _, gan_seeds, _, iso_pred, cut_pred, _, _ = pred_outputs
#Plot one PWM sequence logo per optimized objective (Experiment 'Punish A-runs')
for pwm_index in range(5) :
sequence_template = sequence_templates[class_i]
pwm = np.expand_dims(optimized_pwm[pwm_index, :, :, 0], axis=0)
cut = np.expand_dims(cut_pred[pwm_index, 0, :], axis=0)
iso = np.expand_dims(np.sum(cut[:, 80: 115], axis=-1), axis=-1)
plot_seqprop_logo(pwm, iso, cut, annotate_peaks='max', sequence_template=sequence_template, figsize=(12, 1.5), width_ratios=[1, 8], logo_height=0.8, usage_unit='fraction', plot_start=70-49, plot_end=76+49, save_figs=False, fig_name='target_isoform_genesis_simple_' + str(target_isos[class_i]).replace(".", "") + "_pwm_index_" + str(pwm_index), fig_dpi=150)
plot_seqprop_logo(pwm, iso, cut, annotate_peaks='max', sequence_template=sequence_template, figsize=(12, 1), width_ratios=[1, 7], logo_height=0.8, usage_unit='fraction', plot_start=20, plot_end=177, save_figs=False, fig_name='target_isoform_genesis_simple_' + str(target_isos[class_i]).replace(".", "") + "_pwm_index_" + str(pwm_index), fig_dpi=150)
# +
#Load GAN generator
aparent = load_model(saved_predictor_model_path)
gan_generator = load_model(saved_gan_path, custom_objects={'st_sampled_softmax': st_sampled_softmax, 'st_hardmax_softmax': st_hardmax_softmax, '<lambda>': lambda y_true, y_pred: y_pred, 'InstanceNormalization': InstanceNormalization})
sequence_templates_gan = [
'TCCCTACACGACGCTCTTCCGATCTNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCGCCTAACCCTAAGCAGATTCTTCATGCAATTG'
]
initialize_sequence_templates(gan_generator, sequence_templates_gan)
# +
n = int(1000 / 32) * 32 + 32
fake_libs = np.zeros((n, 13))
fake_libs[:, 5] = 1.
fake_dpas = np.ones((n, 1))
#Generate non-conditioned sequences from GAN
gan_classes = np.zeros((n, 1), dtype=np.int)
gan_seeds = np.random.uniform(low=-1., high=1., size=(n, 100))
_, _, gan_sequences = gan_generator.predict([gan_classes, gan_seeds], batch_size=32)
gan_isoform_preds = np.sum(aparent.predict([np.moveaxis(gan_sequences, 1, -1), fake_libs, fake_dpas], batch_size=32)[1][:, 80: 115], axis=1)
gan_isoform_logits = np.log(gan_isoform_preds / (1. - gan_isoform_preds))
#Generate DEN-conditioned sequences from GAN
sequence_classes = np.zeros((n, 1), dtype=np.int)
noise_1 = np.random.uniform(low=-1., high=1., size=(n, 100))
noise_2 = np.random.uniform(low=-1., high=1., size=(n, 100))
den_seeds = predictor.predict([sequence_classes, noise_1, noise_2], batch_size=32)[9]
_, _, den_sequences = gan_generator.predict([gan_classes, den_seeds], batch_size=32)
den_isoform_preds = np.sum(aparent.predict([np.moveaxis(den_sequences, 1, -1), fake_libs, fake_dpas], batch_size=32)[1][:, 80: 115], axis=1)
den_isoform_logits = np.log(den_isoform_preds / (1. - den_isoform_preds))
#Calculate histogram statistics
logit_range = [-4, 8]
gan_logit_hist, bin_edges = np.histogram(gan_isoform_logits, bins=50, range=logit_range, density=True)
den_logit_hist, _ = np.histogram(den_isoform_logits, bins=50, range=logit_range, density=True)
f = plt.figure(figsize=(6, 4))
bar_width = np.abs(bin_edges[1] - bin_edges[0])
plt.bar(bin_edges[1:] - bar_width / 2., gan_logit_hist, width=bar_width, color='darkorange', edgecolor='black', linewidth=2, label='Uniform Seeds')
plt.bar(bin_edges[1:] - bar_width / 2., den_logit_hist, width=bar_width, color='darkgreen', edgecolor='black', linewidth=2, label='DEN Seeds')
plt.xlim(logit_range[0], logit_range[1])
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel("Predicted Isoform Logit", fontsize=14)
plt.ylabel("Sequences", fontsize=14)
plt.legend(fontsize=14, framealpha=0.5)
plt.tight_layout()
plt.show()
# +
#Plot diversity grid
flat_sequences = np.zeros((n, 205))
for i in range(n) :
for j in range(205) :
max_nt_ix = np.argmax(den_sequences[i, 0, j, :])
flat_sequences[i, j] = max_nt_ix + 1
flat_sequences = flat_sequences[:200, 20: 177]
cmap = colors.ListedColormap(['red', 'blue', 'orange', 'darkgreen'])
bounds=[0, 1, 2, 3, 4, 5]
norm = colors.BoundaryNorm(bounds, cmap.N)
f = plt.figure(figsize=(8, 12))
plt.imshow(flat_sequences[:200, :], aspect='equal', interpolation='nearest', origin='lower', cmap=cmap, norm=norm)
plt.xticks([], [])
plt.yticks([], [])
plt.xlim(0, 177 - 20)
plt.ylim(0, 200)
plt.tight_layout()
plt.show()
# -
| analysis/apa/ganception/apa_max_isoform_genesis_wgan_simple_multisample_descent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tensorflow
# %matplotlib inline
import warnings
warnings.simplefilter('ignore', RuntimeWarning)
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
# ## Working with tensors
#
# Almost exactly like numpy arrays.
tf.constant([1., 2., 3.])
x = tf.Variable([[1.,2.,3.], [4.,5.,6.]])
x.shape
x.dtype
# ### Conversin to numpy
x.numpy()
# ### Indexing
x[:, :2]
# ### Assignment
x[0,:].assign([3.,2.,1.])
x
# ### Reductions
tf.reduce_mean(x, axis=0)
tf.reduce_sum(x, axis=1)
# ### Broadcasting
x + 10
x * 10
x - tf.reduce_mean(x, axis=1)[:, tf.newaxis]
# ### Matrix operations
x @ tf.transpose(x)
# ### Ufuncs
tf.exp(x)
tf.sqrt(x)
# ### Random numbers
X = tf.random.normal(shape=(10,4))
y = tf.random.normal(shape=(10,1))
# ### Linear algebra
tf.linalg.lstsq(X, y)
# ### Vectorization
X = tf.random.normal(shape=(1000,10,4))
y = tf.random.normal(shape=(1000,10,1))
tf.linalg.lstsq(X, y)
# ### Automatic differntiation
def f(x,y):
return x**2 + 2*y**2 + 3*x*y
# #### Gradient
x, y = tf.Variable(1.0), tf.Variable(2.0)
with tf.GradientTape() as tape:
z = f(x, y)
tape.gradient(z, [x,y])
# #### Hessian
with tf.GradientTape(persistent=True) as H_tape:
with tf.GradientTape() as J_tape:
z = f(x, y)
Js = J_tape.gradient(z, [x,y])
Hs = [H_tape.gradient(J, [x,y]) for J in Js]
del H_tape
np.array(Hs)
# ## Tenssorflow Data
#
# Tesnorflow provides a data API to allow it to work seamlessly with large data sets that may not fit into memory. This results in`Tesnorfolw Dataset (TFDS)` objects that handle multi-threading, queuing, batching and pre-fetching.
#
# You can think of TFDS as being a smart generator from data. Generally, you first create a TFDS from data using `from_tensor_slices` or from data in the file system or a relational database. Then you apply `trasnforms` to the data to process it, before handing it off to, say, a deep learning method.
# ### Using `from_tensor_slices`
#
# You can pass in a list, dict, `numpy` array, or Tensorflow tensor.
x = np.arange(6)
ds = tf.data.Dataset.from_tensor_slices(x)
ds
for item in ds.take(3):
print(item)
# ### Transformations
#
# Once you have a TFDS, you can chain its transformation methods to process the data.
ds = ds.map(lambda x: x**2).repeat(3)
for item in ds.take(3):
print(item)
ds = ds.shuffle(buffer_size=4, seed=0).batch(5)
for item in ds.take(3):
print(item)
# #### Prefetching is an optimization to preload data in parallel
ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
# ### Reading from files
#
# You can also read from CSV, text files or SQLite database and transform in the same way.
ds = tf.data.experimental.CsvDataset(
'data/X_train_unscaled.csv',
record_defaults=[tf.float32]*10,
header=True
)
for item in ds.take(1):
print(item)
| notebooks/T10A_TF2_Building_Blocks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2 (SageMath)
# language: python
# name: python2
# ---
# # Lesson 2: In-class exercises
# ---
# **Instructions: For each problem, write code in the provided code block. Don't forget to run your code to make sure it works.**
# ---
# **1. Warm up review for lesson 2**
#
# Write code to accomplish the following tasks using the concepts from Lesson 2. These exercises will use the following variables (make sure to run this block of code first to initiate these variables):
# RUN THIS BLOCK FIRST!
num1 = 3.14159
num2 = 6
num3 = 100
motif = "GTAAGTC"
# **(A)** Check if `num1` is greater than twice `num2`. Print a different message depending on the outcome.
# **(B)** Print a random integer between `num2` and `num3`.
# **(C)** Check if the length of the string stored in `motif` is less than or equal to 5. Print a different message depending on the outcome.
# **(D)** Round `num1` to the nearest hundredth.
# ---
# **2. Quadratic formula: checking for negative roots**
#
# Recall that when calculating the quadratic formula, you will get an error if `b^2-4ac` is negative, since you can't take the square root of a negative number.
#
# Edit your quadratic formula program from Lesson 1 so that it checks for this potential error before it occurs. If the error is going to occur, print an informative message saying "non-real answer" or similar, and do not calculate the values of x. If the error is not going to occur, continue on to calculating and printing the values of x as you did before.
# ---
# # Homework exercise (10 Points)
#
# ---
#
# **Motif Checker**
#
# Prompt the user for a DNA sequence and a motif to search for using `raw_input()` (the sequence and motif can be any string of A/T/G/C's; see below for examples).
#
# **(A)** Find the length of the motif and length of the DNA sequence and make sure the motif is shorter than the sequence. If it is not, print an informative error message. **(5 Points)**
# **(B)** Adding to your code from part (A): If and only if the motif is shorter than the sequence, go on to check if the motif can be found somewhere within the sequence (hint: use "`in`"). Print a message saying whether it was found or not. **(5 Points)**
# > **Example input:**
#
# > **Sequence:** AGCTAGCCTGCTAGAAATCGATTGGCTAGCAATCTTATTGTGTTCTACG <br>
# > **Motif:** ATG (This should pass the check from part (a) but not part (b))
#
# > **Sequence:** AGCTAGCCTGCTAGAAATCGATTGGCTAGCAATCTTATTGTGTTCTACG <br>
# > **Motif:** ATCGA (should pass both checks)
#
# > **Sequence:** CTAGCC <br>
# > **Motif:** ATGGCTAGCTA (code should not pass the check from part (a))
| 98_Archive/24_Python-II/Lesson2_exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3.9.6 64-bit
# name: python3
# ---
# # Data Setup
#
# In this notebook, we demonstrate how to:
#
# setup time series data for this module
# visualize the data
# The data in this example is taken from the GEFCom2014 forecasting competition1. It consists of 3 years of hourly electricity load and temperature values between 2012 and 2014.
#
# 1<NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>, "Probabilistic energy forecasting: Global Energy Forecasting Competition 2014 and beyond", International Journal of Forecasting, vol.32, no.3, pp 896-913, July-September, 2016.
import os
import matplotlib.pyplot as plt
from common.utils import load_data
# # %matplotlib inline
data_dir = './data'
energy = load_data(data_dir)[['load']]
energy.head()
energy.plot(y='load', subplots=True, figsize=(25, 8), fontsize=12)
plt.xlabel('timestamp', fontsize=12)
plt.ylabel('load', fontsize=12)
plt.show()
energy['2014-07-01':'2014-07-07'].plot(y='load', subplots=True, figsize=(25, 8), fontsize=12)
plt.xlabel('timestamp', fontsize=12)
plt.ylabel('load', fontsize=12)
plt.show()
| 7-TimeSeries/1-Introduction/working/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # OpenCV Filters Webcam
#
# In this notebook, several filters will be applied to webcam images.
#
# Those input sources and applied filters will then be displayed either directly in the notebook or on HDMI output.
#
# To run all cells in this notebook a webcam and HDMI output monitor are required.
# ## 1. Start HDMI output
# ### Step 1: Load the overlay
from pynq import Overlay
Overlay("base.bit").download()
# ### Step 2: Initialize HDMI I/O
from pynq.drivers.video import HDMI
hdmi_out = HDMI('out')
hdmi_out.start()
# ## 2. Applying OpenCV filters on Webcam input
# ### Step 1: Initialize Webcam and set HDMI Out resolution
# monitor configuration: 640*480 @ 60Hz
hdmi_out.mode(HDMI.VMODE_640x480)
hdmi_out.start()
# monitor (output) frame buffer size
frame_out_w = 1920
frame_out_h = 1080
# camera (input) configuration
frame_in_w = 640
frame_in_h = 480
# ### Step 2: Initialize camera from OpenCV
# +
from pynq.drivers.video import Frame
import cv2
videoIn = cv2.VideoCapture(0)
videoIn.set(cv2.CAP_PROP_FRAME_WIDTH, frame_in_w);
videoIn.set(cv2.CAP_PROP_FRAME_HEIGHT, frame_in_h);
print("capture device is open: " + str(videoIn.isOpened()))
# -
# ### Step 3: Send webcam input to HDMI output
# +
import numpy as np
ret, frame_vga = videoIn.read()
if (ret):
frame_1080p = np.zeros((1080,1920,3)).astype(np.uint8)
frame_1080p[0:480,0:640,:] = frame_vga[0:480,0:640,:]
hdmi_out.frame_raw(bytearray(frame_1080p.astype(np.int8)))
else:
raise RuntimeError("Error while reading from camera.")
# -
# ### Step 4: Edge detection
# Detecting edges on webcam input and display on HDMI out.
# +
import time
frame_1080p = np.zeros((1080,1920,3)).astype(np.uint8)
num_frames = 20
readError = 0
start = time.time()
for i in range (num_frames):
# read next image
ret, frame_vga = videoIn.read()
if (ret):
laplacian_frame = cv2.Laplacian(frame_vga, cv2.CV_8U)
# copy to frame buffer / show on monitor reorder RGB (HDMI = GBR)
frame_1080p[0:480,0:640,[0,1,2]] = laplacian_frame[0:480,0:640,[1,0,2]]
hdmi_out.frame_raw(bytearray(frame_1080p.astype(np.int8)))
else:
readError += 1
end = time.time()
print("Frames per second: " + str((num_frames-readError) / (end - start)))
print("Number of read errors: " + str(readError))
# -
# ### Step 5: Canny edge detection
# Detecting edges on webcam input and display on HDMI out.
#
# Any edges with intensity gradient more than maxVal are sure to be edges and those below minVal are sure to be non-edges, so discarded. Those who lie between these two thresholds are classified edges or non-edges based on their connectivity. If they are connected to “sure-edge” pixels, they are considered to be part of edges. Otherwise, they are also discarded.
# +
frame_1080p = np.zeros((1080,1920,3)).astype(np.uint8)
num_frames = 20
start = time.time()
for i in range (num_frames):
# read next image
ret, frame_webcam = videoIn.read()
if (ret):
frame_canny = cv2.Canny(frame_webcam,100,110)
frame_1080p[0:480,0:640,0] = frame_canny[0:480,0:640]
frame_1080p[0:480,0:640,1] = frame_canny[0:480,0:640]
frame_1080p[0:480,0:640,2] = frame_canny[0:480,0:640]
# copy to frame buffer / show on monitor
hdmi_out.frame_raw(bytearray(frame_1080p.astype(np.int8)))
else:
readError += 1
end = time.time()
print("Frames per second: " + str((num_frames-readError) / (end - start)))
print("Number of read errors: " + str(readError))
# -
# ### Step 6: Show results
# Now use matplotlib to show filtered webcam input inside notebook
# +
# %matplotlib inline
from matplotlib import pyplot as plt
import numpy as np
plt.figure(1, figsize=(10, 10))
frame_vga = np.zeros((480,640,3)).astype(np.uint8)
frame_vga[0:480,0:640,0] = frame_canny[0:480,0:640]
frame_vga[0:480,0:640,1] = frame_canny[0:480,0:640]
frame_vga[0:480,0:640,2] = frame_canny[0:480,0:640]
plt.imshow(frame_vga[:,:,[2,1,0]])
plt.show()
# -
# ### Step 7: Release camera and HDMI
videoIn.release()
hdmi_out.stop()
del hdmi_out
| Pynq-Z1/notebooks/examples/opencv_filters_webcam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import pandas as pd
import matplotlib.pyplot as plt
cwd = os.getcwd()
files = os.listdir(cwd)
files
bigdf = pd.read_csv('./TAZ.csv')
bigdf.drop(columns=['Unnamed: 0'], inplace=True)
bigdf.tail()
bigdf = bigdf[-bigdf.speaker.str.contains('\[|\{|\,|\.|\(|\"|and|Adventure|\&|TAZ|\*|1|/')]
bigdf.speaker = bigdf.speaker.str.strip()
filter_df = bigdf[bigdf.groupby('speaker')['speaker'].transform('count').ge(2)].groupby('speaker').count().sort_values(by='lines', ascending=False)
#filter out all of the lines from speakers that have < 2 lines
bigdf = bigdf[bigdf.groupby('speaker')['speaker'].transform('count').ge(2)]
filter_df.lines.to_csv('./line_count.csv')
bigdf = bigdf.sort_values(by=['episode', 'line_num'])
bigdf.fillna('', inplace=True)
nontrad = bigdf[-bigdf.episode.str.isnumeric()]
nontrad.groupby('episode').line_num.count().mean()
normal = bigdf[bigdf.episode.str.isnumeric()]
nontrad = nontrad.append(bigdf[(bigdf.episode == '2017') | (bigdf.episode == '2016')])
normal = normal[-((normal.episode == '2016') | (normal.episode == '2017'))]
normal.episode = normal.episode.astype(int)
normal = normal.sort_values(by=['episode', 'line_num'])
# +
glines = normal[normal.speaker == 'Griffin'].groupby('episode').lines.count()
tlines = normal[normal.speaker == 'Travis'].groupby('episode').lines.count()
jlines = normal[normal.speaker == 'Justin'].groupby('episode').lines.count()
clines = normal[normal.speaker == 'Clint'].groupby('episode').lines.count()
plt.plot(glines)
plt.plot(tlines)
plt.plot(jlines)
plt.plot(clines)
plt.legend(['Griffin', 'Travis', 'Justin', 'Clint'])
# -
sumlines = normal.groupby('episode').lines.count()
# +
g_normal = glines/sumlines
t_normal = tlines/sumlines
j_normal = jlines/sumlines
c_normal = clines/sumlines
plt.plot(g_normal)
plt.plot(t_normal)
plt.plot(j_normal)
plt.plot(c_normal)
plt.legend(['Griffin', 'Travis', 'Justin', 'Clint'])
# -
normal.groupby(['episode', 'speaker']).lines.count().unstack().plot(kind='line')
normal[normal.groupby('speaker')['speaker'].transform('count').ge(100)].groupby(['episode', 'speaker']).lines.count().unstack().plot(kind='line')
normal.groupby('episode').lines.count().plot(kind='line')
characters = normal[-normal.speaker.isin(['Griffin', 'Travis', 'Justin', 'Clint'])]
normal['character'] = True
normal.loc[normal.speaker.isin(['Griffin', 'Travis', 'Justin', 'Clint']),'character'] = False
normal.head()
(normal[normal.character].groupby('episode').lines.count()/normal[-normal.character].groupby('episode').lines.count()).plot(kind='line')
normal.groupby(['episode'])
words = normal.groupby('speaker').lines.sum()
words = words.str.split()
normal['speaker'].value_counts().head(20).plot(kind='bar')
plt.title('Number of lines per character')
top30 = normal.speaker.value_counts().head(30).index
top4 = normal.speaker.value_counts().head(4).index
top_lines = normal[normal.speaker.isin(top4)]
top_lines = top_lines.groupby('speaker').lines.sum()
top_lines.head()
import spacy
nlp = spacy.load('en_core_web_sm')
docdf = pd.Series([nlp(instance) for instance in top_lines[0]])
docdf.head()
for doc in docdf:
for doc2 in docdf:
print(doc.similarity(doc2))
top_lines[:5]
| TAZ/TAZ_analyze.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # AoC Day 3
#
# <NAME>
#
# 3 December 2021
#
# ## Prompt
#
# --- Day 3: Binary Diagnostic ---
#
# The submarine has been making some odd creaking noises, so you ask it to produce a diagnostic report just in case.
#
# ### Part 1
#
# The diagnostic report (your puzzle input) consists of a list of binary numbers which, when decoded properly, can tell you many useful things about the conditions of the submarine. The first parameter to check is the power consumption.
#
# You need to use the binary numbers in the diagnostic report to generate two new binary numbers (called the gamma rate and the epsilon rate). The power consumption can then be found by multiplying the gamma rate by the epsilon rate.
#
# Each bit in the gamma rate can be determined by finding the most common bit in the corresponding position of all numbers in the diagnostic report. For example, given the following diagnostic report:
#
# ```
# 00100
# 11110
# 10110
# 10111
# 10101
# 01111
# 00111
# 11100
# 10000
# 11001
# 00010
# 01010
# ```
#
# Considering only the first bit of each number, there are five 0 bits and seven 1 bits. Since the most common bit is 1, the first bit of the gamma rate is 1.
#
# The most common second bit of the numbers in the diagnostic report is 0, so the second bit of the gamma rate is 0.
#
# The most common value of the third, fourth, and fifth bits are 1, 1, and 0, respectively, and so the final three bits of the gamma rate are 110.
#
# So, the gamma rate is the binary number 10110, or 22 in decimal.
#
# The epsilon rate is calculated in a similar way; rather than use the most common bit, the least common bit from each position is used. So, the epsilon rate is 01001, or 9 in decimal. Multiplying the gamma rate (22) by the epsilon rate (9) produces the power consumption, 198.
#
# Use the binary numbers in your diagnostic report to calculate the gamma rate and epsilon rate, then multiply them together. What is the power consumption of the submarine? (Be sure to represent your answer in decimal, not binary.)
# +
# get input
with open("inputs/day3.txt") as file:
inputs = file.read().splitlines()
# -
inputs[0:5]
len(inputs[0])
# +
pos_list = list(inputs[0])
for i in inputs[1:]:
for pos, bit in enumerate(i):
pos_list[pos] += bit
gamma = ''
epsilon = ''
for pos in pos_list:
count_0 = 0
count_1 = 0
for char in pos:
if char == '1':
count_1 += 1
elif char == '0':
count_0 += 1
if count_0 > count_1:
gamma += '0'
epsilon += '1'
else:
gamma += '1'
epsilon += '0'
power_consumption = int(gamma, base=2) * int(epsilon, base=2)
print(power_consumption)
# -
# #### Part 1 Solution
#
# 1025636 - Correct!
# ### Part 2
#
# --- Part Two ---
#
# Next, you should verify the life support rating, which can be determined by multiplying the oxygen generator rating by the CO2 scrubber rating.
#
# Both the oxygen generator rating and the CO2 scrubber rating are values that can be found in your diagnostic report - finding them is the tricky part. Both values are located using a similar process that involves filtering out values until only one remains. Before searching for either rating value, start with the full list of binary numbers from your diagnostic report and consider just the first bit of those numbers. Then:
#
# - Keep only numbers selected by the bit criteria for the type of rating value for which you are searching. Discard numbers which do not match the bit criteria.
# - If you only have one number left, stop; this is the rating value for which you are searching.
# - Otherwise, repeat the process, considering the next bit to the right.
#
# The bit criteria depends on which type of rating value you want to find:
#
# - To find oxygen generator rating, determine the most common value (0 or 1) in the current bit position, and keep only numbers with that bit in that position. If 0 and 1 are equally common, keep values with a 1 in the position being considered.
# - To find CO2 scrubber rating, determine the least common value (0 or 1) in the current bit position, and keep only numbers with that bit in that position. If 0 and 1 are equally common, keep values with a 0 in the position being considered.
#
#
# ```
# 00100
# 11110
# 10110
# 10111
# 10101
# 01111
# 00111
# 11100
# 10000
# 11001
# 00010
# 01010
# ```
#
# For example, to determine the oxygen generator rating value using the same example diagnostic report from above:
#
# - Start with all 12 numbers and consider only the first bit of each number. There are more 1 bits (7) than 0 bits (5), so keep only the 7 numbers with a 1 in the first position: 11110, 10110, 10111, 10101, 11100, 10000, and 11001.
# - Then, consider the second bit of the 7 remaining numbers: there are more 0 bits (4) than 1 bits (3), so keep only the 4 numbers with a 0 in the second position: 10110, 10111, 10101, and 10000.
# - In the third position, three of the four numbers have a 1, so keep those three: 10110, 10111, and 10101.
# - In the fourth position, two of the three numbers have a 1, so keep those two: 10110 and 10111.
# - In the fifth position, there are an equal number of 0 bits and 1 bits (one each). So, to find the oxygen generator rating, keep the number with a 1 in that position: 10111.
# - As there is only one number left, stop; the oxygen generator rating is 10111, or 23 in decimal.
#
# Then, to determine the CO2 scrubber rating value from the same example above:
#
# - Start again with all 12 numbers and consider only the first bit of each number. There are fewer 0 bits (5) than 1 bits (7), so keep only the 5 numbers with a 0 in the first position: 00100, 01111, 00111, 00010, and 01010.
# - Then, consider the second bit of the 5 remaining numbers: there are fewer 1 bits (2) than 0 bits (3), so keep only the 2 numbers with a 1 in the second position: 01111 and 01010.
# - In the third position, there are an equal number of 0 bits and 1 bits (one each). So, to find the CO2 scrubber rating, keep the number with a 0 in that position: 01010.
# - As there is only one number left, stop; the CO2 scrubber rating is 01010, or 10 in decimal.
#
# Finally, to find the life support rating, multiply the oxygen generator rating (23) by the CO2 scrubber rating (10) to get 230.
#
# Use the binary numbers in your diagnostic report to calculate the oxygen generator rating and CO2 scrubber rating, then multiply them together. What is the life support rating of the submarine? (Be sure to represent your answer in decimal, not binary.)
# +
from collections import Counter
def find_most_common_char(input_list, position, if_tie):
all_chars_for_position = []
for elem in input_list:
all_chars_for_position.append(elem[position])
c = Counter(all_chars_for_position)
most_common_results = c.most_common(2)
if len(most_common_results) < 2:
most_common_char, most_common_count = c.most_common(2)[0]
return most_common_char
most_common_char, most_common_count = c.most_common(2)[0]
second_common_char, second_common_count = c.most_common(2)[1]
if most_common_count > second_common_count:
return most_common_char
else:
return if_tie
def find_last_remaining_number(input_list, rating_type):
given_list = input_list.copy()
for position in range(len(given_list[0])):
top_char = find_most_common_char(given_list, position, if_tie="1")
for number in given_list:
if rating_type == "O2 Generator":
if number[position] != top_char:
given_list = [value for value in given_list if value != number]
elif rating_type == "CO2 Scrubber":
if number[position] == top_char:
given_list = [value for value in given_list if value != number]
if len(given_list) == 1:
return given_list[0]
# -
o2_generator_value = find_last_remaining_number(inputs, "O2 Generator")
print(o2_generator_value)
co2_scrubber_value = find_last_remaining_number(inputs, "CO2 Scrubber")
print(co2_scrubber_value)
life_support = int(o2_generator_value, base=2) * int(co2_scrubber_value, base=2)
print(life_support)
# #### Part 2 Solution
#
# 793873 - CORRECT!
#
# At first I submitted 821674 - then I realized that instead of having the tie be conditional, the tie should always be "1". With that f
#
# 821674 - WRONG!
#
# idk what's going wrong here.
| 2021/Day_03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import json
# Refactoring census age brackets to add 18, 19, 20 and 21 into their respective 5 year buckets, overwrites original file so this would only be run once
age = pd.read_csv("2010censuscountyagesexbrackets.csv")
pd.unique(age['age'])
to1519 = age[(age['age'] == '15-17') | (age['age'] == '18-19')]
to1519 = to1519.copy()
to1519 = to1519[['county', 'sex','people']].groupby(['county','sex'], as_index=False).sum()
to1519['age'] = '15-19'
to1519.head()
to2024 = age[(age['age'] == '20') | (age['age'] == '21')| (age['age'] == '22-24')]
to2024 = to2024.copy()
to2024 = to2024[['county', 'sex','people']].groupby(['county','sex'], as_index=False).sum()
to2024['age'] = '20-24'
to2024.head()
# +
agesub = ['0-5', '11-15', '25-29', '30-34', '35-39', '40-44', '45-49', '50-54', '55-59', '06-10', '60-61', '62-64', '65-66', '67-69', '70-74', '75-79', '80-84', '85+']
age = age[age['age'].isin(agesub)]
age = age.append(to1519)
age = age.append(to2024)
# -
pd.unique(age['age'])
age.tail()
age.to_csv("2010censuscountyagesexbrackets.csv", index=False)
| data/unprepped/json file creation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Make a figure of the deformation
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeature
# We need these two classes to set proper ticklabels for Cartopy maps
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import numpy as np
import pyproj
import verde as vd
from vectorspline3d import VectorSpline3D
from vectorspline3d.vector import greens_func_3d
# +
region = (-50e3, 50e3, -50e3, 50e3)
spacing = 0.5e3
coords = vd.grid_coordinates(region, spacing=spacing)
eps = 1e3
force = np.array([
[0, 0],
[0, 0],
[1/eps, -1/eps],
]).ravel()
force_coords = ([0, 0], [-0.5*eps, 0.5*eps])
#force = np.array([
# [-1/eps, 1/eps],
# [0, 0],
# [0, 0],
#]).ravel()
#force_coords = ([-0.5*eps, 0.5*eps], [0, 0])
spline = VectorSpline3D(poisson=0.5, depth=3e3, force_coords=force_coords, coupling=1)
spline._depth_scale = np.zeros_like(force_coords[0])
spline.force_ = force
pred = spline.predict(coords)
east, north, up = pred
# -
plt.figure(figsize=(10, 8.2))
maxabs = np.abs([up.min(), up.max()]).max()
plt.pcolormesh(coords[0] - spacing/2, coords[1] - spacing/2, up,
vmin=-maxabs, vmax=maxabs, cmap="seismic")
plt.colorbar(pad=0.01, aspect=50)
s = 4
plt.quiver(coords[0][::s,::s], coords[1][::s,::s],
east[::s,::s], north[::s,::s],
scale=1e-7)
plt.axis('scaled')
plt.xlabel('easting')
plt.ylabel('northing')
plt.xlim(region[:2])
plt.ylim(region[2:])
plt.tight_layout()
jac = spline.jacobian(coords, force_coords)
pred_jac = [i.reshape(coords[0].shape)
for i in jac.dot(force).reshape((3, jac.shape[0]//3))]
print(np.allclose(pred, pred_jac))
| code/notebooks/greens-functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import glob
import matplotlib.pyplot as plt
import pandas as pd
# +
infos = []
idx_tups = []
# for file in glob.glob("../../data/purchase/purchase_std*"): for old purchase data
for file in glob.glob("../../data/purchase/purchase_u*"):
fd = pickle.load(open(file, 'rb'))
#print(file, type(infos[-1]))
var = file.split("_")[-5:]
print(var)
a, y, m = max([(a, yt - yf, mt - mf) for a, yt, yf, mt, mf in zip(fd['acc'], fd['yeom_tpr'], fd['yeom_fpr'], fd['merlin_tpr'], fd['merlin_fpr'])])
fd['acc'] = a
fd['adv'] = y
fd['merlin_adv'] = m
if var[-5] == 'ub':
var[-5] = 'dp'
else:
var[-5] = 'std'
var[-4] = int(var[-4])
var[-1] = int(var[-1].split(".")[0])
var[-3] = int(var[-3])
var[-2] = float(var[-2]) if var[-2] != 'False' else False
# IDX tups follow the format (epsilon, throw out threshold, batch size)
idx_tups.append(var)
infos.append(fd)
for file in glob.glob("../../data/purchase/purchase_s_*"):
fd = pickle.load(open(file, 'rb'))
#print(file, type(infos[-1]))
var = file.split("_")[-5:]
print(var)
a, y, m = max([(a, yt - yf, mt - mf) for a, yt, yf, mt, mf in zip(fd['acc'], fd['yeom_tpr'], fd['yeom_fpr'], fd['merlin_tpr'], fd['merlin_fpr'])])
fd['acc'] = a
fd['adv'] = y
fd['merlin_adv'] = m
if var[-5] == 'ub':
var[-5] = 'dp'
else:
var[-5] = 'std'
var[-4] = int(var[-4])
var[-1] = int(var[-1].split(".")[0])
var[-3] = int(var[-3])
var[-2] = float(var[-2]) if var[-2] != 'False' else False
# IDX tups follow the format (epsilon, throw out threshold, batch size)
idx_tups.append(var)
infos.append(fd)
# -
inf_scalars = [(i[0]['acc'], i[0]['adv'][-1] if type(i[0]['adv']) == list else i[0]['adv'], i[0]['merlin_adv'][-1][0] if type(i[0]['merlin_adv']) == list else i[0]['merlin_adv'], *i[1]) for i in zip(infos, idx_tups)]
ds = pd.DataFrame(inf_scalars)
ds.columns = ['acc', 'yeom', 'merlin', 'method','width', 'epsilon', 'throw out', 'batch_size']
dp = ds[(ds['method'] == 'dp') & (ds['acc'] > .1)].sort_values('acc')
sd = ds[(ds['method'] == 'std') & (ds['acc'] > .1)].sort_values('acc')
plt.scatter(dp['acc'], dp['yeom'],label='Gradient Clipping', alpha=.5)
plt.scatter(sd['acc'], sd['yeom'],c='tab:orange', label='Immediate Sensitivity', alpha=.5)
plt.xlabel('Accuracy')
plt.ylabel('Advantage')
plt.title('Purchase-100')
plt.legend()
ds[ds['method'] == 'std'].sort_values('acc', ascending=False)
throws = ds['throw out'].unique()
for t in throws:
db = ds[ds['throw out'] == t].sort_values('acc')
plt.plot(db['acc'], db['yeom'],alpha=.5, label=f'{t}')
plt.legend()
da = ds[ds['acc'] > .65]
da.sort_values('yeom')
# +
import torchvision.datasets as datasets
import torchvision
from torch.utils.data import DataLoader
cifar_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=torchvision.transforms.ToTensor()) #target_transform=one_hot_label)
cifar_testset = datasets.MNIST(root='./data', train=False, download=True, transform=torchvision.transforms.ToTensor())
# -
from sklearn.model_selection import train_test_split
_, training = train_test_split(cifar_trainset,test_size=.18, shuffle=True)
# +
#training, testing = train_test_split(training, test_size=.3, shuffle=True)
# -
train_loader = DataLoader(training, batch_size=64, shuffle=True, drop_last=True)
len(training)
| experiments/immediate_sensitivity/purchase_data_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="5rmpybwysXGV"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab_type="code" id="m8y3rGtQsYP2" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="hrXv0rU9sIma"
# # Custom training: basics
# + [markdown] colab_type="text" id="7S0BwJ_8sLu7"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/beta/tutorials/eager/custom_training"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/eager/custom_training.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/eager/custom_training.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/eager/custom_training.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="k2o3TTG4TFpt"
# In the previous tutorial, you covered the TensorFlow APIs for automatic differentiation—a basic building block for machine learning.
# In this tutorial, you will use the TensorFlow primitives introduced in the prior tutorials to do some simple machine learning.
#
# TensorFlow also includes `tf.keras`—a high-level neural network API that provides useful abstractions to reduce boilerplate and makes TensorFlow easier to use without sacrificing flexibility and performance. We strongly recommend the [tf.Keras API](../../guide/keras/overview.ipynb) for development. However, in this short tutorial you will learn how to train a neural network from first principles to establish a strong foundation.
# + [markdown] colab_type="text" id="3LXMVuV0VhDr"
# ## Setup
# + colab_type="code" id="NiolgWMPgpwI" colab={}
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version 2.x # Colab only.
except Exception:
pass
import tensorflow as tf
# + [markdown] colab_type="text" id="eMAWbDJFVmMk"
# ## Variables
#
# Tensors in TensorFlow are immutable stateless objects. Machine learning models, however, must have changing state: as your model trains, the same code to compute predictions should behave differently over time (hopefully with a lower loss!). To represent this state, which needs to change over the course of your computation, you can choose to rely on the fact that Python is a stateful programming language:
#
# + colab_type="code" id="VkJwtLS_Jbn8" colab={}
# Using Python state
x = tf.zeros([10, 10])
x += 2 # This is equivalent to x = x + 2, which does not mutate the original
# value of x
print(x)
# + [markdown] colab_type="text" id="wfneTXy7JcUz"
# TensorFlow has stateful operations built-in, and these are often easier than using low-level Python representations for your state. Use `tf.Variable` to represent weights in a model.
#
# A `tf.Variable` object stores a value and implicitly reads from this stored value. There are operations (`tf.assign_sub`, `tf.scatter_update`, etc.) that manipulate the value stored in a TensorFlow variable.
# + colab_type="code" id="itxmrMil6DQi" colab={}
v = tf.Variable(1.0)
# Use Python's `assert` as a debugging statement to test the condition
assert v.numpy() == 1.0
# Reassign the value `v`
v.assign(3.0)
assert v.numpy() == 3.0
# Use `v` in a TensorFlow `tf.square()` operation and reassign
v.assign(tf.square(v))
assert v.numpy() == 9.0
# + [markdown] colab_type="text" id="-paSaeq1JzwC"
# Computations using `tf.Variable` are automatically traced when computing gradients. For variables that represent embeddings, TensorFlow will do sparse updates by default, which are more computation and memory efficient.
#
# A `tf.Variable` is also a way to show a reader of your code that a piece of state is mutable.
# + [markdown] colab_type="text" id="BMiFcDzE7Qu3"
# ## Fit a linear model
#
# Let's use the concepts you have learned so far—`Tensor`, `Variable`, and `GradientTape`—to build and train a simple model. This typically involves a few steps:
#
# 1. Define the model.
# 2. Define a loss function.
# 3. Obtain training data.
# 4. Run through the training data and use an "optimizer" to adjust the variables to fit the data.
#
# Here, you'll create a simple linear model, `f(x) = x * W + b`, which has two variables: `W` (weights) and `b` (bias). You'll synthesize data such that a well trained model would have `W = 3.0` and `b = 2.0`.
# + [markdown] colab_type="text" id="gFzH64Jn9PIm"
# ### Define the model
#
# Let's define a simple class to encapsulate the variables and the computation:
# + colab_type="code" id="_WRu7Pze7wk8" colab={}
class Model(object):
def __init__(self):
# Initialize the weights to `5.0` and the bias to `0.0`
# In practice, these should be initialized to random values (for example, with `tf.random.normal`)
self.W = tf.Variable(5.0)
self.b = tf.Variable(0.0)
def __call__(self, x):
return self.W * x + self.b
model = Model()
assert model(3.0).numpy() == 15.0
# + [markdown] colab_type="text" id="xa6j_yXa-j79"
# ### Define a loss function
#
# A loss function measures how well the output of a model for a given input matches the target output. The goal is to minimize this difference during training. Let's use the standard L2 loss, also known as the least square errors:
# + colab_type="code" id="Y0ysUFGY924U" colab={}
def loss(predicted_y, target_y):
return tf.reduce_mean(tf.square(predicted_y - target_y))
# + [markdown] colab_type="text" id="qutT_fkl_CBc"
# ### Obtain training data
#
# First, synthesize the training data by adding random Gaussian (Normal) noise to the inputs:
# + colab_type="code" id="gxPTb-kt_N5m" colab={}
TRUE_W = 3.0
TRUE_b = 2.0
NUM_EXAMPLES = 1000
inputs = tf.random.normal(shape=[NUM_EXAMPLES])
noise = tf.random.normal(shape=[NUM_EXAMPLES])
outputs = inputs * TRUE_W + TRUE_b + noise
# + [markdown] colab_type="text" id="-50nq-wPBsAW"
# Before training the model, visualize the loss value by plotting the model's predictions in red and the training data in blue:
# + colab_type="code" id="_eb83LtrB4nt" colab={}
import matplotlib.pyplot as plt
plt.scatter(inputs, outputs, c='b')
plt.scatter(inputs, model(inputs), c='r')
plt.show()
print('Current loss: %1.6f' % loss(model(inputs), outputs).numpy())
# + [markdown] colab_type="text" id="sSDP-yeq_4jE"
# ### Define a training loop
#
# With the network and training data, train the model using [gradient descent](https://en.wikipedia.org/wiki/Gradient_descent) to update the weights variable (`W`) and the bias variable (`b`) to reduce the loss. There are many variants of the gradient descent scheme that are captured in `tf.train.Optimizer`—our recommended implementation. But in the spirit of building from first principles, here you will implement the basic math yourself with the help of `tf.GradientTape` for automatic differentiation and `tf.assign_sub` for decrementing a value (which combines `tf.assign` and `tf.sub`):
# + colab_type="code" id="MBIACgdnA55X" colab={}
def train(model, inputs, outputs, learning_rate):
with tf.GradientTape() as t:
current_loss = loss(model(inputs), outputs)
dW, db = t.gradient(current_loss, [model.W, model.b])
model.W.assign_sub(learning_rate * dW)
model.b.assign_sub(learning_rate * db)
# + [markdown] colab_type="text" id="RwWPaJryD2aN"
# Finally, let's repeatedly run through the training data and see how `W` and `b` evolve.
# + colab_type="code" id="XdfkR223D9dW" colab={}
model = Model()
# Collect the history of W-values and b-values to plot later
Ws, bs = [], []
epochs = range(10)
for epoch in epochs:
Ws.append(model.W.numpy())
bs.append(model.b.numpy())
current_loss = loss(model(inputs), outputs)
train(model, inputs, outputs, learning_rate=0.1)
print('Epoch %2d: W=%1.2f b=%1.2f, loss=%2.5f' %
(epoch, Ws[-1], bs[-1], current_loss))
# Let's plot it all
plt.plot(epochs, Ws, 'r',
epochs, bs, 'b')
plt.plot([TRUE_W] * len(epochs), 'r--',
[TRUE_b] * len(epochs), 'b--')
plt.legend(['W', 'b', 'True W', 'True b'])
plt.show()
# + [markdown] colab_type="text" id="vPnIVuaSJwWz"
# ## Next steps
#
# This tutorial used `tf.Variable` to build and train a simple linear model.
#
# In practice, the high-level APIs—such as `tf.keras`—are much more convenient to build neural networks. `tf.keras` provides higher level building blocks (called "layers"), utilities to save and restore state, a suite of loss functions, a suite of optimization strategies, and more. Read the [TensorFlow Keras guide](https://www.tensorflow.org/beta/guide/keras/overview) to learn more.
#
| site/en/r2/tutorials/eager/custom_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Fine-Tuning
# :label:`sec_fine_tuning`
#
# In earlier chapters, we discussed how to train models on the Fashion-MNIST training dataset with only 60000 images. We also described ImageNet, the most widely used large-scale image dataset in academia, which has more than 10 million images and 1000 objects. However, the size of the dataset that we usually encounter is between those of the two datasets.
#
#
# Suppose that we want to recognize different types of chairs from images, and then recommend purchase links to users.
# One possible method is to first identify
# 100 common chairs,
# take 1000 images of different angles for each chair,
# and then train a classification model on the collected image dataset.
# Although this chair dataset may be larger than the Fashion-MNIST dataset,
# the number of examples is still less than one-tenth of
# that in ImageNet.
# This may lead to overfitting of complicated models
# that are suitable for ImageNet on this chair dataset.
# Besides, due to the limited amount of training examples,
# the accuracy of the trained model
# may not meet practical requirements.
#
#
# In order to address the above problems,
# an obvious solution is to collect more data.
# However, collecting and labeling data can take a lot of time and money.
# For example, in order to collect the ImageNet dataset, researchers have spent millions of dollars from research funding.
# Although the current data collection cost has been significantly reduced, this cost still cannot be ignored.
#
#
# Another solution is to apply *transfer learning* to transfer the knowledge learned from the *source dataset* to the *target dataset*.
# For example, although most of the images in the ImageNet dataset have nothing to do with chairs, the model trained on this dataset may extract more general image features, which can help identify edges, textures, shapes, and object composition.
# These similar features may
# also be effective for recognizing chairs.
#
#
# ## Steps
#
#
# In this section, we will introduce a common technique in transfer learning: *fine-tuning*. As shown in :numref:`fig_finetune`, fine-tuning consists of the following four steps:
#
#
# 1. Pretrain a neural network model, i.e., the *source model*, on a source dataset (e.g., the ImageNet dataset).
# 1. Create a new neural network model, i.e., the *target model*. This copies all model designs and their parameters on the source model except the output layer. We assume that these model parameters contain the knowledge learned from the source dataset and this knowledge will also be applicable to the target dataset. We also assume that the output layer of the source model is closely related to the labels of the source dataset; thus it is not used in the target model.
# 1. Add an output layer to the target model, whose number of outputs is the number of categories in the target dataset. Then randomly initialize the model parameters of this layer.
# 1. Train the target model on the target dataset, such as a chair dataset. The output layer will be trained from scratch, while the parameters of all the other layers are fine-tuned based on the parameters of the source model.
#
# 
# :label:`fig_finetune`
#
# When target datasets are much smaller than source datasets, fine-tuning helps to improve models' generalization ability.
#
#
# ## Hot Dog Recognition
#
# Let us demonstrate fine-tuning via a concrete case:
# hot dog recognition.
# We will fine-tune a ResNet model on a small dataset,
# which was pretrained on the ImageNet dataset.
# This small dataset consists of
# thousands of images with and without hot dogs.
# We will use the fine-tuned model to recognize
# hot dogs from images.
#
# + origin_pos=2 tab=["pytorch"]
# %matplotlib inline
import os
import torch
import torchvision
from torch import nn
from d2l import torch as d2l
# + [markdown] origin_pos=3
# ### Reading the Dataset
#
# [**The hot dog dataset we use was taken from online images**].
# This dataset consists of
# 1400 positive-class images containing hot dogs,
# and as many negative-class images containing other foods.
# 1000 images of both classes are used for training and the rest are for testing.
#
#
# After unzipping the downloaded dataset,
# we obtain two folders `hotdog/train` and `hotdog/test`. Both folders have `hotdog` and `not-hotdog` subfolders, either of which contains images of
# the corresponding class.
#
# + origin_pos=4 tab=["pytorch"]
#@save
d2l.DATA_HUB['hotdog'] = (d2l.DATA_URL + 'hotdog.zip',
'fba480ffa8aa7e0febbb511d181409f899b9baa5')
data_dir = d2l.download_extract('hotdog')
# + [markdown] origin_pos=5
# We create two instances to read all the image files in the training and testing datasets, respectively.
#
# + origin_pos=7 tab=["pytorch"]
train_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'train'))
test_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, 'test'))
# + [markdown] origin_pos=8
# The first 8 positive examples and the last 8 negative images are shown below. As you can see, [**the images vary in size and aspect ratio**].
#
# + origin_pos=9 tab=["pytorch"]
hotdogs = [train_imgs[i][0] for i in range(8)]
not_hotdogs = [train_imgs[-i - 1][0] for i in range(8)]
d2l.show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4);
# + [markdown] origin_pos=10
# During training, we first crop a random area of random size and random aspect ratio from the image,
# and then scale this area
# to a $224 \times 224$ input image.
# During testing, we scale both the height and width of an image to 256 pixels, and then crop a central $224 \times 224$ area as input.
# In addition,
# for the three RGB (red, green, and blue) color channels
# we *standardize* their values channel by channel.
# Concretely,
# the mean value of a channel is subtracted from each value of that channel and then the result is divided by the standard deviation of that channel.
#
# [~~Data augmentations~~]
#
# + origin_pos=12 tab=["pytorch"]
# Specify the means and standard deviations of the three RGB channels to
# standardize each channel
normalize = torchvision.transforms.Normalize(
[0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
train_augs = torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
normalize])
test_augs = torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
normalize])
# + [markdown] origin_pos=13
# ### [**Defining and Initializing the Model**]
#
# We use ResNet-18, which was pretrained on the ImageNet dataset, as the source model. Here, we specify `pretrained=True` to automatically download the pretrained model parameters.
# If this model is used for the first time,
# Internet connection is required for download.
#
# + origin_pos=15 tab=["pytorch"]
pretrained_net = torchvision.models.resnet18(pretrained=True)
# + [markdown] origin_pos=17 tab=["pytorch"]
# The pretrained source model instance contains a number of feature layers and an output layer `fc`.
# The main purpose of this division is to facilitate the fine-tuning of model parameters of all layers but the output layer. The member variable `fc` of source model is given below.
#
# + origin_pos=19 tab=["pytorch"]
pretrained_net.fc
# + [markdown] origin_pos=20
# As a fully-connected layer, it transforms ResNet's final global average pooling outputs into 1000 class outputs of the ImageNet dataset.
# We then construct a new neural network as the target model. It is defined in the same way as the pretrained source model except that
# its number of outputs in the final layer
# is set to
# the number of classes in the target dataset (rather than 1000).
#
#
#
#
# In the following code, the model parameters in the member variable features of the target model instance finetune_net are initialized to the model parameters of the corresponding layer of the source model. Since the model parameters in the features are pre-trained on the ImageNet data set and are good enough, generally only a small learning rate is needed to fine-tune these parameters.
#
# The model parameters in the member variable output are initialized randomly, and generally require a larger learning rate to train from scratch. Assuming that the learning rate in the Trainer instance is η, we set the learning rate of the model parameters in the member variable output to be 10η in the iteration.
#
#
# In the code below, the model parameters before the output layer of the target model instance `finetune_net` are initialized to model parameters of the corresponding layers from the source model.
# Since these model parameters were obtained via pretraining on ImageNet,
# they are effective.
# Therefore, we can only use
# a small learning rate to *fine-tune* such pretrained parameters.
# In contrast, model parameters in the output layer are randomly initialized and generally require a larger learning rate to be learned from scratch.
# Let the base learning rate be $\eta$, a learning rate of $10\eta$ will be used to iterate the model parameters in the output layer.
#
# + origin_pos=22 tab=["pytorch"]
finetune_net = torchvision.models.resnet18(pretrained=True)
finetune_net.fc = nn.Linear(finetune_net.fc.in_features, 2)
nn.init.xavier_uniform_(finetune_net.fc.weight);
# + [markdown] origin_pos=23
# ### [**Fine-Tuning the Model**]
#
# First, we define a training function `train_fine_tuning` that uses fine-tuning so it can be called multiple times.
#
# + origin_pos=25 tab=["pytorch"]
# If `param_group=True`, the model parameters in the output layer will be
# updated using a learning rate ten times greater
def train_fine_tuning(net, learning_rate, batch_size=128, num_epochs=5,
param_group=True):
train_iter = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(
os.path.join(data_dir, 'train'), transform=train_augs),
batch_size=batch_size, shuffle=True)
test_iter = torch.utils.data.DataLoader(torchvision.datasets.ImageFolder(
os.path.join(data_dir, 'test'), transform=test_augs),
batch_size=batch_size)
devices = d2l.try_all_gpus()
loss = nn.CrossEntropyLoss(reduction="none")
if param_group:
params_1x = [param for name, param in net.named_parameters()
if name not in ["fc.weight", "fc.bias"]]
trainer = torch.optim.SGD([{'params': params_1x},
{'params': net.fc.parameters(),
'lr': learning_rate * 10}],
lr=learning_rate, weight_decay=0.001)
else:
trainer = torch.optim.SGD(net.parameters(), lr=learning_rate,
weight_decay=0.001)
d2l.train_ch13(net, train_iter, test_iter, loss, trainer, num_epochs,
devices)
# + [markdown] origin_pos=26
# We [**set the base learning rate to a small value**]
# in order to *fine-tune* the model parameters obtained via pretraining. Based on the previous settings, we will train the output layer parameters of the target model from scratch using a learning rate ten times greater.
#
# + origin_pos=28 tab=["pytorch"]
train_fine_tuning(finetune_net, 5e-5)
# + [markdown] origin_pos=29
# [**For comparison,**] we define an identical model, but (**initialize all of its model parameters to random values**). Since the entire model needs to be trained from scratch, we can use a larger learning rate.
#
# + origin_pos=31 tab=["pytorch"]
scratch_net = torchvision.models.resnet18()
scratch_net.fc = nn.Linear(scratch_net.fc.in_features, 2)
train_fine_tuning(scratch_net, 5e-4, param_group=False)
# + [markdown] origin_pos=32
# As we can see, the fine-tuned model tends to perform better for the same epoch
# because its initial parameter values are more effective.
#
#
# ## Summary
#
# * Transfer learning transfers knowledge learned from the source dataset to the target dataset. Fine-tuning is a common technique for transfer learning.
# * The target model copies all model designs with their parameters from the source model except the output layer, and fine-tunes these parameters based on the target dataset. In contrast, the output layer of the target model needs to be trained from scratch.
# * Generally, fine-tuning parameters uses a smaller learning rate, while training the output layer from scratch can use a larger learning rate.
#
#
# ## Exercises
#
# 1. Keep increasing the learning rate of `finetune_net`. How does the accuracy of the model change?
# 2. Further adjust hyperparameters of `finetune_net` and `scratch_net` in the comparative experiment. Do they still differ in accuracy?
# 3. Set the parameters before the output layer of `finetune_net` to those of the source model and do *not* update them during training. How does the accuracy of the model change? You can use the following code.
#
# + origin_pos=34 tab=["pytorch"]
for param in finetune_net.parameters():
param.requires_grad = False
# + [markdown] origin_pos=35
# 4. In fact, there is a "hotdog" class in the `ImageNet` dataset. Its corresponding weight parameter in the output layer can be obtained via the following code. How can we leverage this weight parameter?
#
# + origin_pos=37 tab=["pytorch"]
weight = pretrained_net.fc.weight
hotdog_w = torch.split(weight.data, 1, dim=0)[934]
hotdog_w.shape
# + [markdown] origin_pos=39 tab=["pytorch"]
# [Discussions](https://discuss.d2l.ai/t/1439)
#
| d2l/pytorch/chapter_computer-vision/fine-tuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Field sampling tutorial
# The particle trajectories allow us to study fields like temperature, plastic concentration or chlorophyll from a Lagrangian perspective.
#
# In this tutorial we will go through how particles can sample `Fields`, using temperature as an example. Along the way we will get to know the parcels class `Variable` (see [here](https://oceanparcels.org/gh-pages/html/#parcels.particle.Variable) for the documentation) and some of its methods. This tutorial covers several applications of a sampling setup:
# * [**Basic along trajectory sampling**](#Basic-sampling)
# * [**Sampling initial conditions**](#Sampling-initial-values)
# * [**Sampling initial and along-trajectory values with repeated release**](#Sampling-with-repeatdt)
# ### Basic sampling
# We import the `Variable` class as well as the standard modules needed to set up a simulation.
# +
# Modules needed for the Parcels simulation
from parcels import Variable, FieldSet, ParticleSet, JITParticle, AdvectionRK4
import numpy as np
from datetime import timedelta as delta
# To open and look at the temperature data
import xarray as xr
import matplotlib as mpl
import matplotlib.pyplot as plt
# -
# Suppose we want to study the environmental temperature for plankton drifting around a peninsula. We have a dataset with surface ocean velocities and the corresponding sea surface temperature stored in netcdf files in the folder `"Peninsula_data"`. Besides the velocity fields, we load the temperature field using `extra_fields={'T': 'T'}`. The particles are released on the left hand side of the domain.
# +
# Velocity and temperature fields
fieldset = FieldSet.from_parcels("Peninsula_data/peninsula", extra_fields={'T': 'T'}, allow_time_extrapolation=True)
# Particle locations and initial time
npart = 10 # number of particles to be released
lon = 3e3 * np.ones(npart)
lat = np.linspace(3e3 , 45e3, npart, dtype=np.float32)
time = np.arange(0, npart) * delta(hours=2).total_seconds() # release each particle two hours later
# Plot temperature field and initial particle locations
T_data = xr.open_dataset("Peninsula_data/peninsulaT.nc")
plt.figure()
ax = plt.axes()
T_contour = ax.contourf(T_data.x.values, T_data.y.values, T_data.T.values[0,0], cmap=plt.cm.inferno)
ax.scatter(lon, lat, c='w')
plt.colorbar(T_contour, label='T [$^{\circ} C$]')
plt.show()
# -
# To sample the temperature field, we need to create a new class of particles where temperature is a `Variable`. As an argument for the `Variable` class, we need to provide the initial values for the particles. The easiest option is to access `fieldset.T`, but this option has some drawbacks.
# +
class SampleParticle(JITParticle): # Define a new particle class
temperature = Variable('temperature', initial=fieldset.T) # Variable 'temperature' initialised by sampling the temperature
pset = ParticleSet(fieldset=fieldset, pclass=SampleParticle, lon=lon, lat=lat, time=time)
# -
# Using `fieldset.T` leads to the `WARNING` displayed above because `Variable` accesses the fieldset in the slower SciPy mode. Another problem can occur when using the repeatdt argument instead of time:
# <a id='repeatdt_error'></a>
# + tags=["raises-exception"]
repeatdt = delta(hours=3)
pset = ParticleSet(fieldset=fieldset, pclass=SampleParticle, lon=lon, lat=lat, repeatdt=repeatdt)
# -
# Since the initial time is not defined, the `Variable` class does not know at what time to access the temperature field.
# The solution to this initialisation problem is to leave the initial value zero and sample the initial condition in JIT mode with the sampling Kernel:
# +
class SampleParticleInitZero(JITParticle): # Define a new particle class
temperature = Variable('temperature', initial=0) # Variable 'temperature' initially zero
pset = ParticleSet(fieldset=fieldset, pclass=SampleParticleInitZero, lon=lon, lat=lat, time=time)
def SampleT(particle, fieldset, time):
particle.temperature = fieldset.T[time, particle.depth, particle.lat, particle.lon]
sample_kernel = pset.Kernel(SampleT) # Casting the SampleT function to a kernel.
# -
# To sample the initial values we can execute the Sample kernel over the entire particleset with dt = 0 so that time does not increase
# +
pset.execute(sample_kernel, dt=0) # by only executing the sample kernel we record the initial temperature of the particles
output_file = pset.ParticleFile(name="InitZero.nc", outputdt=delta(hours=1))
pset.execute(AdvectionRK4 + sample_kernel, runtime=delta(hours=30), dt=delta(minutes=5),
output_file=output_file)
output_file.export() # export the trajectory data to a netcdf file
output_file.close()
# -
# The particle dataset now contains the particle trajectories and the corresponding environmental temperature
# +
Particle_data = xr.open_dataset("InitZero.nc")
plt.figure()
ax = plt.axes()
ax.set_ylabel('Y')
ax.set_xlabel('X')
ax.set_ylim(1000, 49000)
ax.set_xlim(1000, 99000)
ax.plot(Particle_data.lon.transpose(), Particle_data.lat.transpose(), c='k', zorder=1)
T_scatter = ax.scatter(Particle_data.lon, Particle_data.lat, c=Particle_data.temperature,
cmap=plt.cm.inferno, norm=mpl.colors.Normalize(vmin=0., vmax=20.),
edgecolor='k', zorder=2)
plt.colorbar(T_scatter, label='T [$^{\circ} C$]')
plt.show()
# -
# ### Sampling initial values
# In some simulations only the particles initial value within the field is of interest: the variable does not need to be known along the entire trajectory. To reduce computing we can specify the `to_write` argument to the temperature `Variable`. This argument can have three values: `True`, `False` or `'once'`. It determines whether to write the `Variable` to the output file. If we want to know only the initial value, we can enter `'once'` and only the first value will be written to the output file.
# +
class SampleParticleOnce(JITParticle): # Define a new particle class
temperature = Variable('temperature', initial=0, to_write='once') # Variable 'temperature'
pset = ParticleSet(fieldset=fieldset, pclass=SampleParticleOnce, lon=lon, lat=lat, time=time)
# +
pset.execute(sample_kernel, dt=0) # by only executing the sample kernel we record the initial temperature of the particles
output_file = pset.ParticleFile(name="WriteOnce.nc", outputdt=delta(hours=1))
pset.execute(AdvectionRK4, runtime=delta(hours=24), dt=delta(minutes=5),
output_file=output_file)
output_file.close()
# -
# Since all the particles are released at the same x-position and the temperature field is invariant in the y-direction, all particles have an initial temperature of 0.4$^\circ$C
# +
Particle_data = xr.open_dataset("WriteOnce.nc")
plt.figure()
ax = plt.axes()
ax.set_ylabel('Y')
ax.set_xlabel('X')
ax.set_ylim(1000, 49000)
ax.set_xlim(1000, 99000)
ax.plot(Particle_data.lon.transpose(), Particle_data.lat.transpose(), c='k', zorder=1)
T_scatter = ax.scatter(Particle_data.lon, Particle_data.lat,
c=np.tile(Particle_data.temperature, (Particle_data.lon.shape[1], 1)).T,
cmap=plt.cm.inferno, norm=mpl.colors.Normalize(vmin=0., vmax=1.),
edgecolor='k', zorder=2)
plt.colorbar(T_scatter, label='Initial T [$^{\circ} C$]')
plt.show()
# -
# ### Sampling with repeatdt
# Some experiments require large sets of particles to be released repeatedly on the same locations. The [`particleset`](https://oceanparcels.org/gh-pages/html/#module-parcels.particleset) object has the option `repeatdt` for this, but when you want to sample the initial values this introduces some problems as we have seen [here](#repeatdt_error). For more advanced control over the repeated release of particles, you can manually write a for-loop using the function `particleset.add()`. Note that this for-loop is very similar to the one that `repeatdt` would execute under the hood in `particleset.execute()`.
#
# Adding particles to the `particleset` during the simulation reduces the memory used compared to specifying the delayed particle release times upfront, which improves the computational speed. In the loop, we want to initialise new particles and sample their initial temperature. If we want to write both the initialised particles with the sampled temperature and the older particles that have already been advected, we have to make sure both sets of particles find themselves at the same moment in time. The initial conditions must be written to the output file before advecting them, because during advection the `particle.time` will increase.
#
# We do not specify the `outputdt` argument for the `output_file` and instead write the data with `output_file.write(pset, time)` on each iteration. A new particleset is initialised whenever time is a multiple of `repeatdt`. Because the particles are advected after being written, the last displacement must be written once more after the loop.
# +
outputdt = delta(hours=1).total_seconds() # write the particle data every hour
repeatdt = delta(hours=6).total_seconds() # release each set of particles six hours later
runtime = delta(hours=24).total_seconds()
pset = ParticleSet(fieldset=fieldset, pclass=SampleParticleInitZero, lon=[], lat=[], time=[]) # Using SampleParticleInitZero
kernels = AdvectionRK4 + sample_kernel
output_file = pset.ParticleFile(name="RepeatLoop.nc") # Do not specify the outputdt yet, so we can manually write the output
for time in np.arange(0, runtime, outputdt):
if np.isclose(np.fmod(time, repeatdt), 0): # time is a multiple of repeatdt
pset_init = ParticleSet(fieldset=fieldset, pclass=SampleParticleInitZero, lon=lon, lat=lat, time=time)
pset_init.execute(sample_kernel, dt=0) # record the initial temperature of the particles
pset.add(pset_init) # add the newly released particles to the total particleset
output_file.write(pset,time) # write the initialised particles and the advected particles
pset.execute(kernels, runtime=outputdt, dt=delta(minutes=5))
print('Length of pset at time %d: %d' % (time, len(pset)))
output_file.write(pset, time+outputdt)
output_file.close()
# -
# In each iteration of the loop, spanning six hours, we have added ten particles.
Particle_data = xr.open_dataset("RepeatLoop.nc")
print(Particle_data.time[:,0].values / np.timedelta64(1, 'h')) # The initial hour at which each particle is released
assert np.allclose(Particle_data.time[:,0].values / np.timedelta64(1, 'h'), [int(k/10)*6 for k in range(40)])
# Let's check if the initial temperatures were sampled correctly for all particles
print(Particle_data.temperature[:,0].values)
assert np.allclose(Particle_data.temperature[:,0].values, Particle_data.temperature[:,0].values[0])
# And see if the sampling of the temperature field is done correctly along the trajectories
# +
Release0 = Particle_data.where(Particle_data.time[:,0]==np.timedelta64(0, 's')) # the particles released at t = 0
plt.figure()
ax = plt.axes()
ax.set_ylabel('Y')
ax.set_xlabel('X')
ax.set_ylim(1000, 49000)
ax.set_xlim(1000, 99000)
ax.plot(Release0.lon.transpose(), Release0.lat.transpose(), c='k', zorder=1)
T_scatter = ax.scatter(Release0.lon, Release0.lat, c=Release0.temperature,
cmap=plt.cm.inferno, norm=mpl.colors.Normalize(vmin=0., vmax=20.),
edgecolor='k', zorder=2)
plt.colorbar(T_scatter, label='T [$^{\circ} C$]')
plt.show()
| parcels/examples/tutorial_sampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# - nb68で調べた特徴量を使ってみる
# - 標準化なしのsumのみ
# - ridge
# # Import everything I need :)
import warnings
warnings.filterwarnings('ignore')
import time
import multiprocessing
import glob
import gc
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import KFold, train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from fastprogress import progress_bar
# # Preparation
nb = 69
isSmallSet = False
length = 100000
model_name = 'ridge'
pd.set_option('display.max_columns', 200)
# use atomic numbers to recode atomic names
ATOMIC_NUMBERS = {
'H': 1,
'C': 6,
'N': 7,
'O': 8,
'F': 9
}
file_path = '../input/champs-scalar-coupling/'
glob.glob(file_path + '*')
# +
# train
path = file_path + 'train.csv'
if isSmallSet:
train = pd.read_csv(path) [:length]
else:
train = pd.read_csv(path)
type_train = train.type.values
# -
# test
path = file_path + 'test.csv'
if isSmallSet:
test = pd.read_csv(path)[:length]
else:
test = pd.read_csv(path)
type_test = test.type.values
# structure
path = file_path + 'structures.csv'
structures = pd.read_csv(path)
# fc_train
path = file_path + 'nb47_fc_train.csv'
if isSmallSet:
fc_train = pd.read_csv(path)[:length]
else:
fc_train = pd.read_csv(path)
# fc_test
path = file_path + 'nb47_fc_test.csv'
if isSmallSet:
fc_test = pd.read_csv(path)[:length]
else:
fc_test = pd.read_csv(path)
# ob charge train
path = file_path + 'train_ob_charges_V7EstimatioofMullikenChargeswithOpenBabel.csv'
if isSmallSet:
ob_charge_train = pd.read_csv(path)[:length].drop(['Unnamed: 0', 'error'], axis=1)
else:
ob_charge_train = pd.read_csv(path).drop(['Unnamed: 0', 'error'], axis=1)
# ob charge test
path = file_path + 'test_ob_charges_V7EstimatioofMullikenChargeswithOpenBabel.csv'
if isSmallSet:
ob_charge_test = pd.read_csv(path)[:length].drop(['Unnamed: 0', 'error'], axis=1)
else:
ob_charge_test = pd.read_csv(path).drop(['Unnamed: 0', 'error'], axis=1)
len(test), len(fc_test)
len(train), len(fc_train)
# +
if isSmallSet:
print('using SmallSet !!')
print('-------------------')
print(f'There are {train.shape[0]} rows in train data.')
print(f'There are {test.shape[0]} rows in test data.')
print(f"There are {train['molecule_name'].nunique()} distinct molecules in train data.")
print(f"There are {test['molecule_name'].nunique()} distinct molecules in test data.")
print(f"There are {train['atom_index_0'].nunique()} unique atoms.")
print(f"There are {train['type'].nunique()} unique types.")
# -
# ---
# ## myFunc
# **metrics**
def kaggle_metric(df, preds):
df["prediction"] = preds
maes = []
for t in df.type.unique():
y_true = df[df.type==t].scalar_coupling_constant.values
y_pred = df[df.type==t].prediction.values
mae = np.log(mean_absolute_error(y_true, y_pred))
maes.append(mae)
return np.mean(maes)
# ---
# **momory**
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
c_prec = df[col].apply(lambda x: np.finfo(x).precision).max()
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max and c_prec == np.finfo(np.float16).precision:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max and c_prec == np.finfo(np.float32).precision:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
class permutation_importance():
def __init__(self, model, metric):
self.is_computed = False
self.n_feat = 0
self.base_score = 0
self.model = model
self.metric = metric
self.df_result = []
def compute(self, X_valid, y_valid):
self.n_feat = len(X_valid.columns)
self.base_score = self.metric(y_valid, self.model.predict(X_valid))
self.df_result = pd.DataFrame({'feat': X_valid.columns,
'score': np.zeros(self.n_feat),
'score_diff': np.zeros(self.n_feat)})
# predict
for i, col in enumerate(X_valid.columns):
df_perm = X_valid.copy()
np.random.seed(1)
df_perm[col] = np.random.permutation(df_perm[col])
y_valid_pred = model.predict(df_perm)
score = self.metric(y_valid, y_valid_pred)
self.df_result['score'][self.df_result['feat']==col] = score
self.df_result['score_diff'][self.df_result['feat']==col] = self.base_score - score
self.is_computed = True
def get_negative_feature(self):
assert self.is_computed!=False, 'compute メソッドが実行されていません'
idx = self.df_result['score_diff'] < 0
return self.df_result.loc[idx, 'feat'].values.tolist()
def get_positive_feature(self):
assert self.is_computed!=False, 'compute メソッドが実行されていません'
idx = self.df_result['score_diff'] > 0
return self.df_result.loc[idx, 'feat'].values.tolist()
def show_permutation_importance(self, score_type='loss'):
assert self.is_computed!=False, 'compute メソッドが実行されていません'
if score_type=='loss':
ascending = True
elif score_type=='accuracy':
ascending = False
else:
ascending = ''
plt.figure(figsize=(15, int(0.25*self.n_feat)))
sns.barplot(x="score_diff", y="feat", data=self.df_result.sort_values(by="score_diff", ascending=ascending))
plt.title('base_score - permutation_score')
# # Feature Engineering
# <br>
# <br>
# basic
# +
def map_atom_info(df_1,df_2, atom_idx):
df = pd.merge(df_1, df_2, how = 'left',
left_on = ['molecule_name', f'atom_index_{atom_idx}'],
right_on = ['molecule_name', 'atom_index'])
df = df.drop('atom_index', axis=1)
return df
# structure and ob_charges
ob_charge = pd.concat([ob_charge_train, ob_charge_test])
merge = pd.merge(ob_charge, structures, how='left',
left_on = ['molecule_name', 'atom_index'],
right_on = ['molecule_name', 'atom_index'])
for atom_idx in [0,1]:
train = map_atom_info(train, merge, atom_idx)
test = map_atom_info(test, merge, atom_idx)
train = train.rename(columns={
'atom': f'atom_{atom_idx}',
'x': f'x_{atom_idx}',
'y': f'y_{atom_idx}',
'z': f'z_{atom_idx}',
'eem': f'eem_{atom_idx}',
'mmff94': f'mmff94_{atom_idx}',
'gasteiger': f'gasteiger_{atom_idx}',
'qeq': f'qeq_{atom_idx}',
'qtpie': f'qtpie_{atom_idx}',
'eem2015ha': f'eem2015ha_{atom_idx}',
'eem2015hm': f'eem2015hm_{atom_idx}',
'eem2015hn': f'eem2015hn_{atom_idx}',
'eem2015ba': f'eem2015ba_{atom_idx}',
'eem2015bm': f'eem2015bm_{atom_idx}',
'eem2015bn': f'eem2015bn_{atom_idx}',})
test = test.rename(columns={
'atom': f'atom_{atom_idx}',
'x': f'x_{atom_idx}',
'y': f'y_{atom_idx}',
'z': f'z_{atom_idx}',
'eem': f'eem_{atom_idx}',
'mmff94': f'mmff94_{atom_idx}',
'gasteiger': f'gasteiger_{atom_idx}',
'qeq': f'qeq_{atom_idx}',
'qtpie': f'qtpie_{atom_idx}',
'eem2015ha': f'eem2015ha_{atom_idx}',
'eem2015hm': f'eem2015hm_{atom_idx}',
'eem2015hn': f'eem2015hn_{atom_idx}',
'eem2015ba': f'eem2015ba_{atom_idx}',
'eem2015bm': f'eem2015bm_{atom_idx}',
'eem2015bn': f'eem2015bn_{atom_idx}'})
# test = test.rename(columns={'atom': f'atom_{atom_idx}',
# 'x': f'x_{atom_idx}',
# 'y': f'y_{atom_idx}',
# 'z': f'z_{atom_idx}'})
# ob_charges
# train = map_atom_info(train, ob_charge_train, 0)
# test = map_atom_info(test, ob_charge_test, 0)
# train = map_atom_info(train, ob_charge_train, 1)
# test = map_atom_info(test, ob_charge_test, 1)
# -
# <br>
# <br>
# type0
def create_type0(df):
df['type_0'] = df['type'].apply(lambda x : x[0])
return df
# train['type_0'] = train['type'].apply(lambda x: x[0])
# test['type_0'] = test['type'].apply(lambda x: x[0])
# <br>
# <br>
# distances
# +
def distances(df):
df_p_0 = df[['x_0', 'y_0', 'z_0']].values
df_p_1 = df[['x_1', 'y_1', 'z_1']].values
df['dist'] = np.linalg.norm(df_p_0 - df_p_1, axis=1)
df['dist_x'] = (df['x_0'] - df['x_1']) ** 2
df['dist_y'] = (df['y_0'] - df['y_1']) ** 2
df['dist_z'] = (df['z_0'] - df['z_1']) ** 2
return df
# train = distances(train)
# test = distances(test)
# -
# <br>
# <br>
# 統計量
def create_features(df):
df['molecule_couples'] = df.groupby('molecule_name')['id'].transform('count')
df['molecule_dist_mean'] = df.groupby('molecule_name')['dist'].transform('mean')
df['molecule_dist_min'] = df.groupby('molecule_name')['dist'].transform('min')
df['molecule_dist_max'] = df.groupby('molecule_name')['dist'].transform('max')
df['atom_0_couples_count'] = df.groupby(['molecule_name', 'atom_index_0'])['id'].transform('count')
df['atom_1_couples_count'] = df.groupby(['molecule_name', 'atom_index_1'])['id'].transform('count')
df[f'molecule_atom_index_0_x_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['x_1'].transform('std')
df[f'molecule_atom_index_0_y_1_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('mean')
df[f'molecule_atom_index_0_y_1_mean_diff'] = df[f'molecule_atom_index_0_y_1_mean'] - df['y_1']
df[f'molecule_atom_index_0_y_1_mean_div'] = df[f'molecule_atom_index_0_y_1_mean'] / df['y_1']
df[f'molecule_atom_index_0_y_1_max'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('max')
df[f'molecule_atom_index_0_y_1_max_diff'] = df[f'molecule_atom_index_0_y_1_max'] - df['y_1']
df[f'molecule_atom_index_0_y_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['y_1'].transform('std')
df[f'molecule_atom_index_0_z_1_std'] = df.groupby(['molecule_name', 'atom_index_0'])['z_1'].transform('std')
df[f'molecule_atom_index_0_dist_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('mean')
df[f'molecule_atom_index_0_dist_mean_diff'] = df[f'molecule_atom_index_0_dist_mean'] - df['dist']
df[f'molecule_atom_index_0_dist_mean_div'] = df[f'molecule_atom_index_0_dist_mean'] / df['dist']
df[f'molecule_atom_index_0_dist_max'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('max')
df[f'molecule_atom_index_0_dist_max_diff'] = df[f'molecule_atom_index_0_dist_max'] - df['dist']
df[f'molecule_atom_index_0_dist_max_div'] = df[f'molecule_atom_index_0_dist_max'] / df['dist']
df[f'molecule_atom_index_0_dist_min'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('min')
df[f'molecule_atom_index_0_dist_min_diff'] = df[f'molecule_atom_index_0_dist_min'] - df['dist']
df[f'molecule_atom_index_0_dist_min_div'] = df[f'molecule_atom_index_0_dist_min'] / df['dist']
df[f'molecule_atom_index_0_dist_std'] = df.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('std')
df[f'molecule_atom_index_0_dist_std_diff'] = df[f'molecule_atom_index_0_dist_std'] - df['dist']
df[f'molecule_atom_index_0_dist_std_div'] = df[f'molecule_atom_index_0_dist_std'] / df['dist']
df[f'molecule_atom_index_1_dist_mean'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('mean')
df[f'molecule_atom_index_1_dist_mean_diff'] = df[f'molecule_atom_index_1_dist_mean'] - df['dist']
df[f'molecule_atom_index_1_dist_mean_div'] = df[f'molecule_atom_index_1_dist_mean'] / df['dist']
df[f'molecule_atom_index_1_dist_max'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('max')
df[f'molecule_atom_index_1_dist_max_diff'] = df[f'molecule_atom_index_1_dist_max'] - df['dist']
df[f'molecule_atom_index_1_dist_max_div'] = df[f'molecule_atom_index_1_dist_max'] / df['dist']
df[f'molecule_atom_index_1_dist_min'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('min')
df[f'molecule_atom_index_1_dist_min_diff'] = df[f'molecule_atom_index_1_dist_min'] - df['dist']
df[f'molecule_atom_index_1_dist_min_div'] = df[f'molecule_atom_index_1_dist_min'] / df['dist']
df[f'molecule_atom_index_1_dist_std'] = df.groupby(['molecule_name', 'atom_index_1'])['dist'].transform('std')
df[f'molecule_atom_index_1_dist_std_diff'] = df[f'molecule_atom_index_1_dist_std'] - df['dist']
df[f'molecule_atom_index_1_dist_std_div'] = df[f'molecule_atom_index_1_dist_std'] / df['dist']
df[f'molecule_atom_1_dist_mean'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('mean')
df[f'molecule_atom_1_dist_min'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('min')
df[f'molecule_atom_1_dist_min_diff'] = df[f'molecule_atom_1_dist_min'] - df['dist']
df[f'molecule_atom_1_dist_min_div'] = df[f'molecule_atom_1_dist_min'] / df['dist']
df[f'molecule_atom_1_dist_std'] = df.groupby(['molecule_name', 'atom_1'])['dist'].transform('std')
df[f'molecule_atom_1_dist_std_diff'] = df[f'molecule_atom_1_dist_std'] - df['dist']
df[f'molecule_type_0_dist_std'] = df.groupby(['molecule_name', 'type_0'])['dist'].transform('std')
df[f'molecule_type_0_dist_std_diff'] = df[f'molecule_type_0_dist_std'] - df['dist']
df[f'molecule_type_dist_mean'] = df.groupby(['molecule_name', 'type'])['dist'].transform('mean')
df[f'molecule_type_dist_mean_diff'] = df[f'molecule_type_dist_mean'] - df['dist']
df[f'molecule_type_dist_mean_div'] = df[f'molecule_type_dist_mean'] / df['dist']
df[f'molecule_type_dist_max'] = df.groupby(['molecule_name', 'type'])['dist'].transform('max')
df[f'molecule_type_dist_min'] = df.groupby(['molecule_name', 'type'])['dist'].transform('min')
df[f'molecule_type_dist_std'] = df.groupby(['molecule_name', 'type'])['dist'].transform('std')
df[f'molecule_type_dist_std_diff'] = df[f'molecule_type_dist_std'] - df['dist']
# fc
df[f'molecule_type_fc_max'] = df.groupby(['molecule_name', 'type'])['fc'].transform('max')
df[f'molecule_type_fc_min'] = df.groupby(['molecule_name', 'type'])['fc'].transform('min')
df[f'molecule_type_fc_std'] = df.groupby(['molecule_name', 'type'])['fc'].transform('std')
df[f'molecule_type_fc_std_diff'] = df[f'molecule_type_fc_std'] - df['fc']
df[f'molecule_atom_index_0_fc_mean'] = df.groupby(['molecule_name', 'atom_index_0'])['fc'].transform('mean')
df[f'molecule_atom_index_0_fc_mean_diff'] = df[f'molecule_atom_index_0_fc_mean'] - df['fc']
df[f'molecule_atom_index_0_fc_mean_div'] = df[f'molecule_atom_index_0_fc_mean'] / df['dist']
df[f'molecule_atom_index_0_fc_max'] = df.groupby(['molecule_name', 'atom_index_0'])['fc'].transform('max')
df[f'molecule_atom_index_0_fc_max_diff'] = df[f'molecule_atom_index_0_fc_max'] - df['fc']
df[f'molecule_atom_index_0_fc_max_div'] = df[f'molecule_atom_index_0_fc_max'] / df['fc']
df[f'molecule_atom_index_0_fc_min'] = df.groupby(['molecule_name', 'atom_index_0'])['fc'].transform('min')
df[f'molecule_atom_index_0_fc_min_diff'] = df[f'molecule_atom_index_0_fc_min'] - df['fc']
df[f'molecule_atom_index_0_fc_min_div'] = df[f'molecule_atom_index_0_fc_min'] / df['fc']
df[f'molecule_atom_index_0_fc_std'] = df.groupby(['molecule_name', 'atom_index_0'])['fc'].transform('std')
df[f'molecule_atom_index_0_fc_std_diff'] = df[f'molecule_atom_index_0_fc_std'] - df['fc']
df[f'molecule_atom_index_0_fc_std_div'] = df[f'molecule_atom_index_0_fc_std'] / df['fc']
df[f'molecule_atom_index_1_fc_mean'] = df.groupby(['molecule_name', 'atom_index_1'])['fc'].transform('mean')
df[f'molecule_atom_index_1_fc_mean_diff'] = df[f'molecule_atom_index_1_fc_mean'] - df['fc']
df[f'molecule_atom_index_1_fc_mean_div'] = df[f'molecule_atom_index_1_fc_mean'] / df['fc']
df[f'molecule_atom_index_1_fc_max'] = df.groupby(['molecule_name', 'atom_index_1'])['fc'].transform('max')
df[f'molecule_atom_index_1_fc_max_diff'] = df[f'molecule_atom_index_1_fc_max'] - df['fc']
df[f'molecule_atom_index_1_fc_max_div'] = df[f'molecule_atom_index_1_fc_max'] / df['fc']
df[f'molecule_atom_index_1_fc_min'] = df.groupby(['molecule_name', 'atom_index_1'])['fc'].transform('min')
df[f'molecule_atom_index_1_fc_min_diff'] = df[f'molecule_atom_index_1_fc_min'] - df['fc']
df[f'molecule_atom_index_1_fc_min_div'] = df[f'molecule_atom_index_1_fc_min'] / df['fc']
df[f'molecule_atom_index_1_fc_std'] = df.groupby(['molecule_name', 'atom_index_1'])['fc'].transform('std')
df[f'molecule_atom_index_1_fc_std_diff'] = df[f'molecule_atom_index_1_fc_std'] - df['fc']
df[f'molecule_atom_index_1_fc_std_div'] = df[f'molecule_atom_index_1_fc_std'] / df['fc']
return df
# angle features
# +
def map_atom_info(df_1,df_2, atom_idx):
df = pd.merge(df_1, df_2, how = 'left',
left_on = ['molecule_name', f'atom_index_{atom_idx}'],
right_on = ['molecule_name', 'atom_index'])
df = df.drop('atom_index', axis=1)
return df
def create_closest(df):
df_temp=df.loc[:,["molecule_name","atom_index_0","atom_index_1","dist","x_0","y_0","z_0","x_1","y_1","z_1"]].copy()
df_temp_=df_temp.copy()
df_temp_= df_temp_.rename(columns={'atom_index_0': 'atom_index_1',
'atom_index_1': 'atom_index_0',
'x_0': 'x_1',
'y_0': 'y_1',
'z_0': 'z_1',
'x_1': 'x_0',
'y_1': 'y_0',
'z_1': 'z_0'})
df_temp=pd.concat(objs=[df_temp,df_temp_],axis=0)
df_temp["min_distance"]=df_temp.groupby(['molecule_name', 'atom_index_0'])['dist'].transform('min')
df_temp= df_temp[df_temp["min_distance"]==df_temp["dist"]]
df_temp=df_temp.drop(['x_0','y_0','z_0','min_distance', 'dist'], axis=1)
df_temp= df_temp.rename(columns={'atom_index_0': 'atom_index',
'atom_index_1': 'atom_index_closest',
'distance': 'distance_closest',
'x_1': 'x_closest',
'y_1': 'y_closest',
'z_1': 'z_closest'})
for atom_idx in [0,1]:
df = map_atom_info(df,df_temp, atom_idx)
df = df.rename(columns={'atom_index_closest': f'atom_index_closest_{atom_idx}',
'distance_closest': f'distance_closest_{atom_idx}',
'x_closest': f'x_closest_{atom_idx}',
'y_closest': f'y_closest_{atom_idx}',
'z_closest': f'z_closest_{atom_idx}'})
return df
def add_cos_features(df):
df["distance_0"]=((df['x_0']-df['x_closest_0'])**2+(df['y_0']-df['y_closest_0'])**2+(df['z_0']-df['z_closest_0'])**2)**(1/2)
df["distance_1"]=((df['x_1']-df['x_closest_1'])**2+(df['y_1']-df['y_closest_1'])**2+(df['z_1']-df['z_closest_1'])**2)**(1/2)
df["vec_0_x"]=(df['x_0']-df['x_closest_0'])/df["distance_0"]
df["vec_0_y"]=(df['y_0']-df['y_closest_0'])/df["distance_0"]
df["vec_0_z"]=(df['z_0']-df['z_closest_0'])/df["distance_0"]
df["vec_1_x"]=(df['x_1']-df['x_closest_1'])/df["distance_1"]
df["vec_1_y"]=(df['y_1']-df['y_closest_1'])/df["distance_1"]
df["vec_1_z"]=(df['z_1']-df['z_closest_1'])/df["distance_1"]
df["vec_x"]=(df['x_1']-df['x_0'])/df["dist"]
df["vec_y"]=(df['y_1']-df['y_0'])/df["dist"]
df["vec_z"]=(df['z_1']-df['z_0'])/df["dist"]
df["cos_0_1"]=df["vec_0_x"]*df["vec_1_x"]+df["vec_0_y"]*df["vec_1_y"]+df["vec_0_z"]*df["vec_1_z"]
df["cos_0"]=df["vec_0_x"]*df["vec_x"]+df["vec_0_y"]*df["vec_y"]+df["vec_0_z"]*df["vec_z"]
df["cos_1"]=df["vec_1_x"]*df["vec_x"]+df["vec_1_y"]*df["vec_y"]+df["vec_1_z"]*df["vec_z"]
df=df.drop(['vec_0_x','vec_0_y','vec_0_z','vec_1_x','vec_1_y','vec_1_z','vec_x','vec_y','vec_z'], axis=1)
return df
# +
# %%time
print('add fc')
print(len(train), len(test))
train['fc'] = fc_train.values
test['fc'] = fc_test.values
print('type0')
print(len(train), len(test))
train = create_type0(train)
test = create_type0(test)
print('distances')
print(len(train), len(test))
train = distances(train)
test = distances(test)
print('create_featueres')
print(len(train), len(test))
train = create_features(train)
test = create_features(test)
# print('create_closest')
# print(len(train), len(test))
# train = create_closest(train)
# test = create_closest(test)
# train.drop_duplicates(inplace=True, subset=['id']) # なぜかtrainの行数が増えるバグが発生
# train = train.reset_index(drop=True)
# print('add_cos_features')
# print(len(train), len(test))
# train = add_cos_features(train)
# test = add_cos_features(test)
# -
# ---
# <br>
# <br>
# <br>
# nanがある特徴量を削除
drop_feats = train.columns[train.isnull().sum(axis=0) != 0].values
drop_feats
# +
train = train.drop(drop_feats, axis=1)
test = test.drop(drop_feats, axis=1)
assert sum(train.isnull().sum(axis=0))==0, f'train に nan があります。'
assert sum(test.isnull().sum(axis=0))==0, f'test に nan があります。'
# -
# <br>
# <br>
# <br>
# エンコーディング
# +
cat_cols = ['atom_1']
num_cols = list(set(train.columns) - set(cat_cols) - set(['type', "scalar_coupling_constant", 'molecule_name', 'id',
'atom_0', 'atom_1','atom_2', 'atom_3', 'atom_4', 'atom_5', 'atom_6', 'atom_7', 'atom_8', 'atom_9']))
print(f'カテゴリカル: {cat_cols}')
print(f'数値: {num_cols}')
# -
# <br>
# <br>
# LabelEncode
#
# - `atom_1` = {H, C, N}
# - `type_0` = {1, 2, 3}
# - `type` = {2JHC, ...}
for f in ['type_0', 'type']:
if f in train.columns:
lbl = LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
# ## nb68で決めた特徴量を作成
top10_feats = [
'molecule_atom_index_0_dist_mean_div',
'molecule_atom_index_0_dist_min_div',
'molecule_atom_index_0_dist_max_div',
'molecule_atom_1_dist_min_div',
'molecule_atom_index_1_dist_mean_div',
'molecule_type_dist_mean_div',
'eem2015bm_0',
'molecule_dist_min',
'eem_0',
'eem2015hm_0']
y = train['scalar_coupling_constant']
train = train[top10_feats+['fc']]
test = test[top10_feats+['fc']]
# - sum
# compute
for feat in progress_bar(train.columns):
train[f'fc+{feat}'] = train[feat].values + train.fc.values
test[f'fc+{feat}'] = test[feat].values + test.fc.values
train = train.drop(top10_feats, axis=1)
test = test.drop(top10_feats, axis=1)
# <br>
# <br>
# <br>
# 標準化
scaler = StandardScaler()
train[train.columns] = scaler.fit_transform(train)
test[test.columns] = scaler.transform(test)
# <br>
# <br>
# LabelEncode
#
# - `atom_1` = {H, C, N}
# - `type_0` = {1, 2, 3}
# - `type` = {2JHC, ...}
train['type'] = type_train
test['type'] = type_test
for f in ['type']:
if f in train.columns:
lbl = LabelEncoder()
lbl.fit(list(train[f].values) + list(test[f].values))
train[f] = lbl.transform(list(train[f].values))
test[f] = lbl.transform(list(test[f].values))
# # create train, test data
# +
# train = train.drop(['id', 'molecule_name', 'atom_0', 'scalar_coupling_constant'], axis=1)
# test = test.drop(['id', 'molecule_name', 'atom_0'], axis=1)
# train = reduce_mem_usage(train)
# test = reduce_mem_usage(test)
X = train.copy()
X_test = test.copy()
# -
del train, test
gc.collect()
# # Training model
# **params**
# +
# Configuration
MAX_ITER = 10000
RANDOM_STATE = 0
model_params_list = [
{'alpha': 0.016490786888660246},
{'alpha': 0.01948297047624237},
{'alpha': 0.01181445773799895},
{'alpha': 0.01},
{'alpha': 0.17022936642711317},
{'alpha': 0.9019291300867383},
{'alpha': 1.2589254117941673},
{'alpha': 0.5469291042181513}]
# -
n_folds = 6
folds = KFold(n_splits=n_folds, shuffle=True)
def train_model(X, X_test, y, folds, model_params):
model = linear_model.Ridge(**model_params, max_iter=MAX_ITER, random_state=RANDOM_STATE) # <=================
scores = []
oof = np.zeros(len(X))
prediction = np.zeros(len(X))
result_dict = {}
for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X)):
print(f'Fold {fold_n + 1} started at {time.ctime()}')
model.fit(X.iloc[train_idx, :], y[train_idx])
y_valid_pred = model.predict(X.iloc[valid_idx, :])
y_train_pred = model.predict(X.iloc[train_idx, :])
prediction = model.predict(X_test)
oof[valid_idx] = y_valid_pred
score = mean_absolute_error(y[valid_idx], y_valid_pred)
score_train = mean_absolute_error(y[train_idx], y_train_pred)
scores.append(score)
print(f'fold {fold_n+1} train:{score_train :.5f} \t valid: {score :.5f}')
print('')
print('CV mean score : {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))
print('kaggle mean score: {0:.4f}'.format(np.log(np.mean(scores))))
print('')
result_dict['oof'] = oof
result_dict['prediction'] = prediction
result_dict['scores'] = scores
return result_dict
# +
# %%time
# type ごとの学習
X_short = pd.DataFrame({'ind': list(X.index), 'type': X['type'].values, 'oof': [0] * len(X), 'target': y.values})
X_short_test = pd.DataFrame({'ind': list(X_test.index), 'type': X_test['type'].values, 'prediction': [0] * len(X_test)})
for t in X['type'].unique():
model_params = model_params_list[t]
print('*'*80)
print(f'Training of type {t}')
print('*'*80)
X_t = X.loc[X['type'] == t]
X_test_t = X_test.loc[X_test['type'] == t]
y_t = X_short.loc[X_short['type'] == t, 'target'].values
result_dict = train_model(X_t, X_test_t, y_t, folds, model_params)
X_short.loc[X_short['type'] == t, 'oof'] = result_dict['oof']
X_short_test.loc[X_short_test['type'] == t, 'prediction'] = result_dict['prediction']
print('')
print('===== finish =====')
X['scalar_coupling_constant'] = y
metric = kaggle_metric(X, X_short['oof'])
X = X.drop(['scalar_coupling_constant', 'prediction'], axis=1)
print('CV mean score(group log mae): {0:.4f}'.format(metric))
prediction = X_short_test['prediction']
# -
# # Save
# **submission**
# path_submittion = './output/' + 'nb{}_submission_lgb_{}.csv'.format(nb, metric)
path_submittion = f'../output/nb{nb}_submission_{model_name}_{metric :.5f}.csv'
print(f'save pash: {path_submittion}')
submittion = pd.read_csv('../input/champs-scalar-coupling/sample_submission.csv')
# submittion = pd.read_csv('./input/champs-scalar-coupling/sample_submission.csv')[::100]
submittion['scalar_coupling_constant'] = prediction
if isSmallSet:
pass
else:
submittion.to_csv(path_submittion, index=False)
# ---
# **result**
path_oof = f'../output/nb{nb}_oof_{model_name}_{metric :.5f}.csv'
print(f'save pash: {path_oof}')
oof = pd.DataFrame(X_short['oof'])
if isSmallSet:
pass
else:
oof.to_csv(path_oof, index=False)
# # analysis
# +
plot_data = pd.DataFrame(y)
plot_data.index.name = 'id'
plot_data['yhat'] = X_short['oof']
plot_data['type'] = lbl.inverse_transform(X['type'])
def plot_oof_preds(ctype, llim, ulim):
plt.figure(figsize=(6,6))
sns.scatterplot(x='scalar_coupling_constant',y='yhat',
data=plot_data.loc[plot_data['type']==ctype,
['scalar_coupling_constant', 'yhat']]);
plt.xlim((llim, ulim))
plt.ylim((llim, ulim))
plt.plot([llim, ulim], [llim, ulim])
plt.xlabel('scalar_coupling_constant')
plt.ylabel('predicted')
plt.title(f'{ctype}', fontsize=18)
plt.show()
plot_oof_preds('1JHC', 0, 250)
plot_oof_preds('1JHN', 0, 100)
plot_oof_preds('2JHC', -50, 50)
plot_oof_preds('2JHH', -50, 50)
plot_oof_preds('2JHN', -25, 25)
plot_oof_preds('3JHC', -25, 60)
plot_oof_preds('3JHH', -20, 20)
plot_oof_preds('3JHN', -10, 15)
# -
| src/69_Create_Submission_39_ridge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kapilkn/Advanced-Data-Science-Capstone/blob/master/Survival_of_Titanic_Passenger_Kapil_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="sW06DLzCsuCY" colab_type="code" colab={}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
# %matplotlib inline
import seaborn as sns
sns.set(style="white",color_codes=True)
sns.set(font_scale=1.5)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score, precision_score, recall_score, f1_score
from sklearn import metrics
# + id="3eqwnkestuyc" colab_type="code" colab={}
df_train =pd.read_csv('train.csv')
# + id="TSGF4LRLtwnL" colab_type="code" outputId="a757fa8f-a7fd-40ce-bbf3-ae82bc9f4d8f" colab={"base_uri": "https://localhost:8080/", "height": 33}
df_train.shape
# + id="5R9UIqexyKEZ" colab_type="code" outputId="4270578f-8e59-4cfd-cda1-377eb0fce415" colab={"base_uri": "https://localhost:8080/", "height": 67}
df_train.Survived.value_counts()
# + id="zPxon4X-yS4R" colab_type="code" outputId="49116c8f-8223-4bea-cb99-d990b77ecdf9" colab={"base_uri": "https://localhost:8080/", "height": 67}
df_train.Sex.value_counts()
# + id="gfpuTPOJya97" colab_type="code" outputId="d09a6859-50a7-4a55-aa73-e0c9095250c0" colab={"base_uri": "https://localhost:8080/", "height": 84}
df_train.Embarked.value_counts()
# + id="uBP772GTyfsV" colab_type="code" outputId="7e297095-ddd9-4486-d133-3e87b4985635" colab={"base_uri": "https://localhost:8080/", "height": 392}
df_train.isnull()
# + id="qU9IvOShymeO" colab_type="code" outputId="0545f6db-09e2-4522-c28e-a55fc616ec84" colab={"base_uri": "https://localhost:8080/", "height": 235}
df_train.isnull().sum()
# + id="WD1ZR0IJytyi" colab_type="code" outputId="4ddf6066-1634-4e03-8171-1c0094ac3a2f" colab={"base_uri": "https://localhost:8080/", "height": 33}
df_train.shape
# + id="zsIM8gPIy1zV" colab_type="code" outputId="76865a0b-6ecd-48c7-dd8f-b3535cc889a3" colab={"base_uri": "https://localhost:8080/", "height": 190}
df_train = df_train.drop(['PassengerId','Name','Ticket','Cabin'], axis =1)
df_train.head()
# + id="qTG1_Ntv11mK" colab_type="code" colab={}
def age_approx(cols):
age =cols[0]
Pclass = cols[1]
if pd.isnull(age):
if Pclass ==1:
return 37
elif Pclass ==2:
return 29
else:
return 24
else:
return age
# + id="1jlU2MZJ2nSf" colab_type="code" outputId="c6cb9f6d-7584-48e9-c760-cc1d10aa6d5f" colab={"base_uri": "https://localhost:8080/", "height": 162}
df_train.groupby(['Pclass']).mean()
# + id="GO5VbQIO20Od" colab_type="code" colab={}
df_train["Age"] = df_train[['Age','Pclass']].apply(age_approx,axis=1)
# + id="b0sJZVfx3GSG" colab_type="code" outputId="fa391251-061e-40be-a28f-f9ee661856ef" colab={"base_uri": "https://localhost:8080/", "height": 168}
df_train.isnull().sum()
# + id="4ihAusG83LNf" colab_type="code" outputId="10c46549-9b67-4cdf-9449-a14267a73ef3" colab={"base_uri": "https://localhost:8080/", "height": 168}
df_train.dropna(inplace=True)
df_train.isnull().sum()
# + id="WBrsfiFo4nwj" colab_type="code" outputId="80d0cac4-f926-448b-d210-318a6763abe5" colab={"base_uri": "https://localhost:8080/", "height": 168}
df_train.dtypes
# + id="Bx0CcH-s4tKB" colab_type="code" colab={}
df_train_dummies = pd.get_dummies(df_train,columns=['Sex'])
# + id="YpthZi_t434r" colab_type="code" colab={}
df_train_dummies = pd.get_dummies(df_train_dummies,columns=['Embarked'])
# + id="6JLpJFL95FB1" colab_type="code" outputId="7257ec91-9ea1-4d1e-8f51-ca53e1dbd40a" colab={"base_uri": "https://localhost:8080/", "height": 190}
df_train_dummies.head()
# + id="KuNi3qfH5IL-" colab_type="code" outputId="e35fd1fd-8cd8-4840-8c37-1e2c89cc72f1" colab={"base_uri": "https://localhost:8080/", "height": 565}
plt.figure(figsize=(12,8))
sns.heatmap(df_train_dummies.corr())
# + id="3S_kYIVt5cyb" colab_type="code" colab={}
use_features = ['Pclass', 'Age', 'SibSp', 'Parch', 'Sex_female', 'Sex_male', 'Embarked_C', 'Embarked_Q', 'Embarked_S']
X = df_train_dummies[use_features].values
y = df_train_dummies['Survived']
# + id="_-b93s5p6TxX" colab_type="code" colab={}
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.3,random_state=1)
# + id="vgq75MUk6qP9" colab_type="code" outputId="5bbbb0e6-3ae9-4476-917c-ac3dca43963e" colab={"base_uri": "https://localhost:8080/", "height": 84}
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
# + id="Na3Slw8I7KqW" colab_type="code" colab={}
LR = LogisticRegression()
# + id="OYZNogzN7mnb" colab_type="code" outputId="18f1550a-1171-46b4-dc01-07ee981e893d" colab={"base_uri": "https://localhost:8080/", "height": 255}
LR.fit(X_train,y_train)
# + id="daWO527E7sUg" colab_type="code" colab={}
y_pred =LR.predict(X_test)
# + id="vgsJspnr74RC" colab_type="code" outputId="b2c33911-775f-44a9-95c9-8ceddfb51c6a" colab={"base_uri": "https://localhost:8080/", "height": 50}
metrics.confusion_matrix(y_test,y_pred)
# + id="jn5tcnF37-7R" colab_type="code" outputId="b0c4b9bf-fcb1-4042-a0d3-4372447616e5" colab={"base_uri": "https://localhost:8080/", "height": 33}
len(X_test)
# + id="uzelaE8n8Ed0" colab_type="code" outputId="168488d0-4dc0-441e-f32f-d9915431d9b1" colab={"base_uri": "https://localhost:8080/", "height": 168}
print(classification_report(y_test,y_pred))
# + id="tSWlqrNd8MIo" colab_type="code" outputId="1f19cadf-2456-4415-bc9d-9720910408f6" colab={"base_uri": "https://localhost:8080/", "height": 50}
LR.coef_
# + id="AXHd4TeX8Py5" colab_type="code" outputId="aa450da5-686e-421e-a686-a988db62a17e" colab={"base_uri": "https://localhost:8080/", "height": 33}
LR.intercept_
# + id="njU1iOyc8Rmr" colab_type="code" outputId="fdb69cb1-915c-4aff-ad4a-099cdb83f6ab" colab={"base_uri": "https://localhost:8080/", "height": 67}
df_train_dummies[use_features].columns
# + id="IivfrC5o8bYg" colab_type="code" outputId="36d8d6d2-6253-412f-ccdc-6e9452f9cd62" colab={"base_uri": "https://localhost:8080/", "height": 309}
plt.plot(LR.predict_proba(X_test)) # Survive vs non survive
# + id="frZaXCyJ8jzr" colab_type="code" outputId="ae98b52a-bf17-433a-b040-29f7267b6822" colab={"base_uri": "https://localhost:8080/", "height": 1000}
LR.predict_proba(X_test)
# + id="FHxR6srW9EuP" colab_type="code" colab={}
| Survival_of_Titanic_Passenger_Kapil_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# **Note**: Click on "*Kernel*" > "*Restart Kernel and Clear All Outputs*" in [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/) *before* reading this notebook to reset its output. If you cannot run this file on your machine, you may want to open it [in the cloud <img height="12" style="display: inline-block" src="../static/link/to_mb.png">](https://mybinder.org/v2/gh/webartifex/intro-to-data-science/main?urlpath=lab/tree/00_python_in_a_nutshell/00_content_arithmetic.ipynb).
# # Chapter 0: Python in a Nutshell (Part 1)
# Python itself is a so-called **general purpose** programming language. That means it does *not* know about any **scientific algorithms** "out of the box."
#
# The purpose of this notebook is to summarize anything that is worthwhile knowing about Python and programming on a "high level" and lay the foundation for working with so-called **third-party libraries**, some of which we see in subsequent chapters.
# ## Basic Arithmetic
# Any computer can always be viewed as some sort of a "fancy calculator" and Python is no exception from that. The following code snippet, for example, does exactly what we expect it would, namely *addition*.
1 + 2
# In terms of **syntax** (i.e., "grammatical rules"), digits are interpreted as plain numbers (i.e., a so-called **numerical literal**) and the `+` symbol consitutes a so-called **operator** that is built into Python.
#
# Other common operators are `-` for *subtraction*, `*` for *multiplication*, and `**` for *exponentiation*. In terms of arithmetic, Python allows the **chaining** of operations and adheres to conventions from math, namely the [PEMDAS rule <img height="12" style="display: inline-block" src="../static/link/to_wiki.png">](https://en.wikipedia.org/wiki/Order_of_operations#Mnemonics).
87 - 42
3 * 5
2 ** 3
2 * 2 ** 3
# To change the **order of precedence**, parentheses may be used for grouping. Syntactically, they are so-called **delimiters** that mark the beginning and the end of a **(sub-)expression** (i.e., a group of symbols that are **evaluated** together).
(2 * 2) ** 3
# We must beware that some operators do *not* do what we expect. So, the following code snippet is *not* an example of exponentiation.
2 ^ 3
# *Division* is also not as straighforward as we may think!
#
# While the `/` operator does *ordinary division*, we must note the subtlety of the `.0` in the result.
8 / 2
# Whereas both `4` and `4.0` have the *same* **semantic meaning** to us humans, they are two *different* "things" for a computer!
#
# Instead of using a single `/`, we may divide with a double `//` just as well.
8 // 2
# However, then we must be certain that the result is not a number with decimals other than `.0`. As we can guess from the result below, the `//` operator does *integer division* (i.e., "whole number" division).
7 // 2
# On the contrary, the `%` operator implements the so-called *modulo division* (i.e., "rest" division). Here, a result of `0` indicates that a number is divisible by another one whereas any result other than `0` shows the opposite.
7 % 2
8 % 2
# What makes Python such an intuitive and thus beginner-friendly language, is the fact that it is a so-called **[interpreted language <img height="12" style="display: inline-block" src="../static/link/to_wiki.png">](https://en.wikipedia.org/wiki/Interpreter_%28computing%29)**. In layman's terms, this means that we can go back up and *re-execute* any of the code cells in *any order*: That allows us to built up code *incrementally*. So-called **[compiled languages <img height="12" style="display: inline-block" src="../static/link/to_wiki.png">](https://en.wikipedia.org/wiki/Compiler)**, on the other hand, would require us to run a program in its entirety even if only one small part has been changed.
#
# Instead of running individual code cells "by hand" and taking the result as it is, Python offers us the usage of **variables** to store "values." A variable is created with the single `=` symbol, the so-called **assignment statement**.
a = 1
b = 2
# After assignment, we can simply ask Python about the values of `a` and `b`.
a
b
# Similarly, we can use a variable in place of, for example, a numerical literal within an expression.
a + b
# Also, we may combine several lines of code into a single code cell, adding as many empty lines as we wish to group the code. Then, all of the lines are executed from top to bottom in linear order whenever we execute the cell as a whole.
# +
a = 1
b = 2
a + b
# -
# Something that fools many beginners is the fact that the `=` statement is *not* to be confused with the concept of an *equation* from math! An `=` statement is *always* to be interpreted from right to left.
#
# The following code snippet, for example, takes the "old" value of `a`, adds the value of `b` to it, and then stores the resulting `3` as the "new" value of `a`. After all, a variable is called a variable as its value is indeed variable!
a = a + b
a
# In general, the result of some expression involving variables is often stored in yet another variable for further processing. This is how more realistic programs are built up.
# +
a = 1
b = 2
c = a + b
c
| 00_python_in_a_nutshell/00_content_arithmetic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-py36] *
# language: python
# name: conda-env-.conda-py36-py
# ---
# +
## pneumothorax heatmap
## 利用在chestx-ray8数据集上的训练的分类网络的特征图,在气胸数据集上,查看heatmap效果,并与气胸分割图进行比对
# +
from glob import glob
import os
import pandas as pd
import pydicom
import matplotlib.pyplot as plt
#checnking the input files
# print(os.listdir("../input/siim-acr-pneumothorax-segmentation-data"))
#reading all dcm files into train and text
train = sorted(glob("pneumothorax/dicom-images-train/*/*/*.dcm"))
test = sorted(glob("pneumothorax/dicom-images-test/*/*/*.dcm"))
print("train files: ", len(train))
print("test files: ", len(test))
pd.reset_option('max_colwidth')
#reading the csv
print("the csv with the labels: -1 means no Pneumothorax, othervise there is an encoding for the place of Pneumothorax")
masks = pd.read_csv("pneumothorax/train-rle.csv", delimiter=",")
masks.head()
# +
#dataframe to ease the access
patients = []
missing = 0
pd.reset_option('max_colwidth')
for t in train:
data = pydicom.dcmread(t)
patient = {}
patient["UID"] = data.SOPInstanceUID
try:
encoded_pixels = masks[masks["ImageId"] == patient["UID"]].values[0][1]
patient["EncodedPixels"] = encoded_pixels
except:
missing = missing + 1
patient["Age"] = data.PatientAge
patient["Sex"] = data.PatientSex
patient["Modality"] = data.Modality
patient["BodyPart"] = data.BodyPartExamined
patient["ViewPosition"] = data.ViewPosition
patient["path"] = "pneumothorax/dicom-images-train/" + data.StudyInstanceUID + "/" + data.SeriesInstanceUID + "/" + data.SOPInstanceUID + ".dcm"
patients.append(patient)
print("missing labels: ", missing)
#pd.set_option('display.max_colwidth', -1)
df_patients = pd.DataFrame(patients, columns=["UID", "EncodedPixels", "Age", "Sex", "Modality", "BodyPart", "ViewPosition", "path"])
print("images with labels: ", df_patients.shape[0])
df_patients.head()
# +
#mask functions from sample dataset
import numpy as np
def mask2rle(img, width, height):
rle = []
lastColor = 0;
currentPixel = 0;
runStart = -1;
runLength = 0;
for x in range(width):
for y in range(height):
currentColor = img[x][y]
if currentColor != lastColor:
if currentColor == 255:
runStart = currentPixel;
runLength = 1;
else:
rle.append(str(runStart));
rle.append(str(runLength));
runStart = -1;
runLength = 0;
currentPixel = 0;
elif runStart > -1:
runLength += 1
lastColor = currentColor;
currentPixel+=1;
return " ".join(rle)
def rle2mask(rle, width, height):
mask= np.zeros(width* height)
array = np.asarray([int(x) for x in rle.split()])
starts = array[0::2]
lengths = array[1::2]
current_position = 0
for index, start in enumerate(starts):
current_position += start
mask[current_position:current_position+lengths[index]] = 255
current_position += lengths[index]
return mask.reshape(width, height)
# +
df_pneumo = df_patients[df_patients["EncodedPixels"] != '-1']
print(df_pneumo.values[3][2], df_pneumo.values[3][3])
mask = rle2mask(df_pneumo.values[3][1], 1024, 1024)
mask = np.rot90(mask, 3) #rotating three times 90 to the right place
mask = np.flip(mask, axis=1)
img = pydicom.read_file(df_pneumo.values[3][-1]).pixel_array
fig = plt.figure(figsize=(15, 10))
a = fig.add_subplot(1, 3, 1)
plt.imshow(img, cmap='bone') #original x-ray
a.set_title("Original x-ray image")
plt.grid(False)
plt.axis("off")
a = fig.add_subplot(1, 3, 2)
imgplot = plt.imshow(mask, cmap='binary')
a.set_title("The mask")
plt.grid(False)
plt.xticks([])
plt.yticks([])
a = fig.add_subplot(1, 3, 3)
plt.imshow(img, cmap='bone')
plt.imshow(mask, cmap='binary', alpha=0.3)
a.set_title("Mask on the x-ray: air in the pleura")
import cv2
cv2.imwrite('./test.png', img)
plt.axis("off")
plt.grid(False)
mask = rle2mask(df_pneumo.values[6][1], 1024, 1024)
mask = np.rot90(mask, 3) #rotating three times 90 to the right place
mask = np.flip(mask, axis=1)
img = pydicom.read_file(df_pneumo.values[6][-1]).pixel_array
fig = plt.figure(figsize=(15, 10))
a = fig.add_subplot(1, 3, 1)
plt.imshow(img, cmap='bone') #original x-ray
a.set_title("Original x-ray image")
plt.grid(False)
plt.axis("off")
a = fig.add_subplot(1, 3, 2)
imgplot = plt.imshow(mask, cmap='binary')
a.set_title("The mask")
plt.grid(False)
plt.xticks([])
plt.yticks([])
a = fig.add_subplot(1, 3, 3)
plt.imshow(img, cmap='bone')
plt.imshow(mask, cmap='binary', alpha=0.3)
a.set_title("Mask on the x-ray: air in the pleura")
plt.axis("off")
plt.grid(False)
# -
| pneumothorax/notebook/0.plot_penumothorax_heatmap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (bl_tf)
# language: python
# name: ml
# ---
# # Efficient RAW file generation by subsampling
# This tutorial discusses generating high resolution synthetic data with smaller volumes by using lower sample rates.
# If you have access to a GPU, it is highly recommended to install CuPy, which performs the equivalent NumPy array operations on the GPU (https://docs.cupy.dev/en/stable/install.html). This is not necessary to run raw voltage generation, but will highly accelerate the pipeline. Once you have CuPy installed, to enable GPU acceleration you must set `SETIGEN_ENABLE_GPU` to '1' in the shell or in Python via `os.environ`. It can also be useful to set `CUDA_VISIBLE_DEVICES` to specify which GPUs to use.
# +
# # !pip install cupy-cuda110
# -
import os
os.environ['SETIGEN_ENABLE_GPU'] = '1'
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
import blimpy as bl
import setigen as stg
# -
# Sometimes it can be necessary to re-run this command for plots to show automatically
# %matplotlib inline
# Especially when using a high sampling rate like 3 GHz, `setigen`'s raw voltage pipeline can produce a *lot* of data, most of which is empty. Using the standard synthesis method, you can throw out information after it's generated, but that still requires generating an unnecessarily large amount of data beforehand. How can we cut down on unnecessary computation, but maintain time and frequency resolutions, not to mention signal processing conversions?
#
# It turns out that as long as we're careful, we can reduce the sampling rate and the number of PFB branches and maintain the same overall structure of the data:
# +
subsample_factor = 128
sample_rate = 3e9 // subsample_factor
num_taps = 8
num_branches = 1024 // subsample_factor
print(f'Max "num_chans" is {num_branches // 2}.')
# -
# Note that the number of coarse channels to record `num_chans` has a reduced maximal value, since we are subsampling. An additional effect when reducing the data volume by a signficiant factor is that if you keep the `block_size` the same, you may end up writing more data when recording in `num_blocks` mode. This might be the desired behavior, but it's important to keep in mind. Since `record()` will generate enough data to fill out full blocks, if you additionally want to reduce the amount of data recorded, be sure to accordingly reduce `block_size`.
#
# So to test this out, let's add the same kinds of signals as we did in `05_raw_file_gen_snr.ipynb`, so that we can directly compare synthetic data products.
#
# Setting up backend elements, then adding noise and a few signals:
# +
fftlength = 1024
int_factor = 4
num_blocks = 1
digitizer = stg.voltage.RealQuantizer(target_fwhm=32,
num_bits=8)
filterbank = stg.voltage.PolyphaseFilterbank(num_taps=num_taps,
num_branches=num_branches)
requantizer = stg.voltage.ComplexQuantizer(target_fwhm=32,
num_bits=8)
antenna = stg.voltage.Antenna(sample_rate=sample_rate,
fch1=6*u.GHz,
ascending=True,
num_pols=2)
block_size = stg.voltage.get_block_size(num_antennas=1,
tchans_per_block=128,
num_bits=8,
num_pols=2,
num_branches=num_branches,
num_chans=num_branches//2,
fftlength=fftlength,
int_factor=int_factor)
rvb = stg.voltage.RawVoltageBackend(antenna,
digitizer=digitizer,
filterbank=filterbank,
requantizer=requantizer,
start_chan=0,
num_chans=num_branches//2,
block_size=block_size,
blocks_per_file=128,
num_subblocks=32)
# Add noise
for stream in antenna.streams:
stream.add_noise(v_mean=0,
v_std=1)
# Add signals
signal_level = stg.voltage.get_level(10,
rvb,
num_blocks=num_blocks,
length_mode='num_blocks',
fftlength=fftlength)
for f_start in np.linspace(6003.1e6, 6003.9e6, 9):
leakage_factor = stg.voltage.get_leakage_factor(f_start, rvb, fftlength)
for stream in antenna.streams:
level = stream.get_total_noise_std() * leakage_factor * signal_level
stream.add_constant_signal(f_start=f_start,
drift_rate=0*u.Hz/u.s,
level=level)
rvb.record(output_file_stem='example_subsample',
num_blocks=num_blocks,
length_mode='num_blocks',
header_dict={'HELLO': 'test_value',
'TELESCOP': 'GBT'},
verbose=False)
# !rawspec -f $fftlength -t $int_factor -d . example_subsample
# -
# Plotting the results:
# +
wf = bl.Waterfall('example_subsample.rawspec.0000.fil',
f_start=6003.0,
f_stop=6004.0)
plt.figure(figsize=(10, 6))
wf.plot_waterfall()
plt.show()
frame = stg.Frame(wf)
spectrum = stg.integrate(frame, normalize=True)
plt.figure(figsize=(8, 6))
plt.plot(spectrum)
plt.xlabel('Frequency bins')
plt.ylabel('SNR')
plt.show()
# -
# This looks very similar to the plot in `05_raw_file_gen_snr.ipynb`, as expected! We can also check the frame resolution:
frame.get_params()
| jupyter-notebooks/voltage/07_efficient_gen_by_subsampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#default_exp dzi
# -
#hide
# %load_ext autoreload
# %autoreload 2
# +
#hide
import os
os.chdir('/home/frank/Work/Projecten/DoRe/data/hasselblad/RP-T-1898-A-3689')
from myb2keys import Deepzoom_demo as mykeys
application_key_id = mykeys.application_key_id
application_key = mykeys.application_key
bucket_name = mykeys.bucket_name
# -
# # Uploading your own deep zoom images
#
# > Three steps
# Once you have managed to register a Backblaze cloud storage account and created a public bucket, the hard work is done!
#
# Upload your deep zoom image by executing the following code in a Jupyter notebook:
# **Step 1**
#
# First, change your working directory to the folder on your computer with the large image files that you would like to upload as deep zoom images. In my case a have two very large tif images from the same drawing.
# 'RP-T-1898-A-3689_Recto.tif' (2.3 GB)
# 'RP-T-1898-A-3689_Trans-Recto.tif' (2.3 GB)
# **Step 2**
#
# Establish a connection to the Backblaze bucket that you would like to upload to by creating a `DeepZoomBucket()` object. **You will need to fill in your own bucket credentials and bucket name.**
# You need to copy your own personal B2 bucket application-key, application-key-id and bucket-name in a notebook cell and run the cell (by pressing Shift-Enter).
#
# ```Python
# application_key = "<KEY>"
# application_key_id = "<KEY>"
# bucket_name = "my-first-bucket"
# ```
#
# +
from deepzoomup import DeepZoomBucket
dzb = DeepZoomBucket(application_key_id, application_key, bucket_name)
# -
# **Step 3**
#
# Convert and upload your images with the `.upload_as_dzi()` method.
dzb.upload_as_dzi('RP-T-1898-A-3689_Recto.tif')
dzb.upload_as_dzi('RP-T-1898-A-3689_Trans-Recto.tif')
# **That is it! You have successfully created and uploaded your cloud stored deep zoom image tiles.**
# Curious to see the result? See the next section.
# # API
# +
#export
import pyvips
import b2sdk.v1 as b2
import time
import sys
import os
import re
from IPython.display import HTML
import textwrap
# CONSTANTS
PREFIX = 'dzp_'
ROOTDIR = 'deepzoom'
VIEWERSDIR = 'viewers'
def _dzp_save(fname):
'''Create local dzp_folder and use pyvips to convert image file *fname* to deepzoom pyramid. '''
name, ext = os.path.splitext(os.path.basename(fname))
dzp_folder = os.path.join('.', ROOTDIR, f'{PREFIX}{name}')
dzi_fname = os.path.join('.', ROOTDIR, f'{PREFIX}{name}', f'{name}.dzi')
dzi_base = os.path.join('.', ROOTDIR, f'{PREFIX}{name}', f'{name}')
os.makedirs(dzp_folder, exist_ok=True)
print('CREATING DEEPZOOM IMAGE TILES...')
if not os.path.exists(dzi_fname):
v_img = pyvips.Image.new_from_file(fname)
v_img.dzsave(dzi_base)
else:
print(f'Skipping local deepzoom creation for: {fname}\n(No need to overwrite already existing tiles)')
return name
def _connect_b2_bucket(application_key_id, application_key, bucket_name):
'''Creates upload connection to your Backblaze b2 bucket.'''
info = b2.InMemoryAccountInfo()
b2_api = b2.B2Api(info)
b2_api.authorize_account("production", application_key_id, application_key)
bucket = b2_api.get_bucket_by_name(bucket_name)
base_url = bucket.get_download_url('')
return b2_api, base_url
def _upload(b2_api, bucket_name):
'''Upload newly created files in local directory './deepzoom/' to b2 bucket.'''
source = os.path.join('.', ROOTDIR)
destination = f'b2://{bucket_name}/{ROOTDIR}'
source = b2.parse_sync_folder(source, b2_api)
destination = b2.parse_sync_folder(destination, b2_api)
policies_manager = b2.ScanPoliciesManager(exclude_all_symlinks=True)
synchronizer = b2.Synchronizer(
max_workers=10,
policies_manager=policies_manager,
dry_run=False,
allow_empty_source=True,
)
no_progress = False
# need to run this to actually synchronize
print('SYNC LOCAL FOLDER WITH CLOUD BUCKET...')
with b2.SyncReport(sys.stdout, no_progress) as reporter:
synchronizer.sync_folders(
source_folder=source,
dest_folder=destination,
now_millis=int(round(time.time() * 1000)),
reporter=reporter,
)
print('')
def _dzp_thumbnail(fname, tn_height=1000):
'''Creates thumbnail file in dzp_directory'''
# create directory if needed
name, ext = os.path.splitext(os.path.basename(fname))
dzp_fname = os.path.join('.', ROOTDIR, f'{PREFIX}{name}')
os.makedirs(dzp_fname, exist_ok=True)
# construct standard thumbnail filename
thumb_fname = os.path.join('.', ROOTDIR, f'{PREFIX}{name}', f'tn_{name}.png')
# resize
v_img = pyvips.Image.new_from_file(fname)
scale = tn_height / v_img.height
v_thumb = v_img.resize(scale)
# write to file
v_thumb.write_to_file(thumb_fname)
def _list_names(application_key_id, application_key, bucket_name, verbose=True):
'''List image names and js urls for all uploaded images'''
# connect
b2_api, base_url = _connect_b2_bucket(application_key_id, application_key, bucket_name)
bucket = b2_api.get_bucket_by_name(bucket_name)
base_url = bucket.get_download_url('')
# list file names in deepzoom bucket folder
filenames = [file_info.file_name for file_info, _ in
bucket.ls(folder_to_list=ROOTDIR,
show_versions=False, recursive=False)]
# extract image names
ptrn = f'{ROOTDIR}/{PREFIX}(?P<name>[^/]+).*'
names = []
for f in filenames:
m = re.match(ptrn, f)
if m:
names.append(m.groups()[0])
if verbose:
print(f'{base_url}{ROOTDIR}/')
return names
def _make_html_snippet(deepzoombucket, image_names):
'''Create multi image viewer html snippet for list of `image_names`. '''
# header
header = textwrap.dedent('''\
<script src="https://cdnjs.cloudflare.com/ajax/libs/openseadragon/2.4.2/openseadragon.min.js"
integrity="sha512-qvQYH6mPuE46uFcWLI8BdGaJpB5taX4lltbSIw5GF4iODh2xIgyz5ii1WpuzPFUknHCps0mi4mFGR44bjdZlZg=="
crossorigin="anonymous">
</script>
<script type="text/javascript">
var viewer = OpenSeadragon({
id: "openseadragon1_dzi",
prefixUrl: "https://cdnjs.cloudflare.com/ajax/libs/openseadragon/2.4.2/images/",
''')
filmstrip = textwrap.dedent('''\
sequenceMode: true,
preserveViewport: true,
showReferenceStrip: true,
showNavigator: true
''')
# construct tilesources section
base_url = deepzoombucket.base_url
tilesources_list = [f' \"{base_url}{ROOTDIR}/{PREFIX}{im_name}/{im_name}.dzi\"' for im_name in image_names]
if len(tilesources_list) == 1:
tilesources_str = f'tileSources: {tilesources_list[0]},\n'
else:
tilesources_str = 'tileSources: [\n' + ',\n'.join(tilesources_list) + '],\n' + filmstrip
# footer
footer = textwrap.dedent('''\
});
</script>
<div id="openseadragon1_dzi" style="width: 800px; height: 500px; background-color: snow"></div>
''')
# combine
html_snippet = header + tilesources_str + footer
return html_snippet
# Class
class DeepZoomBucket:
'''Create DeepZoomBucket object. '''
def __init__(self, application_key_id, application_key, bucket_name):
'''Create DeepZoomStore object for your B2 bucket'''
# create local directory structure in current working directory
store_dir = os.path.join('.', ROOTDIR)
os.makedirs(store_dir, exist_ok=True)
viewers_dir = os.path.join('.', ROOTDIR, VIEWERSDIR)
os.makedirs(viewers_dir, exist_ok=True)
# connect to b2 bucket
self.application_key_id = application_key_id
self.application_key = application_key
self.bucket_name = bucket_name
self.b2_api, self.base_url = _connect_b2_bucket(application_key_id, application_key, bucket_name)
# initialize image list for this session
# why again?
self.images = []
def upload_as_dzi(self, fname):
# convert image file to to local deepzoom directory
name = _dzp_save(fname)
self.images.append(name)
# also make thumnail
_dzp_thumbnail(fname)
# create viewer pages
#dzi_to_js(name, self.base_url, self.bucket_name)
#make_html(name, self.bucket_name)
# and sync new files to b2 bucket
_upload(self.b2_api, self.bucket_name)
def list_names(self, verbose=False):
'''List all image names in bucket'''
names = _list_names(self.application_key_id, self.application_key, self.bucket_name, verbose=verbose)
return names
def make_html_snippet(self, image_names):
html_snippet = _make_html_snippet(self, image_names)
return html_snippet
| notebooks/20_uploading-your-own-deep-zoom-images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (dev)
# language: python
# name: dev
# ---
# # Backtest of Model-Based Strategies | Price Lags
# ---
#
# __Authors:__ <NAME>, <NAME>, <NAME>
# __Data Source:__ Yahoo Finance accessed by `pandas-datareader` and `yfinance`
# __Strategy:__ Vectorized Price Lags
# __Instrument(s):__ SPY
#
# __Intended Use:__
# The primary objective of this notebook is to determine the optimal ML model(s) a strategy based on lagging percent changes. The model(s) chosen at the end of the notebook can be set as a constant, along with the strategy, allowing for further exploration of feature engineering and hyperparameter tuning.
# <br>
#
# ## Imports & Functions
# ---
# __Imports__
# + tags=[]
# Data Objects
import numpy as np
import pandas as pd
from datetime import datetime as dt, timedelta as td
# Plotting
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# Data Collection
import pandas_datareader.data as pdr
import yfinance as yf
# Preprocessing and Pipeline
from sklearn.preprocessing import RobustScaler
from sklearn.pipeline import Pipeline
# Models
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import (
LinearDiscriminantAnalysis as LDA,
QuadraticDiscriminantAnalysis as QDA
)
from sklearn.svm import LinearSVC, SVC
# Evaluation
from sklearn.metrics import classification_report
# Settings
yf.pdr_override()
# -
# <br>
#
# __Functions__
# + tags=[]
def create_lagged_series(symbol, start_date, end_date, lags=5):
"""This creates a Pandas DataFrame that stores the
percentage returns of the adjusted closing value of
a stock obtained from yahoo finance via panadas-datareader,
along with a number of lagged returns from the prior
trading days (lags default to 5 days). Trading volume,
as well as the Direction from the previous day, are
also included.
Parameters
----------
symbol : 'str'
The ticker symbol to obtain from AlphaVantage
start_date : 'datetime'
The starting date of the series to obtain
end_date : 'datetime'
The ending date of the series to obtain
lags : 'int', optional
The number of days to 'lag' the series by
Returns
-------
'pd.DataFrame'
Contains the Adjusted Closing Price returns and lags
"""
# Obtain stock pricing from Pandas-Datareader
adj_start_date = start_date - td(days=365)
ts = pdr.get_data_yahoo(
symbol,
start=adj_start_date.strftime('%Y-%m-%d'),
end=end_date.strftime('%Y-%m-%d')
)
ts.index = ts.index.date
# Create the new lagged DataFrame
tslag = pd.DataFrame(index=ts.index)
tslag['today'] = ts['Adj Close']
tslag['volume'] = ts['Volume']
# Create shifted lag series of prior trading period close values
for i in range(0, lags):
tslag[f"lag_{i+1}"] = ts['Adj Close'].shift(i+1)
# Create returns DataFrame
tsret = pd.DataFrame(index=tslag.index)
tsret['volume'] = tslag['volume']
tsret['today'] = tslag['today'].pct_change()
# If any of the values of percentage returns eqal zero, set them to
# a small number (stops issues with QDA model in scikit-learn)
tsret.loc[tsret['today'].abs() < 0.0001, ['today']] = 0.0001
# Create lagged percentage returns columns
for i in range(0, lags):
tsret[f"lag_{i+1}"] = tslag[f"lag_{i+1}"].pct_change()
# Create "Direction" column (+1 or -1) indicating an up/down day
tsret['direction'] = np.sign(tsret['today'])
tsret = tsret[tsret.index >= start_date.date()]
return tsret
# -
# <br>
#
# ## Retreive Data and Generate Features
# ---
# + tags=[]
# Set start and end dates of time series
start_date = dt(2016, 1, 10)
end_date = dt(2017, 12, 31)
# Create a DataFrame with daily returns, target, and
# the features that will be used to train the models.
returns_df = create_lagged_series('SPY', start_date, end_date)
returns_df.head()
# -
# <br>
#
# ## Train and Evaluate Models
# ---
# + tags=[]
# Split returns_df into features and target.
X = returns_df.iloc[:, 2:4].copy()
y = returns_df['direction'].copy()
# Split data into training and testing sets.
start_test = dt(2017, 1, 1)
X_train = X[X.index < start_test.date()]
X_test = X[X.index >= start_test.date()]
y_train = y[y.index < start_test.date()]
y_test = y[y.index >= start_test.date()]
# + tags=[]
# Create the results DataFrame where each
# model's returns will be stored.
results_df = pd.DataFrame(index=X_test.index)
results_df['Actual'] = returns_df['today']
# + tags=[]
# Instantiate the models that will be tested.
models = [
('LR', LogisticRegression(solver='liblinear')),
('LDA', LDA(solver='svd')),
('QDA', QDA()),
('LSVC', LinearSVC(max_iter=10000)),
('RSVC', SVC(
C=1000000.0, cache_size=200, class_weight=None,
coef0=0.0, degree=3, gamma=0.0001, kernel='rbf',
max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False
)
),
('RF', RandomForestClassifier(
n_estimators=1000, criterion='gini',
max_depth=None, min_samples_split=2,
min_samples_leaf=1, max_features='auto',
bootstrap=True, oob_score=False, n_jobs=-1,
random_state=None, verbose=0
)
)
]
# Train and test the models.
print('Hit Rates & Evaluation Report:\n')
for m in models:
# Build pipeline
clf = Pipeline(steps=[
('scaler', RobustScaler()),
('classifier', m[1])
])
# Train model
clf.fit(X_train, y_train)
# Create predictions
pred = clf.predict(X_test)
# Add the model returns to the returns DataFrame.
results_df[m[0]] = results_df['Actual'] * pred
# Output hit-rate and confusion matrix
print(f"{m[0]}:\n{clf.score(X_test, y_test):.3f}")
print(f"{classification_report(y_test, pred)}\n")
# -
# <br>
#
# ## Evaluate Financial Metrics
# ---
# __Print total returns, annual returns, and Sharpe ratios__
for col_name, col_data in results_df.iteritems():
tot = (1 + col_data).cumprod()[-1] - 1
ann = col_data.mean() * 252
sharpe = ann / (col_data.std() * np.sqrt(252))
print(
f"{col_name} yields:" +
f"\n\t{tot * 100:.2f}% total returns" +
f"\n\t{ann * 100:.2f}% annual returns" +
f"\n\t{sharpe:.2f} Sharpe Ratio"
)
# __Plot cumulative returns, annual returns, and Sharpe ratios__
# +
# Create figure layout.
fig = plt.figure(figsize=(12, 10))
gs = fig.add_gridspec(4, 4)
ax0 = fig.add_subplot(gs[:2, :4])
ax1 = fig.add_subplot(gs[2:, :2])
ax2 = fig.add_subplot(gs[2:, 2:])
# Create a dictionary to capture performance.
perf_dict = {
'tot_ret': {},
'ann_ret': {},
'sharpe': {}
}
# Populate performance dictionary and
# plot cumulative returns.
for col_name, col_data in results_df.iteritems():
# Populate the performance dictionary
# for each column.
perf_dict['tot_ret'][col_name] = (1 + col_data).cumprod()[-1] - 1
perf_dict['ann_ret'][col_name] = col_data.mean() * 252
perf_dict['sharpe'][col_name] = perf_dict['ann_ret'][col_name] /\
(col_data.std() * np.sqrt(252))
# Plot cumulative returns for each column.
ax0.plot((1 + col_data).cumprod(), label=col_name)
# ax0 chart labeling
ax0.set_ylabel('Returns (%)')
ax0.set_xlabel('Date')
ax0.set_title('Cumulative Returns')
ax0.grid()
ax0.legend()
# Plot annual returns
_ = [ax1.bar(i, v * 100) for i, v in enumerate(perf_dict['ann_ret'].values())]
ax1.set_xticks([i for i, k in enumerate(perf_dict['ann_ret'])])
ax1.set_xticklabels(
[
f'{k} Model' if k != 'Actual' else 'Actual'
for k in perf_dict['ann_ret'].keys()
],
rotation=45
)
ax1.grid()
ax1.set_ylabel('Returns (%)')
ax1.set_xlabel('Strategy')
ax1.set_title('Annual Returns')
# Plot Sharpe ratios
_ = [ax2.bar(i, v) for i, v in enumerate(perf_dict['sharpe'].values())]
ax2.set_xticks([i for i, k in enumerate(perf_dict['sharpe'])])
ax2.set_xticklabels(
[
f'{k} Model' if k != 'Actual' else 'Actual'
for k in perf_dict['ann_ret'].keys()
],
rotation=45
)
ax2.grid()
ax2.set_ylabel('Sharpe Ratio')
ax2.set_xlabel('Strategy')
ax2.set_title('Sharpe Ratio')
# Plot results
plt.tight_layout()
plt.savefig('financialMetrics_mbs_lags.png')
plt.show();
# -
| backtest_mbs_lags.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mattselph/ml-notebooks/blob/master/4_ML_KNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="i8Iivr5H2bjH" colab_type="text"
# # *K* Nearest Neighbors (KNN)
# *K* Nearest Neighbors (KNN) is an algorithm that can be used in either classification or regression. It works by polling the nearest values (neighbors) of a point to ascertain what category the point is in. The *K* variable is how many neighbors it will poll to determine the category.
# + [markdown] id="M65yTG7B23lW" colab_type="text"
# ## Explanation
# In the scatter plot below, which class would the star point belong to?
# 
#
# It could belong to either the blue or the orange class. It depends on the value you give K. If you give it K=3, then it will find the closest 3 neighbors and figure out what the majority is (2 orange, 1 blue, so it belongs to orange).
#
# If you give it K=5, it will likely circle the two orange and one blue like it has already, but then circle the two blue dots that are close to the existing circle. In that case, it would classify it as a blue dot.
# + [markdown] id="B6s1hAO19NYV" colab_type="text"
# # Data
# Use the [Halloween candy dataset](https://raw.githubusercontent.com/mattselph/datasets/master/candy-data.csv) that was used in the Logistic Regression notebook to see if there's a more accurate result.
# + [markdown] id="ZmWJ4DTA9lGv" colab_type="text"
# # Question to Answer
# Using the percentage of sugar and the cost, can you determine if the candy has chocolate in it?
# + [markdown] id="3-s6plmH9yIS" colab_type="text"
# ## Preprocessing
# + id="zlEhQIca9urt" colab_type="code" outputId="5d489a27-c578-4359-b7ae-4c45d645f608" colab={"base_uri": "https://localhost:8080/", "height": 369}
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
df = pd.read_csv('https://raw.githubusercontent.com/mattselph/datasets/master/candy-data.csv')
candy = df[['sugarpercent', 'pricepercent', 'chocolate']]
X = candy[['sugarpercent', 'pricepercent']]
y = candy['chocolate']
# scatter plot
sns.lmplot('sugarpercent', 'pricepercent', data=candy, hue='chocolate', fit_reg=False)
plt.show()
# + [markdown] id="l-KCjlkNLUxE" colab_type="text"
# So KNN is going to try and classify points using their nearest neighbor. When it encounters a new variable, it will look at the closest *k* neighbors and figure out which class it belongs in based on that.
# + id="vUme4HqaHyDI" colab_type="code" colab={}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8)
# + [markdown] id="zHZdzaaF-Vpn" colab_type="text"
# ## Train the Model
# Use ```sklearn.neighbors.KNeighborsClassifier``` for this algorithm. By default, it uses a *K* value of 5. I'm going to use 3 and play with that parameter a bit just to see how it changes the result.
# + id="R7BlM1ld-26H" colab_type="code" colab={}
k = 9
# + id="9nP4w_hP-d97" colab_type="code" outputId="e1b8ab00-34db-41e1-b751-e674e6177c1b" colab={"base_uri": "https://localhost:8080/", "height": 68}
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
# + [markdown] id="H48Ia3qgAG-R" colab_type="text"
# ## Test the Model
# + id="4zKuaNZwAIYv" colab_type="code" outputId="0a922983-3ae5-4211-dd75-3a3b200653ea" colab={"base_uri": "https://localhost:8080/", "height": 85}
y_pred = knn.predict(X_test)
y_pred
# + [markdown] id="dPBqFLfsAU2Z" colab_type="text"
# ## Evaluate the Model
# + id="dmX-Q46aAWGw" colab_type="code" outputId="772d86f2-546d-4b8d-c06a-c632c459b162" colab={"base_uri": "https://localhost:8080/", "height": 34}
from sklearn.metrics import accuracy_score
score = accuracy_score(y_test, y_pred)
print("Accuracy score: %.2f" % score)
# + [markdown] id="MPmxkvfYD4uD" colab_type="text"
# Play with the k value above to see different results. If you run the preprocessing code again, you'll also get different results because the ```test_train_split``` function shuffles the data before splitting it.
# + [markdown] id="g1q9whcsI6mi" colab_type="text"
# # Conclusion
# Again, you want a high accuracy score
| 4_ML_KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# __GSoC 2016 Project Proposal__
#
# __Idea 6: Mobile-based blood sample image analysis__
# + [markdown] toc="true"
# # Table of Contents
# <p><div class="lev1"><a href="#Personal-Information-1"><span class="toc-item-num">1 </span>Personal Information</a></div><div class="lev1"><a href="#Background-2"><span class="toc-item-num">2 </span>Background</a></div><div class="lev2"><a href="#Education-2.1"><span class="toc-item-num">2.1 </span>Education</a></div><div class="lev2"><a href="#Relevant-Experience-2.2"><span class="toc-item-num">2.2 </span>Relevant Experience</a></div><div class="lev1"><a href="#Programming-interests-and-choice-3"><span class="toc-item-num">3 </span>Programming interests and choice</a></div><div class="lev2"><a href="#Language-skills-3.1"><span class="toc-item-num">3.1 </span>Language skills</a></div><div class="lev2"><a href="#Prior-open-source-development-3.2"><span class="toc-item-num">3.2 </span>Prior open source development</a></div><div class="lev1"><a href="#Interest-in-biology-4"><span class="toc-item-num">4 </span>Interest in biology</a></div><div class="lev1"><a href="#Project-Idea-5"><span class="toc-item-num">5 </span>Project Idea</a></div><div class="lev2"><a href="#Image-capture-5.1"><span class="toc-item-num">5.1 </span>Image capture</a></div><div class="lev2"><a href="#Preprocessing-5.2"><span class="toc-item-num">5.2 </span>Preprocessing</a></div><div class="lev2"><a href="#Detection-5.3"><span class="toc-item-num">5.3 </span>Detection</a></div><div class="lev2"><a href="#Measurement-5.4"><span class="toc-item-num">5.4 </span>Measurement</a></div><div class="lev2"><a href="#Data-collection/-Error-reporting-5.5"><span class="toc-item-num">5.5 </span>Data collection/ Error reporting</a></div><div class="lev1"><a href="#Timeline-6"><span class="toc-item-num">6 </span>Timeline</a></div><div class="lev1"><a href="#Commitments-7"><span class="toc-item-num">7 </span>Commitments</a></div><div class="lev1"><a href="#Proposal-Task-8"><span class="toc-item-num">8 </span>Proposal Task</a></div>
# -
# # Personal Information #
# * Name: <NAME>
# * Email: <EMAIL>
# # Background
#
# ## Education
# * B.Tech and M.Tech in Electrical Engineering, Indian Institute of Technology (IIT) Bombay (2012-2017)
#
# ## Relevant Experience
# Android:<br>
# * Student Intern, Sony Corporations, Tokyo: Development of automated testing framework for Android apps. (2015)
# * Winner of the Ron-Mehta Android app development contest for IIT Bombay: Developed a platform for segregation and consumption of educational videos (2013)
# # Programming interests and choice
#
# ## Language skills
# Comfortable with:
# * Java (including Android)
# * MATLAB
# * Python (including scientific computing tools like numpy)
#
# Basic familiarity with:
# * C/C++
# * OpenCV
#
#
# ## Prior open source development
# * Scilab Signal Processing Toolbox: Part time contributer to the development of the signal processing toolbox in Scilab which is equivalent of its MATLAB counterpart. The project is undertaken by [Free and Open Source Software for Education (FOSSEE)](http://fossee.in/) . [Github repo](https://github.com/ayushbaid/sptoolbox)
# # Interest in biology #
# I am passionate about working in the field of medical image processing. I have been working on Bayesian Inference for *Joint Desmoking, Denoising, and Specularity Removal for Laparoscopy Images* ( [link](https://drive.google.com/file/d/0B-KDtD7BARvnQ1hJVUtBMm5RZFk/view?usp=sharing) )
#
# As a part of the curriculum of the Medical Image Processing course offered by my university IIT Bombay, I have studiet and implemented imaging techniques like CT and MRI and have worked with segmentation and denoising.
#
# The project idea proposed by Helikar Lab excites me due to two reasons. First, it involves image processing. Second is the implications of this project. It taps the high quality cameras which have a very high penetration in the developing countries, and provides affordable and fast tests.
#
# I would be willing to learn more about the techniques and science behind cancer biomarker dectection. This will broaden my knowledge and I will be doing my part for the community
# # Project Idea
#
# I am applying to work on the Idea-6: <b>Mobile-based blood sample image analysis</b>
#
# The complete project can be divided into 5 categories
# 1. Image Capture
# 2. Preprocessing
# 3. Detection
# 4. Measurement
# 5. Data collection/Error reporting
#
# For the android app, I propose to use OpenCV for Android SDK (<a href = "http://opencv.org/platforms/android.html">link</a>). I propose the use of the Android SDK over C++ code as I am more comfortable with JAVA code. However, I think I will be able to gain some experience using the C++ code till the start of the GSoC coding schedule.
#
# There is also a proposed Idea 7: Web-Based Blood-Sample Image Analysis. The image processing tasks are the same for both the tasks and can be implemented in Python for rapid development and ease of debugging. After that, the code can be written for Android/web-server.
#
# For the Android app, the UI specific components are the image capture along with the display of overlay cues, display the segmented blobs to the user, and display the appropriate end results
#
# In the following sections, I suggest ideas about performing each of the 5 tasks.
# ## Image capture
#
# The app should capture images at the highest possible resolution and avoid preprocessing which can interfere with our algorithm. However, most of the Android devices do not support raw image capture. Use of OpenCV image capture API is preffered for better integration. More research has to be done on the possibility of getting images with high resolution and low preprocessing as possible using OpenCV.
#
# <br>
# The app should have a overlay which provides cue for the grid. There are two ways to achieve this.
#
# <b>1) Static method</b><br>
# The overlay is fixed and the user has to manually adjust the phone so as to get the blobs as cued by the overlay, which will then be evaluated for correctness after the image has been captured. This method can be used only if there are only a few possible grid layout structure.
#
# * Pros: simple to implement, faster as sample layouts are packed with the app
# * Cons: flexibility in terms of data capture
#
# The implementation for this method can be done using one of the two possible approaches.
#
# 1. Use of Android layout design to have two views on top of each other. One view will have the feed from the camera and the other view will have the sample overlay.
# 2. Use of OpenCV APIs to create a new stream by combining the camera feed with the overlay. This method provides more control over the display of the overlays.
#
# <b>2) Dynamic method</b><br>
# The overlay is can be fixed or generated in real time, but the app keeps on checking the position of the blobs on the image with the overlay. If the blobs correspond with the overlay, then the image can be automatically captured.
#
# * Pros: user friendly
# * Cons: computationally expensive
#
#
# I consider the static method to be suitable for the project as it is less computationally expensive and we only have a few grid layouts to consider.
#
# Also, if there are multiple images to be captured for each sample, the blood sample outlines detected after one image can be used to generate a highly specific mask for the subsequent images so as to obtain a good amount of registation beforehand
# ## Preprocessing
#
# Requirements: Noise removal algorithm has to take care as to not disturb the intensities of the blood samples. There are two choices of noise removal. One is to use a bilateral filter, as we want the blood samples to be sharply demarcated and minimally affected by the background. The other choice is to do noise removal after blood blob detection and masking. Bilateral filtering will not be required as we do not have to preserve the edges.
#
# The detection algorithm uses thresholding and does not require background subtraction as such (will be explained below). If required, the corner of the captured image will be used as a background template and subtracted from the original image.
# ## Detection
#
# Assumption: For the purpose of presenting, I have considered the sample image in the last year's code. Any measure of the hue of the image can be used for detection
#
# In the task accompanying the proposal, I have used a\* component of the <a href = "https://en.wikipedia.org/wiki/Lab_color_space">l\*a\*b colorspace</a>. This component take large positive values for red/magenta colors. Hence, it can be used to identify the regions of blood in the untreated image. It can also be used on the images captured after fluorescent quenching (included in last year's code).
#
# The a\* component is then processed using Otsu's thresholding algorithm. The blood samples will have higher values than the paper, and hence the thresholding will generate a binary image. We can than find connected components and filter them out by their areas and location according to the overlay cue.
#
# <i>a\* component</i>:
# 
#
# <i>Otsu's thresholded result</i>
# 
#
#
# Note that there are some regions in the thresholded image which do not correspond to blood samples. Such regions can be filtered out as they will not be present in the valid region determined by the overlay.
#
# Also, the blobs detected by thresholding are larger than they actually are, as evident in the images. Here, we can use the 1st mask obtained by Otsu's thresholding and apply Otsu's thresholding on the grayscale image to obtain a better mask.
# ## Measurement
#
# Assumption: The analysis is not performed pixel by pixel, and hence eliminating the need for registration. If registration is required, it will be performed on the thresholded image.
#
# Once the blobs are detected, the intensity can be estimated and further processing can be done to get the results. The framework developed in the task accompanying the proposal will help in this part.
#
# FRET efficiency analysis will then be performed using a suitable numerical computing library (e.g. Colt, apache commons-math )
# ## Data collection/ Error reporting #
#
# The user will be displayed the identified regions on which the measurements are taken, they can report it so that the algorithm can be further improved. Downsampled images and results can be a part of the data collection.
#
# In case of incorrect processing, the user has an option to report the error to the developers. We can also provide an option to manually adjust the mask and recalculate the result.
#
# # Timeline #
# Community Bonding Period: Follow up on the algorithms for processing. Read more about fluorescence microscopy and the science behind the biomarker detection.
#
# Each task will be implemented first in Python, for ease of debugging and rapid prototyping, and then in Java.
#
# * Week 01: May 23 - May 29
# > - Image capture interface along with overlay cue
# * Week 02: May 30 - Jun 05
# > - Denoising/Background removal algorithms as required
# * Week 03: Jun 06 - Jun 12
# > - Blood samples segmentation algorithms
# * Week 04: Jun 13 - Jun 19
# > - UI enhancements for the Android app for the functionality developed till now
# > - Comparison of the results obtained with the ImageJ plugin
# * Week 05: Jun 20 - Jun 26
# > - Midterm review submission
# * Week 06: Jun 27 - Jul 03
# > - Associating the detected blobs with their position in the rectangular grid (partial)
# > - Using the rectangular grid to get location on blobs missed by the detection algorithm
# * Week 07: Jul 04 - Jul 10
# > - Intensity measurement from blobs
# > - Proper UI for the functionality implemented till now
# * Week 08: Jul 11 - Jul 17
# > - Comparison of the results obtained with the ImageJ plugin
# * Week 09: Jul 18 - Jul 24
# > - FRET efficiency calculation
# * Week 10: Jul 25 - Jul 31
# > - Compiling data to be reported and develop reporting mechanism
# * Week 11: Aug 01 - Aug 07
# > - User assisted improvement of blob detection (time permitting)
# * Week 12: Aug 08 - Aug 14
# > - Buffer week
# * Week 13 and beyond: Aug 14 - Aug 24
# > - Buffer period, documentation
# # Commitments
#
# * My classes finish on May 02, 2016. I will be having summer holidays. Classes resume from the 3rd week of July.
# * I do not have any internships or course-commitment during the summer holidays. I will be staying at my university and will be doing literature survey as a prequel to my Master's thesis. I am confident that I will be able to work on both simultaneously as I will be working over the weekend.
# * I will be available to commit 40-45 hours per week during the coding period.
# # Proposal Task
#
# The proposal task has been implemented in both Python and Android. OpenCV version 3.1.0 has been used.
#
# Note: The android app does its processing in background, and thus does not hogs the UI thread.
#
# [Link](https://github.com/ayushbaid/bloodsample-proposal/blob/master/python/Assignment.html) to the html file containing the explanation, python code and results for the proposal task
#
# [Link](https://github.com/ayushbaid/bloodsample-proposal/tree/master/app-debug.apk) to the Android APK
#
# [Link](https://github.com/ayushbaid/bloodsample-proposal) to the github repo hosting both Android and Python code
| proposal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook was prepared by [<NAME>](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/interactive-coding-challenges).
# # Solution Notebook
# ## Problem: Implement merge sort.
#
# * [Constraints](#Constraints)
# * [Test Cases](#Test-Cases)
# * [Algorithm](#Algorithm)
# * [Code](#Code)
# * [Unit Test](#Unit-Test)
# ## Constraints
#
# * Is a naiive solution sufficient?
# * Yes
# * Are duplicates allowed?
# * Yes
# ## Test Cases
#
# * None -> None
# * Empty input -> []
# * One element -> [element]
# * Two or more elements
# * Left and right subarrays of different lengths
# ## Algorithm
#
# Wikipedia's animation:
# 
#
# * Recursively split array into left and right halves
# * Merge split arrays
# * Using two pointers, one for each half starting at index 0
# * Add the smaller element to the result array
# * Inrement pointer where smaller element exists
# * Copy remaining elements to the result array
# * Return result array
#
# Complexity:
# * Time: O(n log(n))
# * Space: O(n+m), n = number of elements, m = recursion depth
#
# Most implementations are stable.
# ## Code
# +
from __future__ import division
def merge(left, right):
l = 0
r = 0
result = []
while l < len(left) and r < len(right):
if left[l] < right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
# Copy remaining elements
while l < len(left):
result.append(left[l])
l += 1
while r < len(right):
result.append(right[r])
r += 1
return result
def merge_sort(data):
if data is None or len(data) < 2:
return data
mid = len(data) // 2
left = data[:mid]
right = data[mid:]
left = merge_sort(left)
right = merge_sort(right)
return merge(left, right)
# -
# ## Unit Test
#
#
# +
# %%writefile test_merge_sort.py
from nose.tools import assert_equal
class TestMergeSort(object):
def test_merge_sort(self):
print('None input')
data = None
sorted_data = merge_sort(data)
assert_equal(sorted_data, None)
print('Empty input')
data = []
sorted_data = merge_sort(data)
assert_equal(sorted_data, [])
print('One element')
data = [5]
sorted_data = merge_sort(data)
assert_equal(sorted_data, [5])
print('Two or more elements')
data = [5, 1, 7, 2, 6, -3, 5, 7, -1]
sorted_data = merge_sort(data)
assert_equal(sorted_data, sorted(data))
print('Success: test_merge_sort')
def main():
test = TestMergeSort()
test.test_merge_sort()
if __name__ == '__main__':
main()
# -
# %run -i test_merge_sort.py
| sorting_searching/merge_sort/merge_sort_solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import os
myDirectory='E:\OneDrive\Python\PythonMegaCourse\Jupyter\DataAnalysisPandas'
os.listdir(myDirectory)
import pandas
dfcsv=pandas.read_csv('supermarkets.csv')
dfcsv
dfjson=pandas.read_json('supermarkets.json')
dfjson
dfjson.Address[0]
dftxt=pandas.read_csv('supermarkets-commas.txt')
dftxt
df5=pandas.read_csv('supermarkets-semi-colons.txt',sep=';')
df5.set_index('ID')
dir(df5.Name)
df5.Name.add_prefix('test')
df5
df5.loc[2:3,'Address':'City']
list(df5.iloc[3])
df5.ix[1,:]
df5['Continent']=['North America']*6
df5
df5['Address']
df5.loc[:,'Address']
df5
df5.drop(0,0)
df5
| DataAnalysisPandas/DataAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/GabrielCatanaoan/OOP1_2/blob/main/Activity_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="TndYPsn1YpR7" colab={"base_uri": "https://localhost:8080/"} outputId="861771b8-0c4f-423c-d5ef-a240a252c8c8"
#Write a python program that converts the temperature Celsius to Farenheit.Create a class name Temperature
#Create Celsius as attribute name,Temp() as method, and temp1 as object name. F=1.8xC+32
class Temperature:
def __init__(self,Celsius):
self.Celsius=Celsius
def Temp(self):
return 'Farenheit={}'.format(1.8*self.Celsius+32)
temp1=Temperature(float(input('Celsius=')))
print(temp1.Temp())
# + colab={"base_uri": "https://localhost:8080/"} id="FOzpY-xEYUc4" outputId="bb5dd3e2-e369-4231-f848-5aa79a9da8c4"
#Define a Perimeter() method of the class which allows you to calculate the perimeter of the circle.
#Define an Area() method of the class that calculates the circle’s area.
import math
class Circle:
def __init__(self,radius):
self.radius=radius
def Area(self):
return 'Area of the Circle={}'.format(round((math.pi*self.radius**2),2))
def Perimeter(self):
return 'Perimeter of the Circle={}'.format(round((math.pi*self.radius*2),2))
radius1=Circle(float(input('input radius=')))
print(radius1.Area())
print(radius1.Perimeter())
| Activity_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import cv2
# load a image
img = cv2.imread('../images/1.jpg', -1)
print 'Image shape ', img.shape
# cv2.imshow('Image ',img)
#show what going on
plt.axis("off")
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
# +
# convert to HSV color space, this will produce better color filtering
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
#threshold on low range of HSV red
low_redl = np.array([0, 135, 135])
low_redh = np.array([15, 255, 255])
imgThreshLow = cv2.inRange(imgHSV, low_redl, low_redh)
#threshold on high range of HSV red
high_redl = np.array([159, 135, 135])
high_redh = np.array([179, 255, 255])
imgThreshHigh = cv2.inRange(imgHSV, high_redl, high_redh)
#combine low range red thresh and high range red thresh
imgThresh = cv2.bitwise_or(imgThreshLow, imgThreshHigh)
#show what going on
plt.axis("off")
plt.imshow(imgThresh, cmap='Greys_r')
plt.show()
# +
imgThreshSmoothed = imgThresh.copy()
#open image (erode, then dilate)
kernel = np.ones((3, 3), np.uint8)
imgThreshSmoothed = cv2.erode(imgThresh, kernel, iterations=1)
imgThreshSmoothed = cv2.dilate(imgThreshSmoothed, kernel, iterations=1)
#Gaussian blur
imgThreshSmoothed = cv2.GaussianBlur(imgThreshSmoothed, (5, 5), 0)
plt.axis("off")
plt.imshow(imgThreshSmoothed, cmap='Greys_r')
plt.show()
# +
imgCanny = cv2.Canny(imgThreshSmoothed,160,80)
plt.axis("off")
plt.imshow(imgCanny, cmap='Greys_r')
plt.show()
# +
image, contours, hierarchy = cv2.findContours(imgCanny,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
print 'len(contours)',len(contours)
plt.axis("off")
plt.imshow(imgCanny, cmap='Greys_r')
plt.show()
# +
listOfContours = []
if len(contours) != 0:
for cnt in contours:
epsilon = 6.7 # something to play with to fine tune
# print'epsilon',epsilon
listOfContours.append(cv2.approxPolyDP(cnt, epsilon, True))
imghull2 = cv2.drawContours(img, listOfContours, -1, (0,255,0), 3)
#show what going on
plt.axis("off")
plt.imshow(cv2.cvtColor(imghull2, cv2.COLOR_BGR2RGB))
plt.show()
# -
| notebook/.ipynb_checkpoints/Finding cones note book-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Desafio 3
#
# Neste desafio, iremos praticar nossos conhecimentos sobre distribuições de probabilidade. Para isso,
# dividiremos este desafio em duas partes:
#
# 1. A primeira parte contará com 3 questões sobre um *data set* artificial com dados de uma amostra normal e
# uma binomial.
# 2. A segunda parte será sobre a análise da distribuição de uma variável do _data set_ [Pulsar Star](https://archive.ics.uci.edu/ml/datasets/HTRU2), contendo 2 questões.
#
# > Obs.: Por favor, não modifique o nome das funções de resposta.
# ## _Setup_ geral
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
from statsmodels.distributions.empirical_distribution import ECDF
# +
# #%matplotlib inline
from IPython.core.pylabtools import figsize
figsize(12, 8)
sns.set()
# -
# ## Parte 1
# ### _Setup_ da parte 1
# +
np.random.seed(42)
dataframe = pd.DataFrame({"normal": sct.norm.rvs(20, 4, size=10000),
"binomial": sct.binom.rvs(100, 0.2, size=10000)})
# -
# ## Inicie sua análise a partir da parte 1 a partir daqui
# Sua análise da parte 1 começa aqui.
df = dataframe
df.head()
descr = df.describe()
descr
# ## Questão 1
#
# Qual a diferença entre os quartis (Q1, Q2 e Q3) das variáveis `normal` e `binomial` de `dataframe`? Responda como uma tupla de três elementos arredondados para três casas decimais.
#
# Em outra palavras, sejam `q1_norm`, `q2_norm` e `q3_norm` os quantis da variável `normal` e `q1_binom`, `q2_binom` e `q3_binom` os quantis da variável `binom`, qual a diferença `(q1_norm - q1 binom, q2_norm - q2_binom, q3_norm - q3_binom)`?
def q1():
q_norm = df['normal'].quantile((0.25,0.5,0.75))
q_binom = df['binomial'].quantile([0.25,0.5,0.75])
q_diff = q_norm - q_binom
return tuple(round(q_diff, 3))
# +
## Outra maneira, bem menos elegante...
# descr = df.describe()
# q1_norm = descr.loc['25%']['normal']
# q2_norm = descr.loc['50%']['normal']
# q3_norm = descr.loc['75%']['normal']
# q1_binom = descr.loc['25%']['binomial']
# q2_binom = descr.loc['50%']['binomial']
# q3_binom = descr.loc['75%']['binomial']
# return ( round(q1_norm - q1_binom, 3), round(q2_norm - q2_binom, 3), round(q3_norm - q3_binom, 3) )
# -
q1()
# Para refletir:
#
# * Você esperava valores dessa magnitude?
#
# * Você é capaz de explicar como distribuições aparentemente tão diferentes (discreta e contínua, por exemplo) conseguem dar esses valores?
# Nao esperava,
# mas acredito que seja pelo fato da distribuicoes nao serem tao distintas, vide histogramas abaixo
sns.distplot(df['normal']);
sns.distplot(df['binomial']);
# ## Questão 2
#
# Considere o intervalo $[\bar{x} - s, \bar{x} + s]$, onde $\bar{x}$ é a média amostral e $s$ é o desvio padrão. Qual a probabilidade nesse intervalo, calculada pela função de distribuição acumulada empírica (CDF empírica) da variável `normal`? Responda como uma único escalar arredondado para três casas decimais.
def q2():
media = df['normal'].mean()
desvio = df['normal'].std()
ecdf = ECDF(df['normal'])
prob = ecdf(media + desvio) - ecdf(media - desvio)
return float(prob.round(3))
q2()
# Para refletir:
#
# * Esse valor se aproxima do esperado teórico?
# * Experimente também para os intervalos $[\bar{x} - 2s, \bar{x} + 2s]$ e $[\bar{x} - 3s, \bar{x} + 3s]$.
# ## Questão 3
#
# Qual é a diferença entre as médias e as variâncias das variáveis `binomial` e `normal`? Responda como uma tupla de dois elementos arredondados para três casas decimais.
#
# Em outras palavras, sejam `m_binom` e `v_binom` a média e a variância da variável `binomial`, e `m_norm` e `v_norm` a média e a variância da variável `normal`. Quais as diferenças `(m_binom - m_norm, v_binom - v_norm)`?
def q3():
m_norm = df['normal'].mean()
v_norm = np.var(df['normal'])
m_binom = df['binomial'].mean()
v_binom = np.var(df['binomial'])
return ( round(m_binom - m_norm, 3), round(v_binom - v_norm, 3) )
q3()
# +
# Interessante ver que o calculo de variancia padrao do Python e o do Numpy, possuem diferenca, que perde no arredondamento
m_norm = df['normal'].mean()
v_norm = df['normal'].var()
m_binom = df['binomial'].mean()
v_binom = df['binomial'].var()
diff = ( m_binom - m_norm, v_binom - v_norm )
print(diff)
m_norm = df['normal'].mean()
v_norm = np.var(df['normal'])
m_binom = df['binomial'].mean()
v_binom = np.var(df['binomial'])
diff = ( m_binom - m_norm, v_binom - v_norm )
print(diff)
# -
# Para refletir:
#
# * Você esperava valore dessa magnitude?
# * Qual o efeito de aumentar ou diminuir $n$ (atualmente 100) na distribuição da variável `binomial`?
# ## Parte 2
# ### _Setup_ da parte 2
# +
stars = pd.read_csv("pulsar_stars.csv")
stars.rename({old_name: new_name
for (old_name, new_name)
in zip(stars.columns,
["mean_profile", "sd_profile", "kurt_profile", "skew_profile", "mean_curve", "sd_curve", "kurt_curve", "skew_curve", "target"])
},
axis=1, inplace=True)
stars.loc[:, "target"] = stars.target.astype(bool)
# -
# ## Inicie sua análise da parte 2 a partir daqui
# Sua análise da parte 2 começa aqui.
stars.head()
stars.shape
# ## Questão 4
#
# Considerando a variável `mean_profile` de `stars`:
#
# 1. Filtre apenas os valores de `mean_profile` onde `target == 0` (ou seja, onde a estrela não é um pulsar).
# 2. Padronize a variável `mean_profile` filtrada anteriormente para ter média 0 e variância 1.
#
# Chamaremos a variável resultante de `false_pulsar_mean_profile_standardized`.
#
# Encontre os quantis teóricos para uma distribuição normal de média 0 e variância 1 para 0.80, 0.90 e 0.95 através da função `norm.ppf()` disponível em `scipy.stats`.
#
# Quais as probabilidade associadas a esses quantis utilizando a CDF empírica da variável `false_pulsar_mean_profile_standardized`? Responda como uma tupla de três elementos arredondados para três casas decimais.
mprof = stars.query('target == 0')['mean_profile']
false_pulsar_mean_profile_standardized = (mprof - mprof.mean()) / mprof.std()
def q4():
qtst = sct.norm.ppf([0.8, 0.9, 0.95], loc=0, scale=1)
ecdf = ECDF(false_pulsar_mean_profile_standardized)
return tuple(ecdf(qtst).round(3))
q4()
# Para refletir:
#
# * Os valores encontrados fazem sentido?
# * O que isso pode dizer sobre a distribuição da variável `false_pulsar_mean_profile_standardized`?
sns.distplot(false_pulsar_mean_profile_standardized);
# ## Questão 5
#
# Qual a diferença entre os quantis Q1, Q2 e Q3 de `false_pulsar_mean_profile_standardized` e os mesmos quantis teóricos de uma distribuição normal de média 0 e variância 1? Responda como uma tupla de três elementos arredondados para três casas decimais.
def q5():
qts = false_pulsar_mean_profile_standardized.quantile((0.25, 0.5, 0.75))
qtst = sct.norm.ppf([0.25, 0.5, 0.75], 0, 1)
return tuple((qts - qtst).round(3))
q5()
# Para refletir:
#
# * Os valores encontrados fazem sentido?
# * O que isso pode dizer sobre a distribuição da variável `false_pulsar_mean_profile_standardized`?
# * Curiosidade: alguns testes de hipóteses sobre normalidade dos dados utilizam essa mesma abordagem.
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Model prediction using LogisticRegressionCV with SMOTE and using liblinear with predict proba
# Import all necessary packages
import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegressionCV
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import f1_score
from sklearn.metrics import recall_score
from imblearn.over_sampling import SMOTE
# -
# Loading the files as DataFrame and standardise
sample = pd.read_csv("sample_submission.csv")
test = pd.read_csv("test.csv")
train = pd.read_csv("train.csv")
target = train.pop("TARGET_5Yrs")
scaler = StandardScaler()
print(scaler.fit_transform(train), end="\n\n----\n\n")
print(scaler.mean_)
# check for any null values on train
train.isnull().sum()
# check for any null values on test
test.isnull().sum()
# Prove that the target variable is imbalance - 83% is "1" with 6669 occurences out of 8000 entries
print(target.describe(),end="\n\n-------\n\n")
print(target.value_counts())
# +
# resampling using SMOTE
sm = SMOTE(random_state=123)
train_resampled, target_resampled = sm.fit_resample(train, target)
# -
# Check distribution after SMOTE
print(target_resampled.describe(),end="\n\n-------\n\n")
print(target_resampled.value_counts())
# split train into train and validation
X_train, X_val, y_train, y_val = train_test_split(train_resampled, target_resampled, test_size=0.2, random_state=8)
# +
# Instantiate LogisticRegression Class into reg
reg = LogisticRegressionCV(solver='liblinear')
# fitting
reg.fit(X_train,y_train)
# -
# predictions probability
y_train_preds = reg.predict_proba(X_train)[:,1]
y_val_preds = reg.predict_proba(X_val)[:,1]
# +
#RMSE and MAE scores for this model on training set and validation set
print("Train RMSE - " + str(mse(y_train, y_train_preds, squared=False)))
print("Train MAE - " + str(mae(y_train, y_train_preds)))
print("Val RMSE - " + str(mse(y_val, y_val_preds, squared=False)))
print("Val MAE - " + str(mae(y_val, y_val_preds)))
# AUROC scores
print('Train AUROC score:',roc_auc_score(y_train,y_train_preds))
print('Validation AUROC score:', roc_auc_score(y_val,y_val_preds))
# -
# prepare submission
y_test_preds = reg.predict_proba(test)[:,1]
# check if there is out of binary values
print(list(y_test_preds[y_test_preds > 1]))
print(list(y_test_preds[y_test_preds < 0]))
#printout prediction into a file
submission = pd.DataFrame({'Id':test['Id'],'TARGET_5Yrs':y_test_preds})
submission.to_csv('submission_week_2_14.csv',index=False)
# +
# save model
from joblib import dump
dump(reg, 'LogisticRegressionCV_SMOTE_liblinear_predict_proba_1.joblib')
# -
| notebooks/primananda_charles-11210325-week2_LogisticRegressionCV_Best.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from psycopg2 import connect
import os
import json
import math
import os
import json
import math
import psycopg2
json_data = '/home/erick/Downloads/distros.json'
#Load JSON Files
with open(json_data, 'r') as f:
distros_dict = json.load(f)
#Load file size in bytes (1+e6)
file_size = os.path.getsize(json_data)
print(file_size)
for i in distros_dict:
print(i)
con = psycopg2.connect(database='datamodel', user='postgres')
cur = con.cursor()
conn = connect(
dbname = "datamodel",
user = "postgres",
host = "localhost",
password = "<PASSWORD>"
)
cur = conn.cursor()
q = """CREATE TABLE IF NOT EXISTS datamodel.distros (
Name varchar(20),
Version varchar(20),
Install varchar(20),
Owner varchar(20),
Kernel varchar(20)
);
"""
cur.execute(q)
# +
# close the cursor object to avoid memory leaks
cur.close()
# close the connection as well
conn.close()
| psycopg2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import os
players_df = pd.read_csv(".././Data/player_info_merged_normalized.csv")
players_df.head(60)
players_df.shape
players_df[players_df.isna().any(axis=1)]
idx = []
for i in range(players_df.shape[0]):
if "COACH" in players_df.iloc[i,1]:
idx.append(i)
print(idx)
players_df = players_df.drop(idx)
for i in range(1,players_df.shape[0]-1):
if players_df.iloc[i-1,1] == players_df.iloc[i+1,1] and players_df.iloc[i-1,1] != players_df.iloc[i,1]:
players_df.iloc[i,1] = players_df.iloc[i-1,1]
players_df.reset_index(drop=True,inplace = True)
drop_idxs = [4185, 4186, 4189, 4193, 4196
# , 4209, 4213, 4214, 4125, 4218
]
players_df = players_df.drop(drop_idxs)
players_df.reset_index(drop=True,inplace = True)
drop_idxs = [4203, 4204, 4208, 4209, 4213
# , 4209, 4213, 4214, 4125, 4218
]
players_df = players_df.drop(drop_idxs)
players_df.reset_index(drop=True,inplace = True)
# +
line = pd.DataFrame({"Players": "Filler", "Teams": "Dortmund", "Goals scored": 0.0, "Total shots": 0.0,
"Shots on Target": 0.0, "Assists": 0.0, "Interceptions": 0.0, "Crosses": 0.0, "Fouls committed": 0.0,
"Offsides": 0.0, "Time Played": 0.0}, index=[2500.5])
players_df = players_df.append(line, ignore_index=False)
players_df = players_df.sort_index().reset_index(drop=True)
line = pd.DataFrame({"Players": "Filler", "Teams": "Arsenal", "Goals scored": 0.0, "Total shots": 0.0,
"Shots on Target": 0.0, "Assists": 0.0, "Interceptions": 0.0, "Crosses": 0.0, "Fouls committed": 0.0,
"Offsides": 0.0, "Time Played": 0.0}, index=[2788.5])
players_df = players_df.append(line, ignore_index=False)
players_df = players_df.sort_index().reset_index(drop=True)
# -
last_team = 'Juventus'
team_idx = 0
cnt = 0
team_players_dict = {}
for i in range(players_df.shape[0]):
if "(GK)" in players_df.iloc[i,0] and players_df.iloc[i,1] != last_team:
team_players_dict[team_idx] = cnt
if cnt == 36:
print(i,players_df.iloc[i,1],team_idx)
cnt = 1
team_idx += 1
else:
cnt += 1
last_team = players_df.iloc[i,1]
team_players_dict
players_df.shape
2501
2789
players_df.at[2501,'Home_Game'] = 1
players_df.at[2789,'Home_Game'] = 1
players_df.iloc[2501,:]
players_df[players_df['Home_Game'].isnull()]
# +
player_dict_flag = {}
player_dict_idx = {}
player_dict_data = {}
for i in range(players_df.shape[0]):
player_name = players_df.iloc[i,0]
player_dict_idx[player_name] = []
player_dict_flag[player_name] = 0
for i in range(players_df.shape[0]):
player_name = players_df.iloc[i,0]
if player_dict_flag[player_name] == 1:
temp = []
for j in range(i):
if players_df.iloc[j,0] == player_name:
temp.append(players_df.iloc[j,2:-1].values)
list_avg = np.sum(temp,axis = 0) / len(temp)
if player_name == "Vidal":
print(list_avg)
print("added here")
list_avg2 = np.append(list_avg,players_df.iloc[i,-1])
player_dict_data[i] = list_avg2
else:
player_dict_data[i] = players_df.iloc[i,2:].values
if player_name == "Vidal":
print("lol here")
player_dict_idx[player_name].append(i)
player_dict_flag[player_name] = 1
# -
player_dict_idx
player_dict_data
len(player_dict_data)
for key,value in player_dict_data.items():
print(player_dict_data[key].shape)
X = []
for i in range(0,len(player_dict_data),36):
temp = []
for j in range(36):
temp.append(player_dict_data[i+j])
if j == 17 and len(X) == 77:
print(i+j)
print(player_dict_data[i+j])
X.append(temp)
X_input = np.asarray(X)
X_input.shape
X_input[77][17]
with open('players_X_final.npy', 'wb') as f:
np.save(f, X_input)
distilled_rows = []
for i in range(0,players_df.shape[0],36):
temp = []
for j in range(36):
temp.append(players_df.loc[i+j,['Goals scored']].values[0])
temp.append(players_df.loc[i+j,['Assists']].values[0])
temp.append(players_df.loc[i+j,['Fouls committed']].values[0])
temp.append(players_df.loc[i+j,['Total shots']].values[0])
temp.append(players_df.loc[i+j,['Shots on Target']].values[0])
temp.append(players_df.loc[i+j,['Interceptions']].values[0])
temp.append(players_df.loc[i+j,['Crosses']].values[0])
temp.append(players_df.loc[i+j,['Time Played']].values[0])
# temp.append(players_df.loc[i+j,['Goals scored','Assists','Fouls committed','Total shots','Shots on Target','Interceptions','Fouls committed','Time Played']].values.reshape((8,1)))
distilled_rows.append(temp)
matches_df = pd.read_csv(".././Data/match_info_merged_normalized.csv")
# +
blocks = matches_df['Blocks'].values
possession = matches_df['Possession'].values
total_passes = matches_df['Passes'].values
passing_accuracy = matches_df['Passing Accuracy'].values
corners = matches_df['Corners'].values
blocks_1 = []
blocks_2 = []
possession_1 = []
total_passes_1 = []
total_passes_2 = []
passing_accuracy_1 = []
passing_accuracy_2 = []
corners_1 = []
corners_2 = []
for i in range(0,len(blocks),2):
blocks_1.append(blocks[i])
for i in range(1,len(blocks),2):
blocks_2.append(blocks[i])
for i in range(0,len(possession),2):
possession_1.append(possession[i])
for i in range(0,len(total_passes),2):
total_passes_1.append(total_passes[i])
for i in range(1,len(total_passes),2):
total_passes_2.append(total_passes[i])
for i in range(0,len(passing_accuracy),2):
passing_accuracy_1.append(passing_accuracy[i])
for i in range(1,len(passing_accuracy),2):
passing_accuracy_2.append(passing_accuracy[i])
for i in range(0,len(corners),2):
corners_1.append(corners[i])
for i in range(1,len(corners),2):
corners_2.append(corners[i])
match_features = []
for i in range(len(corners_1)):
match_features.append([blocks_1[i],blocks_2[i],possession[i],total_passes_1[i],total_passes_2[i],passing_accuracy_1[i],passing_accuracy_2[i],corners_1[i],corners_2[i]])
# -
output = []
for i in range(len(match_features)):
output.append(match_features[i] + distilled_rows[i])
Y = np.asarray(output)
Y.shape
with open('Y_final.npy', 'wb') as f:
np.save(f, Y)
# +
# remaining matches;
# Juventus(H) vs Lyon
# Manchester City(H) vs Real madrid
# Bayern(H) vs Chelsea
# Barcelona(H) vs Napoli
# -
import pickle
with open('teams_last_embedding.p', 'rb') as fp:
data = pickle.load(fp)
data.keys()
last_team_squads = {}
for elem in data.keys():
last_team_squads[elem] = players_df[players_df['Teams'] == elem]['Players'][-18:].values
last_team_squads['Juventus']
players_df[players_df['Teams'] == 'Juventus']['Players'][-18:].values
# +
last_embedding_players = {}
for elem in last_team_squads.keys():
team_squad = last_team_squads[elem]
team_embedding = []
for player in team_squad:
last_idx = player_dict_idx[player][-1]
team_embedding.append(player_dict_data[last_idx])
last_embedding_players[elem] = team_embedding
# +
try:
import cPickle as pickle
except ImportError: # python 3.x
import pickle
with open('players_last_embedding.p', 'wb') as fp:
pickle.dump(last_embedding_players, fp, protocol=pickle.HIGHEST_PROTOCOL)
# -
last_embedding_players['Lyon']
def get_teamwise_list(team_name,home_flag):
teamwise_list = []
for elem in last_embedding_players[team_name]:
elem2 = np.append(elem[:-1],home_flag)
teamwise_list.append(elem2)
return np.asarray(teamwise_list)
# +
match_1 = []
match_1.append(get_teamwise_list('Juventus',1))
match_1.append(get_teamwise_list('Lyon',0))
match_1_npy = np.asarray(match_1)
match_1_npy.shape
with open('Juventus_Lyon_players.npy', 'wb') as f:
np.save(f, match_1_npy)
# +
match_2 = []
match_2.append(get_teamwise_list('Man. City',1))
match_2.append(get_teamwise_list('Real Madrid',0))
match_2_npy = np.asarray(match_2)
match_2_npy.shape
with open('City_Madrid_players.npy', 'wb') as f:
np.save(f, match_2_npy)
# +
match_3 = []
match_3.append(get_teamwise_list('Bayern',1))
match_3.append(get_teamwise_list('Chelsea',0))
match_3_npy = np.asarray(match_3)
match_3_npy.shape
with open('Bayern_Chelsea_players.npy', 'wb') as f:
np.save(f, match_3_npy)
# +
match_4 = []
match_4.append(get_teamwise_list('Barcelona',1))
match_4.append(get_teamwise_list('Napoli',0))
match_4_npy = np.asarray(match_4)
match_4_npy.shape
with open('Barca_Napoli_players.npy', 'wb') as f:
np.save(f, match_4_npy)
# -
| Code/.ipynb_checkpoints/get_X_players_and_Y-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="V0xH8XNEelKI"
# Cloning the repository
# !git clone https://github.com/misbah4064/Real-Time-Voice-Cloning.git
# + id="syA0yeyQeosa"
# Changing the current directory to the repository's directory
# %cd Real-Time-Voice-Cloning/
# + id="Cgn7dHe1e5It"
# Installing the dependencies
# !pip install -q -r requirements.txt
# !apt-get install -qq libportaudio2
# + id="wq6gAjdxe5x9"
# Downloading pretrained data and unzipping it
# !gdown https://drive.google.com/uc?id=1n1sPXvT34yXFLT47QZA6FIRGrwMeSsZc
# !unzip pretrained.zip
# + id="OOA_HDrke8fv"
# Initializing all the encoder libraries
from IPython.display import Audio
from IPython.utils import io
from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder import inference as vocoder
from pathlib import Path
import numpy as np
import librosa
encoder_weights = Path("encoder/saved_models/pretrained.pt")
vocoder_weights = Path("vocoder/saved_models/pretrained/pretrained.pt")
syn_dir = Path("synthesizer/saved_models/logs-pretrained/taco_pretrained")
encoder.load_model(encoder_weights)
synthesizer = Synthesizer(syn_dir)
vocoder.load_model(vocoder_weights)
# + id="PEU1RADbwcaB"
text = "I’m running away from my responsibilities, and it feels good."
# + id="UKyRu-9XgYlT" colab={"base_uri": "https://localhost:8080/", "height": 92} executionInfo={"status": "ok", "timestamp": 1627403661522, "user_tz": -60, "elapsed": 15987, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjnoHvOS0BBvcc6SDIunSK8XV91TT7FUr2OQoSC=s64", "userId": "14882853818545771059"}} outputId="dac9599f-da81-4a73-8277-7a7e1a8e2c10"
in_fpath = Path("trump10.wav")
reprocessed_wav = encoder.preprocess_wav(in_fpath)
original_wav, sampling_rate = librosa.load(in_fpath)
preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)
embed = encoder.embed_utterance(preprocessed_wav)
with io.capture_output() as captured:
specs = synthesizer.synthesize_spectrograms([text], [embed])
generated_wav = vocoder.infer_waveform(specs[0])
generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
display(Audio(generated_wav, rate=synthesizer.sample_rate))
# + id="loustTnozKpX"
| DeepFake_AUDIO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Humans by group
# Author: <NAME> <<EMAIL>>
#
# This notebook is designed to estimate the number of humans who are in a particular user group. It can only run in the internal analytics cluster, as it requires access to unredacted `user` table, as that is where system-ness of an account can be easily determined.
#
# The "is human" estimation is done by:
#
# * ignoring known system accounts
# * ignoring bot accounts
#
# This is based on [a report made in April 2021](https://people.wikimedia.org/~urbanecm/growth-team/sysops-per-wiki-20210429.html) for the Growth team.
# +
from wmfdata import mariadb, utils
import pandas as pd
pd.set_option('display.max_rows', None)
# +
dfs = []
private = utils.get_dblist('private')
closed = utils.get_dblist('closed')
for wiki in utils.get_dblist('all'):
if wiki in private or wiki in closed:
continue
if wiki == 'labtestwiki':
continue
try:
dfs.append(mariadb.run('''
SELECT
DATABASE() AS wiki,
ug_expiry IS NULL AS permanent,
ug_group,
COUNT(*) AS users
FROM user_groups
-- we need the user table, to be able to exclude system accounts
JOIN user ON user_id=ug_user
WHERE
-- Ignore system accounts
user_token NOT LIKE "%INVALID%" AND
-- Ignore bots
ug_user NOT IN (
SELECT ug_user FROM user_groups WHERE ug_group="bot"
)
GROUP BY
ug_expiry IS NULL,
ug_group
''', wiki))
except ValueError:
pass
df = pd.concat(dfs).reset_index(drop=True).fillna(0)
# -
dfPivot = df.loc[df.permanent==1][['wiki', 'ug_group', 'users']].pivot_table(index='wiki', columns=['ug_group'], values='users', fill_value=0)
dfPivot.to_json('/home/urbanecm/Documents/steward/2021-wikimedia-humans-per-group/data/permanent-users-per-group.json')
dfPivot = df.loc[df.permanent==0][['wiki', 'ug_group', 'users']].pivot_table(index='wiki', columns=['ug_group'], values='users', fill_value=0)
dfPivot.to_json('/home/urbanecm/Documents/steward/2021-wikimedia-humans-per-group/data/temporary-users-per-group.json')
dfPivot = df[['wiki', 'ug_group', 'users']].groupby(['wiki', 'ug_group']).sum().pivot_table(index='wiki', columns=['ug_group'], values='users', fill_value=0)
dfPivot.to_json('/home/urbanecm/Documents/steward/2021-wikimedia-humans-per-group/data/total-users-per-group.json')
| humans_by_group.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Who's responsible?
#
# Archives New Zealand's [Archway](https://archway.archives.govt.nz/) database divides government activities up into 303 functions. Over time, different agencies have been made responsible for these functions, and it can be interesting to track how these responsibilities have shifted.
#
# This notebook uses [data about functions harvested from Archway](harvesting_functions_from_archway.ipynb) to create a a simple visualisation of the agencies responsible for a selected function.
# +
# %%capture
import ipywidgets as widgets
from IPython.display import display, HTML
import json
import altair as alt
import pandas as pd
alt.renderers.enable('notebook')
# -
# Load the harvested functions data from a JSON file
with open('data/functions.json', 'r') as json_file:
data = json.load(json_file)
# Create a dictionary to use in the dropdown
options = {f['term']: f for f in data}
# +
def make_chart(change):
# Clear current output
out.clear_output(wait=True)
# Get the currently selected term from the dropdown
term = change['new']
# Get the agencies responsible for the selected function
agencies = term['agencies_responsible']
if agencies:
# Convert to a dataframe
df = pd.DataFrame(agencies)
# Set some defualts for missing dates
df['start_date'] = df['start_date'].replace('', '1853')
df['end_date'] = df['end_date'].replace('', '2019')
# Create a Gannt style chart
chart = alt.Chart(df).mark_bar(size=20).encode(
x=alt.X('start_date:T', axis=alt.Axis(format='%Y', title='Dates agency was responsible for function'), scale=alt.Scale(nice=True)),
x2=alt.X('end_date:T'),
y=alt.Y('entity', scale=alt.Scale(rangeStep=30), title='Agency'),
color=alt.Color('entity', legend=None),
tooltip=[alt.Tooltip('entity', title='Agency'), alt.Tooltip('start_date', title='From', timeUnit='year'), alt.Tooltip('end_date', title='To', timeUnit='year')]
).properties(
width=700
)
with out:
display(HTML('<h3>Agencies responsible for ‘{}’</h3>'.format(term['term'])))
display(chart)
else:
with out:
display(HTML('<p>No agencies responsible for ‘{}’</p>'.format(term['term'])))
# This is where the chart will be displayed
out = widgets.Output()
# Create the dropdown
term = widgets.Dropdown(
options=options,
value=None,
disabled=False,
)
# Making a selection from the dropdown will automatically run 'make_chart'
term.observe(make_chart, names='value')
display(widgets.HBox([widgets.Label('Select a function:'), term]))
display(out)
# -
| display_agencies_responsible_for_function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Federated Learning - SMS spam prediction with a GRU model
# In this tutorial you are going to see how you can leverage on PySyft and PyTorch to train a 1-layer GRU model using Federated Learning.
#
# The data used for this project was the [SMS Spam Collection Data Set](https://archive.ics.uci.edu/ml/datasets/sms+spam+collection) available on the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php). The dataset consists of c. 5500 SMS messages, of which around 13% are spam messages.
#
# The objective here is to simulate two remote machines (that we will call Bob and Anne), where each machine have a similar number of labeled datapoints (SMS labeled as spam or not).
#
# **Author**: <NAME>. Github: [@andrelmfarias](https://github.com/andrelmfarias) | Twitter: [@andrelmfarias](https://twitter.com/andrelmfarias)
#
# *I also wrote a blogpost about this tutorial and PySyft, feel free to check it out: [Private AI — Federated Learning with PySyft and PyTorch](https://towardsdatascience.com/private-ai-federated-learning-with-pysyft-and-pytorch-954a9e4a4d4e)*
# ## Useful imports
# +
import numpy as np
from sklearn.metrics import roc_auc_score
import torch
from torch import nn, optim
from torch.utils.data import TensorDataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
# -
# ## Loading data
# As we are most interested in the usage of PySyft and Federated Learning, I will skip the text-preprocessing part of the project. If you are interested in how I performed the preprocessing of the raw dataset you can take a look on the script [preprocess.py](https://github.com/OpenMined/PySyft/tree/dev/examples/data/SMS-spam/preprocess.py).
#
# Each data point of the `inputs.npy` dataset correspond to an array of 30 tokens obtained form each message (padded at left or truncated at right)
#
# The `label.npy` dataset has the following unique values: `1` for `spam` and `0` for `non-spam`
inputs = np.load('./data/inputs.npy')
labels = np.load('./data/labels.npy')
VOCAB_SIZE = int(inputs.max()) + 1
# ## Training model with Federated learning
# ### Training and model hyperparameters
# +
# Training params
EPOCHS = 15
CLIP = 5 # gradient clipping - to avoid gradient explosion (frequent in RNNs)
lr = 0.1
BATCH_SIZE = 32
# Model params
EMBEDDING_DIM = 50
HIDDEN_DIM = 10
DROPOUT = 0.2
# -
# ### Initiating virtual workers with Pysyft
# In this part we are going to separate the dataset in training and test sets following the ratio 80/20. Each of these datasets will be split in two and will be sent to "Bob's" and "Anne's" machines in order to **simulate remote and private data**.
#
# Please note that in a real case, such datasets will be already in the remote machines and the preprocessing will be performed before hand by their own devices.
import syft as sy
# +
labels = torch.tensor(labels)
inputs = torch.tensor(inputs)
# splitting training and test data
pct_test = 0.2
train_labels = labels[:-int(len(labels)*pct_test)]
train_inputs = inputs[:-int(len(labels)*pct_test)]
test_labels = labels[-int(len(labels)*pct_test):]
test_inputs = inputs[-int(len(labels)*pct_test):]
# +
# Hook that extends the Pytorch library to enable all computations with pointers of tensors sent to other workers
hook = sy.TorchHook(torch)
# Creating 2 virtual workers
bob = sy.VirtualWorker(hook, id="bob")
anne = sy.VirtualWorker(hook, id="anne")
# threshold indexes for dataset split (one half for Bob, other half for Anne)
train_idx = int(len(train_labels)/2)
test_idx = int(len(test_labels)/2)
# Sending toy datasets to virtual workers
bob_train_dataset = sy.BaseDataset(train_inputs[:train_idx], train_labels[:train_idx]).send(bob)
anne_train_dataset = sy.BaseDataset(train_inputs[train_idx:], train_labels[train_idx:]).send(anne)
bob_test_dataset = sy.BaseDataset(test_inputs[:test_idx], test_labels[:test_idx]).send(bob)
anne_test_dataset = sy.BaseDataset(test_inputs[test_idx:], test_labels[test_idx:]).send(anne)
# Creating federated datasets, an extension of Pytorch TensorDataset class
federated_train_dataset = sy.FederatedDataset([bob_train_dataset, anne_train_dataset])
federated_test_dataset = sy.FederatedDataset([bob_test_dataset, anne_test_dataset])
# Creating federated dataloaders, an extension of Pytorch DataLoader class
federated_train_loader = sy.FederatedDataLoader(federated_train_dataset, shuffle=True, batch_size=BATCH_SIZE)
federated_test_loader = sy.FederatedDataLoader(federated_test_dataset, shuffle=False, batch_size=BATCH_SIZE)
# -
# ### Creating simple GRU (1-layer) model with sigmoid activation for classification task
# For educational purposes, we built a handcrafted GRU with linear layers whose architecture and code you can check on [handcrafted_GRU.py](https://github.com/OpenMined/PySyft/blob/dev/examples/tutorials/advanced/Federated%20SMS%20Spam prediction/handcrafted_GRU.py)
#
# As the focus of this notebook is the usage of Federated Learning with PySyft, we not show the construction of the model here.
from handcrafted_GRU import GRU
# Initiating the model
model = GRU(vocab_size=VOCAB_SIZE, hidden_dim=HIDDEN_DIM, embedding_dim=EMBEDDING_DIM, dropout=DROPOUT)
# ### Training and validation
# Defining loss and optimizer
criterion = nn.BCELoss()
optimizer = optim.SGD(model.parameters(), lr=lr)
# For each epoch we are going to compute the training and validations losses, as well as the [Area Under the ROC Curve](https://scikit-learn.org/stable/modules/model_evaluation.html#roc-metrics) score due to the fact that the target dataset is unbalaced (only 13% of labels are positive).
# + code_folding=[]
for e in range(EPOCHS):
######### Training ##########
losses = []
# Batch loop
for inputs, labels in federated_train_loader:
# Location of current batch
worker = inputs.location
# Initialize hidden state and send it to worker
h = torch.Tensor(np.zeros((BATCH_SIZE, HIDDEN_DIM))).send(worker)
# Send model to current worker
model.send(worker)
# Setting accumulated gradients to zero before backward step
optimizer.zero_grad()
# Output from the model
output, _ = model(inputs, h)
# Calculate the loss and perform backprop
loss = criterion(output.squeeze(), labels.float())
loss.backward()
# Clipping the gradient to avoid explosion
nn.utils.clip_grad_norm_(model.parameters(), CLIP)
# Backpropagation step
optimizer.step()
# Get the model back to the local worker
model.get()
losses.append(loss.get())
######## Evaluation ##########
# Model in evaluation mode
model.eval()
with torch.no_grad():
test_preds = []
test_labels_list = []
eval_losses = []
for inputs, labels in federated_test_loader:
# get current location
worker = inputs.location
# Initialize hidden state and send it to worker
h = torch.Tensor(np.zeros((BATCH_SIZE, HIDDEN_DIM))).send(worker)
# Send model to worker
model.send(worker)
output, _ = model(inputs, h)
loss = criterion(output.squeeze(), labels.float())
eval_losses.append(loss.get())
preds = output.squeeze().get()
test_preds += list(preds.numpy())
test_labels_list += list(labels.get().numpy().astype(int))
# Get the model back to the local worker
model.get()
score = roc_auc_score(test_labels_list, test_preds)
print("Epoch {}/{}... \
AUC: {:.3%}... \
Training loss: {:.5f}... \
Validation loss: {:.5f}".format(e+1, EPOCHS, score, sum(losses)/len(losses), sum(eval_losses)/len(eval_losses)))
model.train()
# -
# ## Well Done!
# Et voilà! You have just trained a model for a real world application (SMS spam classifier) using Federated Learning!
# ## Conclusion
# You can see that with the PySyft library and its PyTorch extension, you can perform operations with tensor pointers such as you can do with PyTorch API.
#
# Thanks to this, you were able to train spam detector model without having any access to the remote and private data: for each batch you sent the model to the current remote worker and got it back to the local machine before sending it to the worker of the next batch.
#
# You can also notice that this federated training did not harm the performance of the model as both losses reduced at each epoch as expected and the final AUC score on the test data was above 97.5%.
#
# There is however one limitation of this method: by getting the model back we can still have access to some private information.
# Let's say Bob had only one SMS on his machine. When we get the model back, we can just check which embeddings of the model changed and we will know which were the tokens (words) of the SMS.
#
# In order to address this issue, there are two solutions: Differential Privacy and Secured Multi-Party Computation (SMPC).
#
# Differential Privacy would be used to make sure the model does not give access to some private information.
#
# SMPC, which is one kind of Encrypted Computation, in return allows you to send the model privately so that the remote workers which have the data cannot see the weights you are using.
# # Congratulations!!! - Time to Join the Community!
#
# Congratulations on completing this notebook tutorial! If you enjoyed this and would like to join the movement toward privacy preserving, decentralized ownership of AI and the AI supply chain (data), you can do so in the following ways!
#
#
# ### Star PySyft on GitHub
#
# The easiest way to help our community is just by starring the repositories! This helps raise awareness of the cool tools we're building.
#
# - [Star PySyft](https://github.com/OpenMined/PySyft)
#
# ### Pick our tutorials on GitHub!
#
# We made really nice tutorials to get a better understanding of what Federated and Privacy-Preserving Learning should look like and how we are building the bricks for this to happen.
#
# - [Checkout the PySyft tutorials](https://github.com/OpenMined/PySyft/tree/master/examples/tutorials)
#
#
# ### Join our Slack!
#
# The best way to keep up to date on the latest advancements is to join our community!
#
# - [Join slack.openmined.org](http://slack.openmined.org)
#
# ### Join a Code Project!
#
# The best way to contribute to our community is to become a code contributor! If you want to start "one off" mini-projects, you can go to PySyft GitHub Issues page and search for issues marked `Good First Issue`.
#
# - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
#
# ### Donate
#
# If you don't have time to contribute to our codebase, but would still like to lend support, you can also become a Backer on our Open Collective. All donations go toward our web hosting and other community expenses such as hackathons and meetups!
#
# - [Donate through OpenMined's Open Collective Page](https://opencollective.com/openmined)
| examples/tutorials/advanced/Federated SMS Spam prediction/Federated SMS Spam prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Moving Average Model (MA)
# # Persistence Model
#
# Again we are going to create a Persistence Model the same way as we did in the AR example. The difference here is that we are wanting to calculate residual errors (the difference between the predicted and the observed values).
# +
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.metrics import mean_squared_error
from math import sqrt
# calculate residual errors for a persistence forecast model
# load data
series = read_csv('daily-total-female-births.csv', header=0, index_col=0, parse_dates=True,
squeeze=True)
# create lagged dataset
values = DataFrame(series.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t', 't+1']
# split into train and test sets
X = dataframe.values
train_size = int(len(X) * 0.66)
train, test = X[1:train_size], X[train_size:]
train_X, train_y = train[:,0], train[:,1]
test_X, test_y = test[:,0], test[:,1]
# persistence model
predictions = [x for x in test_X]
# -
# +
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.metrics import mean_squared_error
from math import sqrt
# calculate residual errors for a persistence forecast model
# load data
series = read_csv('daily-total-female-births.csv', header=0, index_col=0, parse_dates=True,
squeeze=True)
# create lagged dataset
values = DataFrame(series.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t', 't+1']
# split into train and test sets
X = dataframe.values
train_size = int(len(X) * 0.66)
train, test = X[1:train_size], X[train_size:]
train_X, train_y = train[:,0], train[:,1]
test_X, test_y = test[:,0], test[:,1]
# persistence model
predictions = [x for x in test_X]
# skill of persistence model
rmse = sqrt(mean_squared_error(test_y, predictions))
print('Test RMSE: %.3f' % rmse)
# calculate residuals
residuals = [test_y[i]-predictions[i] for i in range(len(predictions))]
residuals = DataFrame(residuals)
print(residuals.head())
# -
# # Autoregression of Residual Errors
#
# Building off our knowledge of AR, we are going to model the residual errors from the above example. This example will create a persistence model, calculate the residuals, the AR model the residuals.
#
# We will get a list of residual coefficients that will be used in the next example.
# +
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from statsmodels.tsa.ar_model import AutoReg
series = read_csv('daily-total-female-births.csv', header=0, index_col=0, parse_dates=True,
squeeze=True)
# create lagged dataset
values = DataFrame(series.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t', 't+1']
# split into train and test sets
X = dataframe.values
train_size = int(len(X) * 0.66)
train, test = X[1:train_size], X[train_size:]
train_X, train_y = train[:,0], train[:,1]
test_X, test_y = test[:,0], test[:,1]
# persistence model on training set
# np.array to list
train_pred = [x for x in train_X]
# calculate residuals
train_resid = [train_y[i]-train_pred[i] for i in range(len(train_pred))]
# model the training set residuals
model = AutoReg(train_resid, lags=15, old_names=False)
model_fit = model.fit()
print('Coef=%s' % (model_fit.params))
# -
# # Residual Errors (Predicted)
#
# Our goal now is to predict the residuals for the days we "do not know" (we actually know them). These forecasted residuals will be used later on to create a more accurate predictions.
# +
# forecast residual forecast error
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from statsmodels.tsa.ar_model import AutoReg
from matplotlib import pyplot
series = read_csv('daily-total-female-births.csv', header=0, index_col=0, parse_dates=True,
squeeze=True)
# create lagged dataset
values = DataFrame(series.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t', 't+1']
# split into train and test sets
X = dataframe.values
train_size = int(len(X) * 0.66)
train, test = X[1:train_size], X[train_size:]
train_X, train_y = train[:,0], train[:,1]
test_X, test_y = test[:,0], test[:,1]
# persistence model on training set
# np.array to list
train_pred = [x for x in train_X]
# calculate residuals
train_resid = [train_y[i]-train_pred[i] for i in range(len(train_pred))]
# model the training set residuals
window = 15
model = AutoReg(train_resid, lags=window)
model_fit = model.fit()
coef = model_fit.params
# walk forward over time steps in test
history = train_resid[len(train_resid)-window:]
history = [history[i] for i in range(len(history))]
predictions = list()
expected_error = list()
for t in range(len(test_y)):
# persistence
yhat = test_X[t]
error = test_y[t] - yhat
expected_error.append(error)
# predict error
length = len(history)
lag = [history[i] for i in range(length-window,length)]
pred_error = coef[0]
for d in range(window):
pred_error += coef[d+1] * lag[window-d-1]
predictions.append(pred_error)
history.append(error)
print('predicted error=%f, expected error=%f' % (pred_error, error))
# plot predicted error
pyplot.plot(expected_error)
pyplot.plot(predictions, color='red')
pyplot.show()
print("Blue: Actual error\nRed: Predicted error")
# -
# # Increasing Accuracy with Modeled Residuals
#
# Time to tie it all together. We will build off of our original persistence model, but we are going to use predicted residual errors to get closer data.
# +
# correct forecasts with a model of forecast residual errors
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from statsmodels.tsa.ar_model import AutoReg
from matplotlib import pyplot
from sklearn.metrics import mean_squared_error
from math import sqrt
# load data
series = read_csv('daily-total-female-births.csv', header=0, index_col=0, parse_dates=True,
squeeze=True)
# create lagged dataset
values = DataFrame(series.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t', 't+1']
# split into train and test sets
X = dataframe.values
train_size = int(len(X) * 0.66)
train, test = X[1:train_size], X[train_size:]
train_X, train_y = train[:,0], train[:,1]
test_X, test_y = test[:,0], test[:,1]
# persistence model on training set
# np.array to list
train_pred = [x for x in train_X]
# calculate residuals
train_resid = [train_y[i]-train_pred[i] for i in range(len(train_pred))]
# model the training set residuals
window = 15
model = AutoReg(train_resid, lags=15)
model_fit = model.fit()
coef = model_fit.params
# walk forward over time steps in test
history = train_resid[len(train_resid)-window:]
history = [history[i] for i in range(len(history))]
predictions = list()
for t in range(len(test_y)):
# persistence
yhat = test_X[t]
error = test_y[t] - yhat
# predict error
length = len(history)
lag = [history[i] for i in range(length-window,length)]
pred_error = coef[0]
for d in range(window):
pred_error += coef[d+1] * lag[window-d-1]
# correct the prediction
yhat = yhat + pred_error
predictions.append(yhat)
history.append(error)
# print('predicted=%f, expected=%f' % (yhat, test_y[t]))
# -
# +
# correct forecasts with a model of forecast residual errors
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from statsmodels.tsa.ar_model import AutoReg
from matplotlib import pyplot
from sklearn.metrics import mean_squared_error
from math import sqrt
# load data
series = read_csv('daily-total-female-births.csv', header=0, index_col=0, parse_dates=True,
squeeze=True)
# create lagged dataset
values = DataFrame(series.values)
dataframe = concat([values.shift(1), values], axis=1)
dataframe.columns = ['t', 't+1']
# split into train and test sets
X = dataframe.values
train_size = int(len(X) * 0.66)
train, test = X[1:train_size], X[train_size:]
train_X, train_y = train[:,0], train[:,1]
test_X, test_y = test[:,0], test[:,1]
# persistence model on training set
# np.array to list
train_pred = [x for x in train_X]
# calculate residuals
train_resid = [train_y[i]-train_pred[i] for i in range(len(train_pred))]
# model the training set residuals
window = 15
model = AutoReg(train_resid, lags=15)
model_fit = model.fit()
coef = model_fit.params
# walk forward over time steps in test
history = train_resid[len(train_resid)-window:]
history = [history[i] for i in range(len(history))]
predictions = list()
for t in range(len(test_y)):
# persistence
yhat = test_X[t]
error = test_y[t] - yhat
# predict error
length = len(history)
lag = [history[i] for i in range(length-window,length)]
pred_error = coef[0]
for d in range(window):
pred_error += coef[d+1] * lag[window-d-1]
# correct the prediction
yhat = yhat + pred_error
predictions.append(yhat)
history.append(error)
print('predicted=%f, expected=%f' % (yhat, test_y[t]))
# error
rmse = sqrt(mean_squared_error(test_y, predictions))
print('Test RMSE: %.3f' % rmse)
# plot predicted error
pyplot.plot(test_y)
pyplot.plot(predictions, color='red')
pyplot.show()
print("Blue: Actual # births. \nRed: Predicted # births.")
# -
| Student Teaching/ARMA/MA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inleiding Jupyter Notebooks
# Voor het oefenen met MongoDB in Python gebruiken we [Jupyter Notebooks](https://jupyter.org).
# Hiermee kun je op een interactieve manier aan de slag, waarbij je alle vrijheden hebt om je eigen varianten uit te proberen.
# Een notebook bestaan uit cellen.
#
# Je selecteert een cel door op de cel te klikken, of in de kantlijn links van de cel.
# Je selecteert de volgende/vorige cel met de pijltjestoetsen up/down.
# **Selecteer nu deze cel.**
#
# Er zijn twee soorten cellen: **Code** en **Markdown**.
# Het type van de geselecteerde cel vind je in de interface-balk bovenin.
# Met dit menu kun je ook het type van de cel veranderen
# Deze cel is een **Markdown** cel, de volgende cel is een (Python) **Code** cel. **Controleer dit.**
2 + 3 * 4
# ## Code-cellen
#
# **Uitvoeren van een cel**. De cel hierboven is een Code-cel, met daarin een Python opdracht. **Voer deze cel uit (Run)**:
#
# 1. Selecteer de cel
# 2. Voer de cel uit, via Run (pijltje) in het menu hierboven, of via SHIFT-RETURN.
#
# Zoals je ziet verschijnt het resultaat van de Python-opdracht onder de cel.
#
# > Bij het uitvoeren van een Code-cel verschijnt er in de kantlijn een getal tussen rechte haken. Dit geeft de volgorde van de uitgevoerde cellen aan. Als je de cel nogmaals uitvoert, zie je een volgend getal.
# **Aanpassen van een cel.** Je kunt de code in een Code-cel eenvoudig aanpassen: selecteer de cel, en verander de tekst.
#
# **Verander de Python-code in de cel hieronder in:** `2 + 3 * 5` en **voer de cel uit**.
# 2 + 3
# ## Code: samengestelde opdrachten
#
# Je kunt in een cel willekeurige stukken Python-code opnemen, inclusief import-opdrachten, functie-definities, enz.
# De definities en declaraties kun je in de volgende cellen weer gebruiken:
# op die manier kun je een groter programma stap voor stap opbouwen.
#
# Als je een cel uitvoert, wordt de laatste expressie van die cel daaronder als resultaat getoond.
# +
def succ(x):
return x+1
succ(3)
# -
# **Opeenvolgende cellen.** Je kunt alle Code-cellen in een Notebook beschouwen als één groot Python-programma, dat je stukje bij beetje (cel voor cel) kunt uitvoeren.
# Bij het uitvoeren van een cel wordt de waarde van de laatste expressie in de cel afgedrukt:
# dit gebruik je meestal om tussenresultaten te kunnen controleren.
#
# **Voer de onderstaande 2 cellen achtereenvolgens uit**
a = 13
b = a * 2
b
c = 14
2 * a + b + c
# Een Code-cel bevat een stukje Python-programma. Dat kan ook een functie-defintie zijn.
# Deze functie kan dan in dezelfde of in een volgende cel gebruikt worden.
#
# **Voer de onderstaande 2 code-cellen achtereenvolgens uit**
# +
def sqr (x):
return x * x
sqr(12)
# -
num = 12
sqr(num + 1)
# Je kunt de tekst in een Markdown-cel aanpassen door deze in **edit-mode** te brengen.
# Dit doe je door een dubbel-klik op de cel.
# Je herkent deze mode aan het gebruik van een ander lettertype.
#
# **Pas de inhoud van deze cel aan**
# Een Markdown-cell breng je van edit-mode naar de geformatteerde tekst-mode door deze cel uit te voeren.
# Je *voert een cel uit* (*Run*) door op het pijl-symbool bovenin te klikken, of door SHIFT-RETURN.
#
# **Voer de cel hierboven uit om deze weer in geformatteerde tekst-mode te brengen.**
# * Het uitvoeren van een Markdown-cel resulteert in het formatteren van de tekst.
# * Het uitvoeren van een Code-cel resulteert in het uitvoeren van de code.
#
# ## Opdrachten
#
# 1. pas de tekst in deze cel aan (edit-mode) en voer deze cel uit.
# 2. pas de tekst van de cel hieronder aan en voer die cel uit.
# 3. verander het type van de cel hieronder en voer die cel uit.
# Let op het verschil tussen (2) en (3).
#
# 2 + 3 * 4
# ## Code werkt over meerdere cellen
#
# Code die je uitvoert kan een zij-effect: bijvoorbeeld het definiëren van een functie of een variabele.
# Het resultaat van dit zij-effect blijft bewaard en kun je gebruiken bij de uitvoering van een volgende cel.
# We gebruiken dit later om een groter programma in kleinere delen te presenteren.
#
# De onderstaande expressie gebruikt de functie die je hierboven gedefinieerd hebt:
x = succ(5)
x
# En de cel hieronder gebruikt de variabele die je hiervoor gedefinieerd hebt:
succ(x)
# ## Code: shell-opdrachten in Python
#
# In Python-code (in Jupyter Notebooks) kun je ook shell-opdrachten uitvoeren.
# Deze worden dan uitgevoerd in het onderliggende operating system.
# Het resultaat van zo'n opdracht kun je weer in Python verwerken.
#
# Een shell-opdracht geef je aan met een `!`, bijvoorbeeld `!ls`
# (voor het opvragen van de namen van de bestanden in de huidige map).
# !ls
# files = !ls
files
len(files)
# ## Kernel
#
# De code-cellen worden uitgevoerd door een *Kernel*-proces op de server.
# In ons geval is dat een Python-Kernel.
# Er zijn ook kernels voor andere programmeertalen beschikbaar.
#
# De toestand van de kernel wordt bepaald door de cellen die je uitvoert,
# in de volgorde zoals je die uitvoert.
# Dit is niet altijd de volgorde waarin ze in het Notebook staan.
#
# ### Herstarten van de Kernel
#
# Als je allerlei experimenten uitgevoerd hebt, wil je soms weer "met een schone lei beginnen".
# Hiervoor kun je de Kernel herstarten via het menu bovenin.
# In het bijzonder is het handig om daarbij de uitvoer van alle cellen ook te verwijderen:
# dan is duidelijk wat er tot nu toe uitgevoerd is, en wat niet.
#
# ## Learn Python - the hard way
#
# De cursus [Learn Python - the hard way](https://learnpythonthehardway.org) bevat allerlei opdrachten die letterlijk overgetypt moeten worden.
# De auteur legt er de nadruk op dat je die voorbeelden zelf moet intikken: van copy-paste leer je veel minder.
# Het intikken vraagt meer aandacht, en je maakt mogelijk leerzame fouten.
#
# In het materiaal gebruiken we zo nu en dan stukjes Python om over te tikken.
# Je zou gebruik kunnen maken van Copy-Paste, maar dan leer je er veel minder van.
#
# Tik de onderstaande code over in de volgende cel, en voer dan de cel uit
#
# ```Python
# def pred(x):
# return x-1
#
# pred(succ(succ(3)))
# ```
# ## Volgorde
#
# Bij de opzet van de Notebooks proberen we de volgende regels te gebruiken.
# Deze verminderen de kans op fouten en verwarring.
#
# 1. De volgorde van de cellen in het notebook is de natuurlijke volgorde: in principe voer je deze van voor naar achter uit.
# 2. De code in een cel is zoveel mogelijk *idempotent*: het maakt niet uit of je deze één keer of vaker uitvoert.
#
# Regel (1) betekent dat als je een cel in het midden van een reeks cellen opnieuw uitvoert,
# dat je dan in principe de cellen daarna ook opnieuw moet uitvoeren.
#
# Regel (2) betekent dat je opdrachten van de vorm `x = x + 1` moet vermijden. Voer in zo'n geval een nieuwe variabele in: `x1 = x + 1` o.i.d.
# ## Tussenvoegen en verwijderen van cellen
#
# Je kunt een cel toevoegen na de huidge cel met het `+`-symbool in het interface bovenin.
#
# De huidige cel verwijder je met het schaar-symbool in het interface bovenin.
# (Met Z -undo cell operation- maak je dat weer ongedaan.)
#
# Je kunt een cel verplaatsen met behulp van het handvat in de linker kantlijn.
# (Dit werkt alleen voor de nieuwere JupyterLab, niet voor Jupyter Notebook.)
# ## Samenvatting
#
# * een notebook bestaat uit cellen; een cel kan *tekst* bevatten (Markdown), zoals deze cel, of (Python) *code*, zoals de cellen hieronder.
# * je voert een cel uit door deze te selecteren (cursor in de cel), en vervolgens SHIFT-RETURN in te toetsen.
# Ook het pijltje in de opdrachtenbalk hierboven kun je gebruiken.
# * onder de cel zie je dan de uitvoer van deze opdracht.
# * alle variabelen enz. die je introduceert in de code van een cel kun je in de volgende cellen gebruiken.
# * met een `!` kun je een shell-opdracht uitvoeren; het resultaat kun je in Python gebruiken (als string).
# * om problemen te voorkomen voer je cellen alleen uit *in de volgorde in het notebook*.
# * je kunt eventueel opnieuw beginnen door de "Kernel" opnieuw te starten (via het cirkeltje, als bij een reload in de browser).
# * zie voor meer informatie: help, en [tutorial](https://www.dataquest.io/blog/jupyter-notebook-tutorial/)
#
# De meeste code-cellen kun je zo uitvoeren; probeer de code en de uitvoer te begrijpen.
# Bij sommige opdrachten moet je de code aanpassen, en dan de cel uitvoeren.
# ## Shortcuts
#
#
# ## Volgende Notebooks
#
# Ga nu verder met:
#
# * [MongoDB-intro](MongoDB-intro.ipynb) - met mongo-shell voorbeelden, of
# * [Connect](Connect.ipynb) - begin van MongoDB in Python
#
# Zie ook: [Inhoudsopgave](Inhoud.ipynb)
| Inleiding-Jupyter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/aubricot/computer_vision_with_eol_images/blob/master/generate_drive_fileid.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="uBkj1wGxU9Yw"
# # Generate Google Drive fileid to include in notebooks for others to easily download your files to their VM
# ---
# *Last Updated 23 Feb 2021*
# File share settings must be set to "anyone with the link." After you generate a fileid, files can be downloaded in future notebooks using the following code:
# # # !gdown --id yourfileid
# + id="GLT3wUub8BTD"
# Mount google drive to import/export files
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + id="ZnBAZcy476jU"
# Install xattr
# !apt-get install -qq xattr
# + id="eaWKm3Bg8Jqk"
# TO DO: Enter path to the file you want to generate a Google Drive fileid to using form field to right
path_to_file = "/content/drive/My Drive/EOL_19_20_21/spr21" #@param {type:"string"}
filename = "yolo-2c_slowlr_anch.cfg" #@param {type:"string"}
# %cd $path_to_file
print("Google Drive fileid:")
# !xattr -p 'user.drive.id' $filename
# + id="ugj3kda38vz2"
# Test download with generated id, replace fileid with output from cell above
# !gdown --id 1wgKwWsnmJDOWzrimp3GTPtpKLoBGoyMg
| generate_drive_fileid.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['figure.figsize'] = [15, 15]
all_courses = pd.read_csv("../data/uoft-first-year-data-2020-07-28.csv")
tutorials = all_courses[all_courses.teachingMethod.eq('TUT')]
tutorials_with_enrollment = tutorials[tutorials.enrollmentCapacity > 0]
tutorials_with_enrollment['enrollmentPercentage'] = tutorials_with_enrollment.apply(lambda row: row['actualEnrolment'] / row['enrollmentCapacity'], axis=1)
tutorials_with_enrollment
sorted_tutorials = tutorials_with_enrollment.sort_values('enrollmentPercentage')
def group_by_delivery_mode_and_sum(df):
frame = df.groupby(['deliveryMode']).agg({'enrollmentCapacity': 'sum', 'actualEnrolment': 'sum'})
frame['enrollmentPercentage'] = frame.apply(lambda row: row['actualEnrolment'] / row['enrollmentCapacity'] * 100, axis=1)
return frame
tutorials_minus_onlasync = tutorials_with_enrollment[tutorials_with_enrollment.deliveryMode != 'ONLASYNC']
sections_with_multiple_delivery_modes = tutorials_minus_onlasync.groupby(['code', 'section']).filter(lambda section: len(section['deliveryMode'].unique()) > 1)
enrollments_with_multiple_modes = group_by_delivery_mode_and_sum(sections_with_multiple_delivery_modes)
enrollments_with_multiple_modes
plot = lambda df: df[['enrollmentPercentage', 'actualEnrolment']].plot(secondary_y=['actualEnrolment'], kind='bar', rot=0)
plot(enrollments_with_multiple_modes)
mat137_sections = sections_with_multiple_delivery_modes[sections_with_multiple_delivery_modes.code == 'MAT137Y1']
mat137_sections[:100]
mat137_by_delivery_mode = group_by_delivery_mode_and_sum(mat137_sections)
mat137_by_delivery_mode
mat137_sorted = mat137_sections.sort_values('enrollmentPercentage')
mat137_sorted
mat137_sorted['enrollmentPercentage'] = mat137_sorted['enrollmentPercentage'] * 100
# +
fig, ax = plt.subplots()
colors = tuple(np.where(mat137_sorted['deliveryMode'] == 'CLASS', 'r', 'b'))
mat137_sorted.plot(kind='bar', y='enrollmentPercentage', color=colors, ax=ax)
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color='r', lw=4),
Line2D([0], [0], color='b', lw=4)]
ax.legend(custom_lines, ['In person', 'Online'])
plt.xticks([])
plt.ylabel("Percentage enrolled")
plt.xlabel("Tutorials")
plt.title("MAT137 Tutorial Enrolment")
# -
mat135_sections = sections_with_multiple_delivery_modes[sections_with_multiple_delivery_modes.code == 'MAT135H1']
mat135_sections
mat135_by_delivery_mode = group_by_delivery_mode_and_sum(mat135_sections)
mat135_by_delivery_mode
mat135_sorted = mat135_sections.sort_values(['enrollmentPercentage', 'deliveryMode'])
mat135_sorted['enrollmentPercentage'] = mat135_sorted['enrollmentPercentage'] * 100
# +
fig, ax = plt.subplots()
mat135_sorted.plot(kind='bar', y='enrollmentPercentage', color=np.where(mat135_sections['deliveryMode'] == 'CLASS', 'r', 'b'), ax=ax)
from matplotlib.lines import Line2D
custom_lines = [Line2D([0], [0], color='r', lw=4),
Line2D([0], [0], color='b', lw=4)]
ax.legend(custom_lines, ['In person', 'Online'])
plt.xticks([])
plt.ylabel("Percentage enrolled")
plt.xlabel("Tutorials")
plt.title("MAT135 Tutorial Enrolment")
# -
| notebooks/tutorialsByOnline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# We need a TFR Dataset with A-values rather than Q-values. ```TournamentData.ipynb``` should provide either another "feature" or another TFR file set for A-values. - DONE
#
# Then RunQVT.ipynb should be used to run the A-values. - DONE
#
# It should be possible to choose the number of games or records in RunQVT. Well, it is, I guess - through the options.
# The A-values are derived by subtracting the default value, which can be found beyond the border on a 22x22 board from the q value. - DONE
#
# This should then be divided by 100.0 and the learned A-value should eventually be multiplied by 100, if ever to be re-used in order to recreate the Q-Function. - DONE
#
# The mask is used to not have the beyond-the-border fields influence the loss function. - DONE
# HeuristicGomokuPolicy's defense_options are buggy. - DONE
# ---
# ## Imitation learning
#
# We'll have a $\pi$-$A$ network consisting of a common residual network with
# - 5-8 layers for line recognition
# - multiple residual blocks
# - residual helps convergence and also reflects the stone-meets-line paradigm
# - network design must allow for an stone-to-field influence across the entire board
# - because the remotest stone could have a role in a threat sequence
#
# Trajectory production is super-slow. Need mass production for imitation training, because the heuristic threat search is slow.
#
# During imitation training, a single combined loss function should help $\pi$ and $A$ network to initialize to efficient networks.
#
# Imitation learning means $\pi$ network learns to predict the moves of the heuristic policy, while the $A$ network learns the advantage from the QFunction that's based on the same heuristics.
#
# We need the self-play recordings to generate the training data for imitation learning.
#
# Least-significant-move baseline LSMB: May take any other. Has the advantage of creating zeros for all but the relevant fields during the imitation learning phase.
#
# ---
# ## Deep RL
# There's only one type of reward: Win or loss. The reward is going to be discounted continuously back to the beginning of the trajectory and provided for $A$-learning.
#
# It's not clear how the different training phases ($\pi$ and $A$) might influence each other. The network might get pushed forth and back.
#
# Maybe it's better to initialize a single network and RL-train $\pi$ and $A$ independently thereafter.
#
# The weights (the critiques) are given by $[r_e - \tilde{A}(s_t)]$, where $r_e$ is episode's final reward and $\tilde{A}$ is the current advantage estimate for the least significant move after $s_t$
# # Thinking fast and slow
# I want to skip RL in favour of the Alpha-Zero approach with a policy-advised tree search, possibly considering RL for some side-line improvement later. However, I'm still going to initialize the network with Imitation learning from my heuristic policy.
#
# Interestingly, a pretty similar approach has been suggested by [Anthony 2017](https://arxiv.org/pdf/1705.08439.pdf), independent of the research done by Deepmind. Would be interesting to compare the approaches.
#
# [This essay](http://www.moderndescartes.com/essays/deep_dive_mcts/) is the most concise and comprehensible piece on UCT. It refers to a *NeuralNet* to provide value estimates for child nodes. I want to start from that, as it also advises an approach to vectorization to massively improve the performance of the search algorithm. The above algorithm takes a single policy evaluation at the child nodes to estimate a parent's value. It'd be interesting to consider some fast policy to chase down four or five more moves and average their results. Another thing is the formula used for evaluation of the UCB. That's adding 1 to the denominators for stability (precondition for vectorizing the UCB calculation. But it's also omitting the exploration parameter and doesn't take the logarithm on the parent's number of simulations. [This Medium blog](https://medium.com/@quasimik/monte-carlo-tree-search-applied-to-letterpress-34f41c86e238) has the correct formula and some more helpful explanations.
#
# We'll have the architecture derived in [LinesOfFive.ipynb](LinesOfFive.ipynb) learn by imitating the [HeuristicGomokuPolicy](HeuristicPolicy.py). The latter needs to have some function that maps the logic of method ```suggest``` into a learnable distribution.
#
# That should already create a pretty strong player. Additional steps would possibly include ideas from [Anthony 2017](https://arxiv.org/pdf/1705.08439.pdf) to have system 1 (the policy network) and system 2 (the UCT algorithm) learn from each other.
#
# [HeuristicPolicy.ipynb](HeuristicPolicy.ipynb) is now the starting point for creating initial training data. I still need to find out how to effectively reflect the results of the threat sequence search in the resulting action (move) distribution.
#
# I could start with implementing UCT with the heuristic policy and see how it does.
#
# Another hard thing is then the full documentation and operationalization of the entire quest. Providing an interactive interface to play with the algo. A web version of GO-UI also being able to run tournaments. Also, benchmarking my algo against the available players at the official Gomoku tournament site is to be considered.
#
# Last, not least, the entire thing should be presentable on various occasions, meetups, conferences, whatever.
| other_stuff/DeepGomoku/new/TODO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Klassifikation
# Wir nutzen ein Datenset, das handschriftliche Ziffern in Form von 8x8 Feldern mit Werten der Farbstärkte darstellt. Ein Beschreibung des Datensets gib es bei [scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html#sklearn.datasets.load_digits) und im
# [UC Irvine Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits).
#
# Wir importieren eine Funktion zu laden des Datensets und rufen dieses auf.
from sklearn.datasets import load_digits
digits = load_digits()
# Die Daten und Metadaten sind in einem sogenannten "Bunch"-Objekt organisiert
type(digits)
# Dieser Bunch hat folgende Attribute.
dir(digits)
# Schauen wir uns mal die Beschreibung an
print(digits.DESCR)
# Die eigentlichen Daten sind in einem numpy-Array abgelegt.
type(digits.data)
# Schauen wir es uns mal an.
digits.data
# Schauen wir uns die Dimension der Matrix an - es handelt
# sich um eine zweidimentionsale Matrix mit 1797 Zeilen und 64 Spalten.
# Es sind 1797 Bilder und 64 (8x8 Felder) Features.
digits.data.shape
# Das Target-Attribute ist ebenfalls ein numpy-array ...
type(digits.target)
# ... allerding mit nur einer Dimension.
digits.target.shape
# Jeder Wert entspricht der geschriebenen Nummer
digits.target
# Das Bunch-Objekt hat noch das Attribute "target_names"
# Normalerweise wird jeder Zahl in "targent" hier ein Name zugeordnen.
# Da es sich aber tatsächlich um Ziffern von 0 - 9 handelt, ist das in diesem
# nicht wirklich nötig.
digits.target_names
# In diesem Datenset gibt es zusätzlich noch ein Attribute "images".
# Es enthält für jede geschriebene Ziffer die Farbwerte in ein 8x8 Matrix.
len(digits.images)
# Schauen wir uns zum Beispiel das erst Bild an ...
digits.images[0]
# ... oder das zehnte Bild
digits.images[9]
# Wir können die in dieser Form gespeicherten Farbintensitäten
# auch mit matplotlib anzeigen lassen. Hier zum Beispiel für die
# ersten 30 Bilder (wenn man mehr haben möchte muss man in subplot
# mehr als 3 Zeilen angeben.)
import matplotlib.pyplot as plt
fig, axes = plt.subplots(3, 10, figsize=(10, 5))
for ax, img in zip(axes.ravel(), digits.images):
ax.imshow(img, cmap=plt.cm.gray_r)
# +
# Um einen Klassifikator für ein Klassifikation zu trainieren
# und dann später seine Güte zu bewerten, wird das Datenset
# (genauer gesagt die Attribute "data" und "target") in
# ein Trainingsset (75%) und Testset (25%) aufgeteilt. Die Konvention
# ist hier eine großes X für den Variablen der Datenmatrix und ein kleines y
# für den Target-Vektor zu nutzen.
# Anmerkung - bei einigen der folgenden Schritte wird
# von bestimmten zufälligen Zuständen ausgegagen. Um diese
# fest zu setzen und somit die Analyse reproduzierbar zu machen
# kann man den Parameter random_state nutzen und mit einer Zahl
# versehen.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
digits['data'], digits['target'], random_state=1)
# -
# Die Maße der zweidimensionalen Trainigs-Daten-Matrix
X_train.shape
# Die Maße der zweidimensionalen Test-Daten-Matrix
X_test.shape
# Die Länge des Trainingsvektor entspricht der Anzahl an
# Zeilen der Trianingsmatrix.
y_train.shape
# Die Länge des Testsvektors entspricht der Anzahl an
# Zeilen der Testsmatrix.
y_test.shape
# Wir werden zuerst mit einem k-Nearest-Neighbor-Klassifizierer Arbeiten
# und laden dazu die Klasse ...
from sklearn.neighbors import KNeighborsClassifier
# ... und erzeugen ein Objekt davon. Hierbei können wird die Anzahl an
# zu betrachteten Nachbarn angeben:
knn_clf = KNeighborsClassifier(n_neighbors=1)
# Jetzt trainieren wir den Klassifikator mit den Trainingsdaten.
# Dafür wird in scikit-learn unabhängig von Klassifikator die
# Methode "fit" genutzt.
knn_clf.fit(X_train, y_train)
# Herzlichen Glückwunsch - wir haben unser aller erstes
# Klassifikator-Modell gebaut und trainiert.
# Jetzt kann mit diesem neue Daten (also Vektoren der Länger 64, die
# die 8x8 Bilder darstellen) klassifizieren - in diesem
# Fall also Vorauszusagen, welche Ziffer dargestellt wurde.
#
# Wir haben unsere Testdaten noch verfügbar und können die Methode "predict"
# des trainierten Klassifiers nutzen und erhalten die Voraussagen.
knn_clf.predict(X_test)
# Da wir für das Testset aber auch wissen welche Ziffern tatsächlich
# herauskommen sollte, können wir die Methode "score" des Klassifiers
# nutzen. Diese führt die Voraussage durch und vergleicht sie mit den
# tatsächlichen Target-Werten. Am Ende bekommen wir einen Wert zwischen
# 0 (schlecht) und 1 (gut).
knn_clf.score(X_test, y_test)
# Jetzt führen wir die das gleich Verfahren (Erstellen, Traininen und Testen)
# dieses Classifiers mit 3 Nachbarn als Parameter durch.
knn_clf_3 = KNeighborsClassifier(n_neighbors=3)
knn_clf_3.fit(X_train, y_train)
knn_clf_3.score(X_test, y_test)
# Das schöne an scikit-learn ist, dass alle Klassifikatoren
# die gleichen Methoden besitzten. Sprich anderen Klassifikatoren
# nutzen auch fit, predict und score.
#
# Machen wir eine Klassifikation mit einem Random-Forest-Klassifikator:
from sklearn.ensemble import RandomForestClassifier
random_forest_cfl = RandomForestClassifier(random_state=1)
random_forest_cfl.fit(X_train, y_train)
random_forest_cfl.score(X_test, y_test)
# Das gleiche machen wir nur für eine Klassifikation mit einem
# künstlichen, neuralen Netz (Multi-Layer-Perceptron). Standardmäßig
# hat das Netz ein eine Hidden-Layer mit 100 Nodes.
from sklearn.neural_network import MLPClassifier
mlpc = MLPClassifier(random_state=1)
mlpc.fit(X_train, y_train)
mlpc.score(X_test, y_test)
# Wir können die Anzahl an Hidden-Layer und Anzahl an Nodes in diesen
# als Parameter setzen (hier 3 Schichten mit mit 200, 100 und 20 Nodes).
# Man kann das ganze kondenensiert schreiben, indem man die
# Methodenaufrufe direkt verknüpft.
MLPClassifier(random_state=1, hidden_layer_sizes=(200, 100, 20)).fit(
X_train, y_train).score(X_test, y_test)
| content/machine_learning_methods/solution_jupyter_notebooks/Klassifikation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from nltk.corpus import stopwords
stopwords.words('english')
import nltk
entries = nltk.corpus.cmudict.entries()
len(entries)
for entry in entries[10000:10025]:
print(entry)
# +
#Benz is credited with the invention of the motorcar
#Benz is credited with the invention of the automobile
# -
from nltk.corpus import wordnet as wn
wn.synsets('motorcar')
wn.synset('car.n.01').lemma_names()
| Section01/Walkthrough_Section 1-4_Example of a Lexical Resource.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 3002, "status": "ok", "timestamp": 1578451401023, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "18429685187145851841"}, "user_tz": -120} id="uS-nw_DBw3e1" outputId="802081f9-dd9a-4de7-e8ac-6c05cd67106d"
from google.colab import drive
drive.mount('/content/drive')
# + colab={} colab_type="code" id="NXV6pOpRw6Th"
import os
os.chdir('/content/drive/My Drive/data_augmentation_techniques')
# + [markdown] colab_type="text" id="DanWPaiAxMFs"
# ### Train without implicit semantic data augmentation (isda)
# + colab={"base_uri": "https://localhost:8080/", "height": 335} colab_type="code" id="uchecAejxCdh" outputId="16b1acdb-2178-4499-f8ef-3e793f8d87f3"
# Train baseline model. Thus, with no data augmentation
# !python main.py --[]
# + colab={} colab_type="code" id="RWmcAlDIxFlw"
# Train baseline model with cutout data augmentation
# !python main.py --cutout --path 'cutout'
# + colab={} colab_type="code" id="Lqs1k3MQxCBv"
# Train baseline model with autoaug data augmentation
# !python main.py --autoaug --path 'autoaug'
# + colab={} colab_type="code" id="OVKn_Sm5xBsz"
# Train baseline model with randaug data augmentation
# !python main.py --randaug --path 'randaug'
# + colab={} colab_type="code" id="Tx0mtq8QxBee"
# Train baseline model with augmix data augmentation
# !python main.py --augmix --path 'augmix'
# + [markdown] colab_type="text" id="zp7q618oxq5w"
# ### Train with implicit semantic data augmentation
# + colab={} colab_type="code" id="5mMzQIp9xBRw"
# Train baseline model with only isda data augmentation
# !python main_isda.py
# + colab={} colab_type="code" id="RpaJBRmsxAWZ"
# Train baseline model with isda and cutout data augmentation
# !python main_isda.py --cutout --path 'cutout'
# + colab={} colab_type="code" id="4tGhrpU_w8Ks"
# Train baseline model with isda and autoaug data augmentation
# !python main_isda.py --autoaug --path 'autoaug'
# + colab={} colab_type="code" id="7-pG5uN_w7UT"
# Train baseline model with isda and randaug data augmentation
# !python main_isda.py --randaug --path 'randaug'
# + colab={} colab_type="code" id="xr7NNJW9w7FZ"
# Train baseline model with isda and augmix data augmentation
# !python main_isda.py --augmix --path 'augmix'
| train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <p style="font-family: Arial; font-size:3.70em;color:purple; font-style:bold">
# Creating Sankey Using Plotly
# </p>
# <br>
#
# <p>Here, I am going to build sankey diagram/sankey plot using plotly.</p>
# <p>There are few points to keep in mind while creating Sankey using plotly.</p>
#
# 1. plotly's sankey representation accept values of source and target as an **integers** not **strings.**
# 2. So, we have to come up with representation to describe our source and target with unique values associated with them. In other words, we have to prepare some kind of mapping of our source and target, which can be mapped back to our dataframe and which in turn can be used as source and target value.
# 3. Labels of nodes in plotly must contains source and target both labels. (which is same in any other library or framework too, I guess.)
# 4. Remember, source value starts with 0 and label which is very first in label list will be assigned to this source. Similarly, second lable assigned to source value 1 and so on and so forth. This is general convention. I found it during exploring examples and other resources on plotly sankey.
# <br><br>
# <b>Even if you don't fully understands above points. I am going to elaborate them while doing Data Transformation.</b>
import plotly
import pandas as pd
import numpy as np
from plotly.graph_objs import Sankey
data = pd.read_csv('FDU_Data.csv', sep =",")
print(type(data))
print("Shape will show number of rows and columns in DataFrame (rows,columns):",data.shape)
data.head()
# <p>We are going to need course and grade columns only to generate sankey plot. In which, <b>Course Title</b> will be our <b>Source</b>, <b>Grade</b> will be our <b>Target</b> and <b>Value</b> will be number of students who got A, A-, B+.. grades in the Source Subject.</p>
df = data[['REC_ID','CTITLE','Grade']]
print(df.shape)
df[:3]
# ## Performing Data Cleaning operations as I did in first Jupyter Notebook.
df.isnull().any()
df.dtypes
df['CTITLE'].str.contains(' ').any()
df['Grade'].str.contains(' ').any()
df = df[['CTITLE','Grade']].replace(' ',np.nan)
df = df.dropna()
df.shape
# ### We will need unique course title and grade list to create mapping for our final data frame.
Ctitle = df['CTITLE'].unique().tolist()
print(len(Ctitle))
Ctitle.sort()
print(Ctitle[:10])
grade = df['Grade'].unique().tolist()
del grade[-3:]
grade.sort()
print(grade)
print(len(grade))
# ## Pandas GroupBy abilities
# <br>
# <p>GroupBy performs the same operation as it does in Database. It is mainly used to perform three operation on data.</p>
# * **Splitting** the data into groups based on some criteria
# * **Applying** a function to each group independently
# * **Combining** the results into a data structure
# <br><p>For detailed information click the link below.</p>
# <a href = "http://pandas.pydata.org/pandas-docs/stable/groupby.html?highlight=groupby">Pandas GroupBy</a>
# <br><br>
# <b style = "font-size: 16px">Here, groupby(['CTITLE','Grade']) will split the data into buckets by Course Title and then splitting the record inside each Course Title bucket by Grade.</b>
grp_by = df.groupby(['CTITLE','Grade'])
grp_by
# ## What and How to display?
#
# <p>Here, We want to count number of rows in each grouping. For that <b>size()</b> is used on group by. So, you will see number of grades received in each course.</p>
#
# ### For more detailed information about our next two steps in group by click the link below.
# <a href ="https://community.modeanalytics.com/python/tutorial/pandas-groupby-and-python-lambda-functions/">GroupBy size() and unstack() functions.</a>
grp_by.size()
# ## GroupBy unstack() will convert the result into more readable format.
count_grades = grp_by.size().unstack().reset_index()
count_grades[:3]
# Removing unnecessary columns from count_grades.
del count_grades['NC']
del count_grades['P']
del count_grades['I']
# <p>As we can observ that we have NaN values. However, we deal with NaN before too. The only difference is that before we removed the records from dataframe. But, over here we can not do that. It would be logically incorrect and would yield nothing. So, what we can do is <b>fill null values with 0 </b>which is more appropriate in our case.</p>
# <br>
# <p>There are two ways to do it.</p>
#
# 1. Using **replace(np.nan, 0, inplace = True)**
# 2. Or using **fillna(np.nan, 0, inplace = True)**
#
# <p>They both are doing the same task.</p>
count_grades.replace(np.nan, 0, inplace = True)
count_grades.tail()
# # Reshaping Data using Pandas melt
# <br>
# <p>Types of Table formats.</p>
#
# * stacked format: The individual observations are stacked on top of each other.
# * record format: Each row is a single record, i.e. a single observation.
# * long format: This format will be long in the vertical direction as opposed to wide in the horizontal direction.
# * wide format: Wider rather than longer.(For Example our count_grade DataFrame.)
# * unstacked format: The individual observations do not stacked on top of each other.
# <br>
# <p>Our Table is in wide format. We have to unpivot the data frame which will be helpful, because it can reshape the data the way we want. </p>
# <br>
# <p>To refer to more resources for <b>melt.</b> The links are given below.</p>
# <a href = "https://hackernoon.com/reshaping-data-in-python-fa27dda2ff77"> Reshaping Data in Python.</a>
# <br>
# <a href = "https://pandas.pydata.org/pandas-docs/stable/generated/pandas.melt.html"> pd.melt documentation.</a>
#
# ### What does melt() do?
# <b>melt() function takes 3 arguments:</b>
#
# 1. First, the DataFrame to melt.
# 2. Second, ID variables to keep (Pandas will melt all of the other ones).
# 3. Finally, a name for the new, melted variable.
dfm = pd.melt(count_grades,id_vars = ['CTITLE'] # columns to keep
, var_name = 'target', value_name = 'value')
dfm.columns = ['source','target','value']
print(dfm.size)
dfm[:5]
# #### Filtering out values as we do not want stubjects having grades values 0.
filter_zero = dfm['value'] > 0
dfm = dfm[filter_zero]
dfm.size
dfm.reset_index(drop = True)[:5]
dfm.isnull().any()
dfm.duplicated().any()
# ## Filter out the subjects having less then 10 students to optimize our plotting
course_list = []
i = 0
while i < len(Ctitle):
filt_sub = dfm['source'] == Ctitle[i]
new_df = dfm[filt_sub]
filt_stu = new_df['value'].values.sum()
if filt_stu >= 10:
course_list.append(Ctitle[i])
i+=1
print(len(course_list))
course_list[:3]
# # Sankey Creating.
#
# <p>This would be few step process.</p>
#
# * Now that we have courses with more than 9 students, we can create the labels required for the nodes of Sankey. Remember as I said before that, this list should contain source and target both labels.
lbls = course_list + grade
lbls[-4:]
# * Creating Mapping for source values, as we have 32 courses with more than 9 students. We can map those 32 courses with range of first 32 numbers starting from 0. This mapping can be done as follows.
# * Creating list of numbers to map.
# * Zipping the course_list and number list to create the dictionary. The reason behind to creating dictionary is that, we would replace courses with values associated with it. So, course name would be key and integers will be value for that course.
crs_zip = list(range(0,32))
src_mapping = dict(zip(course_list, crs_zip))
src_mapping
# * Similarly, creating target mapping. We have grades from A to F which is mapped to 10 distinguish numbers.
tr_zip = list(range(32,42))
trg_mapping = dict(zip(grade,tr_zip))
trg_mapping
# #### Creating color mapping for links
clr_zip = ['#31a354','#a1d99b','#FFFF32','#FFFF66','#CCCC00','#FFAE19','#FFC966','#CC8400','#e34a33','#f03b20']
link_clr_mapping = dict(zip(grade,clr_zip))
link_clr_mapping
# ## Time to apply filters
#
# 1. Firstly, I will create data frame copy.
# 2. Secondly, Filter out the courses less then 10 students using isin() method. It checks if the values in particular column or whole dataframe present in given list or not.
# 3. Applying Source and Target Mapping.
dfm_cpy = dfm.copy()
print(dfm_cpy.size)
dfm_cpy[:3]
dfm_cpy = dfm_cpy[dfm_cpy['source'].isin(course_list)]
print(dfm_cpy.size)
dfm_cpy[:5]
# ### To map link color for each grades we need to have grades values for each record. For that we need to preserve our target column values.
link_color = dfm_cpy['target'].copy()
link_color[:4]
dfm_cpy.replace(src_mapping, inplace = True)
dfm_cpy.replace(trg_mapping, inplace = True)
# ### You would notice that each course and grade is replaced by values assigned to them.
dfm_cpy[:5]
# ### Creating new column to map target colors.
dfm_cpy['link_color'] = link_color
dfm_cpy[:2]
# ### Mapping colors to target links
dfm_cpy.replace(link_clr_mapping, inplace = True)
dfm_cpy[:4]
# ## Using plotly offline to generate diagram on localhost.
# +
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from plotly.graph_objs import *
init_notebook_mode(connected=True)
# -
# ## Here, notice that data_trace is dictionary, label = lbls is lables for nodes, source, target and value are assigned the data frame we created.
# +
data_trace = dict(
type='sankey',
domain = dict(
x = [0,1],
y = [0,1]
),
orientation = "h",
visible = True,
valueformat = ".0f",
valuesuffix = " Students",
node = dict(
pad = 15,
thickness = 30,
line = dict(
color = "black",
width = 0.5
),
label = lbls,
#color = dfm_cpy['node_color']
),
link = dict(
source = dfm_cpy['source'],
target = dfm_cpy['target'],
value = dfm_cpy['value'],
color = dfm_cpy['link_color'],# Here we can add colors for each link which connected source to target.
)
)
layout = dict(
title = "Course Sankey Diagram",
height = 1000,
width = 1300,
font = dict(
size = 12
),
)
# -
fig = Figure(data=[data_trace], layout=layout)
plotly.offline.plot(fig, validate=False)
# ### Ipysankey Widget has also capability to represent sankey diagram. However, I noticed that big visualization should be generated separately because, Jupyter Notebook could run into problem while generating inline plots.
#
# #### Below, you will see small example of sankey diagram for one course having 38 students.
from ipysankeywidget import SankeyWidget
from ipywidgets import Layout
layout = Layout(width="900", height="900")
filt_subject = dfm['source'] == 'Academic Research and Writing'
course_df = dfm[filt_subject]
filt_student = course_df['value'].values.sum()
print(filt_student)
link = course_df.to_dict(orient = 'records')
SankeyWidget(links = link, margins=dict(top=10, bottom=0, left=250, right=60))
| Course Grade Sankey.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
from alpha_vantage.timeseries import TimeSeries
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import pandas as pd
import ta
init_notebook_mode(connected=True)
plotly.__version__
iplot([{"x": [1, 2, 3], "y": [3, 1, 6]}])
ts = TimeSeries(key='<KEY>', output_format='pandas')
ts_data, ts_meta_data = ts.get_daily(symbol='QQQ', outputsize='compact')
ts_data['new_col'] = range(1, len(ts_data) + 1)
ts_data = ts_data.reset_index()
ts_ema5 = ta.trend.EMAIndicator(close=ts_data["1. open"], n=5)
ts_data['ema5'] = ts_ema5.ema_indicator()
# +
fig = go.Figure(data=[go.Candlestick(x=ts_data['date'],
open=ts_data['1. open'],
high=ts_data['2. high'],
low=ts_data['3. low'],
close=ts_data['4. close'])])
fig
# -
| SagemakerTrainingCleanData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Capacity and Power of Quantum Machine Learning Models & the Future of Quantum Machine Learning
#
# <div class="youtube-wrapper">
# <iframe src="https://www.youtube.com/embed/1-IrbRR4rwM" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>
# </div>
#
#
# In this lecture, Amira presents different open questions regarding quantum machine learning and quantum computing in general. The aim is to explain the capacity and power of quantum machine learning for applications today and tomorrow. She begins by focusing on the definition of capacity in the field of classical machine learning. Indeed, it has a lot of definition: statistical complexity, expressivity, power. That can be summarized by wondering how many functions the model can approximate. The more it can approximate, the more capacity it has. However, counterintuitively, higer capacity is not necessarly a good thing. Capacity is linked to generalization related to the bias/variance tradeoff where we want the model neither to underfit nor to overfit. Thus the optimal capacity is a model with the lowest generalization error in the bias/variance tradeoff. Thus the question of how to measure capacity of a machine learning model in classical as well as quantum computing is very important and open.
#
# ### Suggested links
#
# - Download the lecturer's notes [here](/content/summer-school/2021/resources/lecture-notes/Lecture10.2.pdf)
#
# - Read Analytics India Magazine on [Big Data To Good Data](https://analyticsindiamag.com/big-data-to-good-data-andrew-ng-urges-ml-community-to-be-more-data-centric-and-less-model-centric/)
# - Watch IBM - Qiskit on [The Future of Quantum Machine Learning](https://www.youtube.com/watch?v=5UsJV2BNj2U&ab_channel=Qiskit)
#
# ### Other resources
#
# - Read Zhang et al. on [Understanding deep learning requires rethinking generalization](https://arxiv.org/abs/1611.03530)
# - Read Bartlett et al. on [Spectrally-normalized margin bounds for neural networks](https://arxiv.org/abs/1706.08498)
# - Read Neyshabur et al. on [A PAC-Bayesian Approach to Spectrally-Normalized Margin Bounds for Neural Networks](https://arxiv.org/abs/1707.09564)
# - Read Keskar et al. on [On Large-Batch Training for Deep Learning: Generalization Gap and Sharp Minima](https://arxiv.org/abs/1609.04836)
# - Read Sim et al. on [Expressibility and entangling capability of parameterized quantum circuits for hybrid quantum-classical algorithms](https://arxiv.org/abs/1905.10876)
# - Read McClean et al. on [Barren plateaus in quantum neural network training landscapes](https://arxiv.org/abs/1803.11173)
# - Read Schuld et al. on [The effect of data encoding on the expressive power of variational quantum machine learning models](https://arxiv.org/abs/2008.08605)
# - Read Pérez-Salinas et al. on [Data re-uploading for a universal quantum classifier](https://arxiv.org/abs/1907.02085)
# - Read Abbas et al. on [The power of quantum neural networks](https://arxiv.org/abs/2011.00027)
#
#
| notebooks/summer-school/2021/lec10.2.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: .NET (C#)
// language: C#
// name: .net-csharp
// ---
// [](https://mybinder.org/v2/gh/oddrationale/AdventOfCode2020CSharp/main?urlpath=lab%2Ftree%2FDay12.ipynb)
// # --- Day 12: Rain Risk ---
using System.IO;
var navInstructions = File.ReadAllLines(@"input/12.txt").ToArray();
enum Orientation
{
N,
E,
S,
W,
};
class Ship1
{
private int _x = 0;
private int _y = 0;
public int X { get => _x; }
public int Y { get => _y; }
public int Distance { get => Math.Abs(_x) + Math.Abs(_y); }
public Orientation Facing { get; set; } = Orientation.E;
private void Move(string instruction)
{
var direction = instruction[0] == 'F' ? Facing : Enum.Parse<Orientation>(instruction[0].ToString());
switch (direction)
{
case Orientation.N:
_y -= Convert.ToInt32(instruction[1..]);
break;
case Orientation.E:
_x += Convert.ToInt32(instruction[1..]);
break;
case Orientation.S:
_y += Convert.ToInt32(instruction[1..]);
break;
case Orientation.W:
_x -= Convert.ToInt32(instruction[1..]);
break;
}
}
private void Turn(string instruction)
{
var degrees = Convert.ToInt32(instruction[1..]);
switch (instruction[0])
{
case 'L':
Facing = (Orientation)(((int)Facing + 4 - (degrees / 90)) % 4);
break;
case 'R':
Facing = (Orientation)(((int)Facing + (degrees / 90)) % 4);
break;
}
}
public void ReadNavInstruction(string instruction)
{
switch (instruction[0])
{
case 'N':
case 'E':
case 'S':
case 'W':
case 'F':
Move(instruction);
break;
case 'L':
case 'R':
Turn(instruction);
break;
}
}
}
var ship1 = new Ship1();
foreach (var i in navInstructions)
{
ship1.ReadNavInstruction(i);
}
ship1.Distance
// # --- Part Two ---
class Ship2
{
private int _x = 0;
private int _y = 0;
private int _dX = 10;
private int _dY = -1;
public int X { get => _x; }
public int Y { get => _y; }
public int WaypointX { get => _x + _dX; }
public int WaypointY { get => _y + _dY; }
public int Distance { get => Math.Abs(_x) + Math.Abs(_y); }
private void Move(string instruction)
{
if (instruction[0] == 'F')
{
for (int i = 0; i < Convert.ToInt32(instruction[1..]); i++)
{
_x = WaypointX;
_y = WaypointY;
}
return;
}
switch (Enum.Parse<Orientation>(instruction[0].ToString()))
{
case Orientation.N:
_dY -= Convert.ToInt32(instruction[1..]);
break;
case Orientation.E:
_dX += Convert.ToInt32(instruction[1..]);
break;
case Orientation.S:
_dY += Convert.ToInt32(instruction[1..]);
break;
case Orientation.W:
_dX -= Convert.ToInt32(instruction[1..]);
break;
}
}
private void Turn(string instruction)
{
var degrees = Convert.ToInt32(instruction[1..]);
switch (instruction[0])
{
case 'L':
for (int i = 0; i < degrees / 90; i++)
{
(_dX, _dY) = (_dY, -_dX);
}
break;
case 'R':
for (int i = 0; i < degrees / 90; i++)
{
(_dX, _dY) = (-_dY, _dX);
}
break;
}
}
public void ReadNavInstruction(string instruction)
{
switch (instruction[0])
{
case 'N':
case 'E':
case 'S':
case 'W':
case 'F':
Move(instruction);
break;
case 'L':
case 'R':
Turn(instruction);
break;
}
}
}
var ship2 = new Ship2();
foreach (var i in navInstructions)
{
ship2.ReadNavInstruction(i);
}
ship2.Distance
| Day12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3.6
# ---
# %run 'pg_config.py'
import json, time, traceback
import requests as rq
from sqlalchemy import create_engine, func
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.dialects.postgresql import insert
engine = create_engine('postgresql://' + PG_USER + ':' + PG_PASSWORD + '@localhost/' + PG_DATABASE)
session = sessionmaker(bind=engine)
import helpdesk_model as hm
def get_departments(api_key=API_KEY, endpoint=ENDPOINT):
data = rq.get(endpoint + 'departments?apikey=' + api_key).json()
with engine.begin() as con:
vals = [
{
'id' : d['departmentid'],
'name' : d['name'],
'description' : d['description'],
'preset_status' : d['presetstatus']
} for d in data['response']['departments']]
stm = insert(hm.Department).on_conflict_do_nothing()
con.execute(stm, vals)
get_departments()
def get_tags(api_key=API_KEY, endpoint=ENDPOINT):
data = rq.get(endpoint + 'tags?apikey=' + api_key).json()
with engine.begin() as con:
vals = [{'id' : d['id'], 'name' : d['name']} for d in data['response']['tags']]
stm = insert(hm.Tag).on_conflict_do_nothing()
con.execute(stm, vals)
get_tags()
# +
def get_customers_fetcher(limit, offset, max=None, api_key=API_KEY, endpoint=ENDPOINT):
def create_url(offset):
url = endpoint + 'customers' \
+ '?apikey=' + api_key \
+ '&limitcount=' + str(limit) \
+ '&limitfrom=' + str(offset)
return url
while True:
if max is not None:
if offset >= max:
break
data = rq.get(create_url(offset)).json()
if not data['response']['customers']:
break
yield data['response']['customers']
offset += limit
def get_customers(offset, limit=1000, max=None):
stm = insert(hm.Customer).on_conflict_do_nothing()
for i in get_customers_fetcher(limit, offset, max):
with engine.begin() as con:
vals = [
{
'contact_id' : c['contactid'],
'user_id' : c['userid'],
'date_created' : c['datecreated'],
'role' : c['role'],
'gender' : c['gender'],
'email' : c['email']
} for c in i]
for j in vals:
con.execute(stm,j)
time.sleep(2)
return True
while True:
customers_count = session().query(func.count(hm.Customer.contact_id)).first()[0]
try:
if get_customers(customers_count):
break
except Exception:
traceback.print_exc()
time.sleep(20)
# +
def get_conversations_fetcher(limit, offset, max=None, api_key=API_KEY, endpoint=ENDPOINT):
def create_url(offset):
url = endpoint + 'conversations' \
+ '?apikey=' + api_key \
+ '&limit=' + str(limit) \
+ '&offset=' + str(offset)
return url
while(True):
if max is not None:
if offset >= max:
break
data = rq.get(create_url(offset)).json()
if not data['response']['conversations']:
break
yield data['response']['conversations']
offset += limit
def get_messages_fetcher(conv_vals, api_key=API_KEY, endpoint=ENDPOINT):
def create_url(conv_id):
url = endpoint + \
+ 'conversations/' + conv_id + '/messages?apikey=' + api_key
return url
for i in conv_vals:
data = rq.get(create_url(i['id'])).json()
yield((i['id'], data['response']))
def get_conversations(offset, limit=1000, max=None):
def get_message_groups(conv_vals):
mgrp_vals = []
msgs_vals = []
for id, data_global in get_messages_fetcher(conv_vals):
for data in data_global['groups']:
mgrp_vals.append(
{
'id' : data['messagegroupid'],
'user_id' : data['userid'],
'rtype' : data['rtype'],
'rstatus' : data['rstatus'],
'date_created' : data['datecreated'],
'date_finished' : data['datefinished'],
'conversation_id' : id
})
data_msgs = []
if 'messages' in data:
data_msgs = data['messages']
for dt in data_msgs:
msgs_vals.append(
{
'id' : dt['messageid'],
'user_id' : dt['userid'],
'rtype' : dt['rtype'],
'date_created' : dt['datecreated'],
'message_text' : dt['message'].replace('\x00', ''), #psycopg2 disallows NUL
'message_group_id' : data['messagegroupid']
})
return mgrp_vals, msgs_vals
conv_stm = insert(hm.Conversation).on_conflict_do_nothing()
tags_stm = insert(hm.tags_conversations).on_conflict_do_nothing()
mgrp_stm = insert(hm.MessageGroup).on_conflict_do_nothing()
msgs_stm = insert(hm.Message).on_conflict_do_nothing()
for i in get_conversations_fetcher(limit, offset, max):
time.sleep(2)
conv_vals = []
tags_vals = []
for c in i:
conv_vals.append(
{
'id' : c['conversationid'],
'department_id' : c['departmentid'],
'status' : c['status'],
'channel_type' : c['channel_type'],
'date_created' : c['datecreated'],
'date_changed' : c['datechanged'],
'date_due' : c['datedue'],
'owner_name' : c['ownername'],
'owner_email' : c['owneremail'],
'subject' : c['subject'],
})
tags = c['tags'].split(',') if c['tags'] is not None else None
if tags is not None:
tags_vals += [{
'conversation_id' : c['conversationid'],
'tag_id' : t} for t in tags]
mgrp_vals, msgs_vals = get_message_groups(conv_vals)
#print(conv_vals)
#print(tags_vals)
#print(mgrp_vals)
#print(msgs_vals)
with engine.begin() as con:
if len(conv_vals) > 0:
con.execute(conv_stm, conv_vals)
if len(tags_vals) > 0:
con.execute(tags_stm, tags_vals)
if len(mgrp_vals) > 0:
con.execute(mgrp_stm, mgrp_vals)
if len(msgs_vals) > 0:
con.execute(msgs_stm, msgs_vals)
return True
while True:
conversations_count = session().query(func.count(hm.Conversation.id)).first()[0]
try:
if get_conversations(conversations_count, limit=500):
break
except Exception:
traceback.print_exc()
time.sleep(20)
# -
| fetcher.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Supervised Learning: Decision Trees
# One very direct way of performing supervised learning is expressing output as a combination of the predictors (features). An **adaptive basis-function model** (ABM) is one example of this.
#
# $$f(x) = w_0 + \sum_{j=1}^k w_j \phi_j(\mathbf{x})$$
#
# here, $\phi_j$ is a *basis function*, which is typically parametric:
#
# $$\phi_j(\mathbf{x}) = \phi_j(\mathbf{x}|\alpha_j)$$
#
# The parameter set for this model is thus $\theta = \{\mathbf{w} = w_0,\ldots,w_k; \mathbf{\alpha} = \alpha_1, \ldots, \alpha_k\}$. This model is *not* linear in the parameters.
#
#
# **Decision trees** use an ABM to *recursively partition* the space of predictor variables into a piecewise-constant response surface. We can consider each component $j=1,\ldots,k$ to be a region in the response surface, and $w_j$ the expected response in that region.
#
# $$f(x) = \sum_{j=1}^k w_j I(\mathbf{x} \in R_j)$$
#
# Each paramter $\alpha_j$ encodes both (1) a variable used for splitting and (2) the corresponding threshold value. Specifically, the basis functions define the regions, and the weights encode the response value in each region.
#
# This particular formulation implies a regression-type model, but we can generalize this to classification by storing the *distribution over classes* in each leaf, instead of the mean response.
# To get a sense of how decision trees work, consider a diabetes dataset from which we wish to predict disease progression from a range of predictors. In the plot below, the response variable (`target`, an index of disease progression) is color-coded as a function of two variables, metabolic rate (`bmi`) and a blood serum measurement (`ltg`).
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.set()
from sklearn.datasets import load_diabetes
# Predictors: "age" "sex" "bmi" "map" "tc" "ldl" "hdl" "tch" "ltg" "glu"
diabetes = load_diabetes()
y = diabetes['target']
bmi, ltg = diabetes['data'][:,[2,8]].T
plt.scatter(ltg, bmi, c=y, cmap="Reds")
plt.colorbar()
plt.xlabel('ltg'); plt.ylabel('bmi');
# -
# One approach to building a predictive model is to subdivide the variable space into regions, by sequentially subdividing each variable. For example, if we split `ltg` at a threshold value of -0.01, it does a reasonable job of isolating the large values in one of the resulting subspaces.
ltg_split = -0.01
plt.scatter(ltg, bmi, c=y, cmap="Reds")
plt.vlines(ltg_split, *plt.gca().get_ylim(), linestyles='dashed')
plt.colorbar()
plt.xlabel('ltg'); plt.ylabel('bmi');
# However, that region still contains a fair number of low (light) values, so we can similarly bisect the region using a `bmi` value of -0.03 as a threshold value:
bmi_split = -0.03
plt.scatter(ltg, bmi, c=y, cmap="Reds")
plt.vlines(ltg_split, *plt.gca().get_ylim(), linestyles='dashed')
plt.hlines(bmi_split, ltg_split, plt.gca().get_xlim()[1], linestyles='dashed')
plt.colorbar()
plt.xlabel('ltg'); plt.ylabel('bmi');
# We can use this partition to create a piecewise-constant function, which returns the average value of the observations in each region defined by the threshold values. We could then use this rudimentary function as a predictive model.
np.mean(y[(bmi>bmi_split) & (ltg>ltg_split)])
np.mean(y[(bmi<=bmi_split) & (ltg>ltg_split)])
np.mean(y[ltg<ltg_split])
# The choices for splitting the variables here were relatively arbitrary. Better choices can be made using a cost function $C$, such as residual sums of squares (RSS).
#
# $$C = \sum_j \sum_{i \in R_j} (y_i - \hat{y}_{R_j})^2$$
#
# where $\hat{y}_{R_j}$ is the mean response for the training observations in the jth region.
#
# ### Exercise
#
# Use residual sums of squares to select competitive threshold values for the predictive model defined above
# +
# Write your answer here
# -
# The recursive partitioning demonstrated above results in a **decision tree**. The regions defined by the trees are called *terminal nodes*. Locations at which a predictor is split, such as `bmi`=-0.03, are called *internal nodes*. As with this simple example, splits are not generally symmetric, in the sense that splits do not occur similarly on all branches.
#
# Now consider a subset of three variables from the Titanic dataset, which we would like to use to predict survival from the disaster. The following describes one such decision tree:
#
# We first check if gender of the passenger is male. If "no", we follow the right branch and end up in a leaf where the probability of survival is $p(y=1,x_1=F)=0.73$, so we predict survival ($y=1$) at this node (36% of observations fall under this leaf). If the passenger is male, we then check the age of the passenger. If he is older than 9.5 years, then the probability of survival $p(y=1,x_1=M,x_2>9.5)=0.17$, so we predict death ($y=0$). If, on the other hand, the passenger is younger than 9.5 years, we then check if the number of siblings and spouses on board was higher than 2.5; if "yes", then the probability of survival $p(y=1, x_1=M, x_2>9.5, x_3>2.5)=0.05$, so we predict death, otherwise we predict survival with $p(y=1, x_1=M, x_2>9.5 , x_3 \lt 2.5)=0.89$. Hence, these probabilities are just the empirical fraction of positive examples that satisfy each conjunction of feature values, which defines a path from the root to a leaf.
#
# 
#
# There is no way to feasibly evaluate all possible partitions. Instead, the strategy is to use a top-down, **greedy** approach that is optimal (according to a particular cost function) for the current split only. By "greedy", we mean that at each step it chooses the most advantageous binary partition, not taking into account the impact of the choice on the quality of subsequent partitions.
#
# $$(j^*, t^*) = \text{argmin}_{j,t} C(\{\mathbf{x}_i,y_i: x_{ij} \le t\}) + C(\{\mathbf{x}_i,y_i: x_{ij} \gt t\})$$
#
# where $C$ is a cost function, $j$ and $t$ are a variable index and cutpoint, respectively. We will restrict consideration to binary partitions.
#
# ## Classification Trees
#
# In addition to regression trees, we can also use decision trees on categorical outcomes, and these are called classification trees. The primary difference in implementation is that residual sums of squares is no longer an appropriate splitting criterion.
# ### Entropy
#
# An alternative splitting criterion for decision tree learning algorithms is *information gain*. It measures how well a particular attribute distinguishes among different target classifications. Information gain is measured in terms of the expected reduction in the entropy or impurity of the data. The entropy of a set of probabilities is:
#
# $$H(p) = -\sum_i p_i log_2(p_i)$$
#
# If we have a set of binary responses from some variable, all of which are positive/true/1, then knowing the values of the variable does not hold any predictive value for us, since all the outcomes are positive. Hence, the entropy is zero:
# +
import numpy as np
entropy = lambda p: -np.sum(p * np.log2(p)) if not 0 in p else 0
# -
entropy([.4,.6])
# However, if the variable splits responses into equal numbers of positive and negative values, then entropy is maximized, and we wish to know about the feature:
entropy([0.5, 0.5])
pvals = np.linspace(0, 1)
plt.plot(pvals, [entropy([p,1-p]) for p in pvals])
# The entropy calculation tells us how much additional information we would obtain with knowledge of the variable.
#
# So, if we have a set of candidate covariates from which to choose as a node in a decision tree, we should choose the one that gives us the most information about the response variable (*i.e.* the one with the highest entropy).
#
# ### Misclassification Rate
#
# Alternatively, we can use the misclassification rate:
#
# $$C(j,t) = \frac{1}{n_{jt}} \sum_{y_i: x_{ij} \gt t} I(y_i \ne \hat{y})$$
#
# where $\hat{y}$ is the most probable class label and $n_{ij}$ is the number of observations in the data subset obtained from splitting via $j,t$.
#
# ### Gini index
#
# The Gini index is simply the expected error rate:
#
# $$C(j,t) = \sum_{k=1}^K \hat{\pi}_{jt}[k] (1 - \hat{\pi}_{jt}[k]) = 1 - \sum_{k=1}^K \hat{\pi}_{jt}[k]^2$$
#
# where $\hat{\pi}_{jt}[k]$ is the probability of an observation being correctly classified as class $k$ for the data subset obtained from splitting via $j,t$ (hence, $(1 - \hat{\pi}_{jt}[k])$ is the misclassification probability).
gini = lambda p: 1. - (np.array(p)**2).sum()
pvals = np.linspace(0, 1)
plt.plot(pvals, [entropy([p,1-p])/2. for p in pvals], label='Entropy')
plt.plot(pvals, [gini([p,1-p]) for p in pvals], label='Gini')
plt.legend()
# ## ID3
#
# A given cost function can be used to construct a decision tree via one of several algorithms. The Iterative Dichotomiser 3 (ID3) is on such algorithm, which uses entropy, and a related concept, *information gain*, to choose features and partitions at each classification step in the tree.
#
# Information gain is the difference between the current entropy of a system and the entropy measured after a feature is chosen. If $S$ is a set of examples and $X$ is a possible feature on which to partition the examples, then:
#
# $$G(S,X) = \text{Entropy}(S) - \sum_{x \in X} \frac{\#(S_x)}{\#(S)} \text{Entropy}(S_x)$$
#
# where $\#$ is the count function and $x$ is a particular value of $X$.
#
# Let's say $S$ is a set of survival events, $S = \{s_1=survived, s_2=died, s_3=died, s_4=died\}$ and a particular variable $X$ can have values $\{x_1, x_2, x_3\}$. To perform a sample calculation of information gain, we will say that:
#
# * $X(s_1) = x_2$
# * $X(s_2) = x_2$
# * $X(s_3) = x_3$
# * $X(s_4) = x_1$
#
# The current entropy of this state is:
#
# $$\begin{align}
# \text{Entropy}(S) &= -p^{(+)} \log_2(p^{(+)}) - p^{(-)} \log_2(p^{(-)}) \\
# &= -0.25 \log_2(0.25) - 0.75 \log_2(0.75) \\
# &= 0.5 + 0.311 = 0.811
# \end{align}$$
#
# Now, we need to compute the information after selecting variable $X$, which is the sum of three terms:
#
# $$\begin{align}
# \frac{\#(S_{x1})}{\#(S)} \text{Entropy}(S) &= 0.25 (-0 \log_2(0) - 1 \log_2(1)) = 0\\
# \frac{\#(S_{x2})}{\#(S)} \text{Entropy}(S) &= 0.5 (-0.5 \log_2(0.5) - 0.5 \log_2(0.5) = 0.5\\
# \frac{\#(S_{x3})}{\#(S)} \text{Entropy}(S) &= 0.25 (-0 \log_2(0) - 1 \log_2 1) = 0\\
# \end{align}$$
#
# Therefore, the information gain is:
#
# $$G(S,X) = 0.811 - (0 + 0.5 + 0) = 0.311$$
# +
import numpy as np
def info_gain(X, y, feature):
# Calculates the information gain based on entropy
gain = 0
n = len(X)
# List the values that feature can take
values = list(set(X[feature]))
feature_counts = np.zeros(len(values))
E = np.zeros(len(values))
ivalue = 0
# Find where those values appear in X[feature] and the corresponding class
for value in values:
new_y = [y[i] for i,d in enumerate(X[feature].values) if d==value]
feature_counts[ivalue] += len(new_y)
# Get the values in newClasses
class_values = list(set(new_y))
class_counts = np.zeros(len(class_values))
iclass = 0
for v in class_values:
for c in new_y:
if c == v:
class_counts[iclass] += 1
iclass += 1
nc = float(np.sum(class_counts))
new_entropy = entropy([class_counts[c] / nc for c in range(len(class_values))])
E[ivalue] += new_entropy
# Computes both the Gini gain and the entropy
gain += float(feature_counts[ivalue])/n * E[ivalue]
ivalue += 1
return gain
# -
# Consider a few variables from the titanic database:
titanic = pd.read_excel("../data/titanic.xls", "titanic")
titanic.head(1)
# Here, we have selected pasenger class (`pclass`), sex, port of embarcation (`embarked`), and a derived variable called `adult`. We can calculate the information gain for each of these.
# +
y = titanic['survived']
X = titanic[['pclass','sex','embarked']]
X['adult'] = titanic.age<17
info_gain(X, y, 'pclass')
# -
info_gain(X, y, 'sex')
info_gain(X, y, 'embarked')
info_gain(X, y, 'adult')
# Hence, the ID3 algorithm computes the information gain for each variable, selecting the one with the highest value (in this case, `adult`). In this way, it searches the "tree space" according to a greedy strategy.
#
# A tree can be constructed by recursively selecting the feature from the current dataset with the largest information gain, then removing it from the datset. Recursion stops when there are either no variables remaining, or there is only one class left in the subset (*e.g.* all `True` or all `False`).
#
# The ID3 algorithm is as follows:
#
# > * if all response data have the same class:
# >
# > - return leaf with data label
# >
# > * else if no features:
# >
# > - return leaf with most common label
# >
# > * else:
# >
# > - choose variable $X'$ that maximizes information gain to be a tree node
# > - add branch from node for each value of $X'$
# > - for each branch of node:
# >
# > * calculate $S_{x}$ by removing $X'$ from $S$
# > * set $S=S_{x}$ and call algorithm again
#
# The greedy approach of maximizing information gain at each step tends to bias solutions towards smaller trees.
# ## Decision Trees in `scikit-learn`
#
# Classification trees, either binary or multi-class, are implemented in `scikit-learn` in the `DecisionTreeClassifier` class. Where trees are binary, it expects the response variable to be coded as `[-1,1]` for negative and positive outcomes.
#
# Let's build a decision tree on a wine dataset.
# +
wine = pd.read_table("../data/wine.dat", sep='\s+')
attributes = ['Alcohol',
'Malic acid',
'Ash',
'Alcalinity of ash',
'Magnesium',
'Total phenols',
'Flavanoids',
'Nonflavanoid phenols',
'Proanthocyanins',
'Color intensity',
'Hue',
'OD280/OD315 of diluted wines',
'Proline']
grape = wine.pop('region')
y = grape
wine.columns = attributes
X = wine
# +
from sklearn import tree
from sklearn import model_selection
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.4, random_state=0)
clf = tree.DecisionTreeClassifier(criterion='entropy',
max_features="auto",
min_samples_leaf=10)
clf.fit(X_train, y_train)
# -
# If you have [GraphViz](http://www.graphviz.org) installed, you can draw the resulting tree:
with open("wine.dot", 'w') as f:
f = tree.export_graphviz(clf, out_file=f)
# ! dot -Tpng wine.dot -o wine.png
for i,x in enumerate(X.columns):
print(i,x)
from IPython.core.display import Image
Image("wine.png")
preds = clf.predict(X_test)
pd.crosstab(y_test, preds, rownames=['actual'],
colnames=['prediction'])
# ### Pruning
#
# Despite the *inductive bias* associated with trees that tend to make them small, the ID3 algorithm continues choosing nodes and branches until either it runs out of variables, or all outputs are of the same class. This can clearly lead to overfit trees.
#
# To prevent overfitting, we can stop growing the tree if the information gain (or reduction in error, etc.) is not sufficient to justify the extra complexity of adding another node. However, this simple rule is not optimal, because an uninformative subtree can lead to informative ones later on.
#
# The standard approach is therefore to grow a full tree, and then to *prune* it. The easiest approach is to remove branches that give the least increase in the error (information gain). To determine how far back to prune, we can evaluate the cross-validated error on each candidate pruning, and then pick the tree whose CV error is within 1 standard error of the minimum.
#
# Analogous to the lasso or ridge regression, we can penalize the number of terminal nodes in a tree:
#
# $$\sum_{m=1}^{|T|} \sum_{x_i \in R_m} (y_i - \hat{y}_{R_j})^2 + \alpha |T|$$
#
# where $|T|$ is the number of terminal nodes in tree T.
#
# ### Pruned Decision Tree Algorithm
#
# 1. Use recursive binary splitting to grow a large tree, such that each terminal node has fewer than some minimum number of observations.
# 2. Apply pruning to obtain a sequence of best subtrees, as a function of $\alpha$.
# 3. Use k-fold cross-validation to choose $\alpha$. Average results and pick $\alpha$ to minimize the average error.
# 4. Return subtree from (2) that corresponds to chosen $\alpha$.
#
# ## Random Forests
#
# Decision trees have several advantages:
#
# * ease of interpretation
# * handles continuous and discrete features
# * invariant to monotone transformation of features
# * variable selection automated
# * robust
# * scalable
#
# However, relative to other statistical learning methods, trees do not predict very accurately, due to the greedy nature of the tree construction algorithm. Also, trees tend to be **unstable**, as small changes to the inputs can have large effects on the structure of the tree; poor decisions near the root of the tree will propogate to the rest of the tree. Hence, trees are **high variance** (*i.e.* noisy) estimators.
#
# One way to reduce the variance of an estimate is to average together many estimates. In the case of decision trees, we can train $T$ different trees on random subsets of the data (with replacement) then average according to:
#
# $$\hat{f}(\mathbf{x}) = \frac{1}{T} \sum_{i=1}^T f_t(\mathbf{x})$$
#
# where $f_t$ is the $t^{th}$ tree. This approach is called "bootstrap aggregating", or **bagging**.
#
# Note that, since we are averaging over trees, there is *no need to prune*. With bagging, we reduce variance by averaging, rather than by pruning.
# +
from sklearn.ensemble import BaggingClassifier
bc = BaggingClassifier(n_jobs=4, oob_score=True)
bc
# +
bc.fit(X_train, y_train)
preds = bc.predict(X_test)
pd.crosstab(y_test, preds, rownames=['actual'],
colnames=['prediction'])
# -
# Test error of a bagged model is measured by estimating **out-of-bag error**.
#
# On average, each bagged tree uses about 2/3 of observations, leaving the remaining third as "out-of bag". The response for the ith observation for each of the trees in which that observation was excluded (on average, B/3) is averaged. This essentially the same as performing leave-one-out (LOO) cross-validation.
bc.oob_score_
# This approach is an **ensemble learning** method, because it takes a set of *weak* learners, and combines them to construct a *strong* learner that is more robust, with lower generalization error.
#
# An average of B trees, each with variance $\sigma^2$, has variance $\sigma^2/B$. If the variables are simply identically distributed, with positive pairwise correlation $\rho$, then the variance of the average of the B trees is:
#
# $$\rho \sigma^2 + \frac{1-\rho}{B}\sigma^2$$
# As the number of trees becomes large, the second term goes to zero. Further reductions in variance are limited by the size of the correlation among the trees $\rho$.
# **Random forests** improves upon bagging by creating a set of decision trees that are less correlated than bootstrapped trees. This is done by selecting from only a subset $m$ out of $M$ possible predictors at each split. Typically, we choose approximately the square root of the available number.
#
# This procedure is used to create a set of trees, most of which will be poor at predicting a given observation. However, classification is based on a *majority vote* of the constituent trees.
# +
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_jobs=4)
rf.fit(X_train, y_train)
preds = rf.predict(X_test)
pd.crosstab(y_test, preds, rownames=['actual'],
colnames=['prediction'])
# -
# With random forests, it is possible to quantify the relative importance of feature inputs for classification. In scikit-learn, the Gini index (recall, a measure of error reduction) is calculated for each internal node that splits on a particular feature of a given tree, which is multiplied by the number of samples that were routed to the node (this approximates the probability of reaching that node). For each variable, this quantity is averaged over the trees in the forest to yield a measure of importance.
# +
importances = rf.feature_importances_
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. %s (%f)" % (f + 1, X.columns[indices[f]], importances[indices[f]]))
# -
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", align="center")
plt.xticks(range(X.shape[1]), X.columns[indices], rotation=90)
plt.xlim([-1, X.shape[1]]);
# `RandomForestClassifier` uses the Gini impurity index by default; one may instead use the entropy information gain as a criterion.
# +
rf = RandomForestClassifier(n_jobs=4, criterion='entropy')
rf.fit(X_train, y_train)
importances = rf.feature_importances_
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. %s (%f)" % (f + 1, X.columns[indices[f]], importances[indices[f]]))
# -
# ## Decision Tree Regression
# While it may not be apparent how to use trees for regression analysis, it requires only a straightforward modification to the algorithm. A popular tree-based regression algorithm is the **classification and regression tree** (CART).
#
# The file `TNNASHVI.txt` in your data directory contains daily temperature readings for Nashville, courtesy of the [Average Daily Temperature Archive](http://academic.udayton.edu/kissock/http/Weather/). This data, as one would expect, oscillates annually. We can use a decision tree regression model to fit the data.
daily_temps = pd.read_table("../data/TNNASHVI.txt", sep='\s+',
names=['month','day','year','temp'], na_values=-99)
daily_temps.temp[daily_temps.year>2010].plot(style='b.', figsize=(10,6))
# In this context, none of the cost functions considered so far would be appropriate. Instead, it would be more suitable to use something like mean squared error (MSE) to guide the growth of the tree. With this, we can proceed to choose (1) a variable on which to split the dataset and (2) (in the case of continuous features) a value of the variable at which to place a node.
#
# Recall that the output of a tree is just a constant value for each leaf; here, we simply return the average of all the response values in the region. Thus, we choose a cut point that minimizes the MSE at each step.
# Transmogrify data
y = daily_temps.temp[daily_temps.year>2010]
X = np.atleast_2d(np.arange(len(y))).T
# +
from sklearn.tree import DecisionTreeRegressor
clf = DecisionTreeRegressor(max_depth=7, min_samples_leaf=2)
clf.fit(X, y)
X_fit = np.linspace(0, len(X), 1000).reshape((-1, 1))
y_fit_1 = clf.predict(X_fit)
plt.plot(X.ravel(), y, '.k', alpha=0.3)
plt.plot(X_fit.ravel(), y_fit_1, color='red')
# -
# A single decision tree allows us to estimate the signal in a non-parametric way,
# but clearly has some issues. In some regions, the model shows high bias and
# under-fits the data
# (seen in the long flat lines which don't follow the contours of the data),
# while in other regions the model shows high variance and over-fits the data
# (reflected in the narrow spikes which are influenced by noise in single points).
#
# One way to address this is to use an ensemble method, like random forests, so that the
# effects of their over-fitting go away on average.
#
# Here we will use a random forest of 200 trees to reduce the tendency of each
# tree to over-fitting the data.
# +
from sklearn.ensemble import RandomForestRegressor
clf = RandomForestRegressor(n_estimators=200, max_depth=9,
min_samples_leaf=10)
clf.fit(X, y)
y_fit_200 = clf.predict(X_fit)
plt.plot(X.ravel(), y, '.k', alpha=0.3)
plt.plot(X_fit.ravel(), y_fit_200, color='red')
# -
# ### Prediction intervals
#
# The predictions from random forests are not accompanied by estimates of uncertainty, as is the case with Bayesian regression models. However, it is possible to obtain probability intervals using a random forests approach. Since we are using an ensemble of trees, it is possible to track *all* predicted values for all leaf nodes in a random forest, rather than just the mean or modal value. This results in conditional distributions $P(y|X=x)$ for every x, from which percentiles can be calculated for desired endpoints in a prediction interval. This approach is called **quantile regression forests**.
#
# To implement quantile regression forests in scikit-learn, we need to allow each tree to grow so that each leaf node contains exactly one value. Then, each tree returns a single response variable, from which a conditional distribution can be approximated. Of course, fully expanding trees will result in overfitting, but these can also be cross-validated.
#
# scikit-learn does not automatically calculate prediction intervals, but the estimators from each constitutent tree in the `RandomForestRegressor` is avaialable, from which individual tree predictions can be made.
def prediction_intervals(mod, X, alpha=0.05):
preds = [pred.predict(X) for pred in mod.estimators_]
lower = np.percentile(preds, 100*(alpha/2), axis=0)
upper = np.percentile(preds, 100*(1-alpha/2), axis=0)
return lower, upper, np.array(preds)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.4, random_state=0)
# +
clf = RandomForestRegressor(n_estimators=1000, min_samples_leaf=1)
clf.fit(X_train, y_train)
y_fit_200 = clf.predict(X_fit)
# -
lower, upper, preds = prediction_intervals(clf, X_test, alpha=0.1)
x_sorted = np.sort(X_test.ravel())
order = np.argsort(X_test.ravel())
plt.errorbar(x_sorted, y_test.values[order],
yerr=[(y_test.values-lower)[order], (upper-y_test.values)[order]])
plt.plot(X_test, y_test, '.r', alpha=0.3)
# ### Exercise
#
# Select the optimal random forest regression model for the nashville daily temperature data via cross-validation in `scikit-learn`. Use the number of estimators and the maximim leaf nodes as tuning parameters.
# +
# Write your answer here
# -
# ## References
#
# <NAME>. [Quantile Regression Forests](http://www.jmlr.org/papers/volume7/meinshausen06a/meinshausen06a.pdf) Journal of Machine Learning Research 7 (2006) 983–999
#
# <NAME>, <NAME> and <NAME>. (2009) [Elements of Statistical Learning: Data Mining, Inference, and Prediction](http://statweb.stanford.edu/~tibs/ElemStatLearn/), second edition. Springer.
#
| notebooks/Decision Trees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
batman = cv2.imread("batman-swarovski-5492687.jpg")
superman = cv2.imread("dc-comics-superman-swarovski-5556951.jpg")
def picshow(photo):
cv2.imshow("hi", photo)
cv2.waitKey()
cv2.destroyAllWindows()
picshow(batman)
picshow(superman)
batlogo = batman[216:270,510:594]
superlogo = superman[296:445,750:944]
superlogo_resized = cv2.resize(superlogo, (batlogo.shape[1],batlogo.shape[0]))
batlogo_resized = cv2.resize(batlogo, (superlogo.shape[1],superlogo.shape[0]))
batman[216:270,510:594] = superlogo_resized
superman[296:445,750:944] = batlogo_resized
picshow(batman)
picshow(superman)
cv2.imwrite("bat_superlogo.jpg", batman)
cv2.imwrite("super_batlogo.jpg", superman)
| task4.2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit ('Python39')
# name: python3
# ---
# # Module 2 : Wrap up Quizzzzzzz
# Import des données :
# +
import pandas as pd
blood_transfusion = pd.read_csv("../datasets/blood_transfusion.csv")
target_name = "Class"
data = blood_transfusion.drop(columns=target_name)
target = blood_transfusion[target_name]
# -
data
target
target.value_counts()
# #### Question 1 : Select the correct answers from the following proposals.
#
# a) The problem to be solved is a regression problem
# b) The problem to be solved is a binary classification problem (exactly 2 possible classes)
# c) The problem to be solved is a multiclass classification problem (more than 2 possible classes)
# d) The proportions of the class counts are imbalanced: some classes have more than twice as many rows than others)
#
#
# Je dirais : b) et d)
# #### Question 2 : Using a sklearn.dummy.DummyClassifier and the strategy "most_frequent", what is the average of the accuracy scores obtained by performing a 10-fold cross-validation?
#
# a) ~25%
# b) ~50%
# c) ~75%
#
# Je dirais environ 75% en regardant le nombre d'occurences de "not donated", on va le faire qd mm pour la pratique.
# +
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import cross_validate
model = DummyClassifier(strategy="most_frequent")
results = cross_validate(model, data, target, cv=10)
results["test_score"].mean()
# -
# #### Question 3 : Repeat the previous experiment but compute the balanced accuracy instead of the accuracy score. Pass scoring="balanced_accuracy" when calling cross_validate or cross_val_score functions?
#
# a) ~25%
# b) ~50%
# c) ~75%
# +
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import cross_validate
model = DummyClassifier(strategy="most_frequent")
results = cross_validate(model, data, target, cv=10, scoring="balanced_accuracy")
results["test_score"].mean()
# -
# #### Question 4 : We will use a sklearn.neighbors.KNeighborsClassifier for the remainder of this quiz.
#
# Why is it relevant to add a preprocessing step to scale the data using a StandardScaler when working with a KNeighborsClassifier?
#
# a) faster to compute the list of neighbors on scaled data
# b) k-nearest neighbors is based on computing some distances. Features need to be normalized to contribute approximately equally to the distance computation.
# c) This is irrelevant. One could use k-nearest neighbors without normalizing the dataset and get a very similar cross-validation score.
#
# Je dirais b)
# #### Question 5 : Create a scikit-learn pipeline (using sklearn.pipeline.make_pipeline) where a StandardScaler will be used to scale the data followed by a KNeighborsClassifier. Use the default hyperparameters.
#
# Inspect the parameters of the created pipeline. What is the value of K, the number of neighbors considered when predicting with the k-nearest neighbors?
#
# a) 1
# b) 3
# c) 5
# d) 8
# e) 10
# +
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.neighbors import KNeighborsClassifier
model = make_pipeline(
StandardScaler(),
KNeighborsClassifier()
)
model.get_params().keys()
model.get_params()["kneighborsclassifier__n_neighbors"]
# -
# #### Question 6 : Evaluate the previous model with a 10-fold cross-validation. Use the balanced accuracy as a score.
# What can you say about this model? Compare the average of the train and test scores to argument your answer.
#
# a) The model clearly underfits
# b) The model generalizes
# c) The model clearly overfits
#
# D'après le calcul ci-dessous, je dirais que le modèle underfits.
# Réponse : et non, ça overfitte car : We see that the gap between train and test scores is large. In addition, the average score of on the training sets is good while the average scores on the testing sets is really bad. They are the signs of a overfitting model.
#
#
#
# + tags=[]
from sklearn.model_selection import cross_validate
results = cross_validate(model, data, target, scoring="balanced_accuracy", cv=10, return_train_score=True)
results = pd.DataFrame(results)
print(results.mean())
# -
# #### Question 7 : We will now study the effect of the parameter n_neighbors on the train and test score using a validation curve. You can use the following parameter range:
#
# param_range = [1, 2, 5, 10, 20, 50, 100, 200, 500]
#
# Also, use a 5-fold cross-validation and compute the balanced accuracy score instead of the default accuracy score (check the scoring parameter). Finally, plot the average train and test scores for the different value of the hyperparameter. We recall that the name of the parameter can be found using model.get_params().
#
# Select the true affirmations stated below:
#
# a) The model underfits for a range of n_neighbors values between 1 to 10
# b) The model underfits for a range of n_neighbors values between 10 to 100
# c) The model underfits for a range of n_neighbors values between 100 to 500
#
# D'après ci-dessous, je dirais la a) et b)
# Réponse : et ba non c'est la c) : Underfitting happens when the gap between train and test scores is low and that both scores are low. When n_neighbors is high, underfitting occurs. The model lacks expressivity because it always considers the majority class of a large number of data points. Its prediction tends to be always the same, irrespective of the test point of interest.
#
#
# +
import matplotlib.pyplot as plt
from sklearn.model_selection import validation_curve
param_range = [1, 2, 5, 10, 20, 50, 100, 200, 500]
model.get_params()
results = cross_validate(model, data, target, cv=5, scoring="balanced_accuracy", )
train_scores, test_scores = validation_curve(model, data, target, param_name="kneighborsclassifier__n_neighbors", param_range=param_range, cv=5, scoring="balanced_accuracy", n_jobs=2)
plt.plot(param_range, train_scores.mean(axis=1), label="Training score")
plt.plot(param_range, test_scores.mean(axis=1), label="Testing score")
plt.legend()
plt.xscale("log")
plt.xlabel("Nombre de voisins")
plt.ylabel("Score")
_ = plt.title("Validation curve for KNeighrest Neighboors")
# +
print("REPONSE :")
from sklearn.model_selection import validation_curve
import matplotlib.pyplot as plt
param_range = [1, 2, 5, 10, 20, 50, 100, 200, 500]
param_name = "kneighborsclassifier__n_neighbors"
train_scores, test_scores = validation_curve(
model, data, target, param_name=param_name, param_range=param_range, cv=5,
n_jobs=2, scoring="balanced_accuracy")
_, ax = plt.subplots()
for name, scores in zip(
["Training score", "Testing score"], [train_scores, test_scores]
):
ax.plot(
param_range, scores.mean(axis=1), linestyle="-.", label=name,
alpha=0.8)
ax.fill_between(
param_range, scores.mean(axis=1) - scores.std(axis=1),
scores.mean(axis=1) + scores.std(axis=1),
alpha=0.5, label=f"std. dev. {name.lower()}")
ax.set_xticks(param_range)
ax.set_xscale("log")
ax.set_xlabel("Value of hyperparameter n_neighbors")
ax.set_ylabel("Balanced accuracy score")
ax.set_title("Validation curve of K-nearest neighbors")
# -
# #### Question 8 :Select the true affirmations stated below:
#
# a) The model overfits for a range of n_neighbors values between 1 to 10
# b) The model overfits for a range of n_neighbors values between 10 to 100
# c) The model overfits for a range of n_neighbors values between 100 to 500
#
# Je dirais que l'overfit correspond de 1 à 100 car le score du training est bien supérieur au score du test
# Réponse : non c'est que a) car il faut que le test score soit faible
#
# #### Question 9 : Select the true affirmations stated below:
#
# a) The model best generalizes for a range of n_neighbors values between 1 to 10
# b) The model best generalizes for a range of n_neighbors values between 10 to 100
# c) The model best generalizes for a range of n_neighbors values between 100 to 500
#
# Je dirais b) :)
| notebooks/wrap_up_quiz--module_2.ipynb |
# # Exporting
# It's easy.
# ### save
# Setting `save` will save the animation to disk and show it inline.
import ahlive as ah
ah.Array([0, 1, 2], [4, 5, 6], save='exporting_tutorial.gif').render()
# ### fmt
# `fmt` does not have to be explicitly set; if `save` is suffixed with a file format, it will use that, else if `save` is not suffixed with any file format, it will default to gif.
#
# ```python
# import ahlive as ah
# ah.Array([0, 1, 2], [4, 5, 6], save='exporting_tutorial').render()
# ```
#
# However, `fmt` can be explicitly set as well. For example, to save as a video, set `fmt` as `mp4`, but note `imageio-ffmpeg` is required, e.g. `pip install imageio-ffmpeg`.
#
# ```python
# import ahlive as ah
# ah.Array([0, 1, 2], [4, 5, 6], save='exporting_tutorial', fmt='mp4').render()
# ```
#
# ### show
# To disable showing inline, set `show` to `False`. If `save` is set, will still save to disk.
#
# ```python
# import ahlive as ah
# ah.Array([0, 1, 2], [4, 5, 6], show=False).render()
# ```
#
# ### pygifsicle
#
# Reduce file size of an output GIF by setting `pygifiscle` to `True`. If `save` is not set, will write a temporary file in the current working directory.
#
# For this to work, `pip install pygifsicle` and `conda install gifsicle`.
#
# ```python
# import ahlive as ah
# ah.Array([0, 1, 2], [4, 5, 6], pygifsicle=True).render()
# ```
#
# ### workers
# By default, ahlive uses a single thread to output each frame, but supports multiple processors through `workers`.
#
# ```python
# import ahlive as ah
# ah.Array([0, 1, 2], [4, 5, 6], workers=1).render()
# ```
#
# ### scheduler
# Sometimes, `scheduler='single-threaded'` is more efficient than `scheduler=processors` if your dataset is large.
#
# ```python
# import ahlive as ah
# ah.Array([0, 1, 2], [4, 5, 6], scheduler='single-threaded').render()
# ```
| docs/source/essentials/exporting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import ipywidgets as widgets
from traitlets import Unicode, List, Int
# # Example from http://bl.ocks.org/mbostock/4060366
# + language="javascript"
#
# var s = document.createElement("style");
# s.innerHTML = `
# path {
# stroke: #fff;
# }
#
# path:first-child {
# fill: yellow !important;
# }
#
# circle {
# fill: #000;
# pointer-events: none;
# }
#
# .q0-9 { fill: rgb(197,27,125); }
# .q1-9 { fill: rgb(222,119,174); }
# .q2-9 { fill: rgb(241,182,218); }
# .q3-9 { fill: rgb(253,224,239); }
# .q4-9 { fill: rgb(247,247,247); }
# .q5-9 { fill: rgb(230,245,208); }
# .q6-9 { fill: rgb(184,225,134); }
# .q7-9 { fill: rgb(127,188,65); }
# .q8-9 { fill: rgb(77,146,33); }`;
#
# document.getElementsByTagName("head")[0].appendChild(s);
# -
class MyD3(widgets.DOMWidget):
_view_name = Unicode('HelloView').tag(sync=True)
_view_module = Unicode('myd3').tag(sync=True)
width = Int().tag(sync=True)
height = Int().tag(sync=True)
vertices = List().tag(sync=True)
# + language="javascript"
# require.undef('myd3');
#
# define('myd3', ["@jupyter-widgets/base",
# "https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.17/d3.js"], function(widgets, d3) {
#
# var HelloView = widgets.DOMWidgetView.extend({
#
# render: function() {
# var that = this;
# this.width = this.model.get('width');
# this.height = this.model.get('height');
# that.vertices = this.model.get('vertices');
#
# that.voronoi = d3.geom.voronoi()
# .clipExtent([[0, 0], [that.width, that.height]]);
#
# this.svg = d3.select(this.el).append("svg")
# .attr("width", that.width)
# .attr("height", that.height)
# .on("mousemove", function() {
# that.vertices[0] = d3.mouse(this);
# that.redraw();
# });
#
# var g1 = this.svg.append("g");
# this.path = g1.selectAll("path");
# var g2 = this.svg.append("g");
# this.circle = g2.selectAll("circle");
#
# this.model.on('change:vertices', this.update_vertices, this);
# this.redraw();
# },
#
# update_vertices: function() {
# this.redraw();
# },
#
# redraw: function () {
# this.vertices = this.model.get('vertices');
#
# this.path = this.path
# .data(this.voronoi(this.vertices), this.polygon);
# this.path.exit().remove();
# this.path.enter().append("path")
# .attr("class", function(d, i) { return "q" + (i % 9) + "-9"; })
# .attr("d", this.polygon);
# this.path.order();
#
# this.circle = this.circle
# .data([]);
# this.circle.exit().remove();
#
# this.circle = this.circle
# .data(this.vertices.slice(1));
#
# this.circle.enter().append("circle")
# .attr("transform", function(d) {
# return "translate(" + d + ")";
# })
# .attr("r", 1.5);
# },
#
# polygon: function (d) {
# return "M" + d.join("L") + "Z";
# }
# });
#
# return {
# HelloView : HelloView
# };
# });
# +
import numpy as np
sample_size = 100
width = 750
height = 300
m = MyD3(vertices=(np.random.rand(sample_size, 2) * np.array([width, height])).tolist(),
height=height, width=width)
m
# -
m.vertices = (np.random.rand(sample_size, 2) * np.array([width, height])).tolist()
| 2019-07-10-CICM/notebooks/d3-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# -
train = pd.read_csv("../input/creditcardfraud/creditcard.csv")
train
x = train.corrwith(train["Class"]).to_dict()
del x['Class']
features = []
for k,v in x.items():
if abs(v)>0.1:
print(f"{k} : {v:.2f}")
features.append(k)
x = train[features]
y = train['Class']
y.value_counts()
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x = scaler.fit_transform(x)
# splitting the dataset into train and test dataset with 4:1 ratio (80%-20%)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = .2, random_state = 26,stratify=y)
# ## Training on different algorithms
# ### Logistic Regression
# +
from sklearn.linear_model import LogisticRegression
# Create instance of model
lreg = LogisticRegression()
# Pass training data into model
lreg.fit(x_train, y_train)
# -
# Getting prediciton on x_test
y_pred_lreg = lreg.predict(x_test)
# +
# Scoring our model
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,f1_score, precision_score, recall_score
# Confusion Matrix
print('Logistic Regression')
print('\n')
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred_lreg))
print('--'*50)
# Classification Report
print('Classification Report')
print(classification_report(y_test,y_pred_lreg))
# Accuracy of our model
print('--'*50)
logreg_accuracy = round(accuracy_score(y_test, y_pred_lreg) * 100,8)
print('Accuracy = ', logreg_accuracy,'%')
# -
# **We have a accuracy of 99.90%**
# ### LINEAR SUPPORT VECTOR CLASSIFIER
# %%time
from sklearn.svm import SVC
# Instantiate the model
svc = SVC()
# Fit the model on training data
svc.fit(x_train, y_train)
# Getting the predictions for x_test
y_pred_svc = svc.predict(x_test)
# +
print('Support Vector Classifier')
print('\n')
# Confusion matrix
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred_svc))
print('--'*50)
# Classification report
print('Classification Report')
print(classification_report(y_test, y_pred_svc))
# Accuracy
print('--'*50)
svc_accuracy = round(accuracy_score(y_test, y_pred_svc)*100,8)
print('Accuracy = ', svc_accuracy,'%')
# -
# ### K-NEAREST NEIGHBORS
# +
# %%time
from sklearn.neighbors import KNeighborsClassifier
# in knn we need to select a value of nearest neighbour, for now lets use a for loop. If accuarcy
# is better than other models then we would search for optimal parameter
error_rate = []
for i in range (2,15):
knn = KNeighborsClassifier(n_neighbors = i)
knn.fit(x_train, y_train)
pred_i = knn.predict(x_test)
error_rate.append(np.mean(pred_i != y_test))
# Plot error rate
plt.figure(figsize = (10,6))
plt.plot(range(2,15), error_rate, color = 'blue', linestyle = '--', marker = 'o',
markerfacecolor = 'green', markersize = 10)
plt.title('Error Rate vs K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
plt.show()
# +
# now using above data to train with n_neighbors having least error rate
n_value = 0
min_error = float('inf')
for idx,error in enumerate(error_rate):
if min_error>error:
min_error=error
n_value=idx+2
knn = KNeighborsClassifier(n_neighbors = n_value)
# Fit new KNN on training data
knn.fit(x_train, y_train)
# -
# Predict KNN
y_pred_knn_op = knn.predict(x_test)
# +
print('K-Nearest Neighbors(KNN)')
print('k =',n_value)
# Confusion Matrix
print('\n')
print(confusion_matrix(y_test, y_pred_knn_op))
# Classification Report
print('--'*50)
print('Classfication Report',classification_report(y_test, y_pred_knn_op))
# Accuracy
print('--'*50)
knn_op_accuracy =round(accuracy_score(y_test, y_pred_knn_op)*100,8)
print('Accuracy = ',knn_op_accuracy,'%')
# -
# ### RANDOM FOREST
# +
from sklearn.ensemble import RandomForestClassifier
# Create model object
rfc = RandomForestClassifier(n_estimators = 250,n_jobs=-1)
# Fit model to training data
rfc.fit(x_train,y_train)
y_pred_rfc = rfc.predict(x_test)
# +
print('Random Forest')
# Confusion matrix
print('\n')
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred_rfc))
# Classification report
print('--'*50)
print('Classification Report')
print(classification_report(y_test, y_pred_rfc))
# Accuracy
print('--'*50)
rf_accuracy = round(accuracy_score(y_test, y_pred_rfc)*100,8)
print('Accuracy = ', rf_accuracy,'%')
# -
# ### XGBoost Classifier
# +
from xgboost import XGBClassifier
# Create model object
xgb = XGBClassifier(n_jobs=-1)
# Fit model to training data
xgb.fit(x_train, y_train)
y_pred_xgb = xgb.predict(x_test)
# +
print('XGBoost Classifer')
# Confusion matrix
print('\n')
print('Confusion Matrix')
print(confusion_matrix(y_test, y_pred_xgb))
# Classification report
print('--'*50)
print('Classification Report')
print(classification_report(y_test, y_pred_xgb))
# Accuracy
print('--'*50)
xgb_accuracy = round(accuracy_score(y_test, y_pred_xgb)*100,8)
print('Accuracy = ', xgb_accuracy,'%')
# -
models = pd.DataFrame({
'Model': ['Logistic Regression', 'Linear SVC',
'K-Nearest Neighbors', 'Random Forest','XGBoost Classifier'],
'Score': [logreg_accuracy, svc_accuracy,
knn_op_accuracy, rf_accuracy,xgb_accuracy]})
models.sort_values(by='Score', ascending=False)
| Credit Card Fraud Detection/Model/baselinemodels-creditcard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/cxbxmxcx/GenReality/blob/master/GEN_8_StyleGAN2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="rS1wUgN15uyB"
#@title IMPORTS
import os
import sys
import time
import datetime
# + id="Dpf_MyiJzc6l" colab={"base_uri": "https://localhost:8080/"} outputId="5e9ebf5a-a2c3-42ca-f61a-7c9cf996124d"
from google.colab import drive
drive.mount('/content/gdrive')
os.makedirs('/content/gdrive/MyDrive/stylegan2', exist_ok=True)
# + id="RXeA0GTK0dhv" colab={"base_uri": "https://localhost:8080/"} outputId="aefe57f3-ebab-49cb-fab9-705c9855dd52"
# cd /content/gdrive/MyDrive/stylegan2
# + id="Z65V95AFk1Y_"
#@title INSTALLS
# !pip install stylegan2_pytorch --quiet
# + id="rVBTY_4wwfbq" colab={"base_uri": "https://localhost:8080/"} outputId="5da1513c-a9ff-493a-8085-946871c5af30"
#@title HYPERPARAMETERS
dataset_name = "foods" #@param ["img_align_celeba","foods","cars_all"]
datasets = { "img_align_celeba" : { "url" : 'https://www.dropbox.com/s/5hznryk2m0qxs2m/img_align_celeba.zip?dl=1'},
"foods" : { "url" : "https://www.dropbox.com/s/scmccjzoa0i5arx/foods.zip?dl=1"},
"cars_all" : { "url" : "https://www.dropbox.com/s/2zyep29yp4ke7fi/cars_all.zip?dl=1"}
}
img_root_folder = 'images3'
os.makedirs(img_root_folder, exist_ok=True)
image_folder = img_root_folder + "/%s" % dataset_name
print(f"Image data folders constructed {image_folder}")
os.makedirs(image_folder, exist_ok=True)
# + id="Jzze0ldCx1O_" colab={"base_uri": "https://localhost:8080/"} outputId="09eb9029-5443-4664-fe7d-3a7c37815231"
#@title DOWNLOAD IMAGES
if len(os.listdir(image_folder)) == 0:
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
zipurl = datasets[dataset_name]['url']
with urlopen(zipurl) as zipresp:
with ZipFile(BytesIO(zipresp.read())) as zfile:
zfile.extractall(img_root_folder)
print(f"Downloaded & Extracted {zipurl}")
# + colab={"base_uri": "https://localhost:8080/"} id="VSQA0h0L64fr" outputId="083c6e2e-7184-4d2d-f225-7a65b2858357"
# !stylegan2_pytorch --data $image_folder --name $dataset_name
| GEN_8_StyleGAN2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import gym
import matplotlib.pyplot as plt
import sys
sys.path.append('../..')
import wizluk.envs
from wizluk.policies import ContinuousZeroPolicy, ContinuousRandomPolicy
# -
# ## Initialisation
env = gym.make('MountainCarContinuous-v1')
# ## Parameters for Baselines
H = 500 # Horizon
N = 10 # Number of Rollouts
# ### Zero Policy
# +
env = gym.make('MountainCarContinuous-v1')
zero_baseline = np.zeros((N,1))
env.seed(1337)
zero_pi = ContinuousZeroPolicy()
for k in range(N):
x = env.reset()
for s in range(H):
u = zero_pi.get_action(env, x)
x, reward, done, info = env.step(u)
zero_baseline[k] += reward
if done : break
zero_baseline_cost = np.mean(zero_baseline)
print("Zero baseline cost: {}".format(zero_baseline_cost))
# -
# #### Demonstration
# +
env = gym.make('MountainCarContinuous-v1')
env.seed(1337)
zero_pi = ContinuousZeroPolicy()
x = env.reset()
for s in range(H):
env.render()
u = zero_pi.get_action(env, x)
x, reward, done, info = env.step(u)
if done : break
env.close()
# -
# ### Random Policy
# +
env = gym.make('MountainCarContinuous-v1')
np.random.seed(1337)
env.seed(1337)
random_baseline = np.zeros((N,1))
random_pi = ContinuousRandomPolicy()
for k in range(N):
x = env.reset()
for s in range(H):
u = random_pi.get_action(env, x)
x, reward, done, info = env.step(u)
random_baseline[k] += reward
if done : break
random_baseline_cost = np.mean(random_baseline)
print("Random baseline cost: {}".format(random_baseline_cost))
# -
# #### Demonstration
# +
env = gym.make('MountainCarContinuous-v1')
env.seed(1337)
random_pi = ContinuousZeroPolicy()
x = env.reset()
for s in range(H):
env.render()
u = random_pi.get_action(env, x)
x, reward, done, info = env.step(u)
if done : break
env.close()
# -
| notebooks/envs/mountain_car_continuous_v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # EZyRB Tutorial 1
# ## Build and query a simple reduced order model
#
# In this tutorial we will show the typical workflow for the construcion of the Reduced Order Model based only on the outputs of the higher-order model. In detail, we consider here a POD-RBF framework (Proper Orthogonal Decomposition for dimensionality reduction and Radial Basis Function for manifold approximation), but the tutorial can be easily extended to other methods thanks to the modularity nature of **EZyRB**.
#
# We consider a parametric steady heat conduction problem in a two-dimensional domain $\Omega$. While in this tutorial we are going to focus on the data-driven approach, the same problem can be tackled in an intrusive manner (with the Reduced Basis method) using the [RBniCS](https://gitlab.com/RBniCS/RBniCS), as demonstrated in this [RBniCS tutorial](https://gitlab.com/RBniCS/RBniCS/tree/master/tutorials/01_thermal_block).<br>
# This book is therefore exhaustively discussed in the book *Certified reduced basis methods for parametrized partial differential equations*, <NAME>, <NAME>, <NAME>, 2016, Springer. An additional description is available also at [https://rbnics.gitlab.io/RBniCS-jupyter/tutorial_thermal_block.html]().
#
# Since the good documentation already available for this problem and since the data-driven methodologies we will take into consideration, we just summarize the model to allow a better understanding.
#
# The domain is depicted below:
#
# <img src="pictures/tut1_sketch.png" alt="Drawing" style="width: 300px;"/>
#
# where:
# - the first parameter $\mu_o$ controls the conductivity in the circular subdomain $\Omega_0$;
# - the second parameter $\mu_1$ controls the flux over $\Gamma_\text{base}$.
#
#
# ### Initial setting
#
# First of all import the required packages: we need the standard Numpy and Matplotlib, and some classes from EZyRB. In the EZyRB framework, we need three main ingredients to construct a reduced order model:
# - an initial database where the snapshots are stored;
# - a reduction method to reduce the dimensionality of the system, in this tutorial we will use the proper orthogonal decomposition (POD) method;
# - an approximation method to extrapolate the parametric solution for new parameters, in this tutorial we will use a radial basis function (RBF) interpolation.
# +
import numpy as np
import matplotlib.tri as mtri
import matplotlib.pyplot as plt
from ezyrb import POD, RBF, Database
from ezyrb import ReducedOrderModel as ROM
# %matplotlib inline
# -
# ## Offline phase
#
# In the *offline* phase, we need some samples of the parametric high-fidelity model. In this case, we extract 8 snapshots from the numerical model implemented in **FEniCS**, and we import them and the related parameters.
snapshots = np.load('data/tut1_snapshots.npy')
param = np.load('data/tut1_mu.npy')
print(snapshots.shape, param.shape)
# Moreover, to visualize the solution (both the higher-order one and the reduced one), we import also the mesh information to be able to create the triangulation. We underline this additional step is related only to plotting purpose, and not mandatory for the reduced space generation.
tri = np.load('data/tut1_triangles.npy')
coord = np.load('data/tut1_coord.npy')
triang = mtri.Triangulation(coord[0],coord[1],tri)
# For the sake of clarity the snapshots are plotted.
fig, ax = plt.subplots(nrows=2, ncols=4, figsize=(16, 6), sharey=True, sharex=True)
ax = ax.flatten()
for i in range(8):
ax[i].triplot(triang, 'b-', lw=0.1)
cm = ax[i].tripcolor(triang, snapshots[i])
fig.colorbar(cm, ax=ax[i])
ax[i].set_title('($\mu_0={:5.2f}, \mu_1={:5.2f})$'.format(*param[i]))
# First of all, we create a `Database` object from the parameters and the snapshots.
db = Database(param, snapshots)
# Then we need a reduction object. In this case we use the proper orthogonal decomposition so we create a `POD` object. We use here all the default parameters, but for the complete list of available arguments we refer to original documentation of [POD](https://mathlab.github.io/EZyRB/pod.html) class.
pod = POD('svd')
# Then we instantiate the `RBF` class for interpolating the solution manifold. Also in this case, [RBF](https://mathlab.github.io/EZyRB/rbf.html) documentation is the perfect starting point to explore such class.
rbf = RBF()
# Few lines of code and our reduced model is created!
# To complete everything, we create the `ReducedOrderModel` (aliased to `ROM` in this tutorial) object by passing the already created objects. For clarity, we puntualize that we need to pass the **instances** and not the classes. Simply changing such line (with different objects) allows to test different frameworks in a very modular way.
# The `fit()` function computes the reduced model, meaning that the original snapshots in the database are projected onto the POD space and the RBF interpolator is created.
rom = ROM(db, pod, rbf)
rom.fit();
# ## Online phase
# In the *online* phase we can query our model in order to predict the solution for a new parameter $\mu_\text{new}$ that is not in the training set. We just need to pass the new parameters as input of the `predict()` function.
new_mu = [8, 1]
pred_sol = rom.predict(new_mu)
# We can so plot the predicted solution for a fixed parameter...
plt.figure(figsize=(7, 5))
plt.triplot(triang, 'b-', lw=0.1)
plt.tripcolor(triang, pred_sol)
plt.colorbar();
# ... or interactively touch the input parameters to visualize the corresponding (approximated) output. For a fancy result, we need a bit of IPython black magic ([https://ipywidgets.readthedocs.io/en/latest/]()).
# +
from ipywidgets import interact
def plot_solution(mu0, mu1):
new_mu = [mu0, mu1]
pred_sol = rom.predict(new_mu)
plt.figure(figsize=(8, 7))
plt.triplot(triang, 'b-', lw=0.1)
plt.tripcolor(triang, pred_sol)
plt.colorbar()
interact(plot_solution, mu0=8, mu1=1);
# -
# ## Error Approximation & Improvement
#
# At the moment, we used a database which is composed by 8 files. we would have an idea of the approximation accuracy we are able to reach with these high-fidelity solutions. Using the *leave-one-out* strategy, an error is computed for each parametric point in our database and these values are returned as array.
for pt, error in zip(rom.database.parameters, rom.loo_error()):
print(pt, error)
# Moreover, we can use the information about the errors to locate the parametric points where we have to compute the new high-fidelity solutions and add these to the database in order to optimally improve the accuracy.
rom.optimal_mu()
# These function can be used to achieve the wanted (estimated) accuracy.
| tutorials/tutorial-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table> <tr>
# <td style="background-color:#ffffff;">
# <a href="http://qworld.lu.lv" target="_blank"><img src="../images/qworld.jpg" width="25%" align="left"> </a></td>
# <td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
# prepared by <a href="http://abu.lu.lv" target="_blank"><NAME></a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
# </td>
# </tr></table>
# <table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
# <h2> <font color="blue"> Solutions for </font>Probabilistic States</h2>
# <a id="task2"></a>
# <h3> Task 2 </h3>
#
# Suppose that Fyodor hiddenly rolls a loaded (tricky) dice with the bias
#
# $$ Pr(1):Pr(2):Pr(3):Pr(4):Pr(5):Pr(6) = 7:5:4:2:6:1 . $$
#
# Represent your information on the result as a column vector. Remark that the size of your column should be 6.
#
# You may use python for your calculations.
# <h3>Solution</h3>
# +
# all portions are stored in a list
all_portions = [7,5,4,2,6,1];
# calculate the total portions
total_portion = 0
for i in range(6):
total_portion = total_portion + all_portions[i]
print("total is",total_portion)
# find the weight of one portion
one_portion = 1/total_portion
print("the weight of one portion is",one_portion)
print() # print an empty line
# now we can calculate the probabilities of getting 1,2,3,4,5, or 6
for i in range(6):
print("the probability of getting",(i+1),"is",(one_portion*all_portions[i]))
# -
# <a id="task3"></a>
# <h3> Task 3 </h3>
#
# For a system with 4 states, randomly create a probabilistic state, and print its entries, e.g., $ 0.16~~0.17~~0.02~~0.65 $.
#
# <i>Hint: You may pick your random numbers between 0 and 100 (or 1000), and then normalize each value by dividing the summation of all numbers.</i>
# <h3>Solution</h3>
# +
# we randomly create a probabilistic state
#
# we should be careful about two things:
# 1. a probability value must be between 0 and 1
# 2. the total probability must be 1
#
# we use a list of size 4
# initial values are zeros
my_state = [0,0,0,0]
normalization_factor = 0 # this will be the summation of four values
# we pick for random values between 0 and 100
from random import randrange
while normalization_factor==0: # the normalization factor cannot be zero
for i in range(4):
my_state[i] = randrange(101) # pick a random value between 0 and (101-1)
normalization_factor += my_state[i]
print("the random values before the normalization",my_state)
# normalize each value
for i in range(4): my_state[i] = my_state[i]/normalization_factor
print("the random values after the normalization",my_state)
# find their summation
sum = 0
for i in range(4): sum += my_state[i]
print("the summation is",sum)
# -
| bronze/B12_Probabilistic_States_Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 闭包(Closure)
#
# > 在计算机科学中,闭包(英语:Closure),又称词法闭包(Lexical Closure)或函数闭包(function closures),是引用了自由变量的函数。这个被引用的自由变量将和这个函数一同存在,即使已经离开了创造它的环境也不例外。
# [[维基百科::闭包(计算机科学)](https://zh.wikipedia.org/wiki/闭包_%28计算机科学%29)]
#
# [0x02 Python 中的函数式编程](https://github.com/rainyear/pytips/blob/master/Markdowns/2016-03-08-Functional-Programming-in-Python.md) 本来也应该包括闭包的概念,但是我觉得闭包更重要的是对**作用域(Scope)**的理解,因此把它单独列出来,同时可以理顺一下 Python 的作用域规则。
#
# 闭包的概念最早出现在函数式编程语言中,后来被一些命令式编程语言所借鉴。尤其是在一些函数作为一等公民的语言中,例如JavaScript就经常用到(在JavaScript中函数几乎可以当做“特等公民”看待),我之前也写过一篇关于JavaScript闭包的文章([图解Javascript上下文与作用域](http://blog.rainy.im/2015/07/04/scope-chain-and-prototype-chain-in-js/)),实际上闭包并不是太复杂的概念,但是可以借助闭包更好地理解不同语言的作用域规则。
#
# #### 命名空间与作用域
#
# [0x00 The Zen of Python](https://github.com/rainyear/pytips/blob/master/Markdowns/2016-03-06-The-Zen-of-Python.md)的最后一句重点强调命名空间的概念,我们可以把命名空间看做一个大型的字典类型(Dict),里面包含了所有变量的名字和值的映射关系。在 Python 中,作用域实际上可以看做是“**在当前上下文的位置,获取命名空间变量的规则**”。在 Python 代码执行的任意位置,都至少存在三层嵌套的作用域:
#
# 1. 最内层作用域,最早搜索,包含所有局部变量**(Python 默认所有变量声明均为局部变量)**
# 2. 所有包含当前上下文的外层函数的作用域,由内而外依次搜索,这里包含的是**非局部**也**非全局**的变量
# 3. 一直向上搜索,直到当前模块的全局变量
# 4. 最外层,最后搜索的,内置(built-in)变量
#
# 在任意执行位置,可以将作用域看成是对下面这样一个命名空间的搜索:
scopes = {
"local": {"locals": None,
"non-local": {"locals": None,
"global": {"locals": None,
"built-in": ["built-ins"]}}},
}
# 除了默认的局部变量声明方式,Python 还有`global`和`nonlocal`两种类型的声明(**`nonlocal`是Python 3.x之后才有,2.7没有**),其中 `global` 指定的变量直接**指向**(3)当前模块的全局变量,而`nonlocal`则指向(2)最内层之外,`global`以内的变量。这里需要强调指向(references and assignments)的原因是,普通的局部变量对最内层局部作用域之外只有**只读(read-only)**的访问权限,比如下面的例子:
x = 100
def main():
x += 1
print(x)
main()
# 这里抛出`UnboundLocalError`,是因为`main()`函数内部的作用域对于全局变量`x`仅有只读权限,想要在`main()`中对`x`进行改变,不会影响全局变量,而是会创建一个新的局部变量,显然无法对还未创建的局部变量直接使用`x += 1`。如果想要获得全局变量的完全引用,则需要`global`声明:
# +
x = 100
def main():
global x
x += 1
print(x)
main()
print(x) # 全局变量已被改变
# -
# #### Python 闭包
#
# 到这里基本上已经了解了 Python 作用域的规则,那么我们来仿照 JavaScript 写一个计数器的闭包:
# +
"""
/* JavaScript Closure example */
var inc = function(){
var x = 0;
return function(){
console.log(x++);
};
};
var inc1 = inc()
var inc2 = inc()
"""
# Python 3.5
def inc():
x = 0
def inner():
nonlocal x
x += 1
print(x)
return inner
inc1 = inc()
inc2 = inc()
inc1()
inc1()
inc1()
inc2()
# -
# 对于还没有`nonlocal`关键字的 Python 2.7,可以通过一点小技巧来规避局部作用域只读的限制:
# +
# Python 2.7
def inc():
x = [0]
def inner():
x[0] += 1
print(x[0])
return inner
inc1 = inc()
inc2 = inc()
inc1()
inc1()
inc1()
inc2()
# -
# 上面的例子中,`inc1()`是在全局环境下执行的,虽然全局环境是不能向下获取到`inc()`中的局部变量`x`的,但是我们返回了一个`inc()`内部的函数`inner()`,而`inner()`对`inc()`中的局部变量是有访问权限的。也就是说`inner()`将`inc()`内的局部作用域打包送给了`inc1`和`inc2`,从而使它们各自独立拥有了一块封闭起来的作用域,不受全局变量或者任何其它运行环境的影响,因此称为闭包。
#
# 闭包函数都有一个`__closure__`属性,其中包含了它所引用的上层作用域中的变量:
print(inc1.__closure__[0].cell_contents)
print(inc2.__closure__[0].cell_contents)
# #### 参考
#
# 1. [9.2. Python Scopes and Namespaces](https://docs.python.org/3/tutorial/classes.html#python-scopes-and-namespaces)
# 2. [Visualize Python Execution](http://www.pythontutor.com/visualize.html#mode=edit)
# 3. [Wikipedia::Closure](https://en.wikipedia.org/wiki/Closure_%28computer_programming%29)
| Tips/2016-03-10-Scope-and-Closure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Forecasting I: univariate, heavy tailed
#
# This tutorial introduces the [pyro.contrib.forecast](http://docs.pyro.ai/en/latest/contrib.forecast.html) module, a framework for forecasting with Pyro models. This tutorial covers only univariate models and simple likelihoods. This tutorial assumes the reader is already familiar with [SVI](http://pyro.ai/examples/svi_part_ii.html) and [tensor shapes](http://pyro.ai/examples/tensor_shapes.html).
#
# See also:
#
# - [Forecasting II: state space models](http://pyro.ai/examples/forecasting_ii.html)
# - [Forecasting III: hierarchical models](http://pyro.ai/examples/forecasting_iii.html)
#
# #### Summary
#
# - To create a forecasting model:
# 1. Create a subclass of the [ForecastingModel](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.forecaster.ForecastingModel) class.
# 2. Implement the [.model(zero_data, covariates)](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.forecaster.ForecastingModel.model) method using standard Pyro syntax.
# 3. Sample all time-local variables inside the [self.time_plate](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.forecaster.ForecastingModel.time_plate) context.
# 4. Finally call the [.predict(noise_dist, prediction)](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.forecaster.ForecastingModel.predict) method.
# - To train a forecasting model, create a [Forecaster](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.forecaster.Forecaster) object.
# - Training can be flaky, you'll need to tune hyperparameters and randomly restart.
# - Reparameterization can help learning, e.g. [LocScaleReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.loc_scale.LocScaleReparam).
# - To forecast the future, draw samples from a `Forecaster` object conditioned on data and covariates.
# - To model seasonality, use helpers [periodic_features()](http://docs.pyro.ai/en/latest/ops.html#pyro.ops.tensor_utils.periodic_features), [periodic_repeat()](http://docs.pyro.ai/en/latest/ops.html#pyro.ops.tensor_utils.periodic_repeat), and [periodic_cumsum()](http://docs.pyro.ai/en/latest/ops.html#pyro.ops.tensor_utils.periodic_cumsum).
# - To model heavy-tailed data, use [Stable](http://docs.pyro.ai/en/latest/distributions.html#stable) distributions and [StableReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.stable.StableReparam).
# - To evaluate results, use the [backtest()](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.evaluate.eval_crps) helper or low-level loss functions.
# +
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.contrib.examples.bart import load_bart_od
from pyro.contrib.forecast import ForecastingModel, Forecaster, backtest, eval_crps
from pyro.infer.reparam import LocScaleReparam, StableReparam
from pyro.ops.tensor_utils import periodic_cumsum, periodic_repeat, periodic_features
from pyro.ops.stats import quantile
import matplotlib.pyplot as plt
# %matplotlib inline
assert pyro.__version__.startswith('1.3.0')
pyro.enable_validation(True)
pyro.set_rng_seed(20200221)
# -
dataset = load_bart_od()
print(dataset.keys())
print(dataset["counts"].shape)
print(" ".join(dataset["stations"]))
# ## Intro to Pyro's forecasting framework
#
# Pyro's forecasting framework consists of:
# - a [ForecastingModel](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.forecaster.ForecastingModel) base class, whose ``.model()`` method can be implemented for custom forecasting models,
# - a [Forecaster](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.forecaster.Forecaster) class that trains and forecasts using ``ForecastingModel``s, and
# - a [backtest()](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.evaluate.backtest) helper to evaluate models on a number of metrics.
#
# Consider a simple univariate dataset, say weekly [BART train](https://www.bart.gov/about/reports/ridership) ridership aggregated over all stations in the network. This data roughly logarithmic, so we log-transform for modeling.
T, O, D = dataset["counts"].shape
data = dataset["counts"][:T // (24 * 7) * 24 * 7].reshape(T // (24 * 7), -1).sum(-1).log()
data = data.unsqueeze(-1)
plt.figure(figsize=(9, 3))
plt.plot(data)
plt.title("Total weekly ridership")
plt.ylabel("log(# rides)")
plt.xlabel("Week after 2011-01-01")
plt.xlim(0, len(data));
# Let's start with a simple log-linear regression model, with no trend or seasonality. Note that while this example is univariate, Pyro's forecasting framework is multivariate, so we'll often need to reshape using `.unsqueeze(-1)`, `.expand([1])`, and `.to_event(1)`.
# First we need some boilerplate to create a class and define a .model() method.
class Model1(ForecastingModel):
# We then implement the .model() method. Since this is a generative model, it shouldn't
# look at data; however it is convenient to see the shape of data we're supposed to
# generate, so this inputs a zeros_like(data) tensor instead of the actual data.
def model(self, zero_data, covariates):
data_dim = zero_data.size(-1) # Should be 1 in this univariate tutorial.
feature_dim = covariates.size(-1)
# The first part of the model is a probabilistic program to create a prediction.
# We use the zero_data as a template for the shape of the prediction.
bias = pyro.sample("bias", dist.Normal(0, 10).expand([data_dim]).to_event(1))
weight = pyro.sample("weight", dist.Normal(0, 0.1).expand([feature_dim]).to_event(1))
prediction = bias + (weight * covariates).sum(-1, keepdim=True)
# The prediction should have the same shape as zero_data (duration, obs_dim),
# but may have additional sample dimensions on the left.
assert prediction.shape[-2:] == zero_data.shape
# The next part of the model creates a likelihood or noise distribution.
# Again we'll be Bayesian and write this as a probabilistic program with
# priors over parameters.
noise_scale = pyro.sample("noise_scale", dist.LogNormal(-5, 5).expand([1]).to_event(1))
noise_dist = dist.Normal(0, noise_scale)
# The final step is to call the .predict() method.
self.predict(noise_dist, prediction)
# We can now train this model by creating a [Forecaster](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.forecaster.Forecaster) object. We'll split the data into `[T0,T1)` for training and `[T1,T2)` for testing.
T0 = 0 # begining
T2 = data.size(-2) # end
T1 = T2 - 52 # train/test split
# %%time
pyro.set_rng_seed(1)
pyro.clear_param_store()
time = torch.arange(float(T2)) / 365
covariates = torch.stack([time], dim=-1)
forecaster = Forecaster(Model1(), data[:T1], covariates[:T1], learning_rate=0.1)
# Next we can evaluate by drawing posterior samples from the forecaster, passing in full covariates but only partial data. We'll use Pyro's [quantile()](http://docs.pyro.ai/en/latest/ops.html#pyro.ops.stats.quantile) function to plot median and an 80% confidence interval. To evaluate fit we'll use [eval_crps()](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.evaluate.eval_crps) to compute [Continuous Ranked Probability Score](https://www.stat.washington.edu/raftery/Research/PDF/Gneiting2007jasa.pdf); this is an good metric to assess distributional fit of a heavy-tailed distribution.
# +
samples = forecaster(data[:T1], covariates, num_samples=1000)
p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1)
crps = eval_crps(samples, data[T1:])
print(samples.shape, p10.shape)
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(data, 'k-', label='truth')
plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Week after 2011-01-01")
plt.xlim(0, None)
plt.legend(loc="best");
# -
# Zooming in to just the forecasted region, we see this model ignores seasonal behavior.
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(torch.arange(T1, T2), data[T1:], 'k-', label='truth')
plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Week after 2011-01-01")
plt.xlim(T1, None)
plt.legend(loc="best");
# We could add a yearly seasonal component simply by adding new covariates (note we've already taken care in the model to handle `feature_dim > 1`).
# %%time
pyro.set_rng_seed(1)
pyro.clear_param_store()
time = torch.arange(float(T2)) / 365
covariates = torch.cat([time.unsqueeze(-1),
periodic_features(T2, 365.25 / 7)], dim=-1)
forecaster = Forecaster(Model1(), data[:T1], covariates[:T1], learning_rate=0.1)
# +
samples = forecaster(data[:T1], covariates, num_samples=1000)
p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1)
crps = eval_crps(samples, data[T1:])
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(data, 'k-', label='truth')
plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Week after 2011-01-01")
plt.xlim(0, None)
plt.legend(loc="best");
# -
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(torch.arange(T1, T2), data[T1:], 'k-', label='truth')
plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Week after 2011-01-01")
plt.xlim(T1, None)
plt.legend(loc="best");
# ## Time-local random variables: `self.time_plate`
#
# So far we've seen the ``ForecastingModel.model()`` method and ``self.predict()``. The last piece of forecasting-specific syntax is the ``self.time_plate`` context for time-local variables. To see how this works, consider changing our global linear trend model above to a local level model. Note the [poutine.reparam()](http://docs.pyro.ai/en/latest/poutine.html#pyro.poutine.handlers.reparam) handler is a general Pyro inference trick, not specific to forecasting.
class Model2(ForecastingModel):
def model(self, zero_data, covariates):
data_dim = zero_data.size(-1)
feature_dim = covariates.size(-1)
bias = pyro.sample("bias", dist.Normal(0, 10).expand([data_dim]).to_event(1))
weight = pyro.sample("weight", dist.Normal(0, 0.1).expand([feature_dim]).to_event(1))
# We'll sample a time-global scale parameter outside the time plate,
# then time-local iid noise inside the time plate.
drift_scale = pyro.sample("drift_scale",
dist.LogNormal(-20, 5).expand([1]).to_event(1))
with self.time_plate:
# We'll use a reparameterizer to improve variational fit. The model would still be
# correct if you removed this context manager, but the fit appears to be worse.
with poutine.reparam(config={"drift": LocScaleReparam()}):
drift = pyro.sample("drift", dist.Normal(zero_data, drift_scale).to_event(1))
# After we sample the iid "drift" noise we can combine it in any time-dependent way.
# It is important to keep everything inside the plate independent and apply dependent
# transforms outside the plate.
motion = drift.cumsum(-2) # A Brownian motion.
# The prediction now includes three terms.
prediction = motion + bias + (weight * covariates).sum(-1, keepdim=True)
assert prediction.shape[-2:] == zero_data.shape
# Construct the noise distribution and predict.
noise_scale = pyro.sample("noise_scale", dist.LogNormal(-5, 5).expand([1]).to_event(1))
noise_dist = dist.Normal(0, noise_scale)
self.predict(noise_dist, prediction)
# %%time
pyro.set_rng_seed(1)
pyro.clear_param_store()
time = torch.arange(float(T2)) / 365
covariates = periodic_features(T2, 365.25 / 7)
forecaster = Forecaster(Model2(), data[:T1], covariates[:T1], learning_rate=0.1)
# +
samples = forecaster(data[:T1], covariates, num_samples=1000)
p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1)
crps = eval_crps(samples, data[T1:])
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(data, 'k-', label='truth')
plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Week after 2011-01-01")
plt.xlim(0, None)
plt.legend(loc="best");
# -
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(torch.arange(T1, T2), data[T1:], 'k-', label='truth')
plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Week after 2011-01-01")
plt.xlim(T1, None)
plt.legend(loc="best");
# ## Heavy-tailed noise
#
# Our final univariate model will generalize from Gaussian noise to heavy-tailed [Stable](http://docs.pyro.ai/en/latest/distributions.html#stable) noise. The only difference is the `noise_dist` which now takes two new parameters: `stability` determines tail weight and `skew` determines the relative size of positive versus negative spikes.
#
# The [Stable distribution](https://en.wikipedia.org/wiki/Stable_distribution) is a natural heavy-tailed generalization of the Normal distribution, but it is difficult to work with due to its intractible density function. Pyro implements auxiliary variable methods for working with Stable distributions. To inform Pyro to use those auxiliary variable methods, we wrap the final line in [poutine.reparam()](http://docs.pyro.ai/en/latest/poutine.html#pyro.poutine.handlers.reparam) effect handler that applies the [StableReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.stable.StableReparam) transform to the implicit observe site named "residual". You can use Stable distributions for other sites by specifying `config={"my_site_name": StableReparam()}`.
class Model3(ForecastingModel):
def model(self, zero_data, covariates):
data_dim = zero_data.size(-1)
feature_dim = covariates.size(-1)
bias = pyro.sample("bias", dist.Normal(0, 10).expand([data_dim]).to_event(1))
weight = pyro.sample("weight", dist.Normal(0, 0.1).expand([feature_dim]).to_event(1))
drift_scale = pyro.sample("drift_scale", dist.LogNormal(-20, 5).expand([1]).to_event(1))
with self.time_plate:
with poutine.reparam(config={"drift": LocScaleReparam()}):
drift = pyro.sample("drift", dist.Normal(zero_data, drift_scale).to_event(1))
motion = drift.cumsum(-2) # A Brownian motion.
prediction = motion + bias + (weight * covariates).sum(-1, keepdim=True)
assert prediction.shape[-2:] == zero_data.shape
# The next part of the model creates a likelihood or noise distribution.
# Again we'll be Bayesian and write this as a probabilistic program with
# priors over parameters.
stability = pyro.sample("noise_stability", dist.Uniform(1, 2).expand([1]).to_event(1))
skew = pyro.sample("noise_skew", dist.Uniform(-1, 1).expand([1]).to_event(1))
scale = pyro.sample("noise_scale", dist.LogNormal(-5, 5).expand([1]).to_event(1))
noise_dist = dist.Stable(stability, skew, scale)
# We need to use a reparameterizer to handle the Stable distribution.
# Note "residual" is the name of Pyro's internal sample site in self.predict().
with poutine.reparam(config={"residual": StableReparam()}):
self.predict(noise_dist, prediction)
# %%time
pyro.set_rng_seed(2)
pyro.clear_param_store()
time = torch.arange(float(T2)) / 365
covariates = periodic_features(T2, 365.25 / 7)
forecaster = Forecaster(Model3(), data[:T1], covariates[:T1], learning_rate=0.1)
for name, value in forecaster.guide.median().items():
if value.numel() == 1:
print("{} = {:0.4g}".format(name, value.item()))
# +
samples = forecaster(data[:T1], covariates, num_samples=1000)
p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1)
crps = eval_crps(samples, data[T1:])
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(data, 'k-', label='truth')
plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Week after 2011-01-01")
plt.xlim(0, None)
plt.legend(loc="best");
# -
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(torch.arange(T1, T2), data[T1:], 'k-', label='truth')
plt.title("Total weekly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Week after 2011-01-01")
plt.xlim(T1, None)
plt.legend(loc="best");
# ## Backtesting
#
# To compare our Gaussian `Model2` and Stable `Model3` we'll use a simple [backtesting()](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.evaluate.backtest) helper. This helper by default evaluates three metrics: [CRPS](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.evaluate.eval_crps) assesses distributional accuracy of heavy-tailed data, [MAE](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.evaluate.eval_mae) assesses point accuracy of heavy-tailed data, and [RMSE](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.evaluate.eval_rmse) assesses accuracy of Normal-tailed data. The one nuance here is to set `warm_start=True` to reduce the need for random restarts.
# %%time
pyro.set_rng_seed(1)
pyro.clear_param_store()
windows2 = backtest(data, covariates, Model2,
min_train_window=104, test_window=52, stride=26,
forecaster_options={"learning_rate": 0.1, "log_every": 1000,
"warm_start": True})
# %%time
pyro.set_rng_seed(1)
pyro.clear_param_store()
windows3 = backtest(data, covariates, Model3,
min_train_window=104, test_window=52, stride=26,
forecaster_options={"learning_rate": 0.1, "log_every": 1000,
"warm_start": True})
fig, axes = plt.subplots(3, figsize=(8, 6), sharex=True)
axes[0].set_title("Gaussian versus Stable accuracy over {} windows".format(len(windows2)))
axes[0].plot([w["crps"] for w in windows2], "b<", label="Gaussian")
axes[0].plot([w["crps"] for w in windows3], "r>", label="Stable")
axes[0].set_ylabel("CRPS")
axes[1].plot([w["mae"] for w in windows2], "b<", label="Gaussian")
axes[1].plot([w["mae"] for w in windows3], "r>", label="Stable")
axes[1].set_ylabel("MAE")
axes[2].plot([w["rmse"] for w in windows2], "b<", label="Gaussian")
axes[2].plot([w["rmse"] for w in windows3], "r>", label="Stable")
axes[2].set_ylabel("RMSE")
axes[0].legend(loc="best")
plt.tight_layout()
# Note that RMSE is a poor metric for evaluating heavy-tailed data. Our stable model has such heavy tails that its variance is infinite, so we cannot expect RMSE to converge, hence occasional outlying points.
| tutorial/source/forecasting_i.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Rer_47I95Kns" colab_type="text"
# # GPT2-Telegram-Chatbot
# A GPT-2 Telegram chatbot that's been relatively tuned for chatting. Feel free to make me PRs and I'll check out your code! The bot isn't 100% accurate all the time (why I coded in a /retry function.)
#
# Since the bot consumes so much memory, I have it programmed in a round-robin sort of mode. Each input will reset a timer on your account ID, once the timer runs down the bot is free for other users to use. You will be notified when the timer runs down, and other users can see how much time is left and if the bot is in use.
# + id="4pF534aJ5IP9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="ed106c98-3852-4c7d-d4ba-59a0991c45b4"
# !pip3 install tensorflow-gpu===1.15.0
# !pip3 install tqdm
# !pip3 install regex
# !pip3 install fire
# !pip3 install python-telegram-bot
# + [markdown] id="PrJpk10z5VdP" colab_type="text"
# ## Clone the repository
# + id="56Tg6ouJ5yEM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="8a1f5e07-a7f4-49ab-e1d6-8e474b84cfbf"
# !git clone --depth=1 https://github.com/paper2code/GPT2-Telegram-Chatbot /content/gpt2telegram
# !ls -l /content/
# %cd /content/gpt2telegram
# !ls -l
# + [markdown] id="64Jbex7t6I40" colab_type="text"
# ## Download model
# + id="6FrBB_px6D_P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="ec8710e5-fcbe-441d-a903-88259d6fda28"
# !python3 download_model.py 1558M
# + [markdown] id="OsZKFiVs6Sr3" colab_type="text"
# ## Setup the Telegram Bot
# + id="PrUQtJJv6WJE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a9cb1c65-bfa7-49aa-a044-7a608b45280e"
# !sed -i -e 's/BOTKEYBOTKEYBOTKEYBOTKEYBOTKEY/YOUR_TOKEN_HERE/' src/GPT2-Learning.py
# !cat src/GPT2-Learning.py
# + id="9dcAf0OJ7BXy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9610082b-1627-499d-f04a-3f3aea89905e"
# !./start
| notebooks/GPT2_Telegram_Chatbot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Identity Scales
#
# Use data values **as** visual values.
# +
import pandas as pd
from lets_plot import *
LetsPlot.setup_html()
# -
df = pd.read_csv('https://raw.githubusercontent.com/JetBrains/lets-plot-docs/master/data/mpg.csv')
# +
p = ggplot(df, aes(x='cyl')) + \
geom_bar(aes(size='cyl'), color='#54278f', fill='#f2f0f7', show_legend=False)
p1 = p + ggtitle('Default')
p2 = p + scale_size_identity() + ggtitle('With Scale')
w, h = 400, 300
bunch = GGBunch()
bunch.add_plot(p1, 0, 0, w, h)
bunch.add_plot(p2, w, 0, w, h)
bunch.show()
| source/examples/basics/gog/identity_scales.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: erlotinib-venv
# language: python
# name: erlotinib-venv
# ---
# # Combine EC all drugs data
#
# Source of data:
#
# 1. Active: https://ec.europa.eu/health/documents/community-register/html/reg_od_act.htm?sort=n
# 2. Withdrawn, suspended, expired, not renewed: https://ec.europa.eu/health/documents/community-register/html/reg_hum_nact.htm?sort=a
# 3. Rejected: https://ec.europa.eu/health/documents/community-register/html/reg_hum_refus.htm
# +
import os
import numpy as np
import pandas as pd
path = os.path.dirname(os.path.dirname(os.getcwd()))
data_active = pd.read_excel(path + '/data/ec_all_drugs_active.xlsx', header=2)
data_active
# -
path = os.path.dirname(os.path.dirname(os.getcwd()))
data_not_active = pd.read_excel(path + '/data/ec_all_drugs_withdrawn_suspended_expired_not_renewed.xlsx', header=2)
data_not_active
path = os.path.dirname(os.path.dirname(os.getcwd()))
data_rejected = pd.read_excel(path + '/data/ec_all_drugs_refused.xlsx', header=2)
data_rejected
# ## Label datasets and join them together
# +
data_active['Status'] = 'Active'
data_not_active['Status'] = 'Not active'
data_rejected['Status'] = 'Rejected'
data = pd.concat([data_active, data_not_active, data_rejected])
data
# -
# ## Export data
path = os.path.dirname(os.path.dirname(os.getcwd()))
name = '/data/ec_all_drugs.csv'
data.to_csv(path + name, index=False)
| analysis/prepare_ec_data/prepare_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Learning Objectives
#
# At the end of this class, you will be able to...
#
# - Write down functions to compute mean, median and mode
#
# - Describe variance and what it reflects
#
# - Express the meaning of percentile
# # Warmup
#
# - What percent of passengers on the Titanic embarked at C?
#
# - What percent of female passengers in Titanic embarked at C?
#
# Work on your own, then compare solutions with a neighbor and discuss
# # An Introduction to Descriptive Statistics
#
# - At this point in our course, we've had plenty of time, experience, and practice in manipulating our data.
#
# - However, to really _understand_ our data and underlying patterns across it, we need to dive a layer deeper into the mathematics behind data mechanics and dynamics.
#
# - In order to be able to draw conclusions from our data, we need to be able to **describe and interpret our data**.
#
# - This will become essential for more advanced data science concepts in data visualization, machine learning, and deep learning.
# ## Measures of Central Tendency
#
# A [**Central Tendency**](https://en.wikipedia.org/wiki/Central_tendency) is a central (typical) value for a probability distribution
#
# - In statistics, we often find that describing data by _averages_ allows us to more often make more powerful assertions regarding the data as a whole.
#
# - We often use **three key measures of central tendency** to help describe the **centroid** (arithmetic mean trend across a distribution) of our data:
# - **Mean**
# - **Median**
# - **Mode**
# ### The mean is the raw average value across our data.
#
# Calculating the mean is simple: _compute the sum of all values across our data and divide by the total number of values in our dataset_.
#
# We've been using the mean for years and years, but such a surprisingly simple arithmetic calculation turns out to have massive implications across being able to critically understand and break down complex datasets!
#
# ### Activity: _Write a function to compute the mean from an arbitrary dataset._
# +
import numpy as np
data = np.array([1, 3, 5, 2, 3, 7, 8, 4, 10, 0, 6, 7, 3, 0, 3, 0, 5, 7, 10, 1, 4, 9, 3])
# TODO: Complete this function by having the function return the average value of our dataset.
def compute_mean(dataset):
""" Main function that calculates the average value across our data. """
compute_mean(data)
# -
# ## Activity (Titanic):
#
# - What is the average age of male passengers that survived?
#
# Once you've found the average, talk with a neighbor who has also found the average, and compare how you each came about to your solution
# ### The median is the "middle value" or midpoint across our data.
#
# Determining the median is as simple as it sounds: _find the data value lying in the exact middle of our dataset_.
#
# One critical exception occurs when our data has an even number of values and thus has **two values** at its center: _in these cases, find the **mean** value of the two medians to obtain the true median across our data_.
#
# **Remember:** the median can only be calculated across _sorted data_!
#
# If data is distributed in a non-normal manner, then we can learn a great deal from interpreting what the exact median value of our dataset is.
# ### Activity: _Write a function to compute the median from an arbitrary dataset._
#
# Once you write the function, compare with a neighbor's implementation
# +
data = np.array([1, 3, 5, 2, 3, 7, 8, 4, 10, 0, 6, 7, 3, 0, 3, 0, 5, 7, 10, 1, 4, 9, 3])
# TODO: Complete this function by having the function return the exact true median value of our dataset.
# HINT: Consider using DataFrame slicing to help with identifying the correct median value(s).
def compute_median(dataset):
""" Main function that determines the median value across our data. """
count = len(dataset)
if count < 1:
# TODO: Complete this if-statement
return
if count % 2 == 1:
# TODO: Complete this if-statement
return
else:
# TODO: Complete this if-else statement
return
compute_median(data)
# -
# ### The mode is the most commonly occurring value or feature across our data.
#
# Determining the mode is relatively simple: _find the value that occurs most frequently across our data_.
#
# Remember that if all values across our data are unique and only occur once, then our data **has no mode**!
#
# The mode is also an interesting measure of _central tendency_ in that it can be applied towards categorical (non-numerical) data; one can find frequently occurring categories without running any calculations.
# ### Activity: _Write a function to compute the mode from an arbitrary dataset._
#
# Once you write the function, compare with a neighbor's implementation
# +
# NOTE: Tricker than it looks!
data = np.array([1, 3, 5, 2, 3, 7, 8, 4, 10, 0, 6, 7, 3, 0, 3, 0, 5, 7, 10, 1, 4, 9, 3])
# TODO: Complete this function by having the function return the relative mode across our dataset.
# HINT: Remember histograms and tokenization from CS 1.2? How many they help you here?
def compute_mode(dataset):
""" Main function that determines the mode value across our data. """
return
compute_mode(data)
# -
# There we have it!
#
# Three measures of central tendency that are critically important to understanding the distribution of our data.
#
# In future classes on distributions and introductory inferential statistics, we'll talk more about exactly **why** these measures are so important.
# ## Measures of Spread and Variance
#
# Like our friends in the central tendency community, measures of spread and variance do their best to describe patterns across our data as a whole.
#
# However, unlike measures of central tendency, which focus on the distribution of our data towards an arithmetic centroid, measures of spread and variance talk about the shape and layout of our data all across the board!
#
# In this course, there are **two key measures of spread and variance** to help describe the shape of our data:
# - **Range**
# - **Variance**, **Standard Deviation**
#
# ## What is Range?
#
# **Range** is the difference between the highest and lowest values in a data set. It is one of the simplest measures of **spread** (the extent to which a distribution is stretched or squeezed). We'll use variance/standard deviation to help give more information around all of this!
# ## What is standard deviation or variance?
#
# Let's learn it by example:
#
# - We measured the number of rainy days during Fall in three different cities in the last 5 years:
# +
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
df = pd.DataFrame({'Rainy':[29,28,32,35,36,12,18,30,45,55, 32,32,32,32,32], 'City':['City_A']*5 + ['City_B']*5 + ['City_C']*5})
df
# -
# ## What is the mean of rainy days for City A, B and C?
# +
print(np.mean(df[df['City'] == 'City_A']['Rainy']))
print(np.mean(df[df['City'] == 'City_B']['Rainy']))
print(np.mean(df[df['City'] == 'City_C']['Rainy']))
# -
# ## Which city has more variation?
#
# **Standard deviation (std), which is the square root of variance,** can capture the variations in different arrays
#
# [Docs on the std function](https://docs.scipy.org/doc/numpy/reference/generated/numpy.std.html) from Numpy
# +
print(np.std(df[df['City'] == 'City_A']['Rainy']))
print(np.std(df[df['City'] == 'City_B']['Rainy']))
print(np.std(df[df['City'] == 'City_C']['Rainy']))
# -
# ## Let's plot the variations in the data
sns.lineplot(y='Rainy', x='City', data=df)
plt.show()
# ## Most (or majority) of the values (rainy days) in Cities A, B and C are between mean - std and mean + std
# +
print([32 - np.std(df[df['City'] == 'City_A']['Rainy']), 32 + np.std(df[df['City'] == 'City_A']['Rainy'])])
print([32 - np.std(df[df['City'] == 'City_B']['Rainy']), 32 + np.std(df[df['City'] == 'City_B']['Rainy'])])
print([32 - np.std(df[df['City'] == 'City_C']['Rainy']), 32 + np.std(df[df['City'] == 'City_C']['Rainy'])])
# -
# # Percentile
#
# The value below which a percentage of data falls.
#
# ## Activity - Compute 75% Percentile for Fare in Titanic
# We want to compute:
#
# - What fare value did 75% of all the fare values fall below
#
# How would we do this?
#
# **Hint:** Use Numpy's [percentile](https://docs.scipy.org/doc/numpy/reference/generated/numpy.percentile.html) function
#
# When you're done, compare implementations with a neighbor
df = pd.read_csv("Datasets/titanic.csv")
df
np.percentile(df['Fare'], 75)
# ## Verify that 75 percent of passengers paid less than 31.0 Dollar
# +
# Slice the data based on Fare for those paid less than 31 Dollar
numbers_below_percentile = df[df['Fare'] <= np.percentile(ls_fare, 75)]
# Compute the size of the sliced dataframe and divide over all number of passengers
pr_below_percentile = len(numbers_below_percentile)/len(ls_fare)
pr_below_percentile
| Notebooks/Applied_Descriptive_Statistics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import numpy as np
import pandas as pd
import os
import sys
import matplotlib
# %matplotlib inline
from pandas import Series, DataFrame
# +
# !cat ../data/ch3/ex1.csv
# # !type ../data/ch3/ex1.csv
# -
# ## Check version
pd.__version__
# ## Simplest Way
data = pd.read_csv('../data/ch3/ex1.csv')
data
# ## If no header
# +
# !cat ../data/ch3/ex2.csv
# # !type ../data/ch3/ex2.csv
# -
data3 = pd.read_csv('../data/ch3/ex2.csv', header = None)
# data2 = pd.read_csv('../data/ch3/ex2.csv', names=['a', 'b', 'c', 'd', 'message'])
data3
# ## Only read specific columns
datan = pd.read_csv('../data/ch3/ex1.csv', usecols = ['a', 'b', 'c'])
datan
# ## Specify Index
names = ['a', 'b', 'c', 'd', 'message']
data4 = pd.read_csv('../data/ch3/ex2.csv', names=names, index_col='message')
data4
# ## Only handle specified columns
data.a
data['a']
data[data.columns[[1, 2]]]
# ## Missing Value Handling
# +
# !cat ../data/ch3/ex5.csv
# #!type ../data/ch3/ex5.csv
# -
data5 = pd.read_csv('../data/ch3/ex5.csv')
# print data5.isnull()
# print data5.isnull().values
print data5[data5.isnull().values == True]
data6 = pd.read_csv('../data/ch3/ex5.csv', na_values = ['world'])
data6
sentinels = {'message':['foo', 'NA'], 'something':['two']}
data7 = pd.read_csv('../data/ch3/ex5.csv', na_values = sentinels)
data7
# ## Read Big File
data8 = pd.read_csv('../data/ch3/ex6.csv', nrows = 5)
data8
# chunksize specifies the number of rows in each chunk
chunker = pd.read_csv('../data/ch3/ex6.csv', chunksize = 1000)
tot = Series([])
for piece in chunker:
tot = tot.add(piece['key'].value_counts(), fill_value = 0)
tot = tot.sort_values(ascending = False)
tot
# ## Write to CSV
data.to_csv('ch3-out1.csv')
# +
data.to_csv('ch3-out2.csv', index = False)
data.to_csv('ch3-out3.csv', index = False, header = False, columns = ['a', 'b', 'c'])
data5.to_csv('../data/ch3/ex1.tsv', index = False, header = False, sep = '\t')
# !cat '../data/ch3/ex1.tsv'
# -
# ## Read TSV
data = pd.read_csv('../data/ch3/ex1.tsv', sep = '\t')
data
data = pd.read_table('../data/ch3/ex1.tsv')
data
data5.to_csv('ch3-out4.tsv', sep = '\t', na_rep = 'NULL', index = False, header = False)
# !cat 'ch3-out4.tsv'
| ml-workshop/src/Access-CSV&TSV-File.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# name: python3
# ---
# importing libraries to use
import pandas as pd
# Create the path to IMDB Names
resources_folder = r'../resources/'
IMDb_file = resources_folder + 'clean_Awards.csv'
# Reading the csv as a Panda dataframe
df = pd.read_csv(IMDb_file, low_memory=False)
df.head()
# filter the df for conts containing title
df_titles = df[df['const'].str.contains('tt')]
df_titles.head()
# filter the df for conts containing names
df_names = df[df['const'].str.contains('nm')]
df_names.head()
# how many records do we have
df_titles.count()
# how many records do we have
df_names.count()
# save final dataframe to a csv file
df_titles.to_csv(resources_folder + 'movie_Awards.csv', index=False)
df_names.to_csv(resources_folder + 'actor_Awards.csv', index=False)
# +
#separated and tested
| Jupyter_Notebook_Files/separateIMDb_Awards in 2 diff tables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://machinelearningmastery.com/regression-tutorial-keras-deep-learning-library-python/
import numpy
import pandas
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
print(tf.__version__)
# load dataset
dataframe = pandas.read_csv("housing.csv", delim_whitespace=True, header=None)
dataset = dataframe.values
# split into input (X) and output (Y) variables
X = dataset[:,0:13]
Y = dataset[:,13]
# define base model
def baseline_model():
# create model
model = Sequential()
model.add(Dense(13, input_dim=13, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
# evaluate model with standardized dataset
estimator = KerasRegressor(build_fn=baseline_model, nb_epoch=100, batch_size=5, verbose=0)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(estimator, X, Y, cv=kfold)
print("Baseline: %.2f (%.2f) MSE" % (results.mean(), results.std()))
# evaluate model with standardized dataset
numpy.random.seed(seed)
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasRegressor(build_fn=baseline_model, epochs=50, batch_size=5, verbose=0)))
pipeline = Pipeline(estimators)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(pipeline, X, Y, cv=kfold)
print("Standardized: %.2f (%.2f) MSE" % (results.mean(), results.std()))
# define the model
def larger_model():
# create model
model = Sequential()
model.add(Dense(13, input_dim=13, kernel_initializer='normal', activation='relu'))
model.add(Dense(6, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
numpy.random.seed(seed)
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasRegressor(build_fn=larger_model, epochs=50, batch_size=5, verbose=0)))
pipeline = Pipeline(estimators)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(pipeline, X, Y, cv=kfold)
print("Larger: %.2f (%.2f) MSE" % (results.mean(), results.std()))
# define wider model
def wider_model():
# create model
model = Sequential()
model.add(Dense(20, input_dim=13, kernel_initializer='normal', activation='relu'))
model.add(Dense(1, kernel_initializer='normal'))
# Compile model
model.compile(loss='mean_squared_error', optimizer='adam')
return model
numpy.random.seed(seed)
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasRegressor(build_fn=wider_model, epochs=100, batch_size=5, verbose=0)))
pipeline = Pipeline(estimators)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(pipeline, X, Y, cv=kfold)
print("Wider: %.2f (%.2f) MSE" % (results.mean(), results.std()))
| BostonHousingKeras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf2.0
# language: python
# name: tf2.0
# ---
# +
from __future__ import print_function
from deepview import DeepView
import demo_utils as demo
from demo_utils import Net
import torch
import torch.optim as optim
import numpy as np
import time
import matplotlib.pyplot as plt
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# -
# %load_ext autoreload
# %autoreload 2
# %matplotlib qt
# ## Load the Fashion MNIST data set and train a simple ConvNet
# +
# device will be detected automatically
# Set to 'cpu' or 'cuda:0' to set the device manually
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
trainloader, testset, testloader = demo.make_FashionMNIST_dataset()
classes = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot')
dim_img = 28
dim_sq = dim_img*dim_img
col_ch = 1
n_classes = len(classes)
# init the model
torch.manual_seed(42)
torch_model = Net().to(device)
optimizer = optim.Adam(torch_model.parameters(), lr=0.001)
# +
# train the model on data including backdoors
# testing on clean test set
n_backd = 600 * 8
# backdoor 'bag' as 'trousers'
backd_a = 8 # attacked class
backd_t = 1 # target class
for epoch in range(1, 10 + 1):
demo.train_backdoor(torch_model, device, trainloader, optimizer, epoch, backd_a=backd_a, backd_t=backd_t, n_backd=n_backd)
#train(model, device, trainloader, optimizer, epoch)
demo.test(torch_model, device, testloader)
# -
# ## select a data subset and add backdoors
# +
# prepare the test data
show_backdoors = 1
n_data = 600 # dataset.__len__()
# pick random instances
np.random.seed(42)
rand_posis = np.random.permutation(len(testset))[0:n_data]
# check how many instances are from our attacked class
n_attackable = 0
for i in range(0, n_data):
# load the data
data = testset.__getitem__(rand_posis[i])
if data[1] == backd_a:
n_attackable +=1
n_att = 20
print('#points from target class:', n_attackable, ', #attacking points', n_att)
att_idx = np.zeros(n_att, dtype=int)
# load the data and add backdoors
#X = torch.zeros([n_data+add_points, col_ch*dim_sq]).to(device)
X = np.empty([n_data, col_ch, dim_img, dim_img])
labs = np.empty([n_data], dtype=int)
pred_labs = np.empty([n_data], dtype=int)
if show_backdoors:
print("Displaying backdoored points with backdoor label and predicted label")
fig, axes = plt.subplots(4, round(n_att/4), figsize=(12, 8))
attacked = 0
for i in range(0, n_data):
# load the data
data = testset.__getitem__(rand_posis[i])
data_item = torch.zeros([1, col_ch, dim_img, dim_img]).to(device)
data_item[:,:,:,:] = data[0]
labs[i] = data[1]
# attack the first n_att images from attacked class
if (attacked < n_att) & (labs[i].item() == backd_a):
labs[i] = backd_t
demo.add_backdoor(data_item[0])
att_idx[attacked] = i
attacked += 1
output = torch_model(data_item)
pred_labs[i] = output.detach()[0].argmax().item()
if (data[1] == backd_a) & (labs[i].item() == backd_t) & show_backdoors:
if attacked-1 < round(n_att/4):
curr_col = attacked-1
cur_row = 0
elif attacked-1 < 2*round(n_att/4):
curr_col = attacked-1 - round(n_att/4)
cur_row = 1
elif attacked-1 < 3*round(n_att/4):
curr_col = attacked-1 - 2*round(n_att/4)
cur_row = 2
elif attacked-1 < 4*round(n_att/4):
curr_col = attacked-1 - 3*round(n_att/4)
cur_row = 3
axes[cur_row, curr_col].imshow(data_item[0,0].detach().cpu().numpy(), cmap='gray')
axes[cur_row, curr_col].axis('off')
axes[cur_row, curr_col].set_title(classes[labs[i].item()] + ", " + classes[output.detach()[0].argmax().item()])
X[i,:,:,:] = data_item[0,:,:,:].detach().cpu().numpy()
# first, load the data and add their index in the last dim
#X[i,0:-1] = torch.reshape(data_item.detach(), [col_ch*dim_sq])
#X[i,-1] = i
# -
# ## initialize and apply DeepView
# +
def visualization(image, point, prediction, label):
fig, ax = plt.subplots()
ax.imshow(image.squeeze())
pred = classes[prediction]
if label is None:
ax.set_title('pred: %s' % pred)
else:
label = classes[label]
ax.set_title('pred: %s - label: %s' % (pred, label))
fig.show()
# --- Deep View Parameters ----
batch_size = 1024
max_samples = 500
data_shape = (col_ch, dim_img, dim_img)
n = 5
lam = .65
resolution = 100
cmap = 'tab10'
title = 'ConvNet - FashionMnist with backdoors'
deepview = DeepView(torch_model.predict_numpy, classes, max_samples, batch_size, data_shape,
n, lam, resolution, cmap, title=title, data_viz=visualization)
# +
from deepview.evaluate import evaluate
# run deepview
umapParms = {
"random_state": 42*42,
"n_neighbors": 30,
"min_dist" : 1,
"verbose" : True
}
deepview._init_mappers(None, None, umapParms)
#deepview.resolution = 200 # uncomment to increase resolution
# TODO: a = 400
t0 = time.time()
# create a visualization
deepview.add_samples(X, labs)
#deepview.update_mappings()
deepview.show()
print('Time to calculate visualization for %d samples: %.2f sec' % (n_data, time.time() - t0))
# calculate the quality of the projection (pi)
print('Evaluation of DeepView: %s\n' % deepview.title)
evaluate(deepview, deepview.samples, deepview.y_true)
# -
| deepview/DeepView Demo_FashionMnist_BackdoorAttack.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # {glue:text}`{{ github_org }}_github_org`
#
# **Activity from {glue:}`{{ github_org }}_start` to {glue:}`{{ github_org }}_stop`**
# + tags=["remove_cell"]
from datetime import date
from dateutil.relativedelta import relativedelta
from myst_nb import glue
import seaborn as sns
import pandas as pd
import numpy as np
import altair as alt
from markdown import markdown
from IPython.display import Markdown
from ipywidgets.widgets import HTML, Tab
from ipywidgets import widgets
from datetime import timedelta
from matplotlib import pyplot as plt
import os.path as op
from warnings import simplefilter
simplefilter('ignore')
# + tags=["remove_cell"]
# Altair config
def author_url(author):
return f"https://github.com/{author}"
def alt_theme():
return {
'config': {
'axisLeft': {
'labelFontSize': 15,
},
'axisBottom': {
'labelFontSize': 15,
},
}
}
alt.themes.register('my_theme', alt_theme)
alt.themes.enable("my_theme")
# Define colors we'll use for GitHub membership
author_types = ['MEMBER', 'CONTRIBUTOR', 'COLLABORATOR', "NONE"]
author_palette = np.array(sns.palettes.blend_palette(["lightgrey", "lightgreen", "darkgreen"], 4)) * 256
author_colors = ["rgb({}, {}, {})".format(*color) for color in author_palette]
author_color_dict = {key: val for key, val in zip(author_types, author_palette)}
# + tags=["parameters", "hide_input", "remove_cell"]
github_org = "jupyterhub"
top_n_repos = 15
n_days = 10
# + tags=["remove_cell"]
############################################################
# Variables
stop = date.today()
start = date.today() - relativedelta(days=n_days)
# Strings for use in queries
start_date = f"{start:%Y-%m-%d}"
stop_date = f"{stop:%Y-%m-%d}"
# Glue variables for use in markdown
glue(f"{github_org}_github_org", github_org, display=False)
glue(f"{github_org}_start", start_date, display=False)
glue(f"{github_org}_stop", stop_date, display=False)
# -
# ## Load data
#
# Load and clean up the data
# +
from pathlib import Path
path_data = Path("../data")
comments = pd.read_csv(path_data.joinpath('comments.csv'), index_col=None).drop_duplicates()
issues = pd.read_csv(path_data.joinpath('issues.csv'), index_col=None).drop_duplicates()
prs = pd.read_csv(path_data.joinpath('prs.csv'), index_col=None).drop_duplicates()
for idata in [comments, issues, prs]:
idata.query("org == @github_org", inplace=True)
# -
# What are the top N repos, we will only plot these in the full data plots
top_commented_repos = comments.groupby("repo").count().sort_values("createdAt", ascending=False)['createdAt']
use_repos = top_commented_repos.head(top_n_repos).index.tolist()
# + [markdown] tags=[] toc-hr-collapsed=false
# ## Merged Pull requests
#
# Here's an analysis of **merged pull requests** across each of the repositories in the Jupyter
# ecosystem.
# + tags=["remove_cell"]
merged = prs.query('state == "MERGED" and closedAt > @start_date and closedAt < @stop_date')
# + tags=["hide_input"]
prs_by_repo = merged.groupby(['org', 'repo']).count()['author'].reset_index().sort_values(['org', 'author'], ascending=False)
alt.Chart(data=prs_by_repo, title=f"Merged PRs in the last {n_days} days").mark_bar().encode(
x=alt.X('repo', sort=prs_by_repo['repo'].values.tolist()),
y='author',
color='org'
)
# -
# ### Authoring and merging stats by repository
#
# Let's see who has been doing most of the PR authoring and merging. The PR author is generally the
# person that implemented a change in the repository (code, documentation, etc). The PR merger is
# the person that "pressed the green button" and got the change into the main codebase.
# + tags=["remove_cell"]
# Prep our merging DF
merged_by_repo = merged.groupby(['repo', 'author'], as_index=False).agg({'id': 'count', 'authorAssociation': 'first'}).rename(columns={'id': "authored", 'author': 'username'})
closed_by_repo = merged.groupby(['repo', 'mergedBy']).count()['id'].reset_index().rename(columns={'id': "closed", "mergedBy": "username"})
# + tags=["hide_input"]
charts = []
title = f"PR authors for {github_org} in the last {n_days} days"
this_data = merged_by_repo.replace(np.nan, 0).groupby('username', as_index=False).agg({'authored': 'sum', 'authorAssociation': 'first'})
this_data = this_data.sort_values('authored', ascending=False)
ch = alt.Chart(data=this_data, title=title).mark_bar().encode(
x='username',
y='authored',
color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors))
)
ch
# + tags=["hide_input"]
charts = []
title = f"Merges for {github_org} in the last {n_days} days"
ch = alt.Chart(data=closed_by_repo.replace(np.nan, 0), title=title).mark_bar().encode(
x='username',
y='closed',
)
ch
# -
# ## Issues
#
# Issues are **conversations** that happen on our GitHub repositories. Here's an
# analysis of issues across the Jupyter organizations.
# + tags=["remove_cell"]
created = issues.query('state == "OPEN" and createdAt > @start_date and createdAt < @stop_date')
closed = issues.query('state == "CLOSED" and closedAt > @start_date and closedAt < @stop_date')
# + tags=["hide_input"]
created_counts = created.groupby(['org', 'repo']).count()['number'].reset_index()
created_counts['org/repo'] = created_counts.apply(lambda a: a['org'] + '/' + a['repo'], axis=1)
sorted_vals = created_counts.sort_values(['org', 'number'], ascending=False)['repo'].values
alt.Chart(data=created_counts, title=f"Issues created in the last {n_days} days").mark_bar().encode(
x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())),
y='number',
)
# + tags=["hide_input"]
closed_counts = closed.groupby(['org', 'repo']).count()['number'].reset_index()
closed_counts['org/repo'] = closed_counts.apply(lambda a: a['org'] + '/' + a['repo'], axis=1)
sorted_vals = closed_counts.sort_values(['number'], ascending=False)['repo'].values
alt.Chart(data=closed_counts, title=f"Issues closed in the last {n_days} days").mark_bar().encode(
x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())),
y='number',
)
# + tags=["remove_cell"]
created_closed = pd.merge(created_counts.rename(columns={'number': 'created'}).drop(columns='org/repo'),
closed_counts.rename(columns={'number': 'closed'}).drop(columns='org/repo'),
on=['org', 'repo'], how='outer')
created_closed = pd.melt(created_closed, id_vars=['org', 'repo'], var_name="kind", value_name="count").replace(np.nan, 0)
# + tags=["hide_input"]
charts = []
# Pick the top 10 repositories
top_repos = created_closed.groupby(['repo']).sum().sort_values(by='count', ascending=False).head(10).index
ch = alt.Chart(created_closed.query('repo in @top_repos'), width=120).mark_bar().encode(
x=alt.X("kind", axis=alt.Axis(labelFontSize=15, title="")),
y=alt.Y('count', axis=alt.Axis(titleFontSize=15, labelFontSize=12)),
color='kind',
column=alt.Column("repo", header=alt.Header(title=f"Issue activity, last {n_days} days for {github_org}", titleFontSize=15, labelFontSize=12))
)
ch
# + tags=["remove_cell"]
# Set to datetime
for kind in ['createdAt', 'closedAt']:
closed.loc[:, kind] = pd.to_datetime(closed[kind])
closed.loc[:, 'time_open'] = closed['closedAt'] - closed['createdAt']
closed.loc[:, 'time_open'] = closed['time_open'].dt.total_seconds()
# + tags=["hide_input"]
time_open = closed.groupby(['org', 'repo']).agg({'time_open': 'median'}).reset_index()
time_open['time_open'] = time_open['time_open'] / (60 * 60 * 24)
time_open['org/repo'] = time_open.apply(lambda a: a['org'] + '/' + a['repo'], axis=1)
sorted_vals = time_open.sort_values(['org', 'time_open'], ascending=False)['repo'].values
alt.Chart(data=time_open, title=f"Time to close for issues closed in the last {n_days} days").mark_bar().encode(
x=alt.X('repo', sort=alt.Sort(sorted_vals.tolist())),
y=alt.Y('time_open', title="Median Days Open"),
)
# -
# ## Most-upvoted issues
# + tags=["hide_input"]
thumbsup = issues.sort_values("thumbsup", ascending=False).head(25)
thumbsup = thumbsup[["title", "url", "number", "thumbsup", "repo"]]
text = []
for ii, irow in thumbsup.iterrows():
itext = f"- ({irow['thumbsup']}) {irow['title']} - {irow['repo']} - [#{irow['number']}]({irow['url']})"
text.append(itext)
text = '\n'.join(text)
HTML(markdown(text))
# -
# ## Commenters across repositories
#
# These are commenters across all issues and pull requests in the last several days.
# These are colored by the commenter's association with the organization. For information
# about what these associations mean, [see this StackOverflow post](https://stackoverflow.com/a/28866914/1927102).
# + tags=["remove_cell"]
commentors = (
comments
.query("createdAt > @start_date and createdAt < @stop_date")
.groupby(['org', 'repo', 'author', 'authorAssociation'])
.count().rename(columns={'id': 'count'})['count']
.reset_index()
.sort_values(['org', 'count'], ascending=False)
)
# + tags=["hide_input"]
n_plot = 50
charts = []
for ii, (iorg, idata) in enumerate(commentors.groupby(['org'])):
title = f"Top {n_plot} commentors for {iorg} in the last {n_days} days"
idata = idata.groupby('author', as_index=False).agg({'count': 'sum', 'authorAssociation': 'first'})
idata = idata.sort_values('count', ascending=False).head(n_plot)
ch = alt.Chart(data=idata.head(n_plot), title=title).mark_bar().encode(
x='author',
y='count',
color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors))
)
charts.append(ch)
alt.hconcat(*charts)
# -
# ## First responders
#
# First responders are the first people to respond to a new issue in one of the repositories.
# The following plots show first responders for recently-created issues.
# + tags=["remove_cell"]
first_comments = []
for (org, repo, issue_id), i_comments in comments.groupby(['org', 'repo', 'id']):
ix_min = pd.to_datetime(i_comments['createdAt']).idxmin()
first_comment = i_comments.loc[ix_min]
if isinstance(first_comment, pd.DataFrame):
first_comment = first_comment.iloc[0]
first_comments.append(first_comment)
first_comments = pd.concat(first_comments, axis=1).T
# Make up counts for viz
first_responder_counts = first_comments.groupby(['org', 'author', 'authorAssociation'], as_index=False).\
count().rename(columns={'id': 'n_first_responses'}).sort_values(['org', 'n_first_responses'], ascending=False)
# + tags=["hide_input"]
n_plot = 50
title = f"Top {n_plot} first responders for {github_org} in the last {n_days} days"
idata = first_responder_counts.groupby('author', as_index=False).agg({'n_first_responses': 'sum', 'authorAssociation': 'first'})
idata = idata.sort_values('n_first_responses', ascending=False).head(n_plot)
ch = alt.Chart(data=idata.head(n_plot), title=title).mark_bar().encode(
x='author',
y='n_first_responses',
color=alt.Color('authorAssociation', scale=alt.Scale(domain=author_types, range=author_colors))
)
ch
# -
# ## Recent activity
#
# ### A list of merged PRs by project
#
# Below is a tabbed readout of recently-merged PRs. Check out the title to get an idea for what they
# implemented, and be sure to thank the PR author for their hard work!
# + tags=["hide_input"]
tabs = widgets.Tab(children=[])
for ii, ((org, repo), imerged) in enumerate(merged.query("repo in @use_repos").groupby(['org', 'repo'])):
merged_by = {}
pr_by = {}
issue_md = []
issue_md.append(f"#### Closed PRs for repo: [{org}/{repo}](https://github.com/{github_org}/{repo})")
issue_md.append("")
issue_md.append(f"##### ")
for _, ipr in imerged.iterrows():
user_name = ipr['author']
user_url = author_url(user_name)
pr_number = ipr['number']
pr_html = ipr['url']
pr_title = ipr['title']
pr_closedby = ipr['mergedBy']
pr_closedby_url = f"https://github.com/{pr_closedby}"
if user_name not in pr_by:
pr_by[user_name] = 1
else:
pr_by[user_name] += 1
if pr_closedby not in merged_by:
merged_by[pr_closedby] = 1
else:
merged_by[pr_closedby] += 1
text = f"* [(#{pr_number})]({pr_html}): _{pr_title}_ by **[@{user_name}]({user_url})** merged by **[@{pr_closedby}]({pr_closedby_url})**"
issue_md.append(text)
issue_md.append('')
markdown_html = markdown('\n'.join(issue_md))
children = list(tabs.children)
children.append(HTML(markdown_html))
tabs.children = tuple(children)
tabs.set_title(ii, repo)
tabs
# -
# ### A list of recent issues
#
# Below is a list of issues with recent activity in each repository. If they seem of interest
# to you, click on their links and jump in to participate!
# + tags=["remove_cell"]
# Add comment count data to issues and PRs
comment_counts = (
comments
.query("createdAt > @start_date and createdAt < @stop_date")
.groupby(['org', 'repo', 'id'])
.count().iloc[:, 0].to_frame()
)
comment_counts.columns = ['n_comments']
comment_counts = comment_counts.reset_index()
# + tags=["hide_input"] toc-hr-collapsed=false
n_plot = 5
tabs = widgets.Tab(children=[])
for ii, (repo, i_issues) in enumerate(comment_counts.query("repo in @use_repos").groupby('repo')):
issue_md = []
issue_md.append("")
issue_md.append(f"##### [{github_org}/{repo}](https://github.com/{github_org}/{repo})")
top_issues = i_issues.sort_values('n_comments', ascending=False).head(n_plot)
top_issue_list = pd.merge(issues, top_issues, left_on=['org', 'repo', 'id'], right_on=['org', 'repo', 'id'])
for _, issue in top_issue_list.sort_values('n_comments', ascending=False).head(n_plot).iterrows():
user_name = issue['author']
user_url = author_url(user_name)
issue_number = issue['number']
issue_html = issue['url']
issue_title = issue['title']
text = f"* [(#{issue_number})]({issue_html}): _{issue_title}_ by **[@{user_name}]({user_url})**"
issue_md.append(text)
issue_md.append('')
md_html = HTML(markdown('\n'.join(issue_md)))
children = list(tabs.children)
children.append(HTML(markdown('\n'.join(issue_md))))
tabs.children = tuple(children)
tabs.set_title(ii, repo)
display(Markdown(f"Here are the top {n_plot} active issues in each repository in the last {n_days} days"))
display(tabs)
| monthly_update/templates/org_report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Serial Test over USB
# Connecting to a USB serial port to get data from an Arduino. To install the software run:
#
# `apt-get install python-serial`
# %ls /dev/ttyACM*
# ## Setup the serial configuration
# Check that the serial device printed by the command above matches the `portPath` below:
# +
# %matplotlib notebook
import serial
import time
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib.animation import FuncAnimation
import pandas as pd
import numpy as np
from datetime import datetime, date
portPath = "/dev/ttyACM0" # Must match value shown on Arduino IDE
baud = 115200 # Must match Arduino baud rate
timeout = 5 # Seconds
filename = str(date.today()) + " data.csv"
max_num_readings = 8
num_signals = 1
def create_serial_obj(portPath, baud_rate, tout):
"""
Given the port path, baud rate, and timeout value, creates and returns a pyserial object.
"""
return serial.Serial(portPath, baud_rate, timeout = tout)
# -
def read_serial_data(serial):
"""
Given a pyserial object (serial). Outputs a list of lines read in from the serial port
"""
#serial.flushInput()
serial.reset_input_buffer()
serial_data = []
readings_left = True
timeout_reached = False
#Send data to start the transfer
serial.write(1)
#Wait for the data to be ready
time.sleep(2)
while readings_left and not timeout_reached:
serial_line = serial.readline()
if serial_line == b'':
timeout_reached = True
else:
serial_data.append(serial_line)
if len(serial_data) == max_num_readings:
readings_left = False
return serial_data
# +
headers = ["PM 0.3","PM 0.5","PM 1.0","PM 2.5","PM 5.0","PM 10.0","Temp","Pressure","Humidity","CO2",
"BME680 VOC","QM9 VOC","MiCS5524 VOC","CCS811 VOC","Date"]
try:
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')
df = pd.read_csv(filename, parse_dates=['Date'], date_parser=dateparse,index_col=0)
df
except:
df = pd.DataFrame() #Create an empty data frame to append to later
print ("Creating serial object...")
serial_obj = create_serial_obj(portPath, baud, timeout)
# -
df
# + active=""
# print ("Creating serial object...")
# serial_obj = create_serial_obj(portPath, baud, timeout)
#
# print ("Reading serial data...")
# serial_data = read_serial_data(serial_obj)
# #print serial_data.len()
# +
def is_number(string):
"""
Given a string returns True if the string represents a number.
Returns False otherwise.
"""
try:
float(string)
return True
except ValueError:
return False
def clean_serial_data(data):
"""
Given a list of serial lines (data). Removes all characters.
Returns the cleaned list of lists of digits.
Given something like: ['0.5000,33\r\n', '1.0000,283\r\n']
Returns: [[0.5,33.0], [1.0,283.0]]
"""
clean_data = []
for line in data:
line_data = line.decode("utf-8", "ignore").strip()
#line_data = re.findall("\d*\.\d*|\d*",line) # Find all digits
#line_data = [float(element) for element in line_data if is_number(element)] # Convert strings to float
#line_data = line_data datetime.now()
clean_data.append(line_data)
return clean_data
# + active=""
# print("Cleaning data...")
# clean_data = clean_serial_data(serial_data)
# clean_data_table = [clean_data]
# clean_data_table
# + active=""
# clean_data_table = pd.DataFrame([sub.split(",") for sub in clean_data])
# clean_data_table['Date'] = [datetime.now()]
# clean_data_table.columns = headers
# clean_data_table
# + active=""
# df = df.append(clean_data_table)
# df
# + active=""
# df.to_csv(filename,names=headers)
# -
dateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S.%f')
df = pd.read_csv(filename, parse_dates=['Date'], date_parser=dateparse,index_col=0)
df
# + active=""
# x = df['Date']
# y1 = df['PM 0.3']
# y2 = df['PM 0.5']
# y3 = df['PM 1.0']
# y4 = df['PM 2.5']
# y5 = df['PM 5.0']
# y6 = df['PM 10.0']
#
# # plot
# plt.cla()
# plt.plot(x,y1, label='PM 0.3')
# plt.plot(x,y2, label='PM 0.5')
# plt.plot(x,y3, label='PM 1.0')
# plt.plot(x,y4, label='PM 2.5')
# plt.plot(x,y5, label='PM 5.0')
# plt.plot(x,y6, label='PM 10.0')
#
# plt.legend()
# # beautify the x-labels
# plt.gcf().autofmt_xdate()
# plt.show()
# #plt.ioff()
# -
def animate(i):
global df
global plt
global serial_obj
serial_data = read_serial_data(serial_obj)
clean_data = clean_serial_data(serial_data)
clean_data_table = [clean_data]
clean_data_table = pd.DataFrame([sub.split(",") for sub in clean_data])
clean_data_table['Date'] = [datetime.now()]
clean_data_table.columns = headers
df = df.append(clean_data_table)
x = df['Date']
y1 = df['PM 0.3']
y2 = df['PM 0.5']
y3 = df['PM 1.0']
y4 = df['PM 2.5']
y5 = df['PM 5.0']
y6 = df['PM 10.0']
# plot
plt.cla()
plt.plot(x,y1, label='PM 0.3')
plt.plot(x,y2, label='PM 0.5')
plt.plot(x,y3, label='PM 1.0')
plt.plot(x,y4, label='PM 2.5')
plt.plot(x,y5, label='PM 5.0')
plt.plot(x,y6, label='PM 10.0')
plt.legend()
# beautify the x-labels
plt.gcf().autofmt_xdate()
plt.xlabel('Time')
plt.ylabel('Particulate matter (µm)')
plt.title("Indoor Air Quality")
#plt.ylim(ymin=0,ymax=85)
# +
ani = FuncAnimation(plt.gcf(), animate,interval=1000)
plt.show()
# -
serial_data = read_serial_data(serial_obj)
clean_data = clean_serial_data(serial_data)
clean_data_table = [clean_data]
clean_data_table = pd.DataFrame([sub.split(",") for sub in clean_data])
clean_data_table['Date'] = [datetime.now()]
clean_data_table.columns = headers
df = df.append(clean_data_table)
animate(1)
# +
try:
df[headers] #Make sure the DataFrame is in the correct order
df.to_csv(filename,names=headers)
except:
df = pd.DataFrame() #Create an empty data frame to append to later
df[headers] #Make sure the DataFrame is in the correct order
df.to_csv(filename,names=headers)
df
# + active=""
# #This will wipe the DataFrame
# df = pd.DataFrame()
# df
# -
df
| jupyter-notebooks/Serial Test over USB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## In this notebook we will implement Policy Iteration (Policy evaluation <-> Policy improvement) and Value Iteration
# - Frozen Lake 8x8 env will be used
# - We will need the entire MDP of the env
# +
import gym
import numpy as np
from matplotlib import pyplot as plt
# %matplotlib inline
np.random.seed(0)
# -
env = gym.make("FrozenLake8x8-v0")
env.action_space, env.env.nA
env.observation_space, env.env.nS
# LEFT = 0
# DOWN = 1
# RIGHT = 2
# UP = 3
# "8x8": [
# "SFFFFFFF",
# "FFFFFFFF",
# "FFFHFFFF",
# "FFFFFHFF",
# "FFFHFFFF",
# "FHHFFFHF",
# "FHFFHFHF",
# "FFFHFFFG"
# ]
# - accessing the transition prob, Prob[s/a] = env.P[s][s'] -> list to tuples where tuple (P[s'/s], s', r, done)
env.P[0][0]
# ### Assume that we will start with random determinsitic policy
# create initial policy
nS, nA = env.env.nS, env.env.nA
pi = {s: np.random.choice(env.action_space.n) for s in range(env.env.nS)}
# _Iterative evaluation of state value under a policy_
# - For stochastic policy policy under evaluation
# $$ V_{k+1}(s) = \sum_{a} \pi(a/s) \sum_{s',r}p(s',r/s,a) [r + \gamma V_{k}(s')]$$
# - For deterministic policy policy under evaluation
# $$ V_{k+1}(s) = \sum_{s',r}p(s',r/s,a) [r + \gamma V_{k}(s')]$$
#
# - Here we have a deterministic policy to begin with
def policy_evaluation(pi, max_iters, tol, nA, mdp, gamma=0.9):
'''
[1] pi: policy dict mapping state to action (state is the key and action is the value)
[2] max_iters: max num of iterations to perform for policy eval
[3] tol: to check if state value function changes by less than tol, then exit policy evaluation
[4] nA: number actions in every state
[5] mdp: MDP
[6] gamma: Discount factor
'''
num_states = len(pi)
v_old = np.zeros(num_states)
for i in range(max_iters):
v = np.zeros(nS)
for s in range(num_states): # loop through all states
for prob, next_state, r, done in mdp[s][pi[s]]:
bootstrap_val = v_old[next_state]
v[s] += prob*(r + gamma * bootstrap_val * (not done)) # NOTE: we are doing synchronous updates
# Exit criteria: check for change in value function
if np.amax(np.abs(v - v_old)) <= tol:
print(f"tolerance achieve.. exiting early")
break
print(f"Iteration.. :{i}/{max_iters}, {np.amax(np.abs(v - v_old))}")
v_old = v.copy()
return v
# check the policy_evaluation function
pi = {s: np.random.choice(env.action_space.n) for s in range(env.env.nS)} # random policy
mdp = env.env.P
v = policy_evaluation(pi, max_iters=1000, tol=1e-9, nA=nA, mdp=mdp, gamma=1.0)
v
# ### Policy Improvement
# - Use state value function and one step look ahead using MDP
# $$ \pi(s) = argmax_{a}\sum_{s',r}p(s',r/s,a) [r + \gamma V_{k}(s')]$$
def policy_improvement(v, mdp, gamma=1.0):
'''
[1] mdp: MDP
[2] v: state value function
[3] gamma: discout factor
'''
num_states = len(v)
qval = np.zeros((num_states, nA))
for s in range(num_states): # for every state
for action, values in mdp[s].items():
for prob, next_state, reward, done in values:
qval[s, action] += prob * (reward + gamma * v[next_state] * (not done))
# update policy
pi = {s: a for s, a in enumerate(np.argmax(qval, axis=1))}
return pi, qval
pi_new, qval = policy_improvement(v, mdp, gamma=0.9)
def print_policy(pi, P, action_symbols=('<', 'v', '>', '^'), n_cols=8, title='Policy:'):
print(title)
arrs = {k:v for k,v in enumerate(action_symbols)}
for s in range(len(P)):
a = pi[s]
print("| ", end="")
if np.all([done for action in P[s].values() for _, _, _, done in action]):
print("".rjust(9), end=" ")
else:
print(str(s).zfill(2), arrs[a].rjust(6), end=" ")
if (s + 1) % n_cols == 0: print("|")
print_policy(pi, mdp, title='Policy:')
print_policy(pi_new, mdp, title='Policy:')
pi == pi_new
# ### Policy Iteration
# - This is just policy evaluation and policy improvement in loop until convergence
def policy_iteration(pi, mdp, gamma=1.0, max_iters=100, tol=1e-9, maxcount=100):
'''
[1] pi: initial policy
[2] gamma: discount factor
[3] max_iters: maximum iterations for policy evaluation
[4] tol: min tolerance for state value function change for policy eval exit criterion
[5] mdp
[6] maxcount: counter for policy iteration to exit
'''
counter = 0
while True:
counter +=1
pi_old = pi
# run policy evaluation (this is called prediction problem)
v = policy_evaluation(pi, max_iters, tol, nA, mdp, gamma=1.0)
# run policy improvement (this is called control problem)
pi_new, _ = policy_improvement(v, mdp, gamma=1.0)
if pi_new == pi_old:
print(f"No change in policy after policy improvement.. exiting in {counter} steps")
break
elif counter >= maxcount:
print(f"Reached maximum count limit of {counter} steps for policy iteration, exiting...")
break
pi = pi_new
return pi_new, v
pi = {s: np.random.choice(env.action_space.n) for s in range(env.env.nS)} # random policy
pi_new, v_final = policy_iteration(pi, mdp, gamma=1.0, max_iters=1000, tol=1e-9, maxcount=100)
print_policy(pi_new, mdp, title='Converged Policy:')
print_policy(pi, mdp, title='Initial Policy:')
### calculate average rewards over some episode based on some policy
def mean_rewards(pi, num_episodes, env):
total_reward_list = []
for ep in range(num_episodes):
total_reward = 0
s = env.reset()
done = False
while not done:
next_state, reward, done, info = env.step(pi[s])
total_reward += reward
s = next_state
if done:
print(f"ep:{ep}, reward:{reward}")
total_reward_list.append(total_reward)
break
return np.array(total_reward_list).mean()
mean_rew_init_policy = mean_rewards(pi, num_episodes=1000, env=env)
mean_rew_conv_policy = mean_rewards(pi_new, num_episodes=1000, env=env)
print(f"avg reward over {str(1000)} episodes for initial policy : {mean_rew_init_policy}")
print(f"avg reward over {str(1000)} episodes for converged policy : {mean_rew_conv_policy}")
# `NOTE`: Mean reward here also means % of trials agent reached its end goal since there is reward of +1 in the entire game for the goal state only and rewards at all other states are zero
# **Conclusion**
# - Converged policy is able to reach its goal ~88% of the time
# - Initial random policy success rate is ~0.1%
# plot final state value function
v_final_rs = v_final.reshape(8,8)
plt.figure()
plt.imshow(v_final_rs)
plt.colorbar()
# ### Value Iteration
# *Merge truncated policy evaluation and policy improvement in a single step*
#
# $$ V_{k+1}(s) = max_{a}\sum_{s',r}p(s',r/s,a) [r + \gamma V_{k}(s')]$$
#
# Value iteraion improves its policy by bootstrapping on value function, hence the name `value iteration`
def value_iteration(mdp, gamma=1.0, maxcount=100, tol=1e-9):
num_states = len(mdp.keys())
v = np.zeros(num_states)
counter = 0
while True:
counter += 1
qval = np.zeros((num_states, nA))
v_old = v
for s in range(num_states): # for every state
for action, values in mdp[s].items():
for prob, next_state, reward, done in values:
qval[s, action] += prob * (reward + gamma * v[next_state] * (not done))
# update state values
v = np.max(qval, axis=1)
# check for exit criteria
if np.amax(np.abs(v-v_old)) <= tol:
print(f"tol achieved in {counter} steps, exiting...")
break
elif counter >= maxcount:
print(f"Maxcount reached, exiting...")
break
# update policy
pi = {s: a for s, a in enumerate(np.argmax(qval, axis=1))}
return pi,v
pi_vi_new, v_final_vi = value_iteration(mdp, gamma=1.0, maxcount=100, tol=1e-9)
print_policy(pi, mdp, title='Initial Policy:')
print_policy(pi_vi_new, mdp, title='Initial Policy:')
mean_rew_init_policy = mean_rewards(pi, num_episodes=1000, env=env)
mean_rew_conv_policy = mean_rewards(pi_vi_new, num_episodes=1000, env=env)
mean_rew_init_policy, mean_rew_conv_policy
# plot the converged value function from value iteration
plt.figure()
plt.imshow(v_final_vi.reshape(8,8))
plt.colorbar()
# **__Conclusions__**
# - We have implemented PI and VI on 8x8 Frozen Lake where env is stochastic
# - For both cases, we finally converged to optimal policy
# - Optimal policy gives around 90% of success rate in this environment
| PI_VI_FrozenLake/.ipynb_checkpoints/PI_VI_Frozenlake-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
# pprint is used to format the JSON response
from pprint import pprint
import os
import pandas as pd
import numpy as np
##Takes in cpf_tickets.csv and generates cpf_processed.csv
subscription_key = "cd0cf9855b244aa28c017742ed7a904c"
endpoint = "https://cpftext.cognitiveservices.azure.com/"
sentiment_url = endpoint + "/text/analytics/v2.1/sentiment"
language_api_url = endpoint + "/text/analytics/v2.1/languages"
keyphrase_url = endpoint + "/text/analytics/v2.1/keyphrases"
documents = {"documents": []}
training_data = pd.read_csv("cpf_tickets.csv")
#Request for keywords from azure and append
for index, row in training_data.iterrows():
newobj = {"id": index, "language": "en",
"text": row['body']}
documents["documents"].append(newobj)
headers = {"Ocp-Apim-Subscription-Key": subscription_key}
response = requests.post(keyphrase_url, headers=headers, json=documents)
key_phrases = response.json()
keywords = []
for i in range(len(response.json()['documents'])):
s = ""
for j in range(len(response.json()['documents'][i]['keyPhrases'])):
s = s + " " + response.json()['documents'][i]['keyPhrases'][j]
if s == "":
s = "NoKeywordsFound"
keywords.append(s)
training_data['keywords'] = keywords
#Request for sentiment from azure and append
response = requests.post(sentiment_url, headers=headers, json=documents)
sentiments_response = response.json()
sentiments = []
for i in range(len(response.json()['documents'])):
sentiments.append(response.json()['documents'][i]['score'])
training_data['sentiments'] = sentiments
training_data.to_csv("cpf_processed.csv")
# -
| Ticket_Keywords_and_sentiments_extractor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to BioPython
#
# Load Biopython library & Functions
import Bio
from Bio import SeqIO
from Bio.Seq import Seq, MutableSeq
from Bio.Seq import transcribe, back_transcribe, translate, complement, reverse_complement
# Check Biopython version
Bio.__version__
# ## Sequence Operations
# Sequence
seq = Seq("GGACCTGGAACAGGCTGAACCCTTTATCCACCTCTCTCCAATTATACCTATCATCCTAACTTCTCAGTGGACCTAACAATCTTCTCCCTTCATCTAGCAGGAGTC")
# Alphabet
seq.alphabet
# Check type
type(seq.alphabet)
# Find sub-sequence: if TRUE <- SubSeq Position, else <- return -1
seq.find("ATC")
seq.find("ATGC")
# Number of `A`
seq.count("A")
# Number of `C`
seq.count("C")
# Number of `T`
seq.count("T")
# Number of `G`
seq.count("G")
# K-mer analysis, K = 2(AA)<--dimer
seq.count("AA")
# K-mer analysis, K = 3(AAA)<--trimer
seq.count("AAA")
# ## Frequency
# Count frequency of nucleotides
from collections import Counter
freq = Counter(seq)
print(freq)
# ## Reverse
# Reverse
print(f'RefSeq: {seq}')
rev = str(seq[::-1])
print(f'RevSeq: {rev}')
# ## Complement
# Complement
print(f'RefSeq: {seq}')
com = seq.complement()
print(f'ComSeq: {com}')
# ## Reverse Complement
# Reverse complement
print(f'RefSeq: {seq}')
rev_com = seq.reverse_complement()
print(f'RevCom: {rev_com}')
# ## Transcription
# Transcription(DNA ==> RNA)
print(f'DNA: {seq}')
rna = seq.transcribe()
print(f'RNA: {rna}')
# ## Transcribe
# Back Transcription(RNA ==> DNA)
print(f'RNA: {rna}')
dna = rna.back_transcribe()
print(f'DNA: {dna}')
# ## Translation
# Translation(DNA ==> Protein)
print(f'DNA: {seq}')
prt = seq.translate()
print(f'Protein: {prt}')
# Let's varify the protein with length property
len(seq)
# Make codons
len(seq) % 3
# Number of codons
len(seq) / 3
# Now varify the protein length
len(prt)
# Translation(DNA ==> Protein) Stop translation when found stop codon
print(f'DNA: {seq}')
prt = seq.translate(to_stop=True)
print(f'Protein: {prt}')
# Translation(DNA ==> Protein) for Mitochondrial DNA
print(f'DNA: {seq}')
prt = seq.translate(to_stop=True, table=2)
print(f'Protein: {prt}')
# ## Handling Files
for seq_record in SeqIO.parse("../data/den1.fasta", "fasta"):
ID = seq_record.id
seqs = seq_record.seq[:100]
rep = repr(seq_record)
length = len(seq_record)
# ID
print(ID)
# Sequence
print(seqs)
# Representation
print(rep)
# Length
print(length)
# Print the first nucleotide of each codon
seqs[0::3]
# Print the first codon position
seqs[1::3]
# Print the second codon position
seqs[2::3]
# Sequence Length Comparison
seq1 = Seq("TTGTGGCCGCTCAGATCAGGCAGTTTAGGCTTA")
seq2 = Seq("ATTTATAGAAATGTGGTTATTTCTTAAGCATGGC")
seq1 == seq2
# Mutable sequence
mut_seq = MutableSeq("TTGTGGCCGCTCAGATCAGGCAGTTTAGGCTTA")
print(f'MutSeq: {mut_seq}')
mut_seq[5] == "C"
print(mut_seq)
mut_seq.remove("T")
print(mut_seq)
mut_seq.reverse()
print(mut_seq)
# !wget http://d28rh4a8wq0iu5.cloudfront.net/ads1/data/SRR835775_1.first1000.fastq
# +
# Working with Fastq files
for record in SeqIO.parse("SRR835775_1.first1000.fastq", "fastq"):
print(record)
print(record.seq)
print(record.letter_annotations['phred_quality'])
# -
quals = [record.letter_annotations['phred_quality'] for record in SeqIO.parse("SRR835775_1.first1000.fastq", "fastq")]
import matplotlib.pyplot as plt
plt.hist(quals, bins=10)
plt.title("Distribution of Phred Quality Score")
plt.xlabel("Base Position")
plt.ylabel("Phred Score")
plt.show()
sequences = [record.seq for record in SeqIO.parse("SRR835775_1.first1000.fastq", "fastq")]
sequences[:100]
| book/_build/jupyter_execute/notebooks/BioPython Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import yaml
yaml.load('a: 123')
# -
yaml.load('Quick brown fox jumped over the lazy dog.')
yaml.load('3.14151926536')
yaml.load('''
- eggs
- ham
- spam
- French basil salmon terrine
''')
| Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sense of humour replication
#
# Today's state of the art in sense of humour is held by carbon-based lifeforms and biological neural networks. Machine learning has proven an invaluable tool in a lot of other domains so in all likelihood lack of progress in artificial sense of humour is a consequence of acute lack of imagination as opposed to technological limitations.
# ## Library
#
# Import everything we need for experiments
import pandas as pd
import numpy as np
import itertools
# +
import torch
from torch import nn
import torch.nn.functional as F
normal_dist = torch.distributions.Normal(0, 1)
# -
# ## Research log
#
# Experments
tweets = pd.read_json('tweets.json')
tweets
y = np.array(tweets['favoriteCount'])
# ### Bag of words models
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer()
# +
import numpy as np
X = np.array(tweets['text'])
X = vectorizer.fit_transform(X)
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y)
# -
from sklearn.linear_model import LinearRegression
linear = LinearRegression()
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
linear.fit(X_train, y_train)
linear.score(X_test, y_test)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
# ### Byte pair embedding models
# Summing up all [byte-pair embeddings](https://github.com/bheinzerling/bpemb) to get a sentence embedding and use it as the input vector
import sentencepiece as spm
from gensim.models import KeyedVectors
# +
bpe_model_location = 'ru.wiki.bpe.op100000.model'
bpe_vec_location = 'ru.wiki.bpe.op100000.d300.w2v.bin'
sp = spm.SentencePieceProcessor()
sp.Load(bpe_model_location)
model = KeyedVectors.load_word2vec_format(bpe_vec_location, binary=True)
def bpe_embed(text):
pieces = sp.encode_as_pieces(text)
embedding = np.zeros(model.vector_size)
piece_count = 0
for binary_piece in pieces:
piece = binary_piece.decode('utf-8')
try:
embedding += model[piece]
piece_count += 1
except KeyError:
pass
if piece_count:
embedding /= piece_count
return embedding
# -
X_bpe = np.array(list(map(embed, tweets['text'])))
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_bpe, y)
# +
from sklearn.linear_model import LinearRegression
linear = LinearRegression()
from sklearn.ensemble import RandomForestRegressor
randomforest = RandomForestRegressor()
# -
linear.fit(X_train, y_train)
randomforest.fit(X_train, y_train)
linear.score(X_test, y_test)
randomforest.score(X_test, y_test)
# ### Sequence models
# Treating tweets as sequences of chunk embeddings
def sequence_bpe_embed(text):
pieces = sp.encode_as_pieces(text)
embedding = []
for binary_piece in pieces:
piece = binary_piece.decode('utf-8')
try:
embedding.append(model[piece])
except KeyError:
pass
return np.array(embedding)
X_seq = [sequence_bpe_embed(t) for t in tweets['text']]
X_train, X_test, y_train, y_test = train_test_split(X_seq, y)
import torch
from torch import nn
from torch.optim import Adam
import torch.distributions
import torch.nn.functional as F
normal_dist = torch.distributions.Normal(0, 1)
[p.shape for p in lstm.parameters()]
lstm = nn.LSTM(300, 32, batch_first=True)
regressor = nn.Sequential(nn.Linear(64, 64), nn.LeakyReLU(), nn.Linear(64, 64), nn.LeakyReLU(), nn.Linear(64, 1))
opt = Adam(itertools.chain(lstm.parameters(), classifier.parameters()))
def predict_likes(sentences):
c0, h0 = normal_dist.sample((1, 1, 32)), normal_dist.sample((1,1,32))
_, (cn, hn) = lstm(torch.Tensor(sentences), (c0, h0))
return regressor(torch.cat((cn[0], hn[0]), dim=1))
def fit_step():
for sentence, likes in zip(X_train, y_train):
likes_pred = predict_likes([sentence])
loss = F.mse_loss(likes_pred, torch.Tensor([[likes]]))
loss.backward()
opt.step()
opt.zero_grad()
# +
from sklearn.metrics import r2_score
def test():
print(f'train r2 {r2_score(y_train, [predict_likes([sentence]) for sentence in X_train])}')
print(f'test r2 {r2_score(y_test, [predict_likes([sentence]) for sentence in X_test])}')
# -
# I ran the cell below, like, 20 times
fit_step()
test()
# ### Length
# Cause why not?
lengths = [len(text) for text in tweets['text']]
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# -
plt.xlabel('length')
plt.ylabel('likes')
plt.scatter(lengths, y)
# ### Neural machine
#
# What if we try learning on a bigger corpus of [tweets](https://twitter.com/neural_machine)?
from sklearn.cross_validation import train_test_split
neural_machine = pd.read_json('neural_machine.json')
neural_machine = neural_machine[neural_machine['text'] != '']
neural_machine.head()
# #### LSTM
X = [sequence_bpe_embed(t) for t in neural_machine['text']]
y = np.array(neural_machine['favoriteCount'])
X_train, X_test, y_train, y_test = train_test_split(X, y)
lstm = nn.LSTM(300, 32, batch_first=True)
regressor = nn.Sequential(nn.Linear(64, 64), nn.LeakyReLU(), nn.Linear(64, 16), nn.LeakyReLU(), nn.Linear(16, 1))
opt = torch.optim.Adam(itertools.chain(lstm.parameters(), regressor.parameters()))
def predict_likes(sentences):
c0, h0 = normal_dist.sample((1, 1, 32)), normal_dist.sample((1,1,32))
_, (cn, hn) = lstm(torch.Tensor(sentences), (c0, h0))
return torch.exp(regressor(torch.cat((cn[0], hn[0]), dim=1)))
def fit_step():
for sentence, likes in zip(X_train, y_train):
likes_pred = predict_likes([sentence])
loss = F.mse_loss(likes_pred, torch.Tensor([[likes]]))
loss.backward()
opt.step()
opt.zero_grad()
# +
from sklearn.metrics import r2_score
def test():
print(f'train r2 {r2_score(y_train, [predict_likes([sentence]) for sentence in X_train])}')
print(f'test r2 {r2_score(y_test, [predict_likes([sentence]) for sentence in X_test])}')
# -
fit_step()
test()
# Best r2 on test set: 0.025
# #### Different embeddings?
fasttext =
# #### Bag of words
X = [bpe_embed(t) for t in neural_machine['text']]
y = np.array(neural_machine['favoriteCount'])
X_train, X_test, y_train, y_test = train_test_split(X, y)
# +
from sklearn.linear_model import LinearRegression
linear = LinearRegression()
from sklearn.ensemble import RandomForestRegressor
randomforest = RandomForestRegressor()
# -
linear.fit(X_train, y_train)
randomforest.fit(X_train, y_train)
linear.score(X_test, y_test)
randomforest.score(X_test, y_test)
| HumourRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Hey everyone, this is my first go at Kaggle competitions and Kernels.
#
# In this Kernel, I implemented kNN classifier from scratch.
# And the results got 97.1% accuracy on public leaderboard.
# +
import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
import time
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# load csv files to numpy arrays
def load_data(data_dir):
train_data = open(data_dir + "train.csv").read()
train_data = train_data.split("\n")[1:-1]
train_data = [i.split(",") for i in train_data]
# print(len(train_data))
X_train = np.array([[int(i[j]) for j in range(1,len(i))] for i in train_data])
y_train = np.array([int(i[0]) for i in train_data])
# print(X_train.shape, y_train.shape)
test_data = open(data_dir + "test.csv").read()
test_data = test_data.split("\n")[1:-1]
test_data = [i.split(",") for i in test_data]
# print(len(test_data))
X_test = np.array([[int(i[j]) for j in range(0,len(i))] for i in test_data])
# print(X_test.shape)
return X_train, y_train, X_test
class simple_knn():
"a simple kNN with L2 distance"
def __init__(self):
pass
def train(self, X, y):
self.X_train = X
self.y_train = y
def predict(self, X, k=1):
dists = self.compute_distances(X)
# print("computed distances")
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in range(num_test):
k_closest_y = []
labels = self.y_train[np.argsort(dists[i,:])].flatten()
# find k nearest lables
k_closest_y = labels[:k]
# out of these k nearest lables which one is most common
# for 5NN [1, 1, 1, 2, 3] returns 1
# break ties by selecting smaller label
# for 5NN [1, 2, 1, 2, 3] return 1 even though 1 and 2 appeared twice.
c = Counter(k_closest_y)
y_pred[i] = c.most_common(1)[0][0]
return(y_pred)
def compute_distances(self, X):
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dot_pro = np.dot(X, self.X_train.T)
sum_square_test = np.square(X).sum(axis = 1)
sum_square_train = np.square(self.X_train).sum(axis = 1)
dists = np.sqrt(-2 * dot_pro + sum_square_train + np.matrix(sum_square_test).T)
return(dists)
# -
# Let's read `../input/train.csv` and `../input/test.csv` files to numpy arrays.
#
# Print shapes of those arrays as a sanity check.
# runs for 35 seconds
data_dir = "../input/"
X_train, y_train, X_test = load_data(data_dir)
print(X_train.shape, y_train.shape, X_test.shape)
# Visualize random samples from training data.
# +
# runs for 10 seconds
classes = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
num_classes = len(classes)
samples = 8
for y, cls in enumerate(classes):
idxs = np.nonzero([i == y for i in y_train])
idxs = np.random.choice(idxs[0], samples, replace=False)
for i , idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples, num_classes, plt_idx)
plt.imshow(X_train[idx].reshape((28, 28)))
plt.axis("off")
if i == 0:
plt.title(cls)
plt.show()
# -
# just to visualize ith test image
plt.imshow(X_test[2311].reshape((28, 28)))
# Split testing data into batches as distances of 10,000 test images and
# 60,000 train images won't fit in memory.
# predict labels for batch_size number of test images at a time.
batch_size = 2000
# k = 3
k = 1
classifier = simple_knn()
classifier.train(X_train, y_train)
# As Kaggle kernels have 1200 seconds limit, I have divided the prediction step
# into two cells each cell running for 13 minutes and saving prediction to `predictions`.
# +
# runs for 13 minutes
predictions = []
for i in range(int(len(X_test)/(2*batch_size))):
# predicts from i * batch_size to (i+1) * batch_size
print("Computing batch " + str(i+1) + "/" + str(int(len(X_test)/batch_size)) + "...")
tic = time.time()
predts = classifier.predict(X_test[i * batch_size:(i+1) * batch_size], k)
toc = time.time()
predictions = predictions + list(predts)
# print("Len of predictions: " + str(len(predictions)))
print("Completed this batch in " + str(toc-tic) + " Secs.")
print("Completed predicting the test data.")
# +
# runs for 13 minutes
# uncomment predict lines to predict second half of test data
for i in range(int(len(X_test)/(2*batch_size)), int(len(X_test)/batch_size)):
# predicts from i * batch_size to (i+1) * batch_size
print("Computing batch " + str(i+1) + "/" + str(int(len(X_test)/batch_size)) + "...")
tic = time.time()
#predts = classifier.predict(X_test[i * batch_size:(i+1) * batch_size], k)
toc = time.time()
#predictions = predictions + list(predts)
# print("Len of predictions: " + str(len(predictions)))
print("Completed this batch in " + str(toc-tic) + " Secs.")
print("Completed predicting the test data.")
# -
# After predicting and saving results in Python array, we dump our predictions to a csv file
# named `predictions.csv` which gets an accuracy of 97.114% on public leaderboard.
out_file = open("predictions.csv", "w")
out_file.write("ImageId,Label\n")
for i in range(len(predictions)):
out_file.write(str(i+1) + "," + str(int(predictions[i])) + "\n")
out_file.close()
| 2 digit recognizer/knn-from-scratch-in-python-at-97-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# Replace this text with the description of your task. Obviously the following text is just a silly example. You may use any number of markdown or code cells, but the code cells will not be run.
#
# Sometimes the user wants to see a caterpillar in their code's output. What kind of code could be used to produce a caterpillar? We assume the caterpillar should face to the left, and that the user can specify the caterpillar's length.
#
| examples/Python/How to print a caterpillar/description.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Publishing packages as web layers
#
# Packages in ArcGIS bundle maps, data, tools and cartographic information. ArcGIS lets you [create a variety of packages](http://pro.arcgis.com/en/pro-app/help/sharing/overview/introduction-to-sharing-packages.htm) such as map (.mpkx), layer (.lpkx), map tile (.tpk), vector tile (.vtpk), scene layer (.slpk), geoprocessing (.gpkx) packages etc. to name a few. You can share any of these packages with other users either as files on a network share or as items in your portal. In addition, some of these packages can be shared as web layers.
#
# In this sample, we will observe how to publish web layers from tile, vector tile and scene layer packages. Data for this sample is available in the accompanying `data` folder.
#
# ## Publishing tile layers from a tile package
#
# A [Tile package](http://pro.arcgis.com/en/pro-app/help/sharing/overview/tile-package.htm) contains a set of tiles (images) from a map or raster dataset. These tiles (also called as tile cache) can be used as basemaps and are useful for visualizing imagery or relatively static data.
# connect to the GIS
from arcgis.gis import GIS
gis = GIS("https://pythonapi.playground.esri.com/portal", "arcgis_python", "amazing_arcgis_123")
# Upload the tile package (USA_counties_divorce_rate.tpk) as an item. To keep our 'my contents' tidy, let us create a new folder called 'packages' and add to it.
gis.content.create_folder('packages')
tpk_item = gis.content.add({}, data='data/USA_counties_divorce_rate.tpk', folder='packages')
tpk_item
# Now, let us go ahead and publish this item as a tile layer
tile_layer = tpk_item.publish()
tile_layer
# ## Publishing vector tile layers from a vector tile package
#
# A [vector tile package](http://pro.arcgis.com/en/pro-app/help/sharing/overview/vector-tile-package.htm) is a collection of vector tiles and style resources. Vector tiles contain vector representations of data across a range of scales. Unlike raster tiles, they can adapt to the resolution of the display device and even be customized for multiple uses.
#
# Let us upload a World_earthquakes_2010.vtpk vector tile package like earlier and publish that as a vector tile service
# upload vector tile package to the portal
vtpk_item = gis.content.add({}, data='data/World_earthquakes_2010.vtpk', folder='packages')
vtpk_item
# publish that item as a vector tile layer
vtpk_layer = vtpk_item.publish()
vtpk_layer
# ## Publishing scene layers from a scene layer package
#
# A [scene layer package](http://pro.arcgis.com/en/pro-app/help/sharing/overview/scene-layer-package.htm) contains a cache of a multipatch, point, or point cloud dataset and is used to visualize 3D data. You can publish this package and create a web scene layer which can be visualized on a web scene.
#
# Let us publish a 'World_earthquakes_2000_2010.slpk' scene layer package that visualizes global earthquakes between the years 2000 and 2010 in 3 dimension
slpk_item = gis.content.add({}, data='data/World_earthquakes_2000_2010.slpk', folder='packages')
slpk_item
slpk_layer = slpk_item.publish()
slpk_layer
| samples/05_content_publishers/publishing_packages_as_web_layers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from astropy.io import fits
from IPython.display import display, Math
from scipy.optimize import minimize
import astropy.units as u
from astropy.modeling.models import custom_model
from scipy import interpolate
import keras
from scipy.optimize import Bounds
import sys
sys.path.insert(0, '/home/carterrhea/Documents/LUCI/') # Location of Luci
import LUCI.LuciFit as lfit
import emcee
import corner
# +
# Read in Data
sigma_info = []
velocity_info = []
res_info = []
spectrum = fits.open('../Data/Reference-Spectrum-R5000.fits')
header = spectrum[0].header
spec = spectrum[1].data
channel = []
counts = []
for chan in spec:
channel.append(chan[0])
counts.append(chan[1])
axis = channel
sky = counts
velocity = header['VELOCITY']
sigma = header['BROADEN']
print(velocity, sigma)
# -
# Machine Learning Reference Spectrum
ref_spec = fits.open('../ML/Reference-Spectrum-R5000-SN3.fits')[1].data
channel = []
counts = []
for chan in ref_spec: # Only want SN3 region
channel.append(chan[0])
counts.append(np.real(chan[1]))
min_ = np.argmin(np.abs(np.array(channel)-14700))
max_ = np.argmin(np.abs(np.array(channel)-15600))
wavenumbers_syn = channel[min_:max_]
f = interpolate.interp1d(axis, sky, kind='slinear')
sky_corr = (f(wavenumbers_syn))
sky_corr_scale = np.max(sky_corr)
sky_corr = sky_corr/sky_corr_scale
print(sky_corr.shape)
plt.plot(wavenumbers_syn, sky_corr)
fit = lfit.Fit(sky, axis, wavenumbers_syn, 'sincgauss', ['Halpha', 'NII6583', 'NII6548','SII6716', 'SII6731'], [1,1,1,1,1], [1,1,1,1,1],
keras.models.load_model('../ML/R5000-PREDICTOR-I-MDN-SN3'),
bayes_bool=True, bayes_method='emcee', mdn=True)
fit_dict = fit.fit()
print(fit_dict['sigmas'])
print(fit_dict['fit_sol'])
plt.plot(axis, sky, label='spectrum')
plt.plot(axis, fit_dict['fit_vector'], label='fit vector')
plt.xlim(14800, 15300)
plt.legend()
fit_dict['amplitudes']
| Examples/Fit-Reference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import queue
class BinaryTreeNode:
def __init__(self, data):
self.data = data
self.left = None
self.right = None
def searchInBST(root, k):
if root is None:
return None
if k == root.data:
return root
if k < root.data:
return searchInBST(root.left, k)
if k > root.data:
return searchInBST(root.right, k)
def buildLevelTree(levelorder):
index = 0
length = len(levelorder)
if length<=0 or levelorder[0]==-1:
return None
root = BinaryTreeNode(levelorder[index])
index += 1
q = queue.Queue()
q.put(root)
while not q.empty():
currentNode = q.get()
leftChild = levelorder[index]
index += 1
if leftChild != -1:
leftNode = BinaryTreeNode(leftChild)
currentNode.left =leftNode
q.put(leftNode)
rightChild = levelorder[index]
index += 1
if rightChild != -1:
rightNode = BinaryTreeNode(rightChild)
currentNode.right =rightNode
q.put(rightNode)
return root
# Main
levelOrder = [int(i) for i in input().strip().split()]
root = buildLevelTree(levelOrder)
k=int(input())
node=searchInBST(root, k)
if node:
print(node.data)
| 15 BST-1/15.2 Search in BST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
import pandas as pd
import numpy as np
from tqdm.auto import tqdm
import joblib
import matplotlib.pyplot as plt
# -
# +
def far_func(sorted_dist: torch.tensor, indices: torch.tensor):
return sorted_dist[:, -100:], indices[:, -100:]
def close_func(sorted_dist: torch.tensor, indices: torch.tensor):
return sorted_dist[:, :100], indices[:, :100]
def calculate_distance(x, close_fn, far_fn):
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
batch_size = 2048
x_device = x.to(device)
num_iter = x.shape[0] // batch_size + 1
anchor_idx_list, close_idx_list, far_idx_list = list(), list(), list()
close_distance_list, far_distance_list = list(), list()
for i in tqdm(torch.arange(num_iter), desc='create triplets'):
batch_x = x[i * batch_size: (i + 1) * batch_size, :].to(device)
dist = torch.cdist(x1=batch_x, x2=x_device, p=2) # (n, n)
sorted_dist, indices = torch.sort(dist, dim=1, descending=False)
sorted_dist, indices = sorted_dist, indices
anchor_idx = torch.arange(i * batch_size, i * batch_size + batch_x.shape[0]) # (n,)
# assert torch.equal(anchor_idx, indices[:, 0].cpu())
# the 0-th column is the distance to oneself
close_distance, close_idx = close_fn(sorted_dist, indices) # (n,)
far_distance, far_idx = far_fn(sorted_dist, indices) # (n, r)
anchor_idx_list.append(anchor_idx.cpu())
close_idx_list.append(close_idx.cpu())
far_idx_list.append(far_idx.cpu())
close_distance_list.append(close_distance.cpu())
far_distance_list.append(far_distance.cpu())
anchor_idx_list = torch.cat(anchor_idx_list, dim=0)
close_idx_list = torch.cat(close_idx_list, dim=0)
far_idx_list = torch.cat(far_idx_list, dim=0)
close_distance_list = torch.cat(close_distance_list, dim=0)
far_distance_list = torch.cat(far_distance_list, dim=0)
return anchor_idx_list, close_idx_list, far_idx_list, close_distance_list, far_distance_list
# +
mode = 'train'
data = torch.from_numpy(pd.read_csv(f'../data/{mode}.csv', header=None).to_numpy())
anchor_idx, close_idx, far_idx, close_distance, far_distance = calculate_distance(data, close_func, far_func)
# -
torch.save({
'data': data,
'anchor_idx': anchor_idx,
'close_idx': close_idx,
'far_idx': far_idx,
'close_distance': close_distance,
'far_distance': far_distance},
f'../data/{mode}.pt'
)
close_distance.mean(dim=0)
plt.plot(close_distance.mean(dim=0))
far_distance.mean(dim=0)
plt.plot(far_distance.mean(dim=0))
close_distance.std(dim=0)
far_distance.std(dim=0)
# check dataset
from src.models.siamese_triplet.datasets import SiameseSynthesis
from pathlib import Path
import torch
# +
siamese_test_dataset = SiameseSynthesis(Path('../data/dev.pt'))
# -
siamese_test_dataset[0]
siamese_test_dataset[1]
siamese_test_dataset[201]
d = torch.load('../data/dev.pt')
d['close_idx']
| notebooks/pre-compute-nn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" id="GVPFeNy5jYgP" outputId="87ac1c46-ad58-4758-be1c-2a8014b179c0"
# !python -m pip install control
# + colab={} colab_type="code" id="hewFVkaTi8ma"
# %matplotlib notebook
import matplotlib.pyplot as plt
import numpy as np
import control
# + [markdown] colab={} colab_type="code" id="WfPDgV4Xi8mk"
# $$ '''f = np.logspace(-2,3,1000) #Hz 10 rest to -2 is min & 10 reat to 3 is max
# w = 2*np.pi*f
# s = 1.0j*w''' $$
# + [markdown] colab={} colab_type="code" id="m3V7vqb3i8mp"
# ## p1 = 10*2*np.pi
# ## p2 = 40*2*np.pi
# ## p3 = 10
# -
p1 = eval(input("Enter the Poles: "))
z1 = eval(input("Enter the Zeros: "))
# + colab={} colab_type="code" id="xIjDJ0ohi8mu"
z = control.TransferFunction(z1,1)
p = control.TransferFunction(1,p1)
G = z*p
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="z2CAyeFLi8mz" outputId="6218ee93-a03b-4aca-9d31-840aa6c09120"
G
# + colab={"base_uri": "https://localhost:8080/", "height": 365} colab_type="code" id="qJ3qTBzTi8nF" outputId="7a6b69d5-2afc-461f-ff49-cc0d28223764"
out = control.bode_plot(G,dB=1,Hz=1,deg=1)
# + colab={} colab_type="code" id="KNQCoNeci8nF"
out
# -
| test2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
raw_data = pd.read_excel('Test_dataset.xlsx',sheet_name = 1,header = 1)
raw_data
raw_data.dropna(axis = 0, inplace=True)
raw_data
from fbprophet import Prophet
def run_prophet(timeserie):
model = Prophet(yearly_seasonality=False,daily_seasonality=False)
model.fit(timeserie)
forecast = model.make_future_dataframe(periods=1, include_history=False)
forecast = model.predict(forecast)
return forecast
series = raw_data.iloc[0,1:6]
series
| Part2/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # scikit-learn
# ### Generate a CSV file to work with
# %%writefile test.csv
number,group,text,positive
110,C,people corporation problem problem think think transport science go transport place,True
114,A,you problem government you,True
130,B,you people place corporation problem science,True
185,C,have think corporation fact corporation transport transport,True
149,A,people have science science go go corporation you you fact,True
115,C,science people go transport corporation,True
156,A,transport corporation,True
190,C,transport you,True
160,A,the problem think transport place government problem go place the have place place,True
142,B,fact science government go,True
196,A,the fact,False
166,A,problem the corporation you you the transport have work go work problem you,True
168,B,have science science place the science problem,False
167,A,government place think you corporation corporation government the go,True
143,C,place science have fact science corporation science,True
185,C,place the fact transport the transport work science fact people the think problem,True
196,C,place government work science the have go work fact place the fact,True
102,A,go government think people the have the,True
104,C,work transport problem fact people,True
177,C,work problem you think think you transport go think fact go go,True
197,B,government,True
104,A,corporation corporation fact corporation work work,True
151,C,corporation science you people corporation government people go,True
105,A,go fact fact,False
165,B,think fact government the work problem work corporation transport,True
107,B,work transport transport fact people science corporation government,True
177,B,science people you corporation transport people corporation corporation science think have problem government,True
186,B,you government problem you government science fact,True
162,B,go have go go corporation fact government,True
107,A,think have the the science problem fact think go you,False
166,B,transport place place you go government have fact fact you the people think,True
163,C,work problem place have the you you problem people fact science place go think,False
121,B,place government problem,True
118,B,work corporation people the transport corporation corporation government problem the transport have fact people,True
106,A,the problem place work place corporation go work transport,True
138,C,transport science,True
111,B,fact transport corporation place corporation you place corporation you government place people transport government,True
123,A,have work place fact science think work think go place place have problem,False
181,C,corporation transport think,True
156,B,have problem have transport have go the,True
109,B,people you have have go place transport you science corporation people,True
157,C,transport the fact have,True
192,C,have,False
183,C,the you problem think place think you transport people go problem fact go,True
134,A,people place you the corporation science think government transport,True
115,B,transport problem place problem think transport go corporation corporation you place,True
154,A,government government think corporation have science corporation transport,True
106,A,you go science transport problem problem the corporation problem transport problem think,True
138,C,place transport you people work go science government transport people,True
159,B,think have the fact think you,False
194,B,have government the place science you,True
127,C,work think you science science,False
102,C,go corporation you corporation transport place place think people go fact fact,True
103,C,have think corporation problem work go science place fact problem problem problem people,True
127,B,corporation people think go transport fact you go transport science,True
101,C,people science people have place corporation think,True
149,B,the the you fact place people science problem think go think place,False
136,A,place,False
109,A,science problem transport,True
156,C,work place think have work go transport you problem go,True
168,B,fact science problem have think,False
189,C,fact problem problem go government corporation,True
102,B,fact the corporation have science place the have,True
161,A,corporation you fact,True
161,B,work corporation corporation have place work you transport science problem government,True
177,B,fact fact,False
123,B,government,True
146,C,the fact you work you corporation place corporation fact,True
136,A,work think you,False
184,A,corporation transport go place have government go you you corporation,True
185,A,transport go fact corporation have have government think people,True
125,A,place place people work science think fact fact,False
191,A,science work government fact the problem you,True
170,B,you transport work corporation work place government people corporation you the,True
186,A,work science think corporation think fact,True
142,A,work people go think go fact transport go you,True
187,A,you problem problem work,False
183,C,corporation corporation work place fact government think work work have you have the,True
128,A,place problem transport the,True
118,C,transport think work you transport you work corporation have have have place,True
151,C,work problem you go,False
136,A,you the you think,False
178,C,corporation you the people science people think,True
171,C,transport you government you corporation the problem you have place place have think,True
104,C,transport work go corporation think,True
157,A,go place people place the,False
114,C,science go corporation corporation government place problem the,True
141,C,think government corporation go the government science go corporation problem place have people problem,True
113,B,transport science fact think fact corporation corporation work,True
170,B,transport government think you go go,True
137,B,science people go government have,True
170,C,science people place,False
154,B,place go transport the government corporation fact transport fact go you corporation,True
180,B,go transport fact government government work you science,True
166,A,work go work people science the place people problem work go work,False
157,C,problem fact fact the have fact go you government work people,True
198,C,place think transport government,True
153,A,government place problem work go the work have fact have people have work fact,True
138,A,people,False
111,B,transport science people think think government people fact,True
import pandas as pd
df = pd.read_csv('test.csv')
df.head()
df.positive.value_counts(normalize=True)
# ### Split the data into train and test
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(df[df.columns.difference(['positive'])],
df['positive'], test_size=1/3)
x_train.shape
y_train.shape
x_test.shape
# ### Logistic Regression
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf.fit(x_train[['number']], y_train)
# Score is accuracy
clf.score(x_test[['number']], y_test)
from sklearn.metrics import roc_auc_score
clf.classes_
# Use `predict_proba` to get probabilities as opposed to classes
# Get the probability for the True class
x_test_probabilities = clf.predict_proba(x_test[['number']])[:, 1]
roc_auc_score(y_test, x_test_probabilities)
from sklearn.metrics import precision_score, recall_score
precision_score(y_test, clf.predict(x_test[['number']]))
recall_score(y_test, clf.predict(x_test[['number']]))
# ### Categorical One-Hot-Encoding (OHE)
x_train_with_categorical = pd.get_dummies(x_train, columns=['group'])
x_train_with_categorical.head()
x_test_with_categorical = pd.get_dummies(x_test, columns=['group'])
clf = LogisticRegression()
clf.fit(x_train_with_categorical[x_train_with_categorical.columns.difference(['text'])], y_train)
clf.score(x_test_with_categorical[x_test_with_categorical.columns.difference(['text'])], y_test)
# ### Vectorizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, HashingVectorizer
x_train_vectorizer = CountVectorizer()
x_train_vectorizer.fit(x_train['text'])
x_train_vectorized = x_train_vectorizer.transform(x_train['text'])
x_train_vectorized
clf = LogisticRegression()
clf.fit(x_train_vectorized, y_train)
clf.score(x_train_vectorizer.transform(x_test['text']), y_test)
# +
# Same process can be repeated with TF-IDF and Hashing vectorizers
# -
# ### Combining features
import numpy as np
x_train_vectorized
features = np.concatenate([x_train_vectorized.todense(),
x_train_with_categorical[x_test_with_categorical.columns.difference(['text'])].values
],
axis=1)
features
clf = LogisticRegression()
clf.fit(features, y_train)
features_test = np.concatenate([x_train_vectorizer.transform(x_test['text']).todense(),
x_test_with_categorical[x_test_with_categorical.columns.difference(['text'])].values
], axis=1)
clf.score(features_test, y_test)
# ### Other learners
# Just swap the classifier, interface is the same
#
# List of scikit-learn classifiers: http://scikit-learn.org/stable/supervised_learning.html
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(features, y_train)
clf.score(features_test, y_test)
| machine-learning/scikit-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="Nsy7iQ-ZXA5G"
# ## Scrape BNC
# + id="FHmydSjcLBnH"
from nltk.corpus.reader.bnc import BNCCorpusReader
import os
import pandas as pd
import pickle
import nltk
import statistics
import re
from nltk.tokenize import RegexpTokenizer
import string
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 18, "status": "ok", "timestamp": 1623062211358, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05221982976134027612"}, "user_tz": -120} id="bpYNHXeiLBrv" outputId="c5516626-a2d5-4487-a45e-6de91de74713"
nltk.download('punkt')
# + id="SB_Ot2vOLBuQ"
# these are the 4 sub-directories of BNC
subcorpora_dir = ["aca", "dem", "fic", "news"]
bnc_sentences = []
for subcorpora in subcorpora_dir:
DIR = ".data/misc/BNC_texts/Texts/{}".format(subcorpora)
# this is a nltk built-in class to handle BNC
bnc_reader = BNCCorpusReader(root=DIR, fileids=r'[A-K]/\w*/\w*\.xml')
# get all the file names in the subdirectory
list_of_file_ids = []
for root, dirs, files in os.walk(DIR):
for filename in files:
list_of_file_ids.append(filename)
# in this, the normal tokens along with the extra-informed tags are given
pos_tagged_sents = bnc_reader.tagged_sents(fileids=list_of_file_ids)
# retrieve the tokens
tokens = []
extra_tags = []
for elem in pos_tagged_sents:
token = [e[0] for e in elem]
tokens.append(token)
# retrieve the sentences
sentences = []
for elem in tokens:
sentences.append(' '.join(elem))
bnc_sentences.append(sentences)
# + id="f_KeMCJOLBwg"
flat_bnc_sentences = [item for sublist in bnc_sentences for item in sublist]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 38953, "status": "ok", "timestamp": 1623062317538, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05221982976134027612"}, "user_tz": -120} id="51h7PjoSLByv" outputId="7c03dab8-d23e-4fc9-ac0d-b52b3d63f78a"
# calculate the average length, for better insight on the sentences
lengths = []
for sentence in flat_bnc_sentences:
token_sentence = nltk.word_tokenize(sentence)
lengths.append(len(token_sentence))
sentence_average_len = statistics.mean(lengths)
print(sentence_average_len)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 25, "status": "ok", "timestamp": 1623062317539, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05221982976134027612"}, "user_tz": -120} id="FcaThyEOmOLn" outputId="420342fa-f83e-4872-8b3e-3139307e548f"
flat_bnc_sentences[:500]
# + [markdown] id="VuE-I-mHXGBd"
# ## Preprocess BNC
# + id="YehUNHg5omIi"
# since the BNC corpus contained many mathematics-specific sentences, we chose to control that by eliminating sentences containing numbers
def delete_num_sentences(text):
numbers = []
tokenized_text = text.split()
for word in tokenized_text:
# some numbers were in the form x.x or x,x
if '.' in word or ',' in word:
word = word.replace('.','').replace(',','')
if word.isdigit():
numbers.append(word)
# if there is no number in the sentence
if len(numbers)==0:
return text
else:
return False
# + id="Y0wVRCCPq2ul"
# examine if words with non english chatacters exist
def check_all_english(text):
english_texts = []
tokenizer = RegexpTokenizer(r'\w+')
tokenized_text = tokenizer.tokenize(text)
result = all(word.encode().isalpha() for word in tokenized_text if not word.isdigit())
if result:
return text
else:
return False
# + id="YVHckXZDLB1L"
# Corpus preprocessing
text_corpus = []
for sen in flat_bnc_sentences:
# skip sentences containing digits
sen = delete_num_sentences(sen)
if (sen):
# skip sentences containing non-english words
sen = check_all_english(sen)
if (sen):
if sen not in string.punctuation:
# clean surrounding whitespace
sen = sen.strip()
# remove urls
stripped_article = re.sub(r'^https?:\/\/.*[\r\n]*', '', sen, flags=re.MULTILINE)
# further preprocessing
sen = sen.replace("( ","(")
sen = sen.replace(" )",")")
# # remove whitespace before punctuation
sen = sen.replace(" .",".").replace(" ,",",").replace(" !","!").replace(" ?","?")
# further preprocessing
sen = sen.replace(", ,",",").replace(",',",",'").replace(",,",",").replace("..",".").replace("!!","!").replace("??","?")
# remove content inside parentheses (usually unecessary information for our cause)
sen = re.sub(r'\([^)]*\)', '', sen)
# remove big spaces
sen = re.sub('\s{2,}', " ", sen)
text_corpus.append(sen)
# # remove empty elements of a list
text_corpus = list(filter(None, text_corpus))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 17, "status": "ok", "timestamp": 1623062324560, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05221982976134027612"}, "user_tz": -120} id="r5QSdQ2gy1J0" outputId="0c116256-168b-4407-cc12-5e854e097a8c"
text_corpus[:500]
# + id="e1k9HGkLoERH"
# expand abbreviations based on a predefined dictionary
abbr_dict={"what's":"what is", "what're":"what are", "who's":"who is", "who're":"who are", "where's":"where is", "where're":"where are", "when's":"when is",
"when're":"when are", "how's":"how is", "how're":"how are", "i'm":"i am", "we're":"we are", "you're":"you are", "they're":"they are", "it's":"it is",
"he's":"he is", "she's":"she is", "that's":"that is", "there's":"there is", "there're":"there are", "i've":"i have", "we've":"we have", "you've":"you have",
"they've":"they have", "who've":"who have", "would've":"would have", "not've":"not have", "i'll":"i will", "we'll":"we will", "you'll":"you will", "he'll":"he will",
"she'll":"she will", "it'll":"it will", "they'll":"they will", "isn't":"is not", "wasn't":"was not", "aren't":"are not", "weren't":"were not", "can't":"can not",
"couldn't":"could not", "don't":"do not", "didn't":"did not", "shouldn't":"should not", "wouldn't":"would not", "doesn't":"does not", "haven't":"have not",
"hasn't":"has not", "hadn't":"had not", "won't":"will not", "what' s":"what is", "what' re":"what are", "who' s":"who is", "who' re":"who are", "where' s":"where is",
"where' re":"where are", "when' s":"when is", "when' re":"when are", "how' s":"how is", "how' re":"how are", "i' m":"i am", "we' re":"we are", "you' re":"you are",
"they' re":"they are", "it' s":"it is", "he' s":"he is", "she' s":"she is", "that' s":"that is", "there' s":"there is", "there' re":"there are", "i' ve":"i have",
"we' ve":"we have", "you' ve":"you have", "they' ve":"they have", "who' ve":"who have", "would' ve":"would have", "not' ve":"not have", "i' ll":"i will", "we' ll":"we will",
"you' ll":"you will", "he' ll":"he will", "she' ll":"she will", "it' ll":"it will", "they' ll":"they will", "isn' t":"is not", "wasn' t":"was not", "aren' t":"are not",
"weren' t":"were not", "can' t":"can not", "couldn' t":"could not", "don' t":"do not", "didn' t":"did not", "shouldn' t":"should not", "wouldn' t":"would not",
"doesn' t":"does not", "haven' t":"have not", "hasn' t":"has not", "hadn' t":"had not", "won' t":"will not"}
# this is just to be sure that the quotes will be those we have used in our abbreviation lexicon
quote_list = "‘’‛’❜'’`‘’"
abbr_corpus = []
for elem in text_corpus:
# make all the single quotes, the one we have used in the abbreviation lexicon
expand_abbr = ["'" if e in quote_list else e for e in elem]
expand_abbr_string = ''.join(expand_abbr)
abbr_corpus.append(expand_abbr_string)
# expand abbreviations
final_corpus_df = pd.DataFrame(abbr_corpus, columns=['Sentences'])
final_corpus_df.replace(abbr_dict,regex=True,inplace=True)
final_corpus = final_corpus_df.Sentences.tolist()
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23, "status": "ok", "timestamp": 1623062367130, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05221982976134027612"}, "user_tz": -120} id="vVbd-WiY_Vmh" outputId="9147b774-61b0-4873-9195-f8fbc2db824a"
final_corpus[:500]
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 455, "status": "ok", "timestamp": 1623062474900, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "05221982976134027612"}, "user_tz": -120} id="tvFhOTf5DXHZ" outputId="8a717515-62a8-4acc-f837-ff3c1347e163"
# remove possible duplicates
final_corpus_clean = list(dict.fromkeys(final_corpus))
print(len(final_corpus))
print(len(final_corpus_clean))
# + id="WjxDMy0RLB3g"
with open('./data/misc/bnc_sentences_unparsed.pkl', 'wb') as f:
pickle.dump(final_corpus_clean, f)
| notebooks/.ipynb_checkpoints/scrape_bnc-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import random
import timeit
## BUBBLESORT(A)
def bubbleSort(A):
# 1 for i = 1 to A.length - 1
for i in range(len(A) - 1):
# 2 for j = A.length downto i + 1
for j in range(len(A) - 1, i, -1):
# 3 if A[j] < A[j - 1]
if (A[j] < A[j - 1]):
# 4 exchange A[j] with A[j - 1]
A[j], A[j - 1] = A[j - 1], A[j]
## INSERTION-SORT(A)
def insertSort(A):
# 1 for j = 2 to A.length
for j in range(len(A)):
# 2 key = A[j]
temp = A[j]
# 3 // Insert A[j] into the sorted sequence A[1 .. j - 1].
# 4 i = j - 1
i = j - 1
# 5 while i > 0 and A[i] > key
while ((i > 0) & (A[i] > temp)):
# 6 A[i + 1] = A[i]
A[i + 1] = A[i]
# 7 i = i - 1
i = i - 1
# 8 A[i + 1] = key
A[i + 1] = temp
# This is just a wrapper function for the timings
def wrapper(func, *args, **kwargs):
def wrap():
return func(*args, **kwargs)
return wrap
# +
#### For plot 1
## Create 3 lists of 10 sublists of n random numbers to avoid skewing timing:
lists5001 = [[] for _ in range(10)]
lists5k1 = [[] for _ in range(10)]
lists50k1 = [[] for _ in range(10)]
for i in range(10):
lists5001[i] = random.sample(range(1, 50001), 500)
lists5k1[i] = random.sample(range(1, 50001), 5000)
lists50k1[i] = random.sample(range(1, 50001), 50000)
# Create empty lists to store timings
# bubbleSort timings
bubble5001 = []
bubble5k1 = []
bubble50k1 = []
insert5001 = []
insert5k1 = []
insert50k1 = []
# Loop through and time the 10 random lists for each of the three lengths
for i in range(len(lists5001)):
bubble5001.append(timeit.timeit(wrapper(bubbleSort, range(10)), number=10))
insert5001.append(timeit.timeit(wrapper(insertSort, range(10)), number=10))
for i in range(len(lists50k1)):
bubble5k1.append(timeit.timeit(wrapper(bubbleSort, range(10)), number=10))
insert5k1.append(timeit.timeit(wrapper(insertSort, range(10)), number=10))
for i in range(len(lists50k1)):
bubble50k1.append(timeit.timeit(wrapper(bubbleSort, range(10)), number=10))
insert50k1.append(timeit.timeit(wrapper(insertSort, range(10)), number=10))
# +
# evenly sampled time at 200ms intervals
bubble1 = [np.mean(bubble5001), np.mean(bubble5k1), np.mean(bubble50k1)]
insert1 = [np.mean(insert5001), np.mean(insert5k1), np.mean(insert50k1)]
tests = np.arange(0., 3., 1)
# red dashes, blue squares and green triangles
plt.plot(tests, bubble1, 'r--')
plt.show()
plt.plot(tests, insert1, 'b--')
plt.show()
# +
#### Plot 2
## Create 3 lists of 10 sublists of n random numbers to avoid skewing timing:
lists5002 = [[] for _ in range(10)]
lists5k2 = [[] for _ in range(10)]
lists50k2 = [[] for _ in range(10)]
# Create data, store in list, and sort in increasing order
for i in range(10):
lists5002[i] = np.sort(random.sample(range(1, 50001), 500), axis=None)
lists5k2[i] = np.sort(random.sample(range(1, 50001), 5000), axis=None)
lists50k2[i] = np.sort(random.sample(range(1, 50001), 50000), axis=None)
# Create empty lists to store timings
# bubbleSort timings
bubble5002 = []
bubble5k2 = []
bubble50k2 = []
insert5002 = []
insert5k2 = []
insert50k2 = []
# Loop through and time the 10 random lists * 10 times for each of the three lengths
for i in range(len(lists5002)):
bubble5002.append(timeit.timeit(wrapper(bubbleSort, range(10)), number=10))
insert5002.append(timeit.timeit(wrapper(insertSort, range(10)), number=10))
for i in range(len(lists50k2)):
bubble5k2.append(timeit.timeit(wrapper(bubbleSort, range(10)), number=10))
insert5k2.append(timeit.timeit(wrapper(insertSort, range(10)), number=10))
for i in range(len(lists50k2)):
bubble50k2.append(timeit.timeit(wrapper(bubbleSort, range(10)), number=10))
insert50k2.append(timeit.timeit(wrapper(insertSort, range(10)), number=10))
# +
# evenly sampled time at 200ms intervals
bubble2 = [np.mean(bubble5002), np.mean(bubble5k2), np.mean(bubble50k2)]
insert2 = [np.mean(insert5002), np.mean(insert5k2), np.mean(insert50k2)]
tests = np.arange(0., 3., 1)
# red dashes, blue squares and green triangles
plt.plot(tests, bubble2, 'r--')
plt.show()
plt.plot(tests, insert2, 'b--')
plt.show()
# +
#### Plot 3
## Create 3 lists of 10 sublists of n random numbers to avoid skewing timing:
lists5003 = [[] for _ in range(10)]
lists5k3 = [[] for _ in range(10)]
lists50k3 = [[] for _ in range(10)]
for i in range(10):
lists5003[i] = random.sample(range(1, 50001), 500)
lists5k3[i] = random.sample(range(1, 50001), 5000)
lists50k3[i] = random.sample(range(1, 50001), 50000)
for i in range(10):
lists5003[i][::-1].sort()
lists5k3[i][::-1].sort()
lists50k3[i][::-1].sort()
## Create empty lists to store timings
bubble5003 = []
bubble5k3 = []
bubble50k3 = []
insert5003 = []
insert5k3 = []
insert50k3 = []
## Loop through and time the 10 random lists for each of the three lengths
for i in range(len(lists5003)):
bubble5003.append(timeit.timeit(wrapper(bubbleSort, range(10)), number=10))
insert5003.append(timeit.timeit(wrapper(insertSort, range(10)), number=10))
for i in range(len(lists50k3)):
bubble5k3.append(timeit.timeit(wrapper(bubbleSort, range(10)), number=10))
insert5k3.append(timeit.timeit(wrapper(insertSort, range(10)), number=10))
for i in range(len(lists50k3)):
bubble50k3.append(timeit.timeit(wrapper(bubbleSort, range(10)), number=10))
insert50k3.append(timeit.timeit(wrapper(insertSort, range(10)), number=10))
# +
# evenly sampled time at 200ms intervals
bubble3 = [np.mean(bubble5003), np.mean(bubble5k3), np.mean(bubble50k3)]
insert3 = [np.mean(insert5003), np.mean(insert5k3), np.mean(insert50k3)]
tests = np.arange(0., 3., 1)
# red dashes, blue squares and green triangles
plt.plot(tests, bubble3, 'r--')
plt.show()
plt.plot(tests, insert3, 'b--')
plt.show()
# -
# ## Explain Your Choices:
# + Explain any platform/language choices that you made for your code/plots.
# - I chose python since I am most comfortable in python. For the code, I adapted the psuedo code from the book and used that psuedo code as comments to highlight how I adapted these steps.
# + How did you create/store your data that you used to make the plots?
# - I used a for loop and numpy's random sample function to create a list with 10 randomly sampled sublists of size n = 500, 5000, and 50000 for plot 1. For plots 2 and 3 I used similar approach but sorted in ascending and descending order, respectively to meet the constraints instructed. I took the average of the runs on the 10 sets of data and plotted in matplotlib. Red represents the bubbleSort algorithm timing; whereas blue represents the insertSort algorithm.
# + If you ran into any special difficulties or made any interesting observations, feel free to mention them here.
# - I originally ran into some difficulty with the nested for loop with bubble sort (step 2) and getting it to iterate through the entire list but eventually got it work.
# ## Conclusions:
# + These two algorithms are supposed to have quadratic running time.
# + Does the first plot reflect this?
# + How do the two algorithms compare in terms of running time?
# + How about the second plot?
# + Do you think this one is quadratic? Why do you think it looks the way it does?
# + How does the third compare to the first and the second?
# + What kind of functions do you think you’re observing in the three plots (linear, logarithmic, quadratic, exponential, etc etc)?
# + Would you use these algorithms for real life data, and why?
| p1/p1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, mean_absolute_error
from sklearn import model_selection, preprocessing
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.metrics import classification_report
input_file = 'traffic_data.txt'
data = []
with open(input_file, 'r') as f:
for line in f.readlines():
items = line[:-1].split(',')
data.append(items)
data = np.array(data)
label_encoder = []
X_encoded = np.empty(data.shape)
for i, item in enumerate(data[0]):
if item.isdigit():
X_encoded[:, i] = data[:, i]
else:
label_encoder.append(preprocessing.LabelEncoder())
X_encoded[:, i] = label_encoder[-1].fit_transform(data[:, i])
X = X_encoded[:, :-1].astype(int)
y = X_encoded[:, -1].astype(int)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.25, random_state=5)
params = {'n_estimators': 100, 'max_depth': 4, 'random_state': 0}
regressor = ExtraTreesRegressor(**params)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
print("Mean absolute error:", round(mean_absolute_error(y_test, y_pred), 2))
# +
test_datapoint = ['Saturday', '10:20', 'Atlanta', 'no']
test_datapoint_encoded = [0] * len(test_datapoint)
count = 0
for i, item in enumerate(test_datapoint):
if item.isdigit():
test_datapoint_encoded[i] = int(test_datapoint[i])
else:
test_datapoint_encoded[i] = int(label_encoder[count].transform(
[test_datapoint[i]])[0])
count = count + 1
test_datapoint_encoded = np.array(test_datapoint_encoded)
print("Predicted traffic:", int(regressor.predict([test_datapoint_encoded])[0]))
# -
| artificial-intelligence-with-python-ja-master/Chapter 3/traffic_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: seaborn-py38-latest
# language: python
# name: seaborn-py38-latest
# ---
# + active=""
# .. _function_tutorial:
#
# .. currentmodule:: seaborn
# + active=""
# Overview of seaborn plotting functions
# ======================================
#
# .. raw:: html
#
# <div class=col-md-9>
#
# Most of your interactions with seaborn will happen through a set of plotting functions. Later chapters in the tutorial will explore the specific features offered by each function. This chapter will introduce, at a high-level, the different kinds of functions that you will encounter.
# + tags=["hide"]
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from IPython.display import HTML
sns.set()
# + active=""
# Similar functions for similar tasks
# -----------------------------------
#
# The seaborn namespace is flat; all of the functionality is accessible at the top level. But the code itself is hierarchically structured, with modules of functions that achieve similar visualization goals through different means. Most of the docs are structured around these modules: you'll encounter names like "relational", "distributional", and "categorical".
#
# For example, the :ref:`distributions module <distribution_api>` defines functions that specialize in representing the distribution of datapoints. This includes familiar methods like the histogram:
# -
penguins = sns.load_dataset("penguins")
sns.histplot(data=penguins, x="flipper_length_mm", hue="species", multiple="stack")
# + active=""
# Along with similar, but perhaps less familiar, options such as kernel density estimation:
# -
sns.kdeplot(data=penguins, x="flipper_length_mm", hue="species", multiple="stack")
# + active=""
# Functions within a module share a lot of underlying code and offer similar features that may not be present in other components of the library (such as ``multiple="stack"`` in the examples above). They are designed to facilitate switching between different visual representations as you explore a dataset, because different representations often have complementary strengths and weaknesses.
# + active=""
# Figure-level vs. axes-level functions
# -------------------------------------
#
# In addition to the different modules, there is a cross-cutting classification of seaborn functions as "axes-level" or "figure-level". The examples above are axes-level functions. They plot data onto a single :class:`matplotlib.pyplot.Axes` object, which is the return value of the function.
#
# In contrast, figure-level functions interface with matplotlib through a seaborn object, usually a :class:`FacetGrid`, that manages the figure. Each module has a single figure-level function, which offers a unitary interface to its various axes-level functions. The organization looks a bit like this:
# + tags=["hide-input"]
from matplotlib.patches import FancyBboxPatch
f, ax = plt.subplots(figsize=(7, 5))
f.subplots_adjust(0, 0, 1, 1)
ax.set_axis_off()
ax.set(xlim=(0, 1), ylim=(0, 1))
modules = "relational", "distributions", "categorical"
pal = sns.color_palette("deep")
colors = dict(relational=pal[0], distributions=pal[1], categorical=pal[2])
pal = sns.color_palette("dark")
text_colors = dict(relational=pal[0], distributions=pal[1], categorical=pal[2])
functions = dict(
relational=["scatterplot", "lineplot"],
distributions=["histplot", "kdeplot", "ecdfplot", "rugplot"],
categorical=["stripplot", "swarmplot", "boxplot", "violinplot", "pointplot", "barplot"],
)
pad = .06
w = .2
h = .15
xs = np.arange(0, 1, 1 / 3) + pad * 1.01
y = .7
for x, mod in zip(xs, modules):
color = colors[mod] + (.2,)
text_color = text_colors[mod]
box = FancyBboxPatch((x, y), w, h, f"round,pad={pad}", color="white")
ax.add_artist(box)
box = FancyBboxPatch((x, y), w, h, f"round,pad={pad}", linewidth=1, edgecolor=text_color, facecolor=color)
ax.add_artist(box)
ax.text(x + w / 2, y + h / 2, f"{mod[:3]}plot\n({mod})", ha="center", va="center", size=22, color=text_color)
for i, func in enumerate(functions[mod]):
x_i = x + w / 2
y_i = y - i * .1 - h / 2 - pad
box = FancyBboxPatch((x_i - w / 2, y_i - pad / 3), w, h / 4, f"round,pad={pad / 3}",
color="white")
ax.add_artist(box)
box = FancyBboxPatch((x_i - w / 2, y_i - pad / 3), w, h / 4, f"round,pad={pad / 3}",
linewidth=1, edgecolor=text_color, facecolor=color)
ax.add_artist(box)
ax.text(x_i, y_i, func, ha="center", va="center", size=18, color=text_color)
ax.plot([x_i, x_i], [y, y_i], zorder=-100, color=text_color, lw=1)
# + active=""
# For example, :func:`displot` is the figure-level function for the distributions module. Its default behavior is to draw a histogram, using the same code as :func:`histplot` behind the scenes:
# -
sns.displot(data=penguins, x="flipper_length_mm", hue="species", multiple="stack")
# + active=""
# To draw a kernel density plot instead, using the same code as :func:`kdeplot`, select it using the ``kind`` parameter:
# -
sns.displot(data=penguins, x="flipper_length_mm", hue="species", multiple="stack", kind="kde")
# + active=""
# You'll notice that the figure-level plots look mostly like their axes-level counterparts, but there are a few differences. Notably, the legend is placed ouside the plot. They also have a slightly different shape (more on that shortly).
#
# The most useful feature offered by the figure-level functions is that they can easily create figures with multiple subplots. For example, instead of stacking the three distributions for each species of penguins in the same axes, we can "facet" them by plotting each distribution across the columns of the figure:
# -
sns.displot(data=penguins, x="flipper_length_mm", hue="species", col="species")
# + active=""
# The figure-level functions wrap their axes-level counterparts and pass the kind-specific keyword arguments (such as the bin size for a histogram) down to the underlying function. That means they are no less flexible, but there is a downside: the kind-specific parameters don't appear in the function signature or docstrings. Some of their features might be less discoverable, and you may need to look at two different pages of the documentation before understanding how to achieve a specific goal.
# + active=""
# Axes-level functions make self-contained plots
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The axes-level functions are written to act like drop-in replacements for matplotlib functions. While they add axis labels and legends automatically, they don't modify anything beyond the axes that they are drawn into. That means they can be composed into arbitrarily-complex matplotlib figures with predictable results.
#
# The axes-level functions call :func:`matplotlib.pyplot.gca` internally, which hooks into the matplotlib state-machine interface so that they draw their plots on the "currently-active" axes. But they additionally accept an ``ax=`` argument, which integrates with the object-oriented interface and lets you specify exactly where each plot should go:
# -
f, axs = plt.subplots(1, 2, figsize=(8, 4), gridspec_kw=dict(width_ratios=[4, 3]))
sns.scatterplot(data=penguins, x="flipper_length_mm", y="bill_length_mm", hue="species", ax=axs[0])
sns.histplot(data=penguins, x="species", hue="species", shrink=.8, alpha=.8, legend=False, ax=axs[1])
f.tight_layout()
# + active=""
# Figure-level functions own their figure
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# In contrast, figure-level functions cannot (easily) be composed with other plots. By design, they "own" their own figure, including its initialization, so there's no notion of using a figure-level function to draw a plot onto an existing axes. This constraint allows the figure-level functions to implement features such as putting the legend outside of the plot.
#
# Nevertheless, it is possible to go beyond what the figure-level functions offer by accessing the matplotlib axes on the object that they return and adding other elements to the plot that way:
# -
tips = sns.load_dataset("tips")
g = sns.relplot(data=tips, x="total_bill", y="tip")
g.ax.axline(xy1=(10, 2), slope=.2, color="b", dashes=(5, 2))
# + active=""
# Customizing plots from a figure-level function
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The figure-level functions return a :class:`FacetGrid` instance, which has a few methods for customizing attributes of the plot in a way that is "smart" about the subplot organization. For example, you can change the labels on the external axes using a single line of code:
# -
g = sns.relplot(data=penguins, x="flipper_length_mm", y="bill_length_mm", col="sex")
g.set_axis_labels("Flipper length (mm)", "Bill length (mm)")
# + active=""
# While convenient, this does add a bit of extra complexity, as you need to remember that this method is not part of the matplotlib API and exists only when using a figure-level function.
# + active=""
# Specifying figure sizes
# ^^^^^^^^^^^^^^^^^^^^^^^
#
# To increase or decrease the size of a matplotlib plot, you set the width and height of the entire figure, either in the `global rcParams <https://matplotlib.org/tutorials/introductory/customizing.html>`_, while setting up the plot (e.g. with the ``figsize`` parameter of :func:`matplotlib.pyplot.subplots`), or by calling a method on the figure object (e.g. :meth:`matplotlib.Figure.set_size_inches`). When using an axes-level function in seaborn, the same rules apply: the size of the plot is determined by the size of the figure it is part of and the axes layout in that figure.
#
# When using a figure-level function, there are several key differences. First, the functions themselves have parameters to control the figure size (although these are actually parameters of the underlying :class:`FacetGrid` that manages the figure). Second, these parameters, ``height`` and ``aspect``, parameterize the size slightly differently than the ``width``, ``height`` parameterization in matplotlib (using the seaborn parameters, ``width = height * apsect``). Most importantly, the parameters correspond to the size of each *subplot*, rather than the size of the overall figure.
#
# To illustrate the difference between these approaches, here is the default output of :func:`matplotlib.pyplot.subplots` with one subplot:
# -
f, ax = plt.subplots()
# + active=""
# A figure with multiple columns will have the same overall size, but the axes will be squeezed horizontally to fit in the space:
# -
f, ax = plt.subplots(1, 2, sharey=True)
# + active=""
# In contrast, a plot created by a figure-level function will be square. To demonstrate that, let's set up an empty plot by using :class:`FacetGrid` directly. This happens behind the scenes in functions like :func:`relplot`, :func:`displot`, or ;func:`catplot`:
# -
g = sns.FacetGrid(penguins)
# + active=""
# When additional columns are added, the figure itself will become wider, so that its subplots have the same size and shape:
# -
g = sns.FacetGrid(penguins, col="sex")
# + active=""
# And you can adjust the size and shape of each subplot without accounting for the total number of rows and columns in the figure:
# -
g = sns.FacetGrid(penguins, col="sex", height=3.5, aspect=.75)
# + active=""
# The upshot is that you can assign faceting variables without stopping to think about how you'll need to adjust the total figure size. A downside is that, when you do want to change the figure size, you'll need to remember that things work a bit differently than they do in matplotlib.
# + active=""
# Relative merits of figure-level functions
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Here is a summary of the pros and cons that we have discussed above:
# + tags=["hide-input"]
HTML("""
<table>
<tr>
<th>Advantages</th>
<th>Drawbacks</th>
</tr>
<tr>
<td>Easy faceting by data variables</td>
<td>Many parameters not in function signature</td>
</tr>
<tr>
<td>Legend outside of plot by default</td>
<td>Cannot be part of a larger matplotlib figure</td>
</tr>
<tr>
<td>Easy figure-level customization</td>
<td>Different API from matplotlib</td>
</tr>
<tr>
<td>Different figure size parameterization</td>
<td>Different figure size parameterization</td>
</tr>
</table>
""")
# + active=""
# On balance, the figure-level functions add some additional complexity that can make things more confusing for beginners, but their distinct features give them additional power. The tutorial documentaion mostly uses the figure-level functions, because they produce slightly cleaner plots, and we generally recommend their use for most applications. The one situation where they are not a good choice is when you need to make a complex, standalone figure that composes multiple different plot kinds. At this point, it's recommended to set up the figure using matplotlib directly and to fill in the individual components using axes-level functions.
# + active=""
# Combining multiple views on the data
# ------------------------------------
#
# Two important plotting functions in seaborn don't fit cleanly into the classification scheme discussed above. These functions, :func:`jointplot` and :func:`pairplot`, employ multiple kinds of plots from different modules to represent mulitple aspects of a dataset in a single figure. Both plots are figure-level functions and create figures with multiple subplots by default. But they use different objects to manage the figure: :class:`JointGrid` and :class:`PairGrid`, respectively.
#
# :func:`jointplot` plots the relationship or joint distribution of two variables while adding marginal axes that show the univariate distribution of each one separately:
# -
sns.jointplot(data=penguins, x="flipper_length_mm", y="bill_length_mm", hue="species")
# + active=""
# :func:`pairplot` is similar — it combines joint and marginal views — but rather than focusing on a single relationship, it visualizes every pairwise combination of variables simultaneously:
# -
sns.pairplot(data=penguins, hue="species")
# + active=""
# Behind the scenes, these functions are using axes-level functions that you have already met (:func:`scatterplot` and :func:`kdeplot`), and they also have a ``kind`` parameter that lets you quickly swap in a different representation:
# -
sns.jointplot(data=penguins, x="flipper_length_mm", y="bill_length_mm", hue="species", kind="hist")
| doc/tutorial/function_overview.ipynb |