code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Concise Implementation of Multilayer Perceptron
#
# :label:`chapter_mlp_gluon`
#
#
# Now that we learned how multilayer perceptrons (MLPs) work in theory, let's implement them. We begin, as always, by importing modules.
import d2l
from mxnet import gluon, npx, init
from mxnet.gluon import nn
npx.set_np()
# ## The Model
#
# The only difference from our softmax regression implementation
# is that we add two `Dense` (fully-connected) layers instead of one.
# The first is our hidden layer, which has *256* hidden units
# and uses the ReLU activation function.
# + attributes={"classes": [], "id": "", "n": "5"}
net = nn.Sequential()
net.add(nn.Dense(256, activation='relu'),
nn.Dense(10))
net.initialize(init.Normal(sigma=0.01))
# -
# Again, note that as always, Gluon automatically
# infers the missing input dimensions to each layer.
#
# Training the model follows the exact same steps as in our softmax regression implementation.
# + attributes={"classes": [], "id": "", "n": "6"}
batch_size, num_epochs = 256, 10
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5})
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
# -
# ## Exercises
#
# 1. Try adding a few more hidden layers to see how the result changes.
# 1. Try out different activation functions. Which ones work best?
# 1. Try out different initializations of the weights.
#
# ## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2340)
#
# 
| 4 multilayer-perceptrons/mlp-gluon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: clouds113_kernel
# language: python
# name: clouds113_kernel
# ---
# **NARVAL column-based model on QUBICC data**
#
# <a id='import_cell_1'></a>
# +
# Ran with Max Mem = 200 GB on a GPU node
import sys
import os
import numpy as np
import pandas as pd
import xarray as xr
import importlib
import resource
import gc
# importlib.reload(my_classes)
from tensorflow.keras.models import load_model
root_path = '/pf/b/b309170'
# Add path with my_classes to sys.path
sys.path.insert(0, root_path + '/workspace_icon-ml/cloud_cover_parameterization/')
from my_classes import read_mean_and_std
from my_classes import load_data
import matplotlib.pyplot as plt
# -
# <a id='import_cell_2'></a>
model_path = os.path.join(root_path,
'workspace_icon-ml/cloud_cover_parameterization/grid_column_based/saved_models')
# *Load models*
# <a id='load_models'></a>
model_1 = load_model(os.path.join(model_path, 'model_grid_column_based_final_1.h5'))
model_2 = load_model(os.path.join(model_path, 'model_grid_column_based_final_2.h5'))
# **First visualization method** (Third in the other notebook) <br>
# Hovmöller diagram (time on x-axis with vertical layers on y-axis and NN ensemble mean clc in color) <br>
# Note that I start with the third visualization method here, since the original data is loaded in the third visualization method in the NARVAL-interpretation notebook.
# + jupyter={"outputs_hidden": true}
# Example file: int_var_hc2_02_p1m_ta_ml_20041107T100000Z.nc
os.listdir(os.path.join(root_path, 'my_work/QUBICC/data_var_vertinterp/'))
# -
# Get not_nan quickly
path = os.path.join(root_path, 'my_work/QUBICC/data_var_vertinterp/')
DS = xr.open_mfdataset(path+'cl/int_var_hc2_02_p1m_cl_ml_20041107T100000Z.nc', combine='by_coords')
da = DS.cl.values
not_nan = ~np.isnan(da[0,30,:])
# *Loading Nov, 2nd: 2004110200*
#
# Careful with the order of the variables!
#
# <a id='order_of_vars'></a>
all_features = ['hus', 'clw', 'cli', 'ta', 'pfull', 'rho', 'zg', 'fr_lake']
order_of_vars = ['hus', 'clw', 'cli', 'ta', 'pfull', 'rho', 'zg', 'fr_lake', 'cl']
# <a id='load_data_one_day'></a>
# Loads the NARVAL data into the data_dict dictionary
data_dict = load_data(source='qubicc', days='nov_2nd', order_of_vars=order_of_vars)
# +
TIME_STEPS = data_dict['cl'].shape[0]
HORIZ_FIELDS = data_dict['cl'].shape[2]
VERT_LAYERS = 31
#Reshaping into nd-arrays of equaling shapes
data_dict['zg'] = np.repeat(np.expand_dims(data_dict['zg'], 0), TIME_STEPS, axis=0)
data_dict['fr_lake'] = np.repeat(np.expand_dims(data_dict['fr_lake'], 0), TIME_STEPS, axis=0)
#Convert cl-information from [0, 1] to [0, 100]
data_dict['cl'] = data_dict['cl']*100
# + jupyter={"outputs_hidden": true}
[print(key + ': ' + str(data_dict[key].shape)) for key in data_dict.keys()];
# +
# One sample should contain a column of information
data_dict_reshaped = {}
for key in data_dict.keys():
if data_dict[key].shape[1] == VERT_LAYERS:
for i in range(4, VERT_LAYERS):
new_key = '{}{}{:d}'.format(key,'_',i)
data_dict_reshaped[new_key] = np.reshape(data_dict[key][:,i,:], -1)
else:
data_dict_reshaped[key] = np.reshape(data_dict[key], -1)
# Remove constant columns
del data_dict_reshaped['zg_4']
del data_dict_reshaped['zg_5']
del data_dict_reshaped['zg_6']
del data_dict_reshaped['qclw_phy_4']
# + jupyter={"outputs_hidden": true}
#Converting dict into a DataFrame-object
df = pd.DataFrame.from_dict(data_dict_reshaped)
df.head()
# -
# *Scaling the data*
# <a id='means_stds'></a>
# +
mean_1, std_1 = read_mean_and_std(os.path.join(model_path, 'model_grid_column_based_final_1.txt'))
mean_2, std_2 = read_mean_and_std(os.path.join(model_path, 'model_grid_column_based_final_2.txt'))
mean_1 = np.concatenate((mean_1, np.zeros(27)))
mean_2 = np.concatenate((mean_2, np.zeros(27)))
std_1 = np.concatenate((std_1, np.ones(27)))
std_2 = np.concatenate((std_2, np.ones(27)))
# -
# Note that after standardization the mean and variance are not 0 and 1 respectively
# as the data values above 21km are still in the data
df_1 = ((df - mean_1)/std_1).to_numpy()
df_2 = ((df - mean_2)/std_2).to_numpy()
# *Drawing the diagram*
df_1.shape
# Draw the diagram
# All vertical layers from first time step
pred_clc_mat = np.zeros((TIME_STEPS, 27))
for i in range(TIME_STEPS):
# Mean prediction for a specific time step
pred_1 = model_1.predict(df_1[1024*i: 1024*(i+1), :-27]) # Maybe I have the indexing wrong after all
pred_adj_1 = np.mean(np.minimum(np.maximum(pred_1, 0), 100), axis=0)
pred_2 = model_2.predict(df_2[1024*i: 1024*(i+1), :-27])
pred_adj_2 = np.mean(np.minimum(np.maximum(pred_2, 0), 100), axis=0)
pred_clc_mat[i,:] = 1/2*(pred_adj_1+pred_adj_2)
data_clc_mat = np.zeros((TIME_STEPS, 27))
for i in range(TIME_STEPS):
# Mean cloud cover for a specific time step
data_clc_1 = np.mean(df_1[1024*i: 1024*(i+1), -27:], axis=0)
data_clc_2 = np.mean(df_2[1024*i: 1024*(i+1), -27:], axis=0)
data_clc_mat[i,:] = 1/2*(data_clc_1+data_clc_2)
# +
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1 import make_axes_locatable
# %matplotlib inline
matrix = np.zeros((TIME_STEPS, 32))
matrix_data = np.zeros((TIME_STEPS, 32))
matrix[:, 5:] = pred_clc_mat
matrix_data[:, 5:] = data_clc_mat
# Crucial so that the colorbar will actually match both plots
vmax = np.maximum(np.max(pred_clc_mat), np.max(data_clc_mat))
vmin = np.minimum(np.min(pred_clc_mat), np.min(data_clc_mat))
# t = np.linspace(1, 25, 25)
# v = np.linspace(9, 75)
# T, V = np.meshgrid(t, v)
fig = plt.figure(figsize=(10, 10))
ax_2 = fig.add_subplot(122, xlabel='Hours', ylim=[31, 5], title='True mean cloud cover (One day)')
ims_2 = ax_2.imshow(np.transpose(matrix_data),vmin=vmin, vmax=vmax)
# Colorbar axes
divider = make_axes_locatable(ax_2)
cax_2 = divider.append_axes("right", size="5%", pad=0.05)
cb_2 = fig.colorbar(ims_2, cax=cax_2)
cb_2.ax.set_title('[%]')
#fig.colorbar(ims_2)
ax_1 = fig.add_subplot(121, xlabel='Hours', ylabel='Vertical layers', ylim=[31, 5],
title='NNs\' mean cloud cover (One day)')
ims_1 = ax_1.imshow(np.transpose(matrix),vmin=vmin, vmax=vmax)
# Colorbar axes
divider = make_axes_locatable(ax_1)
cax_1 = divider.append_axes("right", size="5%", pad=0.05)
cb_1 = fig.colorbar(ims_1, cax=cax_1)
#fig.colorbar(ims_1)
cb_1.ax.set_title('[%]')
# fig.savefig('../figures/qubicc_offline_testing_hovmoeller.pdf')
# -
# **Plot the timeseries for one specific grid column** <br>
# I can do it for August, 2016 (December, 2013 has some tricky days and I need a continuous stretch of days)
# + jupyter={"outputs_hidden": true}
# Show memory usage: 1295604
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# -
# <a id='load_data_nov_20s'></a>
# +
# Load all days
# Uses 25 GB of memory
# Loads the NARVAL data into the data_dict dictionary
data_dict = load_data(source='qubicc', days='nov_20s', order_of_vars=order_of_vars)
# + jupyter={"outputs_hidden": true}
# When deleting variables and running the garbage-collector, it doesn't show that
# resources were freed up, still they are freed up for other objects that need to be stored
del da
del DS
gc.collect()
# Show memory usage: 26271440
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
# -
# <a id='reshape_1'></a>
# +
TIME_STEPS = data_dict['cl'].shape[0]
HORIZ_FIELDS = data_dict['cl'].shape[2]
VERT_LAYERS = 31
#Reshaping into nd-arrays of equaling shapes
data_dict['zg'] = np.repeat(np.expand_dims(data_dict['zg'], 0), TIME_STEPS, axis=0)
data_dict['fr_lake'] = np.repeat(np.expand_dims(data_dict['fr_lake'], 0), TIME_STEPS, axis=0)
#Convert cl-information from [0, 1] to [0, 100]
data_dict['cl'] = data_dict['cl']*100
# -
# <a id='reshape_2'></a>
# +
# One sample should contain a column of information
data_dict_reshaped = {}
for key in data_dict.keys():
if data_dict[key].shape[1] == VERT_LAYERS:
for i in range(4, VERT_LAYERS):
new_key = '{}{}{:d}'.format(key,'_',i)
data_dict_reshaped[new_key] = np.reshape(data_dict[key][:,i,:], -1)
else:
data_dict_reshaped[key] = np.reshape(data_dict[key], -1)
# Remove constant fields
del data_dict_reshaped['zg_4']
del data_dict_reshaped['zg_5']
del data_dict_reshaped['zg_6']
del data_dict_reshaped['qclw_phy_4']
# -
# <a id='df'></a>
#Converting dict into a DataFrame-object
df = pd.DataFrame.from_dict(data_dict_reshaped)
df.head()
# <a id='df_scaled'></a>
# Scale the data
df_scaled_1 = ((df - mean_1)/std_1).to_numpy()
df_scaled_2 = ((df - mean_2)/std_2).to_numpy()
# +
# Take a random horizontal field
m = 900
indices = []
for i in range(TIME_STEPS):
indices.append(m+i*HORIZ_FIELDS)
timesteps = np.arange(TIME_STEPS)
true_clc = data_dict['cl'][:,:,m] #time x vert x hor
pred_clc = np.maximum(np.minimum(model_1.predict(df_scaled_1[indices, :-27]),100),0) #Have to reconstruct the mth field
# + jupyter={"outputs_hidden": true}
true_clc.shape
# + jupyter={"outputs_hidden": true}
matrix_data.shape
# + jupyter={"outputs_hidden": true}
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from mpl_toolkits.axes_grid1 import make_axes_locatable
matrix = np.zeros((96, 32)) #np.zeros((745, 31))
matrix_data = np.zeros((96, 32))
matrix[:, 5:] = pred_clc[:96]
matrix_data[:, 5:] = true_clc[:96, 4:]
# Crucial so that the colorbar will actually match both plots
vmax = np.maximum(np.max(matrix), np.max(matrix_data))
vmin = np.minimum(np.min(matrix), np.min(matrix_data))
# t = np.linspace(1, 25, 25)
# v = np.linspace(9, 75)
# T, V = np.meshgrid(t, v)
fig = plt.figure(figsize=(20,10))
ax_2 = fig.add_subplot(212, xlabel='Hours', ylabel='Vertical layers', ylim=[31, 5],
title='True cloud cover (First four days in November, 2004)')
im = ax_2.imshow(np.transpose(matrix_data),vmin=vmin, vmax=vmax)
# # Colorbar axes
# divider = make_axes_locatable(ax_2)
# cax_2 = divider.append_axes("right", size="5%", pad=0.05)
# fig.colorbar(ims_2, cax=cax_2)
#fig.colorbar(ims_2)
ax_1 = fig.add_subplot(211, xlabel='Hours', ylabel='Vertical layers', ylim=[31, 5],
title='NN cloud cover (First four days in November, 2004)')
im = ax_1.imshow(np.transpose(matrix),vmin=vmin, vmax=vmax)
# # Colorbar axes
cbar_ax = fig.add_axes([0.85, 0.15, 0.02, 0.7])
#([0.85, 0.15, 0.05, 0.7])
# divider = make_axes_locatable(cbar_ax)
# cax_1 = divider.append_axes("right", size="5%", pad=0.05)
cb = fig.colorbar(im, cax=cbar_ax)
cb.ax.set_title('[%]')
# fig.colorbar(ims_1)
# fig.savefig('../figures/qubicc_offline_testing_column_in_beginning_of_november.pdf')
# -
# **Second visualization method** (First in the other notebook) <br>
# Samples on x-axis and cloud cover on y-axis
# <a id='input_test'></a>
# +
# Maintaining the test(set)-nomenclature
input_test_1 = df_scaled_1[:, :-27]
input_test_2 = df_scaled_2[:, :-27]
output_test = df_scaled_1[:, -27:]
# + jupyter={"outputs_hidden": true}
# Note that the differences in predictions really stem from the differences in the models
# (and not from the different standardization methods)
no_samples = 100
layer = 20
clc_pred_1 = np.maximum(np.minimum(model_1.predict(input_test_1[-no_samples:])[:,layer],100),0)
clc_pred_2 = np.maximum(np.minimum(model_2.predict(input_test_2[-no_samples:])[:,layer],100),0)
clc_true = output_test[-no_samples:, layer]
a = np.linspace(1, no_samples, no_samples)
fig = plt.figure(figsize=(15,4))
# Careful with saying that it's the 20th layer. It's from the perspective of an ICON-A grid with model height at 21km.
ax = fig.add_subplot(111, title='NNs predictions of %d-th layer cloud cover on random samples from the test set'%layer,
xlabel='Sample', ylabel='Cloud Cover')
ax.plot(a, clc_pred_1, 'ro')
ax.plot(a, clc_pred_2, 'co')
ax.plot(a, clc_true, 'bo')
ax.legend(['Predictions', 'Predictions_2', 'Truth'])
# fig.savefig('../figures/qubicc_offline_testing_predictions_random_samples.pdf')
# -
# **Third visualization method** (Second in the other notebook) <br>
# Mean profile of Cloud Cover on x-axis with Vertical Layer on y-axis. This time I can use the test set.
# <a id='predicting'></a>
# Vertical layers
a = np.linspace(5, 31, 27)
# Cloud cover means for first model
clc_data_mean = []
for i in range(27):
clc_data_mean.append(np.mean(output_test[:, i], dtype=np.float64))
# Cloud cover means for second model
clc_data_mean_2 = []
for i in range(27):
clc_data_mean_2.append(np.mean(output_test[:, i], dtype=np.float64))
# Predicted cloud cover means
pred_adj = np.minimum(np.maximum(model_1.predict(input_test_1), 0), 100)
clc_mean = np.mean(pred_adj, axis=0, dtype=np.float64)
pred_adj_2 = np.minimum(np.maximum(model_2.predict(input_test_2), 0), 100)
clc_mean_2 = np.mean(pred_adj_2, axis=0, dtype=np.float64)
# + jupyter={"outputs_hidden": true}
fig = plt.figure(figsize=(15,4))
# For model
ax_1 = fig.add_subplot(121, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Cloud Cover parameterization (First trained model)')
ax_1.plot(clc_mean, a)
ax_1.plot(clc_data_mean, a)
plt.gca().invert_yaxis()
ax_1.legend(['Prediction', 'Truth'])
# For model_2
ax_2 = fig.add_subplot(122, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Cloud Cover parameterization (Second trained model)')
ax_2.plot(clc_mean_2, a)
ax_2.plot(clc_data_mean_2, a)
plt.gca().invert_yaxis()
ax_2.legend(['Prediction', 'Truth'])
# fig.savefig('../figures/qubicc_offline_testing_mean_cloud_cover_NNs.pdf')
# +
# With height vertical axis
#Taken from the grid cell based model
zg_mean = [20784.62706138, 19153.33733398, 17603.95786796, 16133.9890368 ,
14740.72817851, 13421.56030093, 12174.49640667, 10997.54366598,
9888.86828393, 8846.77905352, 7869.70937839, 6956.19812463,
6104.87029163, 5314.41907809, 4583.59136617, 3911.17886673,
3296.01709893, 2736.9940178 , 2233.06954874, 1783.30740375,
1386.91651733, 1043.31024674, 752.18615016, 513.63653294,
328.34456911, 198.27747065, 127.38265241]
fig = plt.figure(figsize=(7,4))
# For model
ax_1 = fig.add_subplot(111, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Column Based Cloud Cover param. (First trained model)', xlim=[-1, 20])
ax_1.plot(clc_mean, zg_mean)
ax_1.plot(clc_data_mean, zg_mean)
# plt.gca().invert_yaxis()
ax_1.legend(['Prediction', 'Truth'])
# # For model_2
# ax_2 = fig.add_subplot(122, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
# title='NN Cloud Cover parameterization (Second trained model)', xlim=[-1, 20])
# ax_2.plot(clc_mean_2, zg_mean)
# ax_2.plot(clc_data_mean_2, zg_mean)
# # plt.gca().invert_yaxis()
# ax_2.legend(['Prediction', 'Truth'])
fig.savefig('../figures/qubicc_offline_testing_mean_cloud_cover_NNs_height_vert_axis_1st_model.pdf')
# +
# With height vertical axis
#Taken from the grid cell based model
zg_mean = [20784.62706138, 19153.33733398, 17603.95786796, 16133.9890368 ,
14740.72817851, 13421.56030093, 12174.49640667, 10997.54366598,
9888.86828393, 8846.77905352, 7869.70937839, 6956.19812463,
6104.87029163, 5314.41907809, 4583.59136617, 3911.17886673,
3296.01709893, 2736.9940178 , 2233.06954874, 1783.30740375,
1386.91651733, 1043.31024674, 752.18615016, 513.63653294,
328.34456911, 198.27747065, 127.38265241]
fig = plt.figure(figsize=(7,4))
# # For model
# ax_1 = fig.add_subplot(121, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
# title='NN Cloud Cover parameterization (First trained model)', xlim=[-1, 20])
# ax_1.plot(clc_mean, zg_mean)
# ax_1.plot(clc_data_mean, zg_mean)
# # plt.gca().invert_yaxis()
# ax_1.legend(['Prediction', 'Truth'])
# For model_2
ax_2 = fig.add_subplot(111, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Column Based Cloud Cover param. (Second trained model)', xlim=[-1, 20])
ax_2.plot(clc_mean_2, zg_mean)
ax_2.plot(clc_data_mean_2, zg_mean)
# plt.gca().invert_yaxis()
ax_2.legend(['Prediction', 'Truth'])
fig.savefig('../figures/qubicc_offline_testing_mean_cloud_cover_NNs_height_vert_axis_2nd_model.pdf')
# + jupyter={"outputs_hidden": true}
# Nando's suggestion: Panel showing the differences (absolute and/or normalised with the truth cloud cover)
fig = plt.figure(figsize=(16,6))
# For model
ax_1 = fig.add_subplot(121, xlabel='Mean Cloud Cover difference', ylabel='Vertical layer',
title='NN Cloud Cover parameterization absolute difference (First trained model)')
ax_1.plot(clc_mean - clc_data_mean, a, 'bo')
ax_1.plot(0*a, a, 'g--')
plt.gca().invert_yaxis()
ax_1.legend(['Prediction - Truth'])
ax_2 = fig.add_subplot(122, xscale='log', xlabel='Mean Cloud Cover difference (log scale)', ylabel='Vertical layer',
title='NN Cloud Cover parameterization normalized difference (First trained model)')
ax_2.plot((clc_mean - clc_data_mean)/clc_data_mean, a, 'bo')
# ax_2.plot(0*a, a, 'g--')
plt.gca().invert_yaxis()
ax_2.legend(['(Prediction - Truth)/Truth'])
# -
# *Computing the standard error of regression per vertical layer* <br>
# The standard error of regression (ser) is defined as the square root of the variance of the residuals. Actually it's the same as the RMSE.
# +
# Nando's suggestion: use a metric to test significance. SER is what comes closest, but significance is rather for classification problems
ser = []
ser_2 = []
for i in range(27):
ser.append(np.sqrt(np.mean((pred_adj[:, i] - output_test[:, i])**2)))
ser_2.append(np.sqrt(np.mean((pred_adj_2[:, i] - output_test[:, i])**2)))
ser_total = np.sqrt(np.var(pred_adj - output_test))
ser_total_2 = np.sqrt(np.var(pred_adj_2 - output_test))
# + jupyter={"outputs_hidden": true}
fig = plt.figure(figsize=(16,6))
# For model
ax_1 = fig.add_subplot(121, xlabel='Test Set Standard Error of Regression/RMSE', ylabel='Vertical layer',
title='Column-based NN test set SER/RMSE (First trained model)')
ax_1.plot(ser, a)
# ax_1.plot(np.mean(ser)*np.ones(len(a)), a, 'g--')
plt.gca().invert_yaxis()
# ax_1.legend(['Prediction - Truth'])
ax_2 = fig.add_subplot(122, xlabel='Test Set Standard Error of Regression/RMSE', ylabel='Vertical layer',
title='NN Cloud Cover parameterization (Second trained model)')
ax_2.plot(ser_2, a)
plt.gca().invert_yaxis()
# plt.savefig('../figures/qubicc_test_set_rmse_ser.pdf')
ax_2.legend(['Prediction', 'Truth'])
# + jupyter={"outputs_hidden": true}
print('{}{:.3f}'.format('The total SER of the first model is ', ser_total))
print('{}{:.3f}'.format('The total SER of the second model is ', ser_total_2))
# -
# #### **Fourth visualization method** <br>
# Interpret NN parameterizations on QUBICC data in NARVAL region
# We need to run the following cells first: <br>
# [import_cell_1](#import_cell_1),
# [import_cell_2](#import_cell_2),
# [load_models](#load_models),
# [order_of_vars](#order_of_vars),
# [load_data_nov_20s](#load_data_nov_20s),
# [means_stds](#means_stds),
# [reshape_1](#reshape_1),
#
# Here might be a better way to execute jupyter cells in a prescribed order: https://stackoverflow.com/questions/47567834/execute-a-jupyter-notebook-cell-programmatically (I couldn't get it running)
for keys in data_dict.keys():
print(data_dict[keys].shape)
# We need to:
# 1) Reinsert nans that arose through overlapping topography during vertical interpolation into the QUBICC data
# 2) Set fields from outside the NARVAL region to nan
# 3) We select the fields where the lowermost levels are not nan
#
# Step 1): So that we have 20480 horizontal fields and can compare that directly to the R02B04-grid. <br>
# Note that data_dict.shape[2] != data_dict_new.shape[2]
#
# Step 3) should leave us with only those grid columns that are in the NARVAL region and that have been coarse-grained without producing nans in QUBICC
# *1. Overlapping nans*
path = '/pf/b/b309170/my_work/QUBICC/data_var_vertinterp/cli'
file_name = 'int_var_hc2_02_p1m_cli_ml_20041112T200000Z_R02B04.nc'
DS = xr.open_dataset(os.path.join(path, file_name))
da_cli = DS.cli.values
overlap_nans = np.isnan(da_cli[0,30,:])
overlap_nans.shape
# Create new data_dict with correct dimensionality
data_dict_new = {}
for key in data_dict.keys():
if key != 'fr_lake':
data_dict_new[key] = np.zeros((data_dict[key].shape[0], data_dict[key].shape[1],
len(overlap_nans)))
else:
data_dict_new[key] = np.zeros((data_dict[key].shape[0], len(overlap_nans)))
# +
# ov_f is a temporary array that is adjusted depending on the key, time step and vertical layer
ov_f = np.array(overlap_nans, dtype=float)
ov_f[np.where(ov_f == 1)] = np.nan # Every True in overlap_nans will be a nan
ov_f_copy = ov_f.copy() # Need this otherwise ov_f == 0 will never be true anymore when beginning the loop
for key in data_dict.keys():
for i in range(TIME_STEPS):
if key != 'fr_lake':
for j in range(VERT_LAYERS):
ov_f_copy[np.where(ov_f == 0)] = data_dict[key][i,j,:]
data_dict_new[key][i,j,:] = ov_f_copy
else:
ov_f_copy[np.where(ov_f == 0)] = data_dict[key][i,:]
data_dict_new[key][i,:] = ov_f_copy
# -
for keys in data_dict_new.keys():
print(data_dict_new[keys].shape)
# *2. Which of these entries are from the NARVAL region?*
# Narval region: 68W-15E, 10S-20N
lon_lower_bound = -68/180*np.pi
lon_upper_bound = 15/180*np.pi
lat_lower_bound = -10/90*np.pi/2
lat_upper_bound = 20/90*np.pi/2
# +
#Load native grid
qubicc_r02b04_path = '/pf/b/b309170/my_work/QUBICC/grids/icon_grid_0013_R02B04_G.nc'
qubicc_r02b04 = xr.open_dataset(qubicc_r02b04_path)
qubicc_lat = qubicc_r02b04.lat_cell_centre.values
qubicc_lon = qubicc_r02b04.lon_cell_centre.values
# -
# Keep only the narval region
for key in data_dict.keys():
for i in range(TIME_STEPS):
for k in range(20480): # Horizontal fields
# If k-th entry of the qubicc r02b04 grid is in narval region
if not (lat_lower_bound <= qubicc_lat[k] <= lat_upper_bound and lon_lower_bound <= qubicc_lon[k] <= lon_upper_bound):
if key != 'fr_lake':
for j in range(VERT_LAYERS):
data_dict_new[key][i,j,k] = np.nan
else:
data_dict_new[key][i,k] = np.nan
# *3. We select the fields where the lowermost levels are not nan*
not_nan = ~np.isnan(data_dict_new['hus'][0,30,:])
# Create new data_dict with correct dimensionality
data_dict_selected_fields = {}
for key in data_dict.keys():
if key != 'fr_lake':
data_dict_selected_fields[key] = data_dict_new[key][:,:,not_nan]
else:
data_dict_selected_fields[key] = data_dict_new[key][:,not_nan]
# Now we can use [reshape_2](#reshape_2) as above:
# +
# One sample should contain a column of information
data_dict_reshaped = {}
for key in data_dict_selected_fields.keys():
if data_dict_selected_fields[key].shape[1] == VERT_LAYERS:
for i in range(4, VERT_LAYERS):
new_key = '{}{}{:d}'.format(key,'_',i)
data_dict_reshaped[new_key] = np.reshape(data_dict_selected_fields[key][:,i,:], -1)
else:
data_dict_reshaped[key] = np.reshape(data_dict_selected_fields[key], -1)
# Remove constant fields
del data_dict_reshaped['zg_4']
del data_dict_reshaped['zg_5']
del data_dict_reshaped['zg_6']
del data_dict_reshaped['qclw_phy_4']
# -
# Run [df](#df), [df_scaled](#df_scaled), [input_test](#input_test) and [predicting](#predicting)!
#
# Finally we can plot the results:
a
#Taken from the grid cell based model
zg_mean = [20784.62706138, 19153.33733398, 17603.95786796, 16133.9890368 ,
14740.72817851, 13421.56030093, 12174.49640667, 10997.54366598,
9888.86828393, 8846.77905352, 7869.70937839, 6956.19812463,
6104.87029163, 5314.41907809, 4583.59136617, 3911.17886673,
3296.01709893, 2736.9940178 , 2233.06954874, 1783.30740375,
1386.91651733, 1043.31024674, 752.18615016, 513.63653294,
328.34456911, 198.27747065, 127.38265241]
# +
fig = plt.figure(figsize=(15,4))
# For model
ax_1 = fig.add_subplot(121, title='Column based NN cloud cover (NARVAL region) (First model)', xlim=[0, 35])
ax_1.set_xlabel('Mean Cloud Cover [%]', fontsize = 12)
ax_1.set_ylabel('Mean height of a vertical layer [m]', fontsize = 11)
ax_1.plot(clc_mean, zg_mean)
ax_1.plot(clc_data_mean, zg_mean)
# plt.gca().invert_yaxis()
ax_1.legend(['Prediction', 'Truth'])
# For model_2
ax_2 = fig.add_subplot(122, title='Column based NN cloud cover (NARVAL region) (Second model)', xlim=[0, 35])
ax_2.set_xlabel('Mean Cloud Cover [%]', fontsize = 12)
ax_2.set_ylabel('Mean height of a vertical layer [m]', fontsize = 11)
ax_2.plot(clc_mean_2, zg_mean)
ax_2.plot(clc_data_mean_2, zg_mean)
# plt.gca().invert_yaxis()
ax_2.legend(['Prediction', 'Truth'])
fig.savefig('../figures/qubicc_offline_testing_mean_cloud_cover_NARVAL_region_nov_20s.pdf')
# -
fig = plt.figure(figsize=(15,4))
# For model
ax_1 = fig.add_subplot(121, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Cloud Cover parameterization (First trained model)')
ax_1.plot(clc_mean, a)
ax_1.plot(clc_data_mean, a)
plt.gca().invert_yaxis()
ax_1.legend(['Prediction', 'Truth'])
# For model_2
ax_2 = fig.add_subplot(122, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Cloud Cover parameterization (Second trained model)')
ax_2.plot(clc_mean_2, a)
ax_2.plot(clc_data_mean_2, a)
plt.gca().invert_yaxis()
ax_2.legend(['Prediction', 'Truth'])
fig.savefig('../figures/qubicc_offline_testing_mean_cloud_cover_NNs_NARVAL_region_nov_20s.pdf')
# The plot **qubicc_offline_testing_mean_cloud_cover_NNs_NARVAL_region_alleged.pdf** is actually evaluated on the region of the globe shown in the pdf alleged_narval_region_on_qubicc_r2b4_grid.pdf which can be found in additional_content/comparing_icon_grids. The issue arose due to differences between the R2B4 grid I coarse-grained Qubicc to (0013) and the R2B4 grid I coarse-grained Narval to (0005).
# #### **Fifth visualization** <br>
# Interpret NN parameterizations on QUBICC data in i) the tropical zone and ii) over the Southern Ocean
# We need to run the following cells first: <br>
# [import_cell_1](#import_cell_1),
# [import_cell_2](#import_cell_2),
# [load_models](#load_models),
# [order_of_vars](#order_of_vars),
# [load_data_nov_20s](#load_data_nov_20s),
# [means_stds](#means_stds),
# [reshape_1](#reshape_1),
#
# Here might be a better way to execute jupyter cells in a prescribed order: https://stackoverflow.com/questions/47567834/execute-a-jupyter-notebook-cell-programmatically (I couldn't get it running)
for keys in data_dict.keys():
print(data_dict[keys].shape)
# We need to:
# 1) Reinsert nans that arose through overlapping topography during vertical interpolation into the QUBICC data
# 2) Set fields from outside the NARVAL region to nan
# 3) We select the columns where the lowermost levels are not nan
#
# Step 1): So that we have 20480 horizontal fields and can compare that directly to the R02B04-grid. <br>
# Note that data_dict.shape[2] != data_dict_new.shape[2]
#
# Step 3) should leave us with only those grid columns that are in the NARVAL region and that have been coarse-grained without producing nans in QUBICC
# *1. Overlapping nans*
path = '/pf/b/b309170/my_work/QUBICC/data_var_vertinterp/cli'
file_name = 'int_var_hc2_02_p1m_cli_ml_20041112T200000Z_R02B04.nc'
DS = xr.open_dataset(os.path.join(path, file_name))
da_cli = DS.cli.values
overlap_nans = np.isnan(da_cli[0,30,:])
overlap_nans.shape
# Create new data_dict with correct dimensionality
data_dict_new = {}
for key in data_dict.keys():
if key != 'fr_lake':
data_dict_new[key] = np.zeros((data_dict[key].shape[0], data_dict[key].shape[1],
len(overlap_nans)))
else:
data_dict_new[key] = np.zeros((data_dict[key].shape[0], len(overlap_nans)))
# +
# ov_f is a temporary array that is adjusted depending on the key, time step and vertical layer
ov_f = np.array(overlap_nans, dtype=float)
ov_f[np.where(ov_f == 1)] = np.nan # Every True in overlap_nans will be a nan
ov_f_copy = ov_f.copy() # Need this otherwise ov_f == 0 will never be true anymore when beginning the loop
for key in data_dict.keys():
for i in range(TIME_STEPS):
if key != 'fr_lake':
for j in range(VERT_LAYERS):
ov_f_copy[np.where(ov_f == 0)] = data_dict[key][i,j,:]
data_dict_new[key][i,j,:] = ov_f_copy
else:
ov_f_copy[np.where(ov_f == 0)] = data_dict[key][i,:]
data_dict_new[key][i,:] = ov_f_copy
# -
for keys in data_dict_new.keys():
print(data_dict_new[keys].shape)
# *2. Which of these entries are from the tropical zone?*
# Tropical zone: 23.4365S-23.4365N (Narval region: 68W-15E, 10S-20N)
lat_lower_bound = -23.4365/90*np.pi/2
lat_upper_bound = 23.4365/90*np.pi/2
# +
#Load native grid
qubicc_r02b04_path = '/pf/b/b309170/my_work/QUBICC/grids/icon_grid_0013_R02B04_G.nc'
qubicc_r02b04 = xr.open_dataset(qubicc_r02b04_path)
qubicc_lat = qubicc_r02b04.lat_cell_centre.values
qubicc_lon = qubicc_r02b04.lon_cell_centre.values
# -
# Keep only the tropical zone
for key in data_dict.keys():
for i in range(TIME_STEPS):
for k in range(20480): # Horizontal fields
# If k-th entry of the qubicc r02b04 grid is in the tropical zone
if not (lat_lower_bound <= qubicc_lat[k] <= lat_upper_bound):
if key != 'fr_lake':
for j in range(VERT_LAYERS):
data_dict_new[key][i,j,k] = np.nan
else:
data_dict_new[key][i,k] = np.nan
# *3. We select the fields where the lowermost levels are not nan*
not_nan = ~np.isnan(data_dict_new['hus'][0,30,:])
# Create new data_dict with correct dimensionality
data_dict_selected_fields = {}
for key in data_dict.keys():
if key != 'fr_lake':
data_dict_selected_fields[key] = data_dict_new[key][:,:,not_nan]
else:
data_dict_selected_fields[key] = data_dict_new[key][:,not_nan]
# Now we can use [reshape_2](#reshape_2) as above:
# +
# One sample should contain a column of information
data_dict_reshaped = {}
for key in data_dict_selected_fields.keys():
if data_dict_selected_fields[key].shape[1] == VERT_LAYERS:
for i in range(4, VERT_LAYERS):
new_key = '{}{}{:d}'.format(key,'_',i)
data_dict_reshaped[new_key] = np.reshape(data_dict_selected_fields[key][:,i,:], -1)
else:
data_dict_reshaped[key] = np.reshape(data_dict_selected_fields[key], -1)
# Remove constant fields
del data_dict_reshaped['zg_4']
del data_dict_reshaped['zg_5']
del data_dict_reshaped['zg_6']
del data_dict_reshaped['clw_4']
# -
# Run [df](#df), [df_scaled](#df_scaled), [input_test](#input_test) and [predicting](#predicting)!
#
# Finally we can plot the results:
#Taken from the grid cell based model
zg_mean = [20784.62706138, 19153.33733398, 17603.95786796, 16133.9890368 ,
14740.72817851, 13421.56030093, 12174.49640667, 10997.54366598,
9888.86828393, 8846.77905352, 7869.70937839, 6956.19812463,
6104.87029163, 5314.41907809, 4583.59136617, 3911.17886673,
3296.01709893, 2736.9940178 , 2233.06954874, 1783.30740375,
1386.91651733, 1043.31024674, 752.18615016, 513.63653294,
328.34456911, 198.27747065, 127.38265241]
# +
fig = plt.figure(figsize=(15,4))
# For model
ax_1 = fig.add_subplot(121, title='Column based NN cloud cover (Tropical Zone) (First model)', xlim=[0, 35])
ax_1.set_xlabel('Mean Cloud Cover [%]', fontsize = 12)
ax_1.set_ylabel('Mean height of a vertical layer [m]', fontsize = 11)
ax_1.plot(clc_mean, zg_mean)
ax_1.plot(clc_data_mean, zg_mean)
# plt.gca().invert_yaxis()
ax_1.legend(['Prediction', 'Truth'])
# For model_2
ax_2 = fig.add_subplot(122, title='Column based NN cloud cover (Tropical Zone) (Second model)', xlim=[0, 35])
ax_2.set_xlabel('Mean Cloud Cover [%]', fontsize = 12)
ax_2.set_ylabel('Mean height of a vertical layer [m]', fontsize = 11)
ax_2.plot(clc_mean_2, zg_mean)
ax_2.plot(clc_data_mean_2, zg_mean)
# plt.gca().invert_yaxis()
ax_2.legend(['Prediction', 'Truth'])
fig.savefig('../figures/qubicc_offline_testing_mean_cloud_cover_tropical_zone_nov_20s.pdf')
# -
fig = plt.figure(figsize=(15,4))
# For model
ax_1 = fig.add_subplot(121, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Cloud Cover parameterization (First trained model)')
ax_1.plot(clc_mean, a)
ax_1.plot(clc_data_mean, a)
plt.gca().invert_yaxis()
ax_1.legend(['Prediction', 'Truth'])
# For model_2
ax_2 = fig.add_subplot(122, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Cloud Cover parameterization (Second trained model)')
ax_2.plot(clc_mean_2, a)
ax_2.plot(clc_data_mean_2, a)
plt.gca().invert_yaxis()
ax_2.legend(['Prediction', 'Truth'])
fig.savefig('../figures/qubicc_offline_testing_mean_cloud_cover_NNs_tropical_zone_nov_20s.pdf')
print(list(clc_mean))
print(list(clc_mean_2))
print(clc_data_mean)
# **Southern Ocean** <br>
# Initialize data_dict like in **Fifth visualization** above
# Create new data_dict with correct dimensionality
data_dict_new = {}
for key in data_dict.keys():
if key != 'fr_lake':
data_dict_new[key] = np.zeros((data_dict[key].shape[0], data_dict[key].shape[1],
len(overlap_nans)))
else:
data_dict_new[key] = np.zeros((data_dict[key].shape[0], len(overlap_nans)))
# +
# ov_f is a temporary array that is adjusted depending on the key, time step and vertical layer
ov_f = np.array(overlap_nans, dtype=float)
ov_f[np.where(ov_f == 1)] = np.nan # Every True in overlap_nans will be a nan
ov_f_copy = ov_f.copy() # Need this otherwise ov_f == 0 will never be true anymore when beginning the loop
for key in data_dict.keys():
for i in range(TIME_STEPS):
if key != 'fr_lake':
for j in range(VERT_LAYERS):
ov_f_copy[np.where(ov_f == 0)] = data_dict[key][i,j,:]
data_dict_new[key][i,j,:] = ov_f_copy
else:
ov_f_copy[np.where(ov_f == 0)] = data_dict[key][i,:]
data_dict_new[key][i,:] = ov_f_copy
# -
for keys in data_dict_new.keys():
print(data_dict_new[keys].shape)
# *2. Which of these entries are from the Southern Ocean?*
# Southern Ocean: South of 60S (Narval region: 68W-15E, 10S-20N)
lat_upper_bound = -60/90*np.pi/2
# +
#Load native grid
qubicc_r02b04_path = '/pf/b/b309170/my_work/QUBICC/grids/icon_grid_0013_R02B04_G.nc'
qubicc_r02b04 = xr.open_dataset(qubicc_r02b04_path)
qubicc_lat = qubicc_r02b04.lat_cell_centre.values
qubicc_lon = qubicc_r02b04.lon_cell_centre.values
# -
# Keep only the Southern Ocean
for key in data_dict.keys():
for i in range(TIME_STEPS):
for k in range(20480): # Horizontal fields
# If k-th entry of the qubicc r02b04 grid is over the Southern Ocean
if not (qubicc_lat[k] <= lat_upper_bound):
if key != 'fr_lake':
for j in range(VERT_LAYERS):
data_dict_new[key][i,j,k] = np.nan
else:
data_dict_new[key][i,k] = np.nan
# *3. We select the fields where the lowermost levels are not nan*
not_nan = ~np.isnan(data_dict_new['hus'][0,30,:])
# Create new data_dict with correct dimensionality
data_dict_selected_fields = {}
for key in data_dict.keys():
if key != 'fr_lake':
data_dict_selected_fields[key] = data_dict_new[key][:,:,not_nan]
else:
data_dict_selected_fields[key] = data_dict_new[key][:,not_nan]
# Now we can use [reshape_2](#reshape_2) as above:
# +
# One sample should contain a column of information
data_dict_reshaped = {}
for key in data_dict_selected_fields.keys():
if data_dict_selected_fields[key].shape[1] == VERT_LAYERS:
for i in range(4, VERT_LAYERS):
new_key = '{}{}{:d}'.format(key,'_',i)
data_dict_reshaped[new_key] = np.reshape(data_dict_selected_fields[key][:,i,:], -1)
else:
data_dict_reshaped[key] = np.reshape(data_dict_selected_fields[key], -1)
# Remove constant fields
del data_dict_reshaped['zg_4']
del data_dict_reshaped['zg_5']
del data_dict_reshaped['zg_6']
del data_dict_reshaped['clw_4']
# -
# Run [df](#df), [df_scaled](#df_scaled), [input_test](#input_test) and [predicting](#predicting)!
#
# Finally we can plot the results:
a
#Taken from the grid cell based model
zg_mean = [20784.62706138, 19153.33733398, 17603.95786796, 16133.9890368 ,
14740.72817851, 13421.56030093, 12174.49640667, 10997.54366598,
9888.86828393, 8846.77905352, 7869.70937839, 6956.19812463,
6104.87029163, 5314.41907809, 4583.59136617, 3911.17886673,
3296.01709893, 2736.9940178 , 2233.06954874, 1783.30740375,
1386.91651733, 1043.31024674, 752.18615016, 513.63653294,
328.34456911, 198.27747065, 127.38265241]
# +
fig = plt.figure(figsize=(15,4))
# For model
ax_1 = fig.add_subplot(121, title='Column based NN cloud cover (Southern Ocean) (First model)', xlim=[0, 35])
ax_1.set_xlabel('Mean Cloud Cover [%]', fontsize = 12)
ax_1.set_ylabel('Mean height of a vertical layer [m]', fontsize = 11)
ax_1.plot(clc_mean, zg_mean)
ax_1.plot(clc_data_mean, zg_mean)
# plt.gca().invert_yaxis()
ax_1.legend(['Prediction', 'Truth'])
# For model_2
ax_2 = fig.add_subplot(122, title='Column based NN cloud cover (Southern Ocean) (Second model)', xlim=[0, 35])
ax_2.set_xlabel('Mean Cloud Cover [%]', fontsize = 12)
ax_2.set_ylabel('Mean height of a vertical layer [m]', fontsize = 11)
ax_2.plot(clc_mean_2, zg_mean)
ax_2.plot(clc_data_mean_2, zg_mean)
# plt.gca().invert_yaxis()
ax_2.legend(['Prediction', 'Truth'])
fig.savefig('../figures/qubicc_offline_testing_mean_cloud_cover_southern_ocean_nov_20s.pdf')
# -
fig = plt.figure(figsize=(15,4))
# For model
ax_1 = fig.add_subplot(121, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Cloud Cover parameterization (First trained model)')
ax_1.plot(clc_mean, a)
ax_1.plot(clc_data_mean, a)
plt.gca().invert_yaxis()
ax_1.legend(['Prediction', 'Truth'])
# For model_2
ax_2 = fig.add_subplot(122, xlabel='Mean Cloud Cover', ylabel='Vertical layer',
title='NN Cloud Cover parameterization (Second trained model)')
ax_2.plot(clc_mean_2, a)
ax_2.plot(clc_data_mean_2, a)
plt.gca().invert_yaxis()
ax_2.legend(['Prediction', 'Truth'])
fig.savefig('../figures/qubicc_offline_testing_mean_cloud_cover_NNs_southern_ocean_nov_20s.pdf')
print(list(clc_mean))
print(list(clc_mean_2))
print(clc_data_mean)
| n2_column_based_narval_r2b4/source_code/test_column_NN_on_QUBICC_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] azdata_cell_guid="2e1cde7c-c21c-4eed-8fed-6d1fc3e6625d"
# # Running Code
# + [markdown] azdata_cell_guid="a46c018f-4adc-4e63-a13c-688394fcc27f"
# *First* and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
#
# + [markdown] azdata_cell_guid="40787b35-53fe-40e4-ac10-60c008cf61f0"
# ## Code cells allow you to enter and run code
# +
from IPython.display import IFrame
IFrame(src='https://s3.amazonaws.com/duhaime/blog/visualizations/isolation-forests.html', width=700, height=600)
# +
from IPython.display import HTML
input_form = """
<div style="background-color:gainsboro; border:solid black; width:300px; padding:20px;">
Variable Name: <input type="text" id="var_name" value="foo"><br>
Variable Value: <input type="text" id="var_value" value="bar"><br>
<button onclick="set_value()">Set Value</button>
</div>
"""
javascript = """
<script type="text/Javascript">
window.set_value = function (){
document.getElementById('var_name').value = 'rebornix';
}
</script>
"""
HTML(input_form + javascript)
# + azdata_cell_guid="1dd74f3c-996e-4e7c-abcc-6d57815807a5" tags=[]
b = input('type:'
print(b)
# + [markdown] azdata_cell_guid="cfd851ac-e42e-4f5c-896b-98250ae87f53"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="9993905c-84dc-44c8-9949-cc33211a8b0e"
# ## ipywidgets support
# Widgets linking
# +
from ipywidgets import VBox, jsdlink, IntSlider, Button
s1, s2 = IntSlider(max=200, value=100), IntSlider(value=40)
jsdlink((s1, 'value'), (s2, 'max'))
VBox([s1, s2])
# + [markdown] azdata_cell_guid="2e1cde7c-c21c-4eed-8fed-6d1fc3e6625d"
# Widgets sharing the same models
# -
VBox([s1, s2])
# + [markdown] azdata_cell_guid="2e1cde7c-c21c-4eed-8fed-6d1fc3e6625d"
# # Running Code
# + [markdown] azdata_cell_guid="a46c018f-4adc-4e63-a13c-688394fcc27f"
# *First* and foremost, the Jupyter Notebook is an interactive environment for writing and running code. The notebook is capable of running code in a wide range of languages. However, each notebook is associated with a single kernel. This notebook is associated with the IPython kernel, therefore runs Python code.
#
# + [markdown] azdata_cell_guid="40787b35-53fe-40e4-ac10-60c008cf61f0"
# ## Code cells allow you to enter and run code
# +
from IPython.display import IFrame
IFrame(src='https://s3.amazonaws.com/duhaime/blog/visualizations/isolation-forests.html', width=700, height=600)
# +
from IPython.display import HTML
input_form = """
<div style="background-color:gainsboro; border:solid black; width:300px; padding:20px;">
Variable Name: <input type="text" id="var_name" value="foo"><br>
Variable Value: <input type="text" id="var_value" value="bar"><br>
<button onclick="set_value()">Set Value</button>
</div>
"""
javascript = """
<script type="text/Javascript">
window.set_value = function (){
document.getElementById('var_name').value = 'rebornix';
}
</script>
"""
HTML(input_form + javascript)
# + azdata_cell_guid="1dd74f3c-996e-4e7c-abcc-6d57815807a5" tags=[]
b = input('type:'
print(b)
# + [markdown] azdata_cell_guid="cfd851ac-e42e-4f5c-896b-98250ae87f53"
# There are two other keyboard shortcuts for running code:
#
# * `Alt-Enter` runs the current cell and inserts a new one below.
# * `Ctrl-Enter` run the current cell and enters command mode.
# + [markdown] azdata_cell_guid="9993905c-84dc-44c8-9949-cc33211a8b0e"
# ## Managing the Kernel
# + [markdown] azdata_cell_guid="9b1217ea-3a8b-4900-a234-c923cd4a8b05"
# Code is run in a separate process called the Kernel. The Kernel can be interrupted or restarted. Try running the following cell and then hit the <button class='btn btn-default btn-xs'><i class='icon-stop fa fa-stop'></i></button> button in the toolbar above.
| test/ipywidgets-scroll.test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/julianovale/PythonFundamentosDSA/blob/main/DSA_Python_Cap04_05_Map.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="tWckDgSDJ9i0"
# # <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 4</font>
#
# ## Download: http://github.com/dsacademybr
# + colab={"base_uri": "https://localhost:8080/"} id="bJW0deP6J9jC" outputId="350bbf70-1132-43c7-9606-ad1eab9d765c"
# Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
# + [markdown] id="83DFEP_-J9jF"
# ## Map
# + id="6e2FnIOjJ9jG"
# Criando duas funções
# Função 1 - Recebe uma temperatura como parâmetro e retorna a temperatura em Fahrenheit
def fahrenheit(T):
return ((float(9)/5)*T + 32)
# Função 2 - Recebe uma temperatura como parâmetro e retorna a temperatura em Celsius
def celsius(T):
return (float(5)/9)*(T-32)
# + id="pFZ6W-hcJ9jG"
# Criando uma lista
temperaturas = [0, 22.5, 40, 100]
# + colab={"base_uri": "https://localhost:8080/"} id="rPwVIsOhJ9jH" outputId="59a8d05f-4ea4-449b-8c82-e44509f69820"
# Aplicando a função a cada elemento da lista de temperaturas.
# Em Python 3, a funçãp map() retornar um iterator
map(fahrenheit, temperaturas)
# + colab={"base_uri": "https://localhost:8080/"} id="YchlHVhjJ9jI" outputId="40ea265b-a048-4565-9fe3-dade71f29b5c"
# Função map() reotrnando a lista de temperaturas convertidas em Fahrenheit
list(map(fahrenheit, temperaturas))
# + colab={"base_uri": "https://localhost:8080/"} id="IEXEHAsXKZGO" outputId="a96aa44c-adc0-4f63-d894-548d50de9221"
teste = list(map(fahrenheit, temperaturas))
teste[1]
# + colab={"base_uri": "https://localhost:8080/"} id="liOiyN1mJ9jI" outputId="bc67f657-0bc4-4a8d-e700-3df2f66ea907"
# Usando um loop for para imprimir o resultado da função map()
for temp in map(fahrenheit, temperaturas):
print(temp)
# + colab={"base_uri": "https://localhost:8080/"} id="TOxrmOAtJ9jJ" outputId="b2a368c8-5789-4201-f6fa-b31af622c915"
# Convertendo para Celsius
map(celsius, temperaturas)
# + colab={"base_uri": "https://localhost:8080/"} id="LFFgRts5J9jJ" outputId="104c7bc6-5a21-4142-ea2a-3db558deaf6c"
list(map(celsius, temperaturas))
# + id="kQBrcZ5-J9jK" outputId="f6ffdfcc-ea32-435b-e89b-7728f5f0c7a6"
# Usando lambda
map(lambda x: (5.0/9)*(x - 32), temperaturas)
# + id="zn6xgNweJ9jL" outputId="051f8e50-8dfe-43a1-c7b6-dd2bbd5b2aba"
list(map(lambda x: (5.0/9)*(x - 32), temperaturas))
# + id="1GRT6iO9J9jO"
# Somando os elementos de 2 listas
a = [1,2,3,4]
b = [5,6,7,8]
# + colab={"base_uri": "https://localhost:8080/"} id="okPgfa8EJ9jP" outputId="26f35b7b-4498-4ba1-866d-b11dfcf04a58"
list(map(lambda x,y:x+y, a, b))
# + id="rGvzoNCGJ9jP"
# Somando os elementos de 3 listas
a = [1,2,3,4]
b = [5,6,7,8]
c = [9,10,11,12]
# + colab={"base_uri": "https://localhost:8080/"} id="kQhcaUE0J9jQ" outputId="dbeed5c1-08e2-42ea-da1f-97a83d85149d"
list(map(lambda x,y,z:x+y+z, a, b, c))
# + [markdown] id="mmtK_cxrJ9jQ"
# # Fim
# + [markdown] id="HhBKzocQJ9jR"
# ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
| DSA_Python_Cap04_05_Map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 1. Load Libs
import math
import numpy as np
import tensorflow as tf
import seaborn as sb
import matplotlib.pyplot as plt
sb.set()
# ### 2. Generate Dataset
## quadratic function: y = 10 + x**2
def get_y(x):
return np.power(x, 2) + 10
## sample x
def sample_data(n=10000, scale=100):
data = []
x = scale*(np.random.random_sample((n,))-0.5)
for i in range(n):
yi = get_y(x[i])
data.append([x[i], yi])
return np.array(data)
# ### 3. Config networks
# +
## generator input
def sample_Z(m, n):
return np.random.uniform(-1., 1., size=[m, n])
## generator network
def generator(Z, hsize=[16, 16], reuse=False):
with tf.variable_scope("GAN/Generator",reuse=reuse):
for hiddle_size in hsize:
Z = tf.layers.dense(Z, hiddle_size, activation = tf.nn.leaky_relu)
out = tf.layers.dense(Z, 2) ## logits layer, before softmax
return out
## discriminator network
def discriminator(X, hsize=[16, 16], reuse=False):
with tf.variable_scope("GAN/Discriminator",reuse=reuse):
for hiddle_size in hsize:
X = tf.layers.dense(X, hiddle_size, activation = tf.nn.leaky_relu)
X = tf.layers.dense(X,2)
out = tf.layers.dense(X,1) ## logits layer, before softmax
return out, X
# -
# ### 4. Optimization Pipeline
# +
## Input placeholder
X = tf.placeholder(tf.float32, [None,2])
Z = tf.placeholder(tf.float32, [None,2])
## Forward Pass
G_sample = generator(Z, hsize=[16,16])
r_logits, r_rep = discriminator(X, hsize=[16,16])
f_logits, g_rep = discriminator(G_sample, hsize=[16,16],reuse=True)
## Compute Loss
dis_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=r_logits, labels=tf.ones_like(r_logits)) + \
tf.nn.sigmoid_cross_entropy_with_logits(logits=f_logits, labels=tf.zeros_like(f_logits)))
gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=f_logits, labels=tf.ones_like(f_logits)))
## Trainables
gen_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="GAN/Generator")
dis_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="GAN/Discriminator")
## Optimizer
lr = 1e-3
gen_step = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(gen_loss, var_list = gen_vars) # G Train step
dis_step = tf.train.RMSPropOptimizer(learning_rate=lr).minimize(dis_loss, var_list = dis_vars) # D Train step
# +
# sess = tf.Session(config=config)
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
batch_size = 256
nd_steps = 10
ng_steps = 10
x_plot = sample_data(n=batch_size)
f = open('loss_logs.csv','w')
f.write('Iteration,Discriminator Loss,Generator Loss\n')
for i in range(10001):
X_batch = sample_data(n = batch_size)
Z_batch = sample_Z(batch_size, 2)
## run discriminator
for _ in range(nd_steps):
_, dloss = sess.run([dis_step, dis_loss], feed_dict={X: X_batch, Z: Z_batch})
## get representations
rrep_dstep, grep_dstep = sess.run([r_rep, g_rep], feed_dict={X: X_batch, Z: Z_batch})
## run generator
for _ in range(ng_steps):
_, gloss = sess.run([gen_step, gen_loss], feed_dict={Z: Z_batch})
## get representations
rrep_gstep, grep_gstep = sess.run([r_rep, g_rep], feed_dict={X: X_batch, Z: Z_batch})
print ("Iterations: %d\t Discriminator loss: %.4f\t Generator loss: %.4f"%(i,dloss,gloss), end = '\r')
if i%10 == 0:
f.write("%d,%f,%f\n"%(i,dloss,gloss))
if i%1000 == 0:
## plot Samples
plt.figure()
g_plot = sess.run(G_sample, feed_dict={Z: Z_batch})
xax = plt.scatter(x_plot[:,0], x_plot[:,1])
gax = plt.scatter(g_plot[:,0], g_plot[:,1])
plt.legend((xax, gax), ("Real Data","Generated Data"))
plt.title('Samples at Iteration %d'%i)
plt.tight_layout()
plt.savefig('../plots/iterations/iteration_%d.png'%i)
plt.close()
## plot Features
plt.figure()
rrd = plt.scatter(rrep_dstep[:,0], rrep_dstep[:,1], alpha=0.5)
rrg = plt.scatter(rrep_gstep[:,0], rrep_gstep[:,1], alpha=0.5)
grd = plt.scatter(grep_dstep[:,0], grep_dstep[:,1], alpha=0.5)
grg = plt.scatter(grep_gstep[:,0], grep_gstep[:,1], alpha=0.5)
plt.legend((rrd, rrg, grd, grg),
("Real Data Before G step","Real Data After G step",
"Generated Data Before G step","Generated Data After G step"))
plt.title('Transformed Features at Iteration %d'%i)
plt.tight_layout()
plt.savefig('../plots/features/feature_transform_%d.png'%i)
plt.close()
## plot Generated data
plt.figure()
rrdc = plt.scatter(np.mean(rrep_dstep[:,0]), np.mean(rrep_dstep[:,1]),s=100, alpha=0.5)
rrgc = plt.scatter(np.mean(rrep_gstep[:,0]), np.mean(rrep_gstep[:,1]),s=100, alpha=0.5)
grdc = plt.scatter(np.mean(grep_dstep[:,0]), np.mean(grep_dstep[:,1]),s=100, alpha=0.5)
grgc = plt.scatter(np.mean(grep_gstep[:,0]), np.mean(grep_gstep[:,1]),s=100, alpha=0.5)
plt.legend((rrdc, rrgc, grdc, grgc), ("Real Data Before G step","Real Data After G step",
"Generated Data Before G step","Generated Data After G step"))
plt.title('Centroid of Transformed Features at Iteration %d'%i)
plt.tight_layout()
plt.savefig('../plots/features/feature_transform_centroid_%d.png'%i)
plt.close()
f.close()
# -
| Exercise 3 - GAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Adding custom python libraries
# It is possible to add additional python libraries to your notebook environment, but you first need to follow a few steps:
# ### 1. Create new virtual environment
# First, in a terminal, create a virtual environment
# ```sh
# python3 -m venv ~/venvs/myenv
# source ~/venvs/myenv/bin/activate
# ```
#
# To open a terminal, click on File>New>Terminal
# ### 2. Load default libraries
# In order to load all of the default python libraries that are installed in the default environment (e.g. including gdal, dask, boto, plotly, holoviews and many others), run the following command:
#
# ```sh
# realpath /env/lib/python3.6/site-packages > ~/venvs/myenv/lib/python3.6/site-packages/base_venv.pth
# ```
# ### 3. Install new libraries
# Then switch into the new venv
# ```sh
# source ~/venvs/myenv/bin/activate
# ```
# You will now be able to nstall new libraries into your new environment, e.g. Below we install [geopy](https://geopy.readthedocs.io/en/stable/)
#
# ```sh
# pip install --upgrade pip geopy
# ```
# Inside a python prompt, you should now be able to import geopy:
# ```sh
# python3
# ```
# ```python
# >>>import geopy
# ```
# >***BUT*** at this point, it will only work inside the Terminal. Follow **step 4** below to make the environment available inside Jupyter.
# If you want to deactivate your new environment and return to the default, use the following command:
# ```sh
# deactivate
# ```
# ### 4. Virtual Env in EASI Hub Jupyter (optional)
#
# You can then add the new environment as a kernel inside Jupyter by running the following line. Note that you should have your new environment activitated (as shown above).
#
# ```sh
# python -m ipykernel install --user --name=myenv
# ```
# After running this example above, try refreshing the browser and clicking on the words "Python 3" at the top right. This allows you to select a different Kernel, including your new "myenv" environment.
| Adding python libraries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Time series forecasting using recurrent neural networks
# ### Import necessary libraries
# +
# %matplotlib notebook
import numpy
import pandas
import math
import time
import sys
import datetime
import matplotlib.pyplot as ma
import keras.models as km
import keras.layers as kl
import sklearn.preprocessing as sp
# -
# ### Initialize random seed for constant neural network initialization
numpy.random.seed(42)
# ### Load necessary CSV file
# +
try:
ts = pandas.read_csv('../../datasets/srv-1-usr-1h.csv')
except:
print("I am unable to connect to read .csv file", sep=',', header=1)
ts.index = pandas.to_datetime(ts['ts'])
# delete unnecessary columns
del ts['id']
del ts['ts']
# print table info
ts.info()
# -
# ### Get values from specified range
ts = ts['2018-06-16':'2018-07-15']
# ### Remove possible zero and NA values (by interpolation)
# We are using MAPE formula for counting the final score, so there cannot occure any zero values in the time series. Replace them with NA values. NA values are later explicitely removed by linear interpolation.
# +
def print_values_stats():
print("Zero Values:\n",sum([(1 if x == 0 else 0) for x in ts.values]),"\n\nMissing Values:\n",ts.isnull().sum(),"\n\nFilled in Values:\n",ts.notnull().sum(), "\n")
idx = pandas.date_range(ts.index.min(), ts.index.max(), freq="1h")
ts = ts.reindex(idx, fill_value=None)
print("Before interpolation:\n")
print_values_stats()
ts = ts.replace(0, numpy.nan)
ts = ts.interpolate(limit_direction="both")
print("After interpolation:\n")
print_values_stats()
# -
# ### Plot values
# +
# Idea: Plot figure now and do not wait on ma.show() at the end of the notebook
def plot_without_waiting(ts_to_plot):
ma.ion()
ma.show()
fig = ma.figure(plot_without_waiting.figure_counter)
plot_without_waiting.figure_counter += 1
ma.plot(ts_to_plot, color="blue")
ma.draw()
try:
ma.pause(0.001) # throws NotImplementedError, ignore it
except:
pass
plot_without_waiting.figure_counter = 1
plot_without_waiting(ts)
# -
# ### Normalize time series for neural network
# LSTM cells are very sensitive to large scaled values. It's generally better to normalize them into <0,1> interval.
dates = ts.index # save dates for further use
scaler = sp.MinMaxScaler(feature_range=(0,1))
ts = scaler.fit_transform(ts)
# ### Split time series into train and test series
# We have decided to split train and test time series by two weeks.
train_data_length = 24*7
ts_train = ts[:train_data_length]
ts_test = ts[train_data_length+1:]
# ### Create train and test dataset for neural networks
# The neural network takes input from TS at time t and returns predicted output at time *t+1*. Generally, we could create neural network that would return predicted output at time *t+n*, just by adjusting *loop_samples* parameter.
# +
def dataset_create(ts, loop_samples):
x = []
y = []
for i in range(len(ts)-loop_samples-1):
x.append(ts[i:(i+loop_samples), 0])
y.append(ts[i+loop_samples, 0])
return numpy.array(x), numpy.array(y)
train_dataset_x, train_dataset_y = dataset_create(ts_train, 1)
test_dataset_x, test_dataset_y = dataset_create(ts_test, 1)
# -
# ### Reshape datasets for NN into [batch size; timesteps; input dimensionality] format
# Keras library have specific needs in case of provided input's format. See https://keras.io/layers/recurrent/ for more details.
# +
def dataset_reshape_for_nn(dataset):
return dataset.reshape((dataset.shape[0], 1, dataset.shape[1]))
train_dataset_x = dataset_reshape_for_nn(train_dataset_x)
test_dataset_x = dataset_reshape_for_nn(test_dataset_x)
# -
# ### Create recurrent neural network
# This recurrent neural network (RNN) consists of three layers (*input, hidden* and *output*). The input layer is implicitly specified by the hidden layer (*input_shape* parameter). Logically, we need to have exactly one input and one output node for one-step prediction. Number of hidden neurons is specified by *number_lstm_cells* variable.
#
# In this RNN we use LSTM cells with sigmoid (http://mathworld.wolfram.com/SigmoidFunction.html) activation function. Network is configured to use *mean square error* (MSE) as optimalization function that is going to be minimized during backpropagation and *stochastic gradient descend* (SGD) optimizer with default parameters (https://keras.io/optimizers/).
number_lstm_cells = 2
# Layer based network
network = km.Sequential()
# Hidden layer is made from LSTM nodes
network.add(kl.LSTM(number_lstm_cells, activation="sigmoid", input_shape=(1,1)))
# Output layer with one output (one step prediction)
network.add(kl.Dense(1))
network.compile(loss="mse", optimizer="sgd", metrics=['mean_squared_error'])
# ### Train neural network
#
# Train neural network on train data and plot MSE metrics for each iteration. Results and time of training process depends on *train_iterations* value.
train_iterations = 100
start_time = time.time()
print("Network fit started...\n")
network_history = network.fit(train_dataset_x, train_dataset_y, epochs=train_iterations, batch_size=1, verbose=0)
print("Network fit finished. Time elapsed: ", time.time() - start_time, "\n")
plot_without_waiting(network_history.history['mean_squared_error'])
# ### Predict new values
# The array *test_dataset_x* is used as an input for the network.
predicted_values_unscaled = network.predict(test_dataset_x)
# Scale the predicted values back using MinMaxScaler
predicted_values_scaled = scaler.inverse_transform(predicted_values_unscaled)
# Scale test values back so we can compare the result
test_values_scaled = scaler.inverse_transform(ts_test)
# ### Count mean absolute percentage error
# We use MAPE (https://www.forecastpro.com/Trends/forecasting101August2011.html) instead of MSE because the result of MAPE does not depend on size of values.
# +
values_sum = 0
for value in zip(test_values_scaled, predicted_values_scaled):
actual = value[0][0]
predicted = value[1][0]
values_sum += abs((actual - predicted) / actual)
values_sum *= 100/len(test_values_scaled)
print("MAPE: ", values_sum, "%\n")
# -
# ### Plot predicted values
fig = ma.figure(plot_without_waiting.figure_counter)
ma.plot(test_values_scaled, color="blue")
ma.plot(predicted_values_scaled, color="red")
ts_len = len(ts)
date_offset_indices = ts_len // 5
ma.xticks(range(0, ts_len-train_data_length, date_offset_indices), [x.date().strftime('%Y-%m-%d') for x in dates[train_data_length::date_offset_indices]])
fig.show()
| analyses/SERV-1/Usr-1h-RNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import os
os.listdir('../data')
assert 'out_breed.csv' in os.listdir('../data') # this assert breaks if the data is configured uncorrectly
breeds = pd.read_csv('../data/out_breed.csv')
colors = pd.read_csv('../data/out_color.csv')
states = pd.read_csv('../data/out_state.csv')
train = pd.read_csv('../data/out_train.csv')
test = pd.read_csv('../data/out_test.csv')
sub = pd.read_csv('../data/out_submission.csv')
average_distribution = (train['AdoptionSpeed'].value_counts() / len(train)).sort_index()
average_distribution
plt.bar([0,1,2,3,4], average_distribution)
plt.title("this is the distribution of the target classes over the whole dataset")
# <h1>FEAT #05: is cat and is cat's age less then 5 months
cats = train[train['Type'] == 2]
FEAT = cats['Age'] < 5
len(cats[~FEAT]), len(cats[FEAT]), len(cats[~FEAT]) / len(cats[FEAT])
feat_distribution = (cats[FEAT]['AdoptionSpeed'].value_counts() / len(cats[FEAT])).sort_index()
feat_distribution
nofeat_distribution = (cats[~FEAT]['AdoptionSpeed'].value_counts() / len(cats[~FEAT])).sort_index()
nofeat_distribution
size = 0.3
plt.bar([i - size for i in range(len(feat_distribution))], feat_distribution, width=size, label='feature distribution')
plt.bar(range(len(nofeat_distribution)), nofeat_distribution, width=size, label='non feature distribution')
plt.legend()
# <p>this look amazingly good to find class 4 for cats, should look more into how to build mean encoding for multi-class classification
| features-engineering/AGE_features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import glob
import pickle
import operator
import matplotlib
import scipy.stats as stats
import statsmodels.stats.multitest as multi
from itertools import chain
from sklearn.preprocessing import minmax_scale
from scipy.stats import linregress, spearmanr, pearsonr
plt.rcParams['figure.figsize'] = [15, 10]
import warnings; warnings.simplefilter('ignore')
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
# raw data files needed to run are available at SCP: https://singlecell.broadinstitute.org/single_cell/study/SCP979/
# please download: mob_aba_energy.csv,lambdas_pmeans_output_mob.tsv, 10015CN83*annotations.txt, 10015CN84*annotations.txt,
# 10015CN85*annotations.txt,10015CN60*annotations.txt,lambdas_pmeans_output_STref.tsv, SRR*annotations.txt
# Load definitions
# +
def Extract(lst):
return list(next(zip(*lst)))
def ftest(st_spec_cutoff,aba_spec_cutoff):
bb_count = 0
fisher_dict = {}
pval_list = []
for condition, df in st_spec_cutoff.groupby('condition_1'):
regions_tmp = list(set(st_spec_cutoff['AAR1'].tolist()))
regions = [x for x in regions_tmp if str(x) != 'nan']
for i in regions:
for j in regions:
#SM-Omics genes
st_genes = df[df['AAR1'] == i]['gene_new'].tolist()
# ABA-genes
aba_genes = aba_spec_cutoff[aba_spec_cutoff['ABA_region'] == j]['gene-symbol'].tolist()
# SM-Omics genes in all other regions
st_rest = df[df['AAR1'] != i]['gene_new'].tolist()
# ABA genes in all other regions
aba_rest = aba_spec_cutoff[aba_spec_cutoff['ABA_region'] != j]['gene-symbol'].tolist()
# g1 = genes in both ST and ABA
# g2 = genes unique to ST
# g3 = genes unique to ABA
# g4 = genes neither in st or aba region but in the other regions
g1 = len(list(set(st_genes).intersection(aba_genes)))
g2 = len(list(set(aba_genes).difference(set(st_genes))))
g3 = len(list(set(st_genes).difference(set(aba_genes))))
g4 = len(list(set(st_rest).intersection(aba_rest)))
# Fisher's test
oddsratio, pvalue = stats.fisher_exact([[g4, g2], [g3, g1]], alternative='greater')
# Store pvalues in list to use for multiple corrections testing
pval_list.append(pvalue)
# Store fisher's test results in DF
ff = [condition, i, j, oddsratio, pvalue, g1]
# print(i, j, g1, g2, g3, g4, pvalue)
if bb_count == 0:
fisher_dict[bb_count] = ff
df_ff = pd.DataFrame.from_dict(fisher_dict)
df_ff['idx'] = ['condition', 'AAR_ST', 'AAR_ABA','Odds ratio', 'p value', 'Num shared genes']
df_ff.set_index('idx', inplace = True)
bb_count += 1
else:
df_ff[bb_count] = ff
bb_count += 1
return pval_list, df_ff
def read_array_metadata(metadata,filename,n_levels, extraplot=None):
array_metadata = metadata[metadata['Count file'] == filename]
array_levels = [array_metadata['Level %d'%(idx+1)].values[0] for idx in range(0,n_levels)]
if extraplot != None:
array_levels = array_metadata['Extra_plot_level'].tolist()
return array_levels
def Extract(lst):
return list(next(zip(*lst)))
def rename_lambdas_index(lambdas_file):
nwe=[]
nm=lambdas_file.index
for item in nm:
nwe.append(str(item).split("_")[0])
return nwe
# -
# Load ABA ref files
path = '../../smomics_data/'
aba_spec = pd.read_csv(os.path.join(path, 'mob_aba_energy.csv'), index_col=0)
aba_spec
# +
# Pick genes to correlate to SM-Omics signals per region
genes = ['Ctgf', 'Camk4', 'Lancl3', 'Cbln4', 'Nr2f2',]
# +
# Load SM-Omics data for MOB
# +
# Load Lambda pmean df
path = '../../smomics_data'
# Read file
filename = os.path.join(path, 'lambdas_pmeans_output_mob.tsv')
lambda_posterior_means = pd.read_csv(filename, index_col=0, sep='\t', header=[0,1],low_memory=False)
lambda_posterior_means.index = rename_lambdas_index(lambda_posterior_means)
# -
# Take exp()
lambda_posterior_means = lambda_posterior_means.astype(float)
lambda_posterior_means = np.exp(lambda_posterior_means-1)
# +
annot_path = '../../smomics_data'
annot_list = []
for stdata in list(np.unique([i[0].split("/")[-1].split("_stdata_adjusted.tsv")[0] for i in lambda_posterior_means.columns])):
well = stdata.split('_stdata')[0]
filename = os.path.join(annot_path, well + '_annotations.txt')
annot_file = pd.read_csv(filename, sep = '\t')
# Remove 'Unknown'
#file = file[file['value'] != 'Unknown']
# Change to int if spot coord are in decimals
if len(annot_file['x_y'][0].split('.')) == 3: # Spot coord in decimals
# Detect which delimiter separates the spot coord
if len(annot_file['x_y'][0].split('_')) == 2:
delimi = '_'
elif len(annot_file['x_y'][0].split('x')) == 2:
delimi = 'x'
else:
print('Unknown spot coordinate delimiter.')
# Round up
annot_file['x'] = annot_file['x'].round(0)
annot_file['y'] = annot_file['y'].round(0)
# Make columns to int type
annot_file['x'] = annot_file['x'].astype(int)
annot_file['y'] = annot_file['y'].astype(int)
# Make columns to string type
annot_file['x'] = annot_file['x'].astype(str)
annot_file['y'] = annot_file['y'].astype(str)
# Make a new columnwith the rounded values
annot_file['spot_coord'] = annot_file[['x', 'y']].apply(lambda z: '_'.join(z), axis=1)
annot_file.drop(['x_y'], axis=1, inplace=True)
annot_file.rename(columns = {'spot_coord':'x_y'}, inplace=True)
# Keep certain columns:
annot_file = annot_file.loc[:, ['image', 'x_y', 'value']]
annot_list.append(annot_file)
# Concat
annotdf = pd.concat(annot_list)
annotdf = annotdf[annotdf['value'] != 'Unknown']
annotdf = annotdf[annotdf['value'].isin([i for i in annotdf['value'] if ";" not in i])]
annotdf['region'] = [i.split("(")[1].split(")")[0] for i in annotdf.value]
annotdf['region'] = annotdf['region'].replace({'GCL-D' : 'GR',
'GCL-E' : 'GR',
'M/T' : 'MI',
'EPL' : 'OPL'})
annotdf = annotdf[annotdf['region'] != 'ONL']
# -
list(np.unique([i[0].split("/")[-1].split("_stdata_adjusted.tsv")[0] for i in lambda_posterior_means.columns]))
gene_scaled = []
gene_names = []
for gene in genes:
# reset index
if gene in lambda_posterior_means.index:
lambda_gene = lambda_posterior_means.loc[gene, :].to_frame().reset_index()
#print(gene)
# Get name of file, for plotting
lambda_gene['name'] = lambda_gene['file'].str.split('/').str[-1].str.split('_stdata').str[0]
# Scales within each sample
gene_scale = lambda_gene.copy()
gene_scale[gene+'.scaled'] = gene_scale.groupby('name')[gene].transform(lambda x: minmax_scale(x))
# connects to annotation areas
lambdadf = pd.merge(gene_scale, annotdf, how='left', left_on=['name','coordinate'], right_on=['image', 'x_y'])
lambdadf = lambdadf.dropna(axis = 0)
gene_scaled.append(list(pd.DataFrame(lambdadf.groupby('region')[gene].mean()).transform(lambda x: minmax_scale(x))[gene]))
#gene_scaled.append(np.mean(lambdadf[gene]))
gene_names.append(gene)
sm_df = pd.DataFrame(gene_scaled).T
sm_df.columns = gene_names
sm_df.index = lambdadf.groupby('region')[gene].mean().index
sm_df['Condition'] = 'sm-omics'
# +
aba_scaled = []
gene_names = []
for gene in genes:
if gene in aba_spec.index:
abs_gene = aba_spec.loc[gene]
#print(np.log(pd.DataFrame(abs_gene.groupby('ABA_region')['Expression target'].mean()).transform(lambda x: minmax_scale(x))))
aba_scaled.append(list(pd.DataFrame((abs_gene.groupby('region')['aba-mean'].mean())).transform(lambda x: minmax_scale(x))['aba-mean']))
gene_names.append(gene)
aba_df = pd.DataFrame(aba_scaled).T
aba_df.columns = gene_names
aba_df.index = abs_gene.groupby('region')['aba-mean'].mean().index
aba_df['Condition'] = 'aba'
# -
abs_gene
"Make list for corplots"
corr_df = pd.DataFrame([[item for sublist in aba_scaled for item in sublist],[item for sublist in gene_scaled for item in sublist]]).T
corr_df.columns = ['aba', 'sm-omics']
corr_df['region'] = list(np.unique(abs_gene.groupby('region')['aba-mean'].mean().index))*len(gene_names)
corr_df['gene'] = [j for j in gene_names for i in np.unique(abs_gene.groupby('region')['aba-mean'].mean().index)]
corr_df = corr_df.fillna(0)
corr_df
for gene in genes:
# initialize a figure
fig, ax = plt.subplots(figsize=(4, 2))
corr_df_sub = corr_df[corr_df['gene'] == gene]
corr_df_sub['jitter_y'] = corr_df_sub['sm-omics'] + 0.05 * np.random.rand(len(corr_df_sub['sm-omics'].tolist())) +0.01
corr_df_sub['jitter_x'] = corr_df_sub['aba'] + 0.05 * np.random.rand(len(corr_df_sub['aba'].tolist())) +0.01
x = 'jitter_x'
y = 'jitter_y'
hue = 'region'
sns.scatterplot(x=x, y=y, hue=hue, data=corr_df_sub, ax=ax, s=50, linewidth=0)
# Calculate the linear regression
slope, intercept, r_value, p_value, std_err = linregress(corr_df_sub[x], corr_df_sub[y])
coef = np.polyfit(corr_df_sub[x],corr_df_sub[y],1)
poly1d_fn = np.poly1d(coef)
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x]), c="k")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])+std_err, c="grey")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])-std_err, c="grey")
ax.set_ylim([-0.05,1.05])
ax.set_xlim([-0.05,1.05])
#ax.errorbar(trans[x], poly1d_fn(trans[x]), yerr=poly1d_fn(trans[x]) - trans[y], fmt='.k')
# Spearman's & Pearson's r
x = 'sm-omics'
y = 'aba'
spear, pspear = spearmanr(corr_df_sub[x], corr_df_sub[y])
pear, p = pearsonr(corr_df_sub[x], corr_df_sub[y])
print(gene)
print("Spearman's r: " + str(round(spear, 3)))
print("Spearman's p value: " + str(pspear))
ax.text(0.05, 0.9, "Spearman's r: " + str(round(spear, 3)), transform=ax.transAxes, fontsize=10)
ax.set_title(gene, fontsize=10)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
#plt.savefig("IFvsGENE_Neun.pdf", transparent=True, bbox_inches = 'tight',
# pad_inches = 0, dpi=1200, rasterized=True)
# +
# initialize a figure
fig, ax = plt.subplots(figsize=(4, 2))
corr_df_sub = corr_df
corr_df_sub['jitter_y'] = corr_df_sub['sm-omics'] + 0.05 * np.random.rand(len(corr_df_sub['sm-omics'].tolist())) +0.01
corr_df_sub['jitter_x'] = corr_df_sub['aba'] + 0.05 * np.random.rand(len(corr_df_sub['aba'].tolist())) +0.01
x = 'jitter_x'
y = 'jitter_y'
hue = 'region'
hue = 'region'
style = 'gene'
corr_df_sub = corr_df
sns.scatterplot(x=x, y=y, hue=hue, style=style, data=corr_df_sub, ax=ax, s=50, linewidth=0)
# Calculate the linear regression
slope, intercept, r_value, p_value, std_err = linregress(corr_df_sub[x], corr_df_sub[y])
coef = np.polyfit(corr_df_sub[x],corr_df_sub[y],1)
poly1d_fn = np.poly1d(coef)
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x]), c="k")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])+std_err, c="grey")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])-std_err, c="grey")
ax.set_ylim([-0.05,1.1])
ax.set_xlim([-0.05,1.1])
#ax.errorbar(trans[x], poly1d_fn(trans[x]), yerr=poly1d_fn(trans[x]) - trans[y], fmt='.k')
# Spearman's & Pearson's r
x = 'sm-omics'
y = 'aba'
spear, pspear = spearmanr(corr_df_sub[x], corr_df_sub[y])
pear, p = pearsonr(corr_df_sub[x], corr_df_sub[y])
print("Spearman's r: " + str(round(spear, 3)))
print("Spearman's p value: " + str(pspear))
ax.text(0.05, 0.9, "Spearman's r: " + str(round(spear, 2)), transform=ax.transAxes, fontsize=10)
ax.text(0.05, 0.8, "p-value: " + str(round(pspear, 6)), transform=ax.transAxes, fontsize=10)
ax.set_title('aba sm-omics correlation', fontsize=10)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
plt.savefig("../../figures/aba_sm_all_genes_correlation.pdf", transparent=True, bbox_inches = 'tight',
pad_inches = 0, dpi=1200, rasterized=True)
# +
# Repeat correlations for SM-Omics and ST vs aba on different genes found in both
# -
# Pick genes to correlate to SM-Omics signals per region
genes = ['Nrsn1', 'Nos1ap', 'Cdh23', 'Prss12',]
gene_scaled = []
gene_names = []
for gene in genes:
# reset index
if gene in lambda_posterior_means.index:
lambda_gene = lambda_posterior_means.loc[gene, :].to_frame().reset_index()
#print(gene)
# Get name of file, for plotting
lambda_gene['name'] = lambda_gene['file'].str.split('/').str[-1].str.split('_stdata').str[0]
# Scales within each sample
gene_scale = lambda_gene.copy()
gene_scale[gene+'.scaled'] = gene_scale.groupby('name')[gene].transform(lambda x: minmax_scale(x))
# connects to annotation areas
lambdadf = pd.merge(gene_scale, annotdf, how='left', left_on=['name','coordinate'], right_on=['image', 'x_y'])
lambdadf = lambdadf.dropna(axis = 0)
gene_scaled.append(list(pd.DataFrame(lambdadf.groupby('region')[gene].mean()).transform(lambda x: minmax_scale(x))[gene]))
#gene_scaled.append(np.mean(lambdadf[gene]))
gene_names.append(gene)
sm_df = pd.DataFrame(gene_scaled).T
sm_df.columns = gene_names
sm_df.index = lambdadf.groupby('region')[gene].mean().index
sm_df['Condition'] = 'sm-omics'
# +
aba_scaled = []
gene_names = []
for gene in genes:
if gene in aba_spec.index:
abs_gene = aba_spec.loc[gene]
#print(np.log(pd.DataFrame(abs_gene.groupby('ABA_region')['Expression target'].mean()).transform(lambda x: minmax_scale(x))))
aba_scaled.append(list(pd.DataFrame((abs_gene.groupby('region')['aba-mean'].mean())).transform(lambda x: minmax_scale(x))['aba-mean']))
gene_names.append(gene)
aba_df = pd.DataFrame(aba_scaled).T
aba_df.columns = gene_names
aba_df.index = abs_gene.groupby('region')['aba-mean'].mean().index
aba_df['Condition'] = 'aba'
# -
abs_gene
"Make list for corplots"
corr_df = pd.DataFrame([[item for sublist in aba_scaled for item in sublist],[item for sublist in gene_scaled for item in sublist]]).T
corr_df.columns = ['aba', 'sm-omics']
corr_df['region'] = list(np.unique(abs_gene.groupby('region')['aba-mean'].mean().index))*len(gene_names)
corr_df['gene'] = [j for j in gene_names for i in np.unique(abs_gene.groupby('region')['aba-mean'].mean().index)]
corr_df = corr_df.fillna(0)
for gene in genes:
# initialize a figure
fig, ax = plt.subplots(figsize=(4, 2))
corr_df_sub = corr_df[corr_df['gene'] == gene]
corr_df_sub['jitter_y'] = corr_df_sub['sm-omics'] + 0.05 * np.random.rand(len(corr_df_sub['sm-omics'].tolist())) +0.01
corr_df_sub['jitter_x'] = corr_df_sub['aba'] + 0.05 * np.random.rand(len(corr_df_sub['aba'].tolist())) +0.01
x = 'jitter_x'
y = 'jitter_y'
hue = 'region'
sns.scatterplot(x=x, y=y, hue=hue, data=corr_df_sub, ax=ax, s=50, linewidth=0)
# Calculate the linear regression
slope, intercept, r_value, p_value, std_err = linregress(corr_df_sub[x], corr_df_sub[y])
coef = np.polyfit(corr_df_sub[x],corr_df_sub[y],1)
poly1d_fn = np.poly1d(coef)
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x]), c="k")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])+std_err, c="grey")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])-std_err, c="grey")
ax.set_ylim([-0.05,1.05])
ax.set_xlim([-0.05,1.05])
#ax.errorbar(trans[x], poly1d_fn(trans[x]), yerr=poly1d_fn(trans[x]) - trans[y], fmt='.k')
# Spearman's & Pearson's r
x = 'sm-omics'
y = 'aba'
spear, pspear = spearmanr(corr_df_sub[x], corr_df_sub[y])
pear, p = pearsonr(corr_df_sub[x], corr_df_sub[y])
print(gene)
print("Spearman's r: " + str(round(spear, 3)))
print("Spearman's p value: " + str(pspear))
ax.text(0.05, 0.9, "Spearman's r: " + str(round(spear, 3)), transform=ax.transAxes, fontsize=10)
ax.set_title(gene, fontsize=10)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
#plt.savefig("IFvsGENE_Neun.pdf", transparent=True, bbox_inches = 'tight',
# pad_inches = 0, dpi=1200, rasterized=True)
# +
# initialize a figure
fig, ax = plt.subplots(figsize=(4, 2))
corr_df_sub = corr_df
corr_df_sub['jitter_y'] = corr_df_sub['sm-omics'] + 0.05 * np.random.rand(len(corr_df_sub['sm-omics'].tolist())) +0.01
corr_df_sub['jitter_x'] = corr_df_sub['aba'] + 0.05 * np.random.rand(len(corr_df_sub['aba'].tolist())) +0.01
x = 'jitter_x'
y = 'jitter_y'
hue = 'region'
hue = 'region'
style = 'gene'
corr_df_sub = corr_df
sns.scatterplot(x=x, y=y, hue=hue, style=style, data=corr_df_sub, ax=ax, s=50, linewidth=0)
# Calculate the linear regression
slope, intercept, r_value, p_value, std_err = linregress(corr_df_sub[x], corr_df_sub[y])
coef = np.polyfit(corr_df_sub[x],corr_df_sub[y],1)
poly1d_fn = np.poly1d(coef)
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x]), c="k")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])+std_err, c="grey")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])-std_err, c="grey")
ax.set_ylim([-0.05,1.05])
ax.set_xlim([-0.05,1.05])
#ax.errorbar(trans[x], poly1d_fn(trans[x]), yerr=poly1d_fn(trans[x]) - trans[y], fmt='.k')
# Spearman's & Pearson's r
x = 'sm-omics'
y = 'aba'
spear, pspear = spearmanr(corr_df_sub[x], corr_df_sub[y])
pear, p = pearsonr(corr_df_sub[x], corr_df_sub[y])
print("Spearman's r: " + str(round(spear, 3)))
print("Spearman's p value: " + str(pspear))
ax.text(0.05, 0.9, "Spearman's r: " + str(round(spear, 2)), transform=ax.transAxes, fontsize=10)
ax.text(0.05, 0.8, "p-value: " + str(round(pspear, 6)), transform=ax.transAxes, fontsize=10)
ax.set_title('aba sm-omics correlation', fontsize=10)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
plt.savefig("../../figures/aba_sm_st_genes_correlation.pdf", transparent=True, bbox_inches = 'tight',
pad_inches = 0, dpi=1200, rasterized=True)
# +
# Load ST data for MOB
# +
# Load Lambda pmean df
path = '../../smomics_data'
# Read file
filename = os.path.join(path, 'lambdas_pmeans_output_STref.tsv')
lambda_posterior_means = pd.read_csv(filename, index_col=0, sep='\t', header=[0,1],low_memory=False)
lambda_posterior_means.index = rename_lambdas_index(lambda_posterior_means)
# -
# Take exp()
lambda_posterior_means = lambda_posterior_means.astype(float)
lambda_posterior_means = np.exp(lambda_posterior_means-1)
# +
annot_path = '../../smomics_data'
annot_list = []
for stdata in list(np.unique([i[0].split("/")[-1].split("_stdata_adjusted.tsv")[0] for i in lambda_posterior_means.columns])):
well = stdata.split('_stdata')[0]
filename = os.path.join(annot_path, well + '_annotations.txt')
annot_file = pd.read_csv(filename, sep = '\t')
# Remove 'Unknown'
#file = file[file['value'] != 'Unknown']
# Change to int if spot coord are in decimals
if len(annot_file['x_y'][0].split('.')) == 3: # Spot coord in decimals
# Detect which delimiter separates the spot coord
if len(annot_file['x_y'][0].split('_')) == 2:
delimi = '_'
elif len(annot_file['x_y'][0].split('x')) == 2:
delimi = 'x'
else:
print('Unknown spot coordinate delimiter.')
# Round up
annot_file['x'] = annot_file['x'].round(0)
annot_file['y'] = annot_file['y'].round(0)
# Make columns to int type
annot_file['x'] = annot_file['x'].astype(int)
annot_file['y'] = annot_file['y'].astype(int)
# Make columns to string type
annot_file['x'] = annot_file['x'].astype(str)
annot_file['y'] = annot_file['y'].astype(str)
# Make a new columnwith the rounded values
annot_file['spot_coord'] = annot_file[['x', 'y']].apply(lambda z: '_'.join(z), axis=1)
annot_file.drop(['x_y'], axis=1, inplace=True)
annot_file.rename(columns = {'spot_coord':'x_y'}, inplace=True)
# Keep certain columns:
annot_file = annot_file.loc[:, ['image', 'x_y', 'value']]
annot_list.append(annot_file)
# Concat
annotdf = pd.concat(annot_list)
annotdf = annotdf[annotdf['value'] != 'Unknown']
annotdf = annotdf[annotdf['value'].isin([i for i in annotdf['value'] if ";" not in i])]
annotdf['region'] = [i.split("(")[1].split(")")[0] for i in annotdf.value]
annotdf['region'] = annotdf['region'].replace({'GCL-D' : 'GR',
'GCL-E' : 'GR',
'M/T' : 'MI',
'EPL' : 'OPL',
'E':'GR',
'RMS':'GR',
'GCL-I':'GR'})
annotdf = annotdf[annotdf['region'] != 'ONL']
# -
annotdf['image'] = annotdf['image'].replace({'Rep1' : 'SRR3382371',
'Rep2' : 'SRR3382372',
'Rep3' : 'SRR3382383',
'Rep4' : 'SRR3382384',
'Rep5' : 'SRR3382385',
'Rep6' : 'SRR3382386',
'Rep7' : 'SRR3382387',
'Rep8' : 'SRR3382388',
'Rep9' : 'SRR3382389',
'Rep10' : 'SRR3382390',
'Rep11' : 'SRR3382373',
'Rep12' : 'SRR3382374', })
gene_scaled = []
gene_names = []
for gene in genes:
# reset index
if gene in lambda_posterior_means.index:
lambda_gene = lambda_posterior_means.loc[gene, :].to_frame().reset_index()
#print(gene)
# Get name of file, for plotting
lambda_gene['name'] = lambda_gene['file'].str.split('/').str[-1].str.split('_stdata').str[0]
# Scales within each sample
gene_scale = lambda_gene.copy()
gene_scale[gene+'.scaled'] = gene_scale.groupby('name')[gene].transform(lambda x: minmax_scale(x))
# connects to annotation areas
lambdadf = pd.merge(gene_scale, annotdf, how='left', left_on=['name','coordinate'], right_on=['image', 'x_y'])
lambdadf = lambdadf.dropna(axis = 0)
gene_scaled.append(list(pd.DataFrame(lambdadf.groupby('region')[gene].mean()).transform(lambda x: minmax_scale(x))[gene]))
#gene_scaled.append(np.mean(lambdadf[gene]))
gene_names.append(gene)
sm_df = pd.DataFrame(gene_scaled).T
sm_df.columns = gene_names
sm_df.index = lambdadf.groupby('region')[gene].mean().index
sm_df['Condition'] = 'st'
sm_df
# +
aba_scaled = []
gene_names = []
for gene in genes:
if gene in aba_spec.index:
abs_gene = aba_spec.loc[gene]
#print(np.log(pd.DataFrame(abs_gene.groupby('ABA_region')['Expression target'].mean()).transform(lambda x: minmax_scale(x))))
aba_scaled.append(list(pd.DataFrame((abs_gene.groupby('region')['aba-mean'].mean())).transform(lambda x: minmax_scale(x))['aba-mean']))
gene_names.append(gene)
aba_df = pd.DataFrame(aba_scaled).T
aba_df.columns = gene_names
aba_df.index = abs_gene.groupby('region')['aba-mean'].mean().index
aba_df['Condition'] = 'aba'
# -
aba_df
"Make list for corplots"
corr_df = pd.DataFrame([[item for sublist in aba_scaled for item in sublist],[item for sublist in gene_scaled for item in sublist]]).T
corr_df.columns = ['aba', 'st']
corr_df['region'] = list(np.unique(abs_gene.groupby('region')['aba-mean'].mean().index))*len(gene_names)
corr_df['gene'] = [j for j in gene_names for i in np.unique(abs_gene.groupby('region')['aba-mean'].mean().index)]
corr_df = corr_df.fillna(0)
# +
for gene in genes:
# initialize a figure
fig, ax = plt.subplots(figsize=(4, 2))
corr_df_sub = corr_df[corr_df['gene'] == gene]
corr_df_sub['jitter_y'] = corr_df_sub['st'] + 0.05 * np.random.rand(len(corr_df_sub['st'].tolist())) +0.01
corr_df_sub['jitter_x'] = corr_df_sub['aba'] + 0.05 * np.random.rand(len(corr_df_sub['aba'].tolist())) +0.01
x = 'jitter_x'
y = 'jitter_y'
hue = 'region'
style = 'gene'
sns.scatterplot(x=x, y=y, hue=hue, data=corr_df_sub, ax=ax, s=50, linewidth=0)
# Calculate the linear regression
slope, intercept, r_value, p_value, std_err = linregress(corr_df_sub[x], corr_df_sub[y])
coef = np.polyfit(corr_df_sub[x],corr_df_sub[y],1)
poly1d_fn = np.poly1d(coef)
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x]), c="k")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])+std_err, c="grey")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])-std_err, c="grey")
ax.set_ylim([-0.05,1.05])
ax.set_xlim([-0.05,1.05])
#ax.errorbar(trans[x], poly1d_fn(trans[x]), yerr=poly1d_fn(trans[x]) - trans[y], fmt='.k')
# Spearman's & Pearson's r
x = 'st'
y = 'aba'
spear, pspear = spearmanr(corr_df_sub[x], corr_df_sub[y])
pear, p = pearsonr(corr_df_sub[x], corr_df_sub[y])
print(gene)
print("Spearman's r: " + str(round(spear, 3)))
print("Spearman's p value: " + str(pspear))
ax.text(0.05, 0.9, "Spearman's r: " + str(round(spear, 3)), transform=ax.transAxes, fontsize=10)
ax.set_title(gene, fontsize=10)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
# +
# initialize a figure
fig, ax = plt.subplots(figsize=(4, 2))
corr_df_sub = corr_df
corr_df_sub['jitter_y'] = corr_df_sub['st'] + 0.05 * np.random.rand(len(corr_df_sub['st'].tolist())) +0.01
corr_df_sub['jitter_x'] = corr_df_sub['aba'] + 0.05 * np.random.rand(len(corr_df_sub['aba'].tolist())) +0.01
x = 'jitter_x'
y = 'jitter_y'
hue = 'region'
style = 'gene'
sns.scatterplot(x=x, y=y, hue=hue, style=style, data=corr_df_sub, ax=ax, s=50, linewidth=0)
# Calculate the linear regression
slope, intercept, r_value, p_value, std_err = linregress(corr_df_sub[x], corr_df_sub[y])
coef = np.polyfit(corr_df_sub[x],corr_df_sub[y],1)
poly1d_fn = np.poly1d(coef)
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x]), c="k")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])+std_err, c="grey")
ax.plot(corr_df_sub[x], poly1d_fn(corr_df_sub[x])-std_err, c="grey")
ax.set_ylim([-0.05,1.05])
ax.set_xlim([-0.05,1.05])
#ax.errorbar(trans[x], poly1d_fn(trans[x]), yerr=poly1d_fn(trans[x]) - trans[y], fmt='.k')
# Spearman's & Pearson's r
x = 'st'
y = 'aba'
spear, pspear = spearmanr(corr_df_sub[x], corr_df_sub[y])
pear, p = pearsonr(corr_df_sub[x], corr_df_sub[y])
print("Spearman's r: " + str(round(spear, 3)))
print("Spearman's p value: " + str(pspear))
ax.text(0.05, 0.9, "Spearman's r: " + str(round(spear, 2)), transform=ax.transAxes, fontsize=10)
ax.text(0.05, 0.8, "p-value: " + str(round(pspear, 6)), transform=ax.transAxes, fontsize=10)
ax.set_title('aba st correlation', fontsize=10)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
plt.savefig("../../figures/aba_st_genes_correlation.pdf", transparent=True, bbox_inches = 'tight',
pad_inches = 0, dpi=1200, rasterized=True)
# -
| enrichments/ABA_correlations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #Sunburst Zero Emissions Ipython notebook by <NAME>.
#
# # %Based on paper: Davis et al., Science 360, 1419 (2018) 29 June 2018
#
# # %This paper is titled "Net-zero emissions energy systems"
#
# % Main Point of Paper: In order to eliminate CO2 emissions, there are certain difficult to eliminate emissions industries like Load-Following electricity, Iron & Stell, Cement, Shipping, Aviation, and Long-Distance Road Transport. The paper analyzes each of these industries and offers valuable methods to mitigate the CO2 intensive processes.
#
# % Data is cited from <NAME>-Maenhout et al., EDGAR v4.3.2 Global Atlas of the three major greenhouse gas emissions for the period 1970-2012. Earth System Science Data, (2017
#
# % Data URL: https://www.earth-syst-sci-data-discuss.net/essd-2017-79/essd-2017-79.pdf
#
# % Data for Load-Following Electricity: https://reader.elsevier.com/reader/sd/CEEA1CA219B4280EC5C2FFC0A349182E96E6F328E9865A2C7216C1E6C32867F507520BB8864ADF9893AB6811ABB2E0CF
# +
import numpy as np
import matplotlib.pyplot as plt
def sunburst(nodes, total=np.pi * 2, offset=0, level=0, ax=None):
fig = plt.figure(figsize=(40,40))
ax = ax or fig.add_subplot(111, projection='polar')
#ax = ax or plt.subplot(111, projection='polar')
#ax = ax.add_axes([0.30, 0.2, 1, 1], polar=True)
if level == 0 and len(nodes) == 1:
label, value, subnodes = nodes[0]
ax.bar([0], [1], [np.pi * 2])
ax.text(0, 0, label, ha='center', va='center')
sunburst(subnodes, total=value, level=level + 1, ax=ax)
elif nodes:
d = np.pi * 2 / total
labels = []
widths = []
local_offset = offset
for label, value, subnodes in nodes:
labels.append(label)
widths.append(value * d)
sunburst(subnodes, total=total, offset=local_offset,
level=level + 1, ax=ax)
local_offset += value
values = np.cumsum([offset * d] + widths[:-1])
heights = [1] * len(nodes)
bottoms = np.zeros(len(nodes)) + level - 0.5
rects = ax.bar(values, heights, widths, bottoms, linewidth=1,
edgecolor='white', align='edge')
for rect, label in zip(rects, labels):
x = rect.get_x() + rect.get_width() / 2
y = rect.get_y() + rect.get_height() / 2
rotation = (90 + (360 - np.degrees(x) % 180)) % 360
ax.text(x, y, label, rotation=rotation, ha='center', va='center')
if level == 0:
ax.set_theta_direction(-1)
ax.set_theta_zero_location('N')
ax.set_axis_off()
# -
# +
data = [
('/', 100, [
('Electricity', 26, [
]),
('Load-following electricity', 12, [
('HDGT(SC)', 1.47, []),
('HDGT(CC)', 1.02, []),
('Aero-GT', 1.46, []),
('Sub-Coal (Small)', 3.17, []),
('Sub-Coal (Medium)', 2.57, []),
('Sub-Coal (Large)', 1.23, []),
]),
('Short Distance Med/Heavy Transport', 5, [
]),
('Short Distance Light Transport', 11, [
]),
('Shipping', 3, [
('Trains', 1.31, []),
('Ships', 1.68, []),
]),
('Aviation', 2, [
('Climbing & Descent', 0.75, []),
('Cruise', 1, []),
('Landing & Takeoff', 0.24, []),
]),
('Long Distance Transport', 1, [
]),
('Residential/Commercial', 10, [
]),
('Other Industry', 9, [
]),
('Iron & Steel', 5, [
('Process Use', 0.075, []),
('Unreported End Use', 2.65, []),
('Non-Process Use', 0.15, []),
('Machine Drive', 0.35, []),
('Process Heating', 1.6, []),
('Boiler Fuel', 0.15, []),
]),
('Cement', 4, [
('Cement Production', 1.52, []),
('Conveying and Packing', 0.2, []),
('Clinker Production', 0.88, []),
('Raw Material Homogenization', 0.24, []),
('Raw Material Grinding', 0.96, []),
('Raw Material Extraction', 0.2, []),
]),
('Heat', 2, [
]),
('Combined Heat and Electricity', 5, [
]),
]),
]
f=sunburst(data)
# -
| Sunburst_Zero_Emissions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab: TfTransform #
#
# **Learning Objectives**
# 1. Preprocess data and engineer new features using TfTransform
# 1. Create and deploy Apache Beam pipeline
# 1. Use processed data to train taxifare model locally then serve a prediction
#
#
#
#
# ## Introduction
# While Pandas is fine for experimenting, for operationalization of your workflow it is better to do preprocessing in Apache Beam. This will also help if you need to preprocess data in flight, since Apache Beam allows for streaming. In this lab we will pull data from BigQuery then use Apache Beam TfTransform to process the data.
#
# Only specific combinations of TensorFlow/Beam are supported by tf.transform so make sure to get a combo that works. In this lab we will be using:
# * TFT 0.24.0
# * TF 2.3.0
# * Apache Beam [GCP] 2.24.0
#
# Each learning objective will correspond to a __#TODO__ in the notebook where you will complete the notebook cell's code before running. Refer to the [solution notebook](https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/feature_engineering/solutions/5_tftransform_taxifare.ipynb) for reference.
#
# !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# !pip install tensorflow==2.3.0 tensorflow-transform==0.24.0 apache-beam[gcp]==2.24.0
# **NOTE**: You may ignore specific incompatibility errors and warnings. These components and issues do not impact your ability to complete the lab.
# Download .whl file for tensorflow-transform. We will pass this file to Beam Pipeline Options so it is installed on the DataFlow workers
# !pip install --user google-cloud-bigquery==1.25.0
# !pip download tensorflow-transform==0.24.0 --no-deps
# <b>Restart the kernel</b> (click on the reload button above).
# + language="bash"
# pip freeze | grep -e 'flow\|beam'
# -
import tensorflow as tf
import tensorflow_transform as tft
import shutil
print(tf.__version__)
# change these to try this notebook out
BUCKET = 'bucket-name'
PROJECT = 'project-id'
REGION = 'us-central1'
import os
os.environ['BUCKET'] = BUCKET
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
# + language="bash"
# gcloud config set project $PROJECT
# gcloud config set compute/region $REGION
# + language="bash"
# if ! gsutil ls | grep -q gs://${BUCKET}/; then
# gsutil mb -l ${REGION} gs://${BUCKET}
# fi
# -
# ## Input source: BigQuery
#
# Get data from BigQuery but defer the majority of filtering etc. to Beam.
# Note that the dayofweek column is now strings.
# +
from google.cloud import bigquery
def create_query(phase, EVERY_N):
"""Creates a query with the proper splits.
Args:
phase: int, 1=train, 2=valid.
EVERY_N: int, take an example EVERY_N rows.
Returns:
Query string with the proper splits.
"""
base_query = """
WITH daynames AS
(SELECT ['Sun', 'Mon', 'Tues', 'Wed', 'Thurs', 'Fri', 'Sat'] AS daysofweek)
SELECT
(tolls_amount + fare_amount) AS fare_amount,
daysofweek[ORDINAL(EXTRACT(DAYOFWEEK FROM pickup_datetime))] AS dayofweek,
EXTRACT(HOUR FROM pickup_datetime) AS hourofday,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count AS passengers,
'notneeded' AS key
FROM
`nyc-tlc.yellow.trips`, daynames
WHERE
trip_distance > 0 AND fare_amount > 0
"""
if EVERY_N is None:
if phase < 2:
# training
query = """{0} AND ABS(MOD(FARM_FINGERPRINT(CAST
(pickup_datetime AS STRING), 4)) < 2""".format(base_query)
else:
query = """{0} AND ABS(MOD(FARM_FINGERPRINT(CAST(
pickup_datetime AS STRING), 4)) = {1}""".format(base_query, phase)
else:
query = """{0} AND ABS(MOD(FARM_FINGERPRINT(CAST(
pickup_datetime AS STRING)), {1})) = {2}""".format(
base_query, EVERY_N, phase)
return query
query = create_query(2, 100000)
# -
df_valid = bigquery.Client().query(query).to_dataframe()
display(df_valid.head())
df_valid.describe()
# ## Create ML dataset using tf.transform and Dataflow
#
# Let's use Cloud Dataflow to read in the BigQuery data and write it out as TFRecord files. Along the way, let's use tf.transform to do scaling and transforming. Using tf.transform allows us to save the metadata to ensure that the appropriate transformations get carried out during prediction as well.
# `transformed_data` is type `pcollection`.
#
# 5 __TODO's__ in the following cell block
# 1. Convert day of week from string->int with `tft.string_to_int`
# 1. Scale `pickuplat`, `pickuplon`, `dropofflat`, `dropofflon` between 0 and 1 with `tft.scale_to_0_1`
# 1. Scale our engineered features `latdiff` and `londiff` between 0 and 1
# 1. Analyze and transform our training data using `beam_impl.AnalyzeAndTransformDataset()`
# 1. Read eval data from BigQuery using `beam.io.BigQuerySource` and filter rows using our `is_valid` function
# +
import datetime
import tensorflow as tf
import apache_beam as beam
import tensorflow_transform as tft
import tensorflow_metadata as tfmd
from tensorflow_transform.beam import impl as beam_impl
def is_valid(inputs):
"""Check to make sure the inputs are valid.
Args:
inputs: dict, dictionary of TableRow data from BigQuery.
Returns:
True if the inputs are valid and False if they are not.
"""
try:
pickup_longitude = inputs['pickuplon']
dropoff_longitude = inputs['dropofflon']
pickup_latitude = inputs['pickuplat']
dropoff_latitude = inputs['dropofflat']
hourofday = inputs['hourofday']
dayofweek = inputs['dayofweek']
passenger_count = inputs['passengers']
fare_amount = inputs['fare_amount']
return fare_amount >= 2.5 and pickup_longitude > -78 \
and pickup_longitude < -70 and dropoff_longitude > -78 \
and dropoff_longitude < -70 and pickup_latitude > 37 \
and pickup_latitude < 45 and dropoff_latitude > 37 \
and dropoff_latitude < 45 and passenger_count > 0
except:
return False
def preprocess_tft(inputs):
"""Preprocess the features and add engineered features with tf transform.
Args:
dict, dictionary of TableRow data from BigQuery.
Returns:
Dictionary of preprocessed data after scaling and feature engineering.
"""
import datetime
print(inputs)
result = {}
result['fare_amount'] = tf.identity(inputs['fare_amount'])
# Build a vocabulary
# TODO 1: convert day of week from string->int with tft.string_to_int
result['hourofday'] = tf.identity(inputs['hourofday']) # pass through
# TODO 2: scale pickup/dropoff lat/lon between 0 and 1 with tft.scale_to_0_1
result['passengers'] = tf.cast(inputs['passengers'], tf.float32) # a cast
# Arbitrary TF func
result['key'] = tf.as_string(tf.ones_like(inputs['passengers']))
# Engineered features
latdiff = inputs['pickuplat'] - inputs['dropofflat']
londiff = inputs['pickuplon'] - inputs['dropofflon']
# TODO 3: Scale our engineered features latdiff and londiff between 0 and 1
dist = tf.sqrt(latdiff * latdiff + londiff * londiff)
result['euclidean'] = tft.scale_to_0_1(dist)
return result
def preprocess(in_test_mode):
"""Sets up preprocess pipeline.
Args:
in_test_mode: bool, False to launch DataFlow job, True to run locally.
"""
import os
import os.path
import tempfile
from apache_beam.io import tfrecordio
from tensorflow_transform.coders import example_proto_coder
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.beam import tft_beam_io
from tensorflow_transform.beam.tft_beam_io import transform_fn_io
job_name = 'preprocess-taxi-features' + '-'
job_name += datetime.datetime.now().strftime('%y%m%d-%H%M%S')
if in_test_mode:
import shutil
print('Launching local job ... hang on')
OUTPUT_DIR = './preproc_tft'
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EVERY_N = 100000
else:
print('Launching Dataflow job {} ... hang on'.format(job_name))
OUTPUT_DIR = 'gs://{0}/taxifare/preproc_tft/'.format(BUCKET)
import subprocess
subprocess.call('gsutil rm -r {}'.format(OUTPUT_DIR).split())
EVERY_N = 10000
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': job_name,
'project': PROJECT,
'num_workers': 1,
'max_num_workers': 1,
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True,
'direct_num_workers': 1,
'extra_packages': ['tensorflow_transform-0.24.0-py3-none-any.whl']
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
if in_test_mode:
RUNNER = 'DirectRunner'
else:
RUNNER = 'DataflowRunner'
# Set up raw data metadata
raw_data_schema = {
colname: dataset_schema.ColumnSchema(
tf.string, [], dataset_schema.FixedColumnRepresentation())
for colname in 'dayofweek,key'.split(',')
}
raw_data_schema.update({
colname: dataset_schema.ColumnSchema(
tf.float32, [], dataset_schema.FixedColumnRepresentation())
for colname in
'fare_amount,pickuplon,pickuplat,dropofflon,dropofflat'.split(',')
})
raw_data_schema.update({
colname: dataset_schema.ColumnSchema(
tf.int64, [], dataset_schema.FixedColumnRepresentation())
for colname in 'hourofday,passengers'.split(',')
})
raw_data_metadata = dataset_metadata.DatasetMetadata(
dataset_schema.Schema(raw_data_schema))
# Run Beam
with beam.Pipeline(RUNNER, options=opts) as p:
with beam_impl.Context(temp_dir=os.path.join(OUTPUT_DIR, 'tmp')):
# Save the raw data metadata
(raw_data_metadata |
'WriteInputMetadata' >> tft_beam_io.WriteMetadata(
os.path.join(
OUTPUT_DIR, 'metadata/rawdata_metadata'), pipeline=p))
# TODO 4: Analyze and transform our training data
# using beam_impl.AnalyzeAndTransformDataset()
raw_dataset = (raw_data, raw_data_metadata)
# Analyze and transform training data
transformed_dataset, transform_fn = (
raw_dataset | beam_impl.AnalyzeAndTransformDataset(
preprocess_tft))
transformed_data, transformed_metadata = transformed_dataset
# Save transformed train data to disk in efficient tfrecord format
transformed_data | 'WriteTrainData' >> tfrecordio.WriteToTFRecord(
os.path.join(OUTPUT_DIR, 'train'), file_name_suffix='.gz',
coder=example_proto_coder.ExampleProtoCoder(
transformed_metadata.schema))
# TODO 5: Read eval data from BigQuery using beam.io.BigQuerySource
# and filter rows using our is_valid function
raw_test_dataset = (raw_test_data, raw_data_metadata)
# Transform eval data
transformed_test_dataset = (
(raw_test_dataset, transform_fn) | beam_impl.TransformDataset()
)
transformed_test_data, _ = transformed_test_dataset
# Save transformed train data to disk in efficient tfrecord format
(transformed_test_data |
'WriteTestData' >> tfrecordio.WriteToTFRecord(
os.path.join(OUTPUT_DIR, 'eval'), file_name_suffix='.gz',
coder=example_proto_coder.ExampleProtoCoder(
transformed_metadata.schema)))
# Save transformation function to disk for use at serving time
(transform_fn |
'WriteTransformFn' >> transform_fn_io.WriteTransformFn(
os.path.join(OUTPUT_DIR, 'metadata')))
# Change to True to run locally
preprocess(in_test_mode=False)
# -
# This will take __10-15 minutes__. You cannot go on in this lab until your DataFlow job has successfully completed.
# **Note**: The above command may fail with an error **`Workflow failed. Causes: There was a problem refreshing your credentials`**. In that case, `re-run` the command again.
# Let's check to make sure that there is data where we expect it to be now.
# + language="bash"
# # ls preproc_tft
# gsutil ls gs://${BUCKET}/taxifare/preproc_tft/
# -
# ## Train off preprocessed data ##
# Now that we have our data ready and verified it is in the correct location we can train our taxifare model locally.
# + language="bash"
# rm -r ./taxi_trained
# export PYTHONPATH=${PYTHONPATH}:$PWD
# python3 -m tft_trainer.task \
# --train_data_path="gs://${BUCKET}/taxifare/preproc_tft/train*" \
# --eval_data_path="gs://${BUCKET}/taxifare/preproc_tft/eval*" \
# --output_dir=./taxi_trained \
# -
# !ls $PWD/taxi_trained/export/exporter
# Now let's create fake data in JSON format and use it to serve a prediction with gcloud ai-platform local predict
# %%writefile /tmp/test.json
{"dayofweek":0, "hourofday":17, "pickuplon": -73.885262, "pickuplat": 40.773008, "dropofflon": -73.987232, "dropofflat": 40.732403, "passengers": 2.0}
# + language="bash"
# sudo find "/usr/lib/google-cloud-sdk/lib/googlecloudsdk/command_lib/ml_engine" -name '*.pyc' -delete
# + language="bash"
# model_dir=$(ls $PWD/taxi_trained/export/exporter/)
# gcloud ai-platform local predict \
# --model-dir=./taxi_trained/export/exporter/${model_dir} \
# --json-instances=/tmp/test.json
# -
# Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| courses/machine_learning/deepdive2/feature_engineering/labs/5_tftransform_taxifare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as sts
import plotly.graph_objects as go
plt.style.use("ggplot")
# +
#triangle distribution
def f_t(x, a, b):
c = (a+b)/2
if a <= x <= c:
return 2*(x-a)/((b-a)*(c-a))
elif c < x <= b:
return 2*(b-x)/((b-a)*(b-c))
else:
return 0
#parabolic distribution
def f_p(x, a, b):
return (6*(x-a)*(b-x))/(b-a)**3
N = 100
dx = 0.1
dt = 1
t=100
alpha = -250
r=10
x = np.linspace(0, r, N)
# -
def solution(f, N=20, r=1, dt=0.5):
x = np.linspace(0, r, N)
initial_temp = [f(i,0,r) for i in x]
matrix = [initial_temp]
for j in range(t):
matrix.append([0]*N)
for j in range(t):
for i in range(2, N-2):
matrix[j+1][i] = 2*matrix[j][i]-matrix[j-1][i]+dt*((matrix[j][i+2]-4*matrix[j][i+1]+6*matrix[j][i]-\
-4*matrix[j][i-1]+matrix[j][i-2])/dx**4+(2/r)*(matrix[j][i+2]-2*matrix[j][i+1]+2*matrix[j][i-1]+\
+matrix[j][i-2])/(dx**3)-(1/r**2)*(matrix[j][i+1]-2*matrix[j][i]+matrix[j][i-1])/dx**2+\
+(1/r**3)*(matrix[j][i+1]-matrix[j][i])/dx)/alpha
return matrix
def solution(f, N=20, r=1, dt=0.5):
x = np.linspace(0, r, N)
initial_temp = [f(i,0,r) for i in x]
matrix = [initial_temp]
for j in range(t):
matrix.append([0]*N)
for j in range(t):
for i in range(2, N-2):
matrix[j+1][i] = 2*matrix[j][i]-matrix[j-1][i]+(dt**2)*(matrix[j][i+2]-4*matrix[j][i+1]+6*matrix[j][i]-\
-4*matrix[j][i-1]+4*matrix[j][i-2]+(2/r)*(matrix[j][i+2]-2*matrix[j][i+1]+2*matrix[j][i-1]+\
+matrix[j][i-2])-(1/r**2)*(matrix[j][i+1]-2*matrix[j][i]+matrix[j][i-1])+\
+(1/r**3)*(matrix[j][i+1]-matrix[j][i]))/alpha
return matrix
# +
N = 100
dx = 1
dt = 0.01
t = 40
alpha = -0.07
# alpha = -0.03
# alpha = -1
r = 15
plt.subplots(figsize=(10,6))
s = solution(f_p, N, r, dt)
t1 = np.arange(0, 1, dt)
for i in range(14):
plt.plot(t1, s[i])
# plt.legend()
plt.xlabel('t')
plt.ylabel('w(x,t)')
plt.show()
# -
t1 = np.arange(0, t, dt)
fig = go.Figure(go.Surface(
# contours = {
# "x": {"show": True, "start": 1.5, "end": 2, "size": 0.04, "color":"white"},
# "z": {"show": True, "start": 0.5, "end": 0.8, "size": 0.05}
# },
x = x,
y = t1,
z = s))
fig.update_layout(
scene = {
"xaxis": {"nticks": 20},
"zaxis": {"nticks": 4},
'camera_eye': {"x": 0, "y": -1, "z": 0.5},
"aspectratio": {"x": 1, "y": 1, "z": 0.2}
})
fig.show()
| Mathematical_modeling/Vibration_Of_Plate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bereiten wir das Script vor
#Imports
import os
import progressbar
import textract
import pandas as pd
#Vorbereitung
pdfs = os.listdir('pdfs')
bar = progressbar.ProgressBar()
lst = []
#Rohe Analyse
for pdf,i in zip(pdfs, bar(range(len(pdfs)))):
try:
text = textract.process("pdfs/"+pdf, method='pdfminer', encoding='ISO-8859-1')
text = text.decode("ISO-8859-1").replace("\n", " ")
if len(text) < 40:
text = textract.process("pdfs/"+pdf, method='tesseract', language='deu')
text = text.decode("ISO-8859-1").replace("\n", " ")
mini_dict = {'Text':text,
'File':pdf}
lst.append(mini_dict)
except:
mini_dict = {'Text':'Fehlermeldung',
'File':pdf}
lst.append(mini_dict)
#Erste Säuberung
df = pd.DataFrame(lst)
def date(elem):
elem = elem[3:11]
return elem
df['date'] = df['File'].apply(date)
df['date'] = pd.to_datetime(df['date'], format='%Y%m%d')
df.index = df['date']
#Suchen wir noch nach Implantaten
df['implant'] = df['Text'].str.contains('implant')
df.to_csv('datafile.csv')
| 15 Debugging, Text, Server/04 Arbeiten wir auf unserem Server mit den Dokumenten.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="s4ljYpQNp50r"
# 
#
# [](https://colab.research.google.com/github/JohnSnowLabs/nlu/blob/master/examples/collab/Classifiers/NLU_Unsupervised_Keyword_Extraction_with_YAKE_Example.ipynb)
# # Unsupervised keyword extraction with NLU using the YAKE algorithm
#
# We can use the YAKE algorithm to extract keywords from text data.
#
# Yake is an Unsupervised, Corpus-Independent, Domain and Language-Independent and Single-Document keyword extraction algorithm.
#
# Yake is a novel feature-based system for multi-lingual keyword extraction, which supports texts of different sizes, domain or languages. Unlike other approaches, It follows an unsupervised approach which builds upon features extracted from the text, making it thus applicable to documents written in different languages without the need for further knowledge. This can be beneficial for a large number of tasks and a plethora of situations where access to training corpora is either limited or restricted.
#
#
#
#
# # 1. Install Java and NLU
# + id="SF5-Z-U4jukd"
import os
# ! apt-get update -qq > /dev/null
# Install java
# ! apt-get install -y openjdk-8-jdk-headless -qq > /dev/null
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["PATH"] = os.environ["JAVA_HOME"] + "/bin:" + os.environ["PATH"]
# ! pip install nlu > /dev/null
# + [markdown] id="kHtLKNXDtZf5"
# # 2. Load the Yake model and predict some sample keywords
# + id="7GJX5d6mjk5j" executionInfo={"status": "ok", "timestamp": 1604908968107, "user_tz": -60, "elapsed": 87772, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="edc1464b-76cf-4818-da65-4cb2d90947ad" colab={"base_uri": "https://localhost:8080/", "height": 168}
import nlu
keyword_pipe = nlu.load('yake')
keyword_pipe.predict('gotta love the teachers who give exams on the day after halloween')
# + [markdown] id="5lrDNzw3tcqT"
# 3.1 Download sample dataset
# + id="gpeS8DWBlrun" executionInfo={"status": "ok", "timestamp": 1604908979914, "user_tz": -60, "elapsed": 99554, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="33db2dbf-fd19-4076-d6ed-a5b3b911f045" colab={"base_uri": "https://localhost:8080/", "height": 780}
import pandas as pd
# Download the dataset
# ! wget -N https://ckl-it.de/wp-content/uploads/2020/11/60kstackoverflow.csv -P /tmp
# Load dataset to Pandas
p = '/tmp/60kstackoverflow.csv'
df = pd.read_csv(p)
df
# + [markdown] id="uLWu8DG3tfjz"
# ## 3.2 Predict on sample dataset
# NLU expects a text column, thus we must create it from the column that contains our text data
# + id="3V5l-B6nl43U" executionInfo={"status": "ok", "timestamp": 1604909043696, "user_tz": -60, "elapsed": 163301, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="49230202-a4d0-466b-f50d-c8bd86156956" colab={"base_uri": "https://localhost:8080/", "height": 437}
keyword_pipe = nlu.load('yake')
keyword_predictions = keyword_pipe.predict(df['Title'])
keyword_predictions
# + [markdown] id="51gbhpalss1f"
# # 3. Checkout the count of each predicted keyword.
# To do that, we need to eplode the keywords column first and then we can use the value_counts function to get the count of each keyword.
# + id="WdnY9n1LTmed" executionInfo={"status": "ok", "timestamp": 1604909045507, "user_tz": -60, "elapsed": 165072, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="58c22def-7009-4c1c-ff1f-68eb2b3b1cb0" colab={"base_uri": "https://localhost:8080/", "height": 579}
keyword_pipe = nlu.load('yake')
keyword_predictions = keyword_pipe.predict(df['Title'])
keyword_predictions.explode('keywords').keywords.value_counts()[0:100].plot.bar(title='Top 100 Keywords in Stack Overflow Questions', figsize=(20,8))
# + [markdown] id="fWhCd-z3t8fB"
# # 4. Lets configure the model
#
# You can configure the following parameters for YAKE :
#
# - setMinNGrams(int) Select the minimum length of a extracted keyword
# - setMaxNGrams(int) Select the maximum length of a extracted keyword
# - setNKeywords(int) Extract the top N keywords
# - setStopWords(list) Set the list of stop words
# - setThreshold(float) Each keyword will be given a keyword score greater than 0. (Lower the score better the keyword) Set an upper bound for the keyword score from this method.
# - setWindowSize(int) Yake will construct a co-occurence matrix. You can set the - window size for the cooccurence matrix construction from this method. ex: - windowSize=2 will look at two words to both left and right of a candidate word.
# + id="bjYUPe4Tt9J9" executionInfo={"status": "ok", "timestamp": 1604909045507, "user_tz": -60, "elapsed": 165049, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="1bb0499f-d794-47f8-e9c4-9858ed4d1ae3" colab={"base_uri": "https://localhost:8080/"}
keyword_pipe.print_info()
# + [markdown] id="j2sKSJnJumGl"
# ## 4.1 Lets configure the Yake model to give us 5 Keywords instead of 3
# + id="IoqUSGNUulch" executionInfo={"status": "ok", "timestamp": 1604909102940, "user_tz": -60, "elapsed": 222465, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="3356079a-fc9a-4fe3-91f3-3a45706f740d" colab={"base_uri": "https://localhost:8080/", "height": 579}
keyword_pipe['yake'].setNKeywords(3)
keyword_predictions = keyword_pipe.predict(df['Title'])
keyword_predictions
keyword_predictions.explode('keywords').keywords.value_counts()[0:100].plot.bar(title='Count of top 100 predicted keywords with new parameters.', figsize=(20,8))
# + [markdown] id="mpoXJLoPvDBf"
# ## 4.2 Lets have some fun and play with YAKES parameters ad configure min,max N gram and window size and see how our plot differs from before!
#
# This is definetly fun and yields some interesting results.
#
# Have fun playing around!
# + id="6sGuv07quMLY" executionInfo={"status": "ok", "timestamp": 1604909174660, "user_tz": -60, "elapsed": 294144, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="af5c934a-27f9-4765-84c4-7fc9f8bad311" colab={"base_uri": "https://localhost:8080/", "height": 762}
keyword_pipe['yake'].setMinNGrams(5)
keyword_pipe['yake'].setNKeywords(10)
keyword_pipe['yake'].setMaxNGrams(10)
keyword_pipe['yake'].setWindowSize(6)
keyword_predictions = keyword_pipe.predict(df['Title'])
keyword_predictions.explode('keywords').keywords.value_counts()[0:100].plot.bar(title='Count of top 100 predicted keywords with new parameters.', figsize=(20,8))
# + id="3NP9IAx0nIkw" executionInfo={"status": "ok", "timestamp": 1604909247594, "user_tz": -60, "elapsed": 367056, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjqAD-ircKP-s5Eh6JSdkDggDczfqQbJGU_IRb4Hw=s64", "userId": "14469489166467359317"}} outputId="ff740f0b-a2f2-4a5e-d13a-81682f2395a3" colab={"base_uri": "https://localhost:8080/", "height": 579}
keyword_pipe['yake'].setMinNGrams(1)
keyword_pipe['yake'].setNKeywords(3)
keyword_pipe['yake'].setMaxNGrams(5)
keyword_pipe['yake'].setWindowSize(6)
keyword_predictions = keyword_pipe.predict(df['Title'])
keyword_predictions.explode('keywords').keywords.value_counts()[0:100].plot.bar(title='Count of top 100 predicted keywords with new parameters.', figsize=(20,8))
# + id="h6kgf8LpvoVC"
| nlu/colab/Component Examples/Classifiers/NLU_Unsupervised_Keyword_Extraction_with_YAKE_Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Replicating the paper "Your Classifier is Secretely an Energy Based Model and you should treat it like one"
#
# Here, I attempt to replicate the paper in the title, by Grathwohl et al.
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sgld_sampler import sgld_sample
from JEM import JEM
from scores import inception_score, frechet_inception_distance
from wrn import create_wide_residual_network
# ### Classifier for IS/FID
#
# For using IS/FID, we need a classifier. Therefore, we quickly train a classifier for CIFAR-10. We reuse the one from the EBM experiment.
# #### Loading the training data
(X_train_full, y_train_full), (X_test, y_test) = keras.datasets.mnist.load_data()
X_train_full = X_train_full / 255.0
X_test = X_test / 255.0
X_train_full.shape, y_train_full.shape
# +
X_valid, X_train = X_train_full[:5000], X_train_full[5000:]
y_valid, y_train = y_train_full[:5000], y_train_full[5000:]
def vgg_block(layer_in, n_filters, n_conv):
# Add convolutional layers
for _ in range(n_conv):
layer_in = keras.layers.Conv2D(n_filters, (3,3), padding='same', activation='relu')(layer_in)
# Add max pooling layer
layer_in = keras.layers.MaxPooling2D((2,2), strides=(2,2))(layer_in)
return layer_in
input_ = keras.layers.Input(shape=(28, 28, 1))
vgg_1 = vgg_block(input_, 128, 3) # 14, 14, 128
vgg_2 = vgg_block(vgg_1, 256, 3) # 7, 7, 256
flatten = keras.layers.Flatten()(vgg_2)
output = keras.layers.Dense(10, activation='softmax')(flatten)
model = keras.Model(inputs=input_, outputs=output)
model.summary()
model.compile(loss="sparse_categorical_crossentropy",
optimizer="sgd", metrics=["accuracy"])
early_stopping = keras.callbacks.EarlyStopping(patience=5,
restore_best_weights=True)
history = model.fit(X_train, y_train, epochs=5,
validation_data=(X_valid, y_valid),
callbacks=[early_stopping])
mnist_classifier = keras.models.load_model('mnist-classifier-1')
# -
# We will use this classifier for our experiments.
mnist_classifier = keras.models.load_model('mnist-classifier-1')
# ## JEM network
#
# I use a wide ResNet with no stochastic regularization, as done in the paper.
ebm_model = create_wide_residual_network((28, 28, 1), nb_classes=10, N=2, k=4)
ebm_model.summary()
# ## Evaluation of network performance
#
# To evaluate the network performance, I use metrics, like the IS and FID. The way these are evaluated is that at every batch, they are applied to the generated and real samples. The metrics are then stored in a list. At the end of an epoch, I look at the mean value for each metric of the current epoch and at the last epoch. If the mean value of the current epoch is higher than the mean value of the last epoch, I conclude that the network learned something and save the weights. If not, I conclude that the network has converged or overfit and stop the learning process. For multiple metrics, I perform a majority vote: If the majority agrees that learning occured, I continue. If not, I stop the learning process.
#
# At the end of the learning process, the ```EBM``` object records the trend for all metrics and the best values achieved. Note that I assume that for all metrics, it holds that a higher value means a better value; therefore I negate the FID to fit in that framework.
# +
def fuse_images(width, height, images, img_width, img_height):
really_big_image = None
for i in range(width):
big_image = None
for j in range(height):
cur_image = images[width * i + j].reshape(img_width, img_height)
if big_image is not None:
big_image = np.hstack((big_image, cur_image))
else:
big_image = cur_image
if really_big_image is not None:
really_big_image = np.vstack((really_big_image, big_image))
else:
really_big_image = big_image
return really_big_image
def is_metric_callback(samples_data, samples_energ, it):
p_yx = mnist_classifier(samples_energ).numpy()
score = inception_score(p_yx)
return score
# Need last-layer activations for FID
mnist_statistics = keras.Model(mnist_classifier.input, mnist_classifier.get_layer(name='flatten').output)
def fid_metric_callback(samples_data, samples_energ, it):
stats_gen = mnist_statistics(samples_energ)
stats_real = mnist_statistics(samples_data)
score = frechet_inception_distance(stats_real, stats_gen)
return -score
# -
optimizer = keras.optimizers.Adam(1e-4)
jem = JEM(ebm_model, optimizer)
history = jem.fit(X_train_full, y_train_full, 128, 25, optimizer, 0.0, 1.0, num_steps_markov=tf.constant(120),
var=tf.constant(1e-2 ** 2), step_size=tf.constant(2.0), callbacks_energy=[],
metrics_samples=[("IS", is_metric_callback), ("FID", fid_metric_callback)],
alpha=tf.constant(0.0), clip_thresh=tf.constant(1e6),
weight_ce_loss=tf.constant(1.0), save_best_weights=True, use_replay_buffer=True)
batch_size = 10
x_init = tf.random.uniform(shape=(batch_size, 28, 28))
imgs = sgld_sample(jem.energy, x_init, tf.constant(1000), 1, 0.001 ** 2, clip_thresh=tf.constant(1.0))
imgs = np.clip(imgs, 0, 1)
img = fuse_images(3, 3, imgs, 28, 28)
plt.figure(figsize=(15, 15))
plt.imshow(img)
plt.colorbar()
plt.show()
# ## Test set accuracy of the JEM model
preds_test = jem.classify(X_train_full)
y_test_one_hot = tf.keras.backend.one_hot(y_test, 10)
m = tf.keras.metrics.categorical_accuracy(y_test_one_hot, preds_test)
np.mean(m.numpy())
# ## Calibration of the classifier
pred_confidence = np.max(preds_test, axis=1)
pred_confidence
def make_thresholds(X, y, performance, thresholds):
sets = []
initial = 0.0
for threshold in thresholds:
indices = np.logical_and(performance > initial, performance < threshold)
sets.append((X[indices], y[indices]))
initial = threshold
return sets
perf_buckets = make_thresholds(X_train_full, y_train_full, pred_confidence, [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
y_train = tf.keras.backend.one_hot(y_train_full, 10)
conf_avg = []
for data, labels in perf_buckets:
preds = jem.classify(data)
y = tf.keras.backend.one_hot(labels, 10)
m = tf.keras.metrics.categorical_accuracy(y, preds)
conf_avg.append(np.mean(m.numpy()))
conf_avg
plt.bar([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], conf_avg, width=0.05)
plt.plot(np.linspace(0.2, 0.9, 100),np.linspace(0.2, 0.9, 100), c="red", label="Ideal trend")
plt.ylabel("Model performance")
plt.xlabel("Model confidence")
plt.legend()
plt.show()
# ## Out-of-distribution detection - using FashionMNIST
# ### Using likelihood
(X_train_full_fm, y_train_full_fm), (X_test_fm, y_test_fm) = tf.keras.datasets.fashion_mnist.load_data()
X_train_full_fm = X_train_full_fm / 255.0
X_test_fm = X_test_fm / 255.0
X_train_full_fm.shape, y_train_full_fm.shape
# +
energs_fm = jem.get_likelihoods(X_train_full_fm)
energs_mnist = jem.get_likelihoods(X_train_full)
energ_min_mnist = np.min(-energs_mnist)
energ_min_fm = np.min(-energs_fm)
energ_max_mnist = np.max(-energs_mnist)
energ_max_fm = np.max(-energs_fm)
energ_min = np.minimum(energ_min_mnist, energ_min_fm)
energ_max = np.maximum(energ_max_mnist, energ_max_fm)
# -
energs_mnist_norm = (-energs_mnist - energ_min) / (energ_max - energ_min)
energs_fm_norm = (-energs_fm - energ_min) / (energ_max - energ_min)
combined_energs = np.hstack((energs_mnist_norm, energs_fm_norm))
labels_true = np.ones(energs_mnist_norm.shape[0])
labels_false = np.zeros(energs_fm_norm.shape[0])
labels = np.hstack((labels_true, labels_false))
aucroc = sklearn.metrics.roc_auc_score(labels, combined_energs)
aucroc
fpr, tpr, thresholds = sklearn.metrics.roc_curve(labels, combined_energs)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % aucroc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.01, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig("ROC-MNIST-L")
plt.show()
# ### Using Gradient Magnitude
# +
energs_fm = jem.get_energ_gradient_magnitudes(X_train_full_fm)
energs_mnist = jem.get_energ_gradient_magnitudes(X_train_full)
energ_min_mnist = np.min(-energs_mnist)
energ_min_fm = np.min(-energs_fm)
energ_max_mnist = np.max(-energs_mnist)
energ_max_fm = np.max(-energs_fm)
energ_min = np.minimum(energ_min_mnist, energ_min_fm)
energ_max = np.maximum(energ_max_mnist, energ_max_fm)
# -
energs_fm_norm = (-energs_fm - energ_min) / (energ_max - energ_min)
energs_mnist_norm = (-energs_mnist - energ_min) / (energ_max - energ_min)
combined_energs = np.hstack((energs_mnist_norm, energs_fm_norm))
labels_true = np.ones(energs_mnist_norm.shape[0])
labels_false = np.zeros(energs_fm_norm.shape[0])
labels = np.hstack((labels_true, labels_false))
fpr, tpr, thresholds = sklearn.metrics.roc_curve(labels, combined_energs)
aucroc = sklearn.metrics.roc_auc_score(labels, combined_energs)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % aucroc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.01, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig("ROC-MNIST-G")
plt.show()
# ### Using Confidence
# +
preds_mnist = jem.classify(X_train_full)
pred_confidence_mnist = np.max(preds_mnist, axis=1)
preds_fm = jem.classify(X_train_full_fm)
pred_confidence_fm = np.max(preds_fm, axis=1)
energ_min_mnist = np.min(pred_confidence_mnist)
energ_min_fm = np.min(pred_confidence_fm)
energ_max_mnist = np.max(pred_confidence_mnist)
energ_max_fm = np.max(pred_confidence_fm)
energ_min = np.minimum(energ_min_mnist, energ_min_fm)
energ_max = np.maximum(energ_max_mnist, energ_max_fm)
# -
energs_mnist_norm = (pred_confidence_mnist - energ_min) / (energ_max - energ_min)
energs_fm_norm = (pred_confidence_fm - energ_min) / (energ_max - energ_min)
combined_energs = np.hstack((energs_mnist_norm, energs_fm_norm))
labels_true = np.ones(energs_mnist_norm.shape[0])
labels_false = np.zeros(energs_fm_norm.shape[0])
labels = np.hstack((labels_true, labels_false))
fpr, tpr, thresholds = sklearn.metrics.roc_curve(labels, combined_energs)
aucroc = sklearn.metrics.roc_auc_score(labels, combined_energs)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % aucroc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.01, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig("ROC-MNIST-C")
plt.show()
# ## FashionMNIST
ebm_model_fm = create_wide_residual_network((28, 28, 1), nb_classes=10, N=2, k=5)
ebm_model_fm.summary()
# ## FashionMNIST Classifier for IS/FID
def vgg_block(layer_in, n_filters, n_conv):
# Add convolutional layers
for _ in range(n_conv):
layer_in = keras.layers.Conv2D(n_filters, (3,3), padding='same', activation='relu')(layer_in)
# Add max pooling layer
layer_in = keras.layers.MaxPooling2D((2,2), strides=(2,2))(layer_in)
return layer_in
input_ = keras.layers.Input(shape=(28, 28, 1))
vgg_1 = vgg_block(input_, 128, 3) # 14, 14, 128
vgg_2 = vgg_block(vgg_1, 256, 3) # 7, 7, 256
flatten = keras.layers.Flatten(name='flatten')(vgg_2)
output = keras.layers.Dense(10, activation='softmax')(flatten)
model = keras.Model(inputs=input_, outputs=output)
model.summary()
X_valid, X_train = X_train_full_fm[:5000], X_train_full_fm[5000:]
y_valid, y_train = y_train_full_fm[:5000], y_train_full_fm[5000:]
model.compile(loss="sparse_categorical_crossentropy",
optimizer="adam", metrics=["accuracy"])
early_stopping = keras.callbacks.EarlyStopping(patience=10,
restore_best_weights=True)
history = model.fit(X_train, y_train, epochs=10,
validation_data=(X_valid, y_valid),
callbacks=[early_stopping])
optimizer = keras.optimizers.Adam(1e-4)
jem_fm = JEM(ebm_model_fm, optimizer)
history_fm = jem_fm.fit(X_train_full_fm, y_train_full_fm, 32, 25, optimizer, 0.0, 1.0, num_steps_markov=tf.constant(120),
var=tf.constant(1e-2 ** 2), step_size=tf.constant(2.0), callbacks_energy=[],
metrics_samples=[("IS", is_metric_callback), ("FID", fid_metric_callback)],
alpha=tf.constant(0.0), clip_thresh=tf.constant(1e6),
weight_ce_loss=tf.constant(1.0), save_best_weights=True, use_replay_buffer=True)
| JEM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Initiate H2OContext on top of Spark
from pysparkling import *
hc = H2OContext.getOrCreate(spark)
# +
# This is just helper function returning relative path to data files within sparkling-water project directories
def _locate(example_name):
return "../../../examples/smalldata/chicago/" + example_name
# Define file names
chicagoAllWeather = "chicagoAllWeather.csv"
chicagoCensus = "chicagoCensus.csv"
chicagoCrimes10k = "chicagoCrimes10k.csv.zip"
import h2o
# h2o.import_file expects cluster-relative path
f_weather = h2o.upload_file(_locate(chicagoAllWeather))
f_census = h2o.upload_file(_locate(chicagoCensus))
f_crimes = h2o.upload_file(_locate(chicagoCrimes10k))
# -
f_weather.show()
f_census.show()
f_crimes.show()
# Set time zone to UTC for date manipulation
h2o.cluster().timezone = "Etc/UTC"
# Transform weather table
## Remove 1st column (date)
f_weather = f_weather[1:]
# +
# Transform census table
## Remove all spaces from column names (causing problems in Spark SQL)
col_names = list(map(lambda s: s.strip().replace(' ', '_').replace('+','_'), f_census.col_names))
## Update column names in the table
#f_weather.names = col_names
f_census.names = col_names
# +
# Transform crimes table
## Drop useless columns
f_crimes = f_crimes[2:]
## Replace ' ' by '_' in column names
col_names = list(map(lambda s: s.replace(' ', '_'), f_crimes.col_names))
f_crimes.names = col_names
## Refine date column
def refine_date_col(data, col):
data["Day"] = data[col].day()
data["Month"] = data[col].month()
data["Year"] = data[col].year()
data["WeekNum"] = data[col].week()
data["WeekDay"] = data[col].dayOfWeek()
data["HourOfDay"] = data[col].hour()
# Create weekend and season cols
data["Weekend"] = ((data["WeekDay"] == "Sun") | (data["WeekDay"] == "Sat"))
data["Season"] = data["Month"].cut([0, 2, 5, 7, 10, 12], ["Winter", "Spring", "Summer", "Autumn", "Winter"])
refine_date_col(f_crimes, "Date")
f_crimes = f_crimes.drop("Date")
f_crimes.describe()
# -
# Expose H2O frames as Spark DataFrame
df_weather = hc.as_spark_frame(f_weather)
df_census = hc.as_spark_frame(f_census)
df_crimes = hc.as_spark_frame(f_crimes)
df_weather.show()
# +
# Use Spark SQL to join datasets
# Register DataFrames as tables
df_weather.createOrReplaceTempView("chicagoWeather")
df_census.createOrReplaceTempView("chicagoCensus")
df_crimes.createOrReplaceTempView("chicagoCrime")
crimeWithWeather = spark.sql("""SELECT
a.Year, a.Month, a.Day, a.WeekNum, a.HourOfDay, a.Weekend, a.Season, a.WeekDay,
a.IUCR, a.Primary_Type, a.Location_Description, a.Community_Area, a.District,
a.Arrest, a.Domestic, a.Beat, a.Ward, a.FBI_Code,
b.minTemp, b.maxTemp, b.meanTemp,
c.PERCENT_AGED_UNDER_18_OR_OVER_64, c.PER_CAPITA_INCOME, c.HARDSHIP_INDEX,
c.PERCENT_OF_HOUSING_CROWDED, c.PERCENT_HOUSEHOLDS_BELOW_POVERTY,
c.PERCENT_AGED_16__UNEMPLOYED, c.PERCENT_AGED_25__WITHOUT_HIGH_SCHOOL_DIPLOMA
FROM chicagoCrime a
JOIN chicagoWeather b
ON a.Year = b.year AND a.Month = b.month AND a.Day = b.day
JOIN chicagoCensus c
ON a.Community_Area = c.Community_Area_Number""")
# -
crimeWithWeather.show()
# Publish Spark DataFrame as H2OFrame with given name
crimeWithWeatherHF = hc.as_h2o_frame(crimeWithWeather, "crimeWithWeatherTable")
# +
# Transform selected String columns to categoricals
cat_cols = ["Arrest", "Season", "WeekDay", "Primary_Type", "Location_Description", "Domestic"]
for col in cat_cols :
crimeWithWeatherHF[col] = crimeWithWeatherHF[col].asfactor()
# +
# Split frame into two - we use one as the training frame and the second one as the validation frame
splits = crimeWithWeatherHF.split_frame(ratios=[0.8])
train = splits[0]
test = splits[1]
# Prepare column names
predictor_columns = train.drop("Arrest").col_names
response_column = "Arrest"
# +
# Create and train GBM model
from h2o.estimators.gbm import H2OGradientBoostingEstimator
# Prepare model based on the given set of parameters
gbm_model = H2OGradientBoostingEstimator(ntrees = 50,
max_depth = 3,
learn_rate = 0.1,
distribution = "bernoulli"
)
# Train the model
gbm_model.train(x = predictor_columns,
y = response_column,
training_frame = train,
validation_frame = test
)
# -
# Show GBM model performance
gbm_model.model_performance(test)
# +
# Create and train deeplearning model
from h2o.estimators.deeplearning import H2ODeepLearningEstimator
# Prepare model based on the given set of parameters
dl_model = H2ODeepLearningEstimator()
# Train the model
dl_model.train(x = predictor_columns,
y = response_column,
training_frame = train,
validation_frame = test
)
# -
# Show deeplearning model performance
dl_model.model_performance(test)
# +
# Create crime class which is used as a data holder on which prediction is done
from datetime import datetime
from pytz import timezone
from pyspark.sql import Row
def get_season(dt):
if (dt >= 3 and dt <= 5):
return "Spring"
elif (dt >= 6 and dt <= 8):
return "Summer"
elif (dt >= 9 and dt <= 10):
return "Autumn"
else:
return "Winter"
def crime(date,
iucr,
primaryType,
locationDescr,
domestic,
beat,
district,
ward,
communityArea,
fbiCode,
minTemp = 77777,
maxTemp = 77777,
meanTemp = 77777,
datePattern = "%m/%d/%Y %I:%M:%S %p",
dateTimeZone = "Etc/UTC"):
dt = datetime.strptime(date, datePattern)
dt.replace(tzinfo=timezone(dateTimeZone))
crime = Row(
Year = dt.year,
Month = dt.month,
Day = dt.day,
WeekNum = dt.isocalendar()[1],
HourOfDay = dt.hour,
Weekend = 1 if dt.weekday() == 5 or dt.weekday() == 6 else 0,
Season = get_season(dt.month),
WeekDay = dt.strftime('%a'), #gets the day of week in short format - Mon, Tue ...
IUCR = iucr,
Primary_Type = primaryType,
Location_Description = locationDescr,
Domestic = "true" if domestic else "false",
Beat = beat,
District = district,
Ward = ward,
Community_Area = communityArea,
FBI_Code = fbiCode,
minTemp = minTemp,
maxTemp = maxTemp,
meanTemp = meanTemp
)
return crime
# -
# Create crime examples
crime_examples = [
crime(date="02/08/2015 11:43:58 PM", iucr=1811, primaryType="NARCOTICS", locationDescr="STREET",
domestic=False, beat=422, district=4, ward=7, communityArea=46, fbiCode=18,
minTemp = 19, meanTemp=27, maxTemp=32),
crime(date="02/08/2015 11:00:39 PM", iucr=1150, primaryType="DECEPTIVE PRACTICE", locationDescr="RESIDENCE",
domestic=False, beat=923, district=9, ward=14, communityArea=63, fbiCode=11,
minTemp = 19, meanTemp=27, maxTemp=32)
]
# +
# For given crime and model return probability of crime.
def score_event(crime, model, censusTable):
rdd = sc.parallelize([crime])
crime_frame = sqlContext.createDataFrame(rdd)
# Join table with census data
df_row = censusTable.join(crime_frame).where("Community_Area = Community_Area_Number")
row = hc.as_h2o_frame(df_row)
row["Season"] = row["Season"].asfactor()
row["WeekDay"] = row["WeekDay"].asfactor()
row["Primary_Type"] = row["Primary_Type"].asfactor()
row["Location_Description"] = row["Location_Description"].asfactor()
row["Domestic"] = row["Domestic"].asfactor()
predictTable = model.predict(row)
probOfArrest = predictTable["true"][0,0]
return probOfArrest
for i in crime_examples:
arrestProbGBM = 100*score_event(i, gbm_model, df_census)
arrestProbDLM = 100*score_event(i, dl_model, df_census)
print("""
|Crime: """+ str(i)+"""
| Probability of arrest best on DeepLearning: """+str(arrestProbDLM)+"""
| Probability of arrest best on GBM: """+str(arrestProbGBM)+"""
""")
# -
| py/examples/notebooks/ChicagoCrimeDemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# <div style="text-align: right"> ↑ Ensure Kernel is set to ↑ </div><br><div style="text-align: right">
# conda_python3 </div>
# ## Data Download and Understanding
#
# In this notebook we will do the following
#
# 1. Create an S3 bucket
# 2. Download data from public s3 bucket owned by Raj
# 3. untar the data
# 4. upload to your s3 bucket created in first step
# 5. examine the data
# ### 1. Create an S3 bucket
#
# Change **your-unique-bucket-name** to your unique bucket name. <br>
# Note : Bucket names are globally unique
# !aws s3 mb s3://tmp-agm-dcsum1
# Now lets list all of the buckets in your account. You will see the bucket you just created.
# !aws s3 ls
# ### 2. Download Data
#
# Here we are going to copy the dataset we will be using for the labs today from our public bucket.
# !aws s3 cp s3://ml-materials/junctions-data.tar.gz .
# ### 3. Uncompress the downloaded data
#
# We now uncompress extract the TAR archive on our notebook instance
# !tar -xzf junctions-data.tar.gz . --no-same-owner
# ### 4. Upload data to your bucket
#
# Now we copy the extracted dataset to your previously created **S3** bucket
# Change **your-unique-bucket-name** to your unique bucket name that you created earlier.<br>
# *Note : Bucket names are globally unique*
# !aws s3 sync ./data/ s3://tmp-agm-dcsum1
# ### 5. Quick Examination of images from 3 classes
#
# Feel free to play with changing the image names or plotting your own. We will do this again inside Tensorflow notebook.
#
# *Note: data below is from local drive*
# +
# %pylab inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
f, axs = plt.subplots(1,3,figsize=(15,15))
img1 = mpimg.imread('./data/train/Priority/12481.png')
img2 = mpimg.imread('./data/train/Roundabout/53408.png')
img3 = mpimg.imread('./data/train/Signal/27258.png')
axs[0].imshow(img1)
axs[0].set_title("Priority")
axs[1].imshow(img2)
axs[1].set_title("Roundabout")
axs[2].imshow(img3)
axs[2].set_title("Signal")
plt.show()
| data-prep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import syft as sy
from syft.core.remote_dataloader import RemoteDataset
from syft.core.remote_dataloader import RemoteDataLoader
import torch
import torchvision
import time
import matplotlib.pyplot as plt
duet = sy.join_duet(loopback=True)
duet = sy.join_duet('4814df40925c14ae8f950342e3e8d776', network_url="http://ec2-18-216-8-163.us-east-2.compute.amazonaws.com:5000/")
# ## Run client until end
meta_ptr = duet.store["meta"]
duet.store.pandas
# ### Create the Model in remote
#
# This model is the remote model and this has to be sent to the client site for training. Only disadvantage that I see is that the client must provide the computational resources for training a bigger model.
#
# While creating the module we must inherit from syft module and give a torch reference.
class MedMNISTModel(sy.Module):
def __init__(self,torch_ref):
super(MedMNISTModel, self).__init__(torch_ref=torch_ref)
self.conv_head = self.torch_ref.nn.Sequential(
self.torch_ref.nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3),
self.torch_ref.nn.ReLU(),
self.torch_ref.nn.MaxPool2d(kernel_size=2, stride=2),
self.torch_ref.nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3),
self.torch_ref.nn.ReLU(),
self.torch_ref.nn.MaxPool2d(kernel_size=2, stride=2),
self.torch_ref.nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3),
self.torch_ref.nn.ReLU(),
self.torch_ref.nn.MaxPool2d(kernel_size=2, stride=2)
)
self.classification_head = self.torch_ref.nn.Sequential(
self.torch_ref.nn.Linear(in_features=2304, out_features=128),
self.torch_ref.nn.ReLU(),
self.torch_ref.nn.Linear(in_features=128, out_features=128),
self.torch_ref.nn.ReLU(),
self.torch_ref.nn.Linear(in_features=128, out_features=6),
)
def forward(self, x):
x = self.conv_head(x)
# Flattening
x = self.torch_ref.flatten(x, start_dim=1)
x = self.classification_head(x)
return x
# now we can create the model and pass in our local copy of torch
local_model = MedMNISTModel(torch)
# +
local_transform_1 = torchvision.transforms.ToTensor() # this converts PIL images to Tensors
local_transform_2 = torchvision.transforms.Normalize(0.3, 0.3) # this normalizes the dataset
# compose our transforms
local_transforms = torchvision.transforms.Compose([local_transform_1, local_transform_2])
# -
args = {
"batch_size": 64,
"test_batch_size": 1000,
"epochs": 10,
"lr": 1.0,
"gamma": 0.7,
"no_cuda": False,
"dry_run": False,
"seed": 42, # the meaning of life
"log_interval": 10,
"save_model": True,
}
# +
from syft.util import get_root_data_path
# we will configure the test set here locally since we want to know if our Data Owner's
# private training dataset will help us reach new SOTA results for our benchmark test set
test_kwargs = {
"batch_size": args["test_batch_size"],
}
test_data = torchvision.datasets.ImageFolder("./data/test/",transform=local_transforms)
test_loader = torch.utils.data.DataLoader(test_data,**test_kwargs)
test_data_length = len(test_loader.dataset)
print(test_data_length)
# -
model = local_model.send(duet)
remote_torch = duet.torch
has_cuda = False
has_cuda_ptr = remote_torch.cuda.is_available()
has_cuda = bool(has_cuda_ptr.get(
request_block=True,
reason="To run test and inference locally",
timeout_secs=5, # change to something slower
))
print(has_cuda)
# +
use_cuda = not args["no_cuda"] and has_cuda
# now we can set the seed
remote_torch.manual_seed(args["seed"])
device = remote_torch.device("cuda" if use_cuda else "cpu")
print(f"Data Owner device is {device.type.get()}")
# -
if has_cuda:
model.cuda(device)
else:
model.cpu()
params = model.parameters()
optimizer = remote_torch.optim.Adadelta(params, lr=args["lr"])
scheduler = remote_torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=args["gamma"])
# ## Training loop
def train(model, torch_ref, train_loader, optimizer, epoch, args, train_data_length):
# + 0.5 lets us math.ceil without the import
train_batches = round((train_data_length / args["batch_size"]) + 0.5)
print(f"> Running train in {train_batches} batches")
if model.is_local:
print("Training requires remote model")
return
model.train()
for batch_idx, data in enumerate(train_loader):
data_ptr, target_ptr = data[0], data[1]
optimizer.zero_grad()
output = model(data_ptr)
loss = torch_ref.nn.functional.nll_loss(output, target_ptr)
loss.backward()
optimizer.step()
loss_item = loss.item()
train_loss = duet.python.Float(0) # create a remote Float we can use for summation
train_loss += loss_item
if batch_idx % args["log_interval"] == 0:
local_loss = None
local_loss = loss_item.get(
reason="To evaluate training progress",
request_block=True,
timeout_secs=5
)
if local_loss is not None:
print("Train Epoch: {} {} {:.4}".format(epoch, batch_idx, local_loss))
else:
print("Train Epoch: {} {} ?".format(epoch, batch_idx))
if batch_idx >= train_batches - 1:
print("batch_idx >= train_batches, breaking")
break
if args["dry_run"]:
break
def test_local(model, torch_ref, test_loader, test_data_length):
# download remote model
if not model.is_local:
local_model = model.get(
request_block=True,
reason="test evaluation",
timeout_secs=5
)
else:
local_model = model
# + 0.5 lets us math.ceil without the import
test_batches = round((test_data_length / args["test_batch_size"]) + 0.5)
print(f"> Running test_local in {test_batches} batches")
local_model.eval()
test_loss = 0.0
correct = 0.0
with torch_ref.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
output = local_model(data)
iter_loss = torch_ref.nn.functional.nll_loss(output, target, reduction="sum").item()
test_loss = test_loss + iter_loss
pred = output.argmax(dim=1)
total = pred.eq(target).sum().item()
correct += total
if args["dry_run"]:
break
if batch_idx >= test_batches - 1:
print("batch_idx >= test_batches, breaking")
break
accuracy = correct / test_data_length
print(f"Test Set Accuracy: {100 * accuracy}%")
# ## Dataloader remote transformation
# +
remote_torchvision = duet.torchvision
transform_1 = remote_torchvision.transforms.ToTensor() # this converts PIL images to Tensors
transform_2 = remote_torchvision.transforms.Normalize(0.3, 0.3) # this normalizes the dataset
remote_list = duet.python.List() # create a remote list to add the transforms to
remote_list.append(transform_1)
remote_list.append(transform_2)
# compose our transforms
transforms = remote_torchvision.transforms.Compose(remote_list)
# The DO has kindly let us initialise a DataLoader for their training set
train_kwargs = {
"batch_size": args["batch_size"],
}
train_data_ptr = duet.syft.core.remote_dataloader.RemoteDataset(meta_ptr)
train_loader_ptr = duet.syft.core.remote_dataloader.RemoteDataLoader(train_data_ptr, batch_size=32)
#remote_torch.utils.data.DataLoader(train_data_ptr,**train_kwargs)
train_loader_ptr.load_dataset()
# call create_dataloader to create the real DataLoader object on remote side
train_loader_ptr.create_dataloader()
# +
def get_train_length(train_data_ptr):
train_data_length = len(train_data_ptr)
return train_data_length
try:
if train_data_length is None:
train_data_length = get_train_length(train_data_ptr)
except NameError:
train_data_length = get_train_length(train_data_ptr)
print(f"Training Dataset size is: {train_data_length}")
# -
# ## Training call
args["dry_run"] = False # comment to do a full train
print("Starting Training")
for epoch in range(1, args["epochs"] + 1):
epoch_start = time.time()
print(f"Epoch: {epoch}")
# remote training on model with remote_torch
train(model, remote_torch, train_loader_ptr, optimizer, epoch, args, train_data_length)
# local testing on model with local torch
test_local(model, torch, test_loader, test_data_length)
scheduler.step()
epoch_end = time.time()
print(f"Epoch time: {int(epoch_end - epoch_start)} seconds")
if args["dry_run"]:
break
print("Finished Training")
model.get(
request_block=True,
reason="test evaluation",
timeout_secs=5
).save("./duet_medical_mnist.pt")
# ## Inference
# +
def draw_image_and_label(image, label):
fig = plt.figure()
plt.tight_layout()
plt.imshow(image, cmap="gray", interpolation="none")
plt.title("Ground Truth: {}".format(label))
def prep_for_inference(image):
image_batch = image.unsqueeze(0).unsqueeze(0)
image_batch = image_batch * 1.0
return image_batch
# -
def classify_local(image, model):
if not model.is_local:
print("model is remote try .get()")
return -1, torch.Tensor([-1])
image_tensor = torch.Tensor(prep_for_inference(image))
output = model(image_tensor)
preds = torch.exp(output)
local_y = preds
local_y = local_y.squeeze()
pos = local_y == max(local_y)
index = torch.nonzero(pos, as_tuple=False)
class_num = index.squeeze()
return class_num, local_y
def classify_remote(image, model):
if model.is_local:
print("model is local try .send()")
return -1, remote_torch.Tensor([-1])
image_tensor_ptr = remote_torch.Tensor(prep_for_inference(image))
output = model(image_tensor_ptr)
preds = remote_torch.exp(output)
preds_result = preds.get(
request_block=True,
reason="To see a real world example of inference",
timeout_secs=10
)
if preds_result is None:
print("No permission to do inference, request again")
return -1, torch.Tensor([-1])
else:
# now we have the local tensor we can use local torch
local_y = torch.Tensor(preds_result)
local_y = local_y.squeeze()
pos = local_y == max(local_y)
index = torch.nonzero(pos, as_tuple=False)
class_num = index.squeeze()
return class_num, local_y
# +
import random
total_images = test_data_length # 10000
index = random.randint(0, total_images)
print("Random Test Image:", index)
count = 0
batch = index // test_kwargs["batch_size"]
batch_index = index % int(total_images / len(test_loader))
for tensor_ptr in test_loader:
data, target = tensor_ptr[0], tensor_ptr[1]
if batch == count:
break
count += 1
print(f"Displaying {index} == {batch_index} in Batch: {batch}/{len(test_loader)}")
if batch_index > len(data):
batch_index = 0
image_1 = data[batch_index].reshape((28, 28))
label_1 = target[batch_index]
draw_image_and_label(image_1, label_1)
# -
# classify remote
class_num, preds = classify_remote(image_1, model)
print(f"Prediction: {class_num} Ground Truth: {label_1}")
print(preds)
local_model = model.get(
request_block=True,
reason="To run test and inference locally",
timeout_secs=5,
)
# classify local
class_num, preds = classify_local(image_1, local_model)
print(f"Prediction: {class_num} Ground Truth: {label_1}")
print(preds)
# create RemoteDataset object on remote side
rds_ptr = duet.syft.core.remote_dataloader.RemoteDataset(meta_ptr)
# create RemoteDataLoader object on remote side
rdl_ptr = duet.syft.core.remote_dataloader.RemoteDataLoader(rds_ptr, batch_size=32)
# call create_dataset to create the real Dataset object on remote side
rdl_ptr.load_dataset()
# call create_dataloader to create the real DataLoader object on remote side
rdl_ptr.create_dataloader()
| Server(DS).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 4: Nonlinear Regression
#
# Task: Use `nn.Sequential` to add layers to a model; use a nonlinearity between layers to increase the model's capacity.
# ## Setup
from fastai.vision.all import *
# This function will make a `DataLoaders` object out of an arary dataset.
def make_dataloaders(x, y_true, splitter, batch_size):
data = L(zip(x, y_true))
train_indices, valid_indices = splitter(data)
return DataLoaders(
DataLoader(data[train_indices], batch_size=batch_size, shuffle=True),
DataLoader(data[valid_indices], batch_size=batch_size)
)
# Here are utility functions to plot the first axis of a dataset and a model's predictions.
def plot_data(x, y): plt.scatter(x[:, 0], y[:, 0], s=.5, color='#bbbbbb')
def plot_model(x, model):
x = x.sort(dim=0).values
y_pred = model(x).detach()
plt.plot(x[:, 0], y_pred[:, 0], 'r')
# The following `Callback` can be added to your `Learner` to plot the data and model after each epoch:
#
# ```
# learner = Learner(
# ...
# cbs=[ShowPredictions(), ShowGraphCallback()],
# ...
# ```
# Inspired by https://gist.github.com/oguiza/c7559da6de0e036f01d7dee15e2f15e4
class ShowPredictions(Callback):
def __init__(self): self.graph_fig = None # keep a reference to a figure object to update
def before_fit(self):
self.run = not hasattr(self.learn, 'lr_finder') and not hasattr(self, 'gather_preds')
def after_fit(self): plt.close(self.graph_fig)
def after_epoch(self):
if self.graph_fig is None:
self.graph_fig, self.graph_ax = plt.subplots(1)
self.graph_out = display(self.graph_ax.figure, display_id=True)
plt.sca(self.graph_ax)
self.graph_ax.clear()
# Plot code. Replace this if needed:
plot_data(x, y_true)
plot_model(x, model)
# Update the graph.
self.graph_out.update(self.graph_ax.figure)
# ## Task
# Most applications of neural net models work in very high dimensions (e.g., each individual pixel in an image!) so it's hard to visualize what the model is actually learning. Here, we'll revisit the simple linear model that we looked at in Fundamentals `006` and `009`, which learned to predict a single continuous outcome variable `y` from a single continuous input feature `x`. So we can visualize the network's behavior just like any other univariate function: by plotting y vs x.
#
# But this time the data isn't just a straight line; it's a fancy function of `x`.
# +
num_points = 5000
set_seed(40)
x = torch.rand(num_points, 1)
noise = torch.rand_like(x) * 1.
y_true = .5 * (x*6).sin() + x + (x - .75) * noise
# standardize y, just to make it well behaved.
y_true -= y_true.mean()
y_true /= y_true.std()
plot_data(x, y_true)
# -
# In `006` and `009`, we dealt with models that could only ever make straight lines. They couldn't even make a curve like `3 * x**2 + 2*x + 1`, yet alone that one!
#
# But you may remember from your math or stats studies that a curve like that is actually linear if you transform your data, e.g., using `z = [x, x**2]` as the input; then the model is `3 * z[1] + 2 * z[0] + 1`, which is linear in `z`.
#
# So if we transform our data before giving it to the linear model, we can actually get interesting functions from a linear model. But how do we transform the data?
#
# The classic approach is to specify what transformation to make. e.g., in *polynomial regression* we put in a bunch of powers of `x` (`x**2`, `x**3`, ..., `x**10`, ...), but that gets numerically unstable with high powers. There are other "basis functions" that are better behaved, like *splines*.
#
# But neural nets take a different approach: they *learn* the transformation based on what is needed to accomplish its objective.
# **Instructions**:
#
# 1. **Fit a line to this data** (minimizing the MSE). Evaluate the MSE. By eye, how well does it fit?
# 2. **Add a layer**: Use `nn.Sequential` to put two `nn.Linear` layers back to back. Use 500 dimensions as the hidden dimension (the `out_features` of the first and the `in_features` of the second). Evaluate the MSE. How well does it fit?
# 3. **Add a nonlinearity**: Add a `nn.ReLU` between the two linear layers. Evaluate the MSE. How well does it fit?
#
# Details and tips are given inline below.
# ## Solution
# **Make a `DataLoaders` for this data**. This step has been done for you.
#
# We increased the dataset size and the batch size to make the learning better-behaved. Once you get this to work, you might see if you can deal with a smaller batch size or less data overall.
splitter = RandomSplitter(valid_pct=0.2, seed=42)
batch_size = 100
dataloaders = make_dataloaders(x, y_true, splitter, batch_size=batch_size)
# ### Step 1: Fit a Line
#
# **Fit a line to this data (minimizing the MSE).**
#
# * Use a `nn.Linear` module as your `model`
# * Use `Learner` with `opt_func=SGD`, as you did in `009`.
# * Pass `cbs=[ShowPredictions(), ShowGraphCallback()]` to the `Learner` to show the training progress.
#
# Tune the learning rate and number of epochs until you reliably get an MSE below 0.76.
# +
# To determine the ideal MSE of a linear model, one approach is:
# from sklearn.linear_model import LinearRegression
# from sklearn.metrics import mean_squared_error
# mean_squared_error(
# to_np(y_true),
# LinearRegression().fit(x, y_true).predict(to_np(x))
# )
# +
# # your code here <<EOF
model = nn.Linear(in_features=1, out_features=1, bias=True)
loss_func = nn.MSELoss()
learner = Learner(
dls=dataloaders,
model=model,
loss_func=loss_func,
opt_func=SGD,
metrics=[mae],
cbs=[ShowPredictions(), ShowGraphCallback()],
)
learner.fit(n_epoch=10, lr=1e-1)
# EOF
# -
# Evaluate the MSE. By eye, how well does it fit?
# *Your narrative response here*
# ### Step 2: Add a Layer
#
# **Use `nn.Sequential` to put two `nn.Linear` layers back to back.**
#
# Use 500 dimensions as the hidden dimension (the `out_features` of the first and the `in_features` of the second).
#
# You may notice that the training is much less stable, and is rather sensitive to initializations (run the same thing multiple times and see that it will sometimes converge much better than other times). To improve training, try the following:
#
# * Instead of `learner.fit`, use `learner.fit_one_cycle`. This starts the learning rate low, gradually ramps it up, then ramps it back down. It also enables *momentum*, which tends to make gradient descent both faster and more stable. **Can `fit_one_cycle` handle a larger learning rate (`lr_max=XXX`) than `fit` (`lr=XXX`)?**
# * Instead of `opt_func=SGD`, omit the `opt_func` parameter so it uses the default "Adam" optimizer. Adam adapts the effective learning rate for every parameter based on how big its gradients have been recently. As <NAME> [puts it](https://ruder.io/optimizing-gradient-descent/index.html#adam): "Whereas momentum can be seen as a ball running down a slope, Adam behaves like a heavy ball with friction, which thus prefers flat minima in the error surface." **Does changing to Adam have much effect here?**
# +
model = nn.Sequential(
# ...
nn.Linear(in_features=1, out_features=500, bias=True),
# ...
nn.Linear(in_features=500, out_features=1, bias=True),
)
# ... <<EOF
loss_func = nn.MSELoss()
learner = Learner(
dls=dataloaders,
model=model,
loss_func=loss_func,
#opt_func=SGD,
metrics=[mae],
cbs=[ShowPredictions(), ShowGraphCallback()],
)
learner.fit_one_cycle(n_epoch=20, lr_max=1e-1)
# EOF
# -
# Evaluate the MSE. By eye, how well does it fit?
# *Your narrative response here*
# ### Step 3: **Add a nonlinearity**
#
# Add a `nn.ReLU` between the two linear layers.
#
# * Definitely use `fit_one_cycle` here!
# * You will probably need more epochs to fit this model.
# * Try several different `set_seed`s here to ensure that your results aren't a fluke.
# +
# set_seed(...)
set_seed(10)
# ... << EOF
model = nn.Sequential(
nn.Linear(1, 500),
nn.ReLU(),
nn.Linear(500, 1)
)
learner = Learner(
dls=dataloaders,
model=model,
loss_func=loss_func,
metrics=[mae],
cbs=[ShowPredictions(), ShowGraphCallback()]
)
learner.fit_one_cycle(n_epoch=30, lr_max=.1)
# EOF
# -
# Evaluate the MSE. How well does it fit?
# *your narrative response here*
# ## Analysis
# Despite having a hidden layer like the final model, the second model never gave us anything more than a straight line. Why not?
# *your narrative response here*
# Watch the model plot in Step 3 as the model fits (use at least 30 epochs to be able to watch this clearly). What do you notice about the plot during three regimes of training:
#
# 1. Within the first epoch (right at the start of training)
# 2. Mid-training
# 3. In the last epoch or two
# *your narrative response here*
# ## Extension (optional)
# What effect does the size of the hidden layer have on the quality of the fit?
# What effect does the choice of nonlinearity ("activation function") have? Try a few others: `Tanh`, `Sigmoid`, `LeakyReLU`, `PReLU`, ... Early research in neural nets used smooth functions like `Tanh` and `Sigmoid` almost exclusively; do you think the `ReLU` was a good idea?
| portfolio/narrative/lab04-nn-regression-solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# ## Variables
# ---
# In programming, we often compute many values that we want to save so that we can use the result in a later step. For example, let's say that we want to find the number of seconds in a day. We can easily calculate this with the following:
# <p style="text-align: center">$60 * 60 * 24 = 86400$ seconds</p>
# However, let's say that your friend Alexander asked you how many seconds there are in three days. We could, of course, perform the calculation in a similar manner:
# <p style="text-align: center">$(60 * 60 * 24) * 3 = 259200$ seconds</p>
# But we see that we repeated the calculation in parentheses above. Instead of doing this calculation again, we could have saved the result from our first step (calculating the number of seconds in a day) as a variable.
# +
# This is Python code that assigns variables.
# The name to the left of the equals sign is the variable name.
# The value to the right of the equals sign is the value of the variable.
# Press Shift-Enter to run the code and see the value of our variable!
seconds_in_day = 60 * 60 * 24 # This is equal to 86400.
seconds_in_day
# -
# Then, we can simply multiply this variable by three to get the number of seconds in *three* days:
# +
# The code below takes the number of seconds in a day (which we calculated in the previous code cell)
# and multiplies it by 3 to find the number of seconds in 3 days.
seconds_in_three_days = seconds_in_day * 3 # This is equal to 259200.
seconds_in_three_days
# -
# As you can see, variables can be used to simplify calculations, make code more readable, and allow for repetition and reusability of code.
| intro/variables-intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory Data Analysis
#
# Now that we've cleaned and parsed the data, we can start exploring it and looking for some initial insights.
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use('seaborn')
import numpy as np
import seaborn as sns
# ## Data Loading
df = pd.read_csv('../Data/parsed_data.csv')
# We're going to drop the features that we've noted to be difficult to work with, in the future this may change as I learn more.
drop_features = ['plot', 'metacritic', 'production', 'actors', 'poster', 'director', 'keywords']
df = df.drop(drop_features, axis = 1)
# Now we have to fix datatypes that we lost in conversion.
df[df.columns[12:35]] = df[df.columns[12:35]].astype('category')
df[df.columns[37:53]] = df[df.columns[37:53]].astype('category')
df['release_date'] = pd.to_datetime(df['release_date'])
df['dvd_release'] = pd.to_datetime(df['dvd_release'])
# ## Exploration
#
# Let's go about exploring the data keeping in mind some questions we want to answer:
#
# 1. Do certain genres lend themselves to higher return?
#
# 2. Do remakes, tent-poles and sequels perform differently?
#
# 3. How does the time of year, weather and economic trends influence box office performance?
#
# Let's start with addressing 1.
#
# ### How do we measure return?
plt.hist(df['inflated_gross'], bins=100)
plt.xlabel('Adjusted Box Office Gross')
# Even at 100 bins, the data is very heavily skewed in that small section of the data. Perhaps we have to do some feature engineering. We can look at return in terms of Box Office Gross, or as a percentage of its budget. Let's see how using a ratio between Box Office Gross and Budget performs.
tmp_df = df[df['inflated_budget']>0]
pct_return = tmp_df['inflated_gross']/tmp_df['inflated_budget']
plt.hist(pct_return, bins = 500)
plt.ylim(ymin=0.0, ymax=600)
# At first I thought there was an issue with code, but actually the issue became evident after increasing the granularity to an extreme level (bins = 500). There is a massive outlier at 7000. Next, there is a HUGE number of points with 0 as their ratio. This is the graph with only points greater than 0, limited x range, and even further increased granularity.
#pct_return.drop(pct_return[pct_return == max(pct_return)].index)
plt.hist(pct_return[pct_return>0], bins = 10000)
plt.xlim(xmin = 0, xmax = 5)
# This histogram is clearly still in the same shape as the one for just adjusted box office gross. We'll try a log transformation to get around the skewedness.
# +
import math
plt.hist(pct_return[pct_return>0].apply(lambda x: math.log(x)), bins=30)
plt.xlabel('log(pct_return), pct_return>0')
# -
plt.hist((pct_return+1).apply(lambda x: math.log(x)), bins=30)
plt.xlabel('log(pct_return+1)')
# Clearly adding 1 to all of the pct_return values is erroneous. This is because many of the values are between 0 and 1, indicating movies that lost money. If we add 1 to them, all of those values are suddenly shifted and they lose all meaning. Let's try replacing only the 0's with 1's.
tmp_return = pct_return
tmp_return[pct_return==0] = tmp_return[pct_return==0]+1
plt.hist(tmp_return.apply(lambda x: math.log(x)), bins=30)
plt.xlabel('log(pct_return), 0s mapped to 1')
# The histogram bears more useful information than the above one, however there is still an issue with the huge amount of 0s.
plt.hist((df['inflated_gross']+1).apply(lambda x: math.log(x)), bins=30)
plt.xlabel('log(inflated_gross+1)')
# We've added 1 to each of the values because adding constants doesn't change variance, but allows us to avoid taking the log of 0. We have some separation between all of the points now, but there is still a huge pillar right around zero. This is expected because we had a lot of points at zero. It's unreasonable to believe that a movie grossed absolutely nothing at all. Let's look at this histogram with only values above 0.
plt.hist(df['inflated_gross'][df['inflated_gross']>0].apply(lambda x: math.log(x)), bins=30)
plt.xlabel('log(inflated_gross), inflated_gross>0')
# Based on these histograms, we'll most likely have to stick to **datapoints in which 'inflated_gross' and pct_return are greater than 0**, then apply **log transformations** to them.
#
# ### What factors determine the return of your movie?
df.corr()
zero_index = df[df['inflated_budget']==0].index
tmp_df = df.drop(zero_index)
tmp_df.corr()
def corr_mat(corr):
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
corr_mat(df.corr())
def corrdot(*args, **kwargs):
corr_r = args[0].corr(args[1], 'pearson')
corr_text = f"{corr_r:2.2f}".replace("0.", ".")
ax = plt.gca()
ax.set_axis_off()
marker_size = abs(corr_r) * 10000
ax.scatter([.5], [.5], marker_size, [corr_r], alpha=0.6, cmap="coolwarm",
vmin=-1, vmax=1, transform=ax.transAxes)
font_size = abs(corr_r) * 40 + 5
ax.annotate(corr_text, [.5, .5,], xycoords="axes fraction",
ha='center', va='center', fontsize=font_size)
def corrdot(*args, **kwargs):
corr_r = args[0].corr(args[1], 'pearson')
corr_text = f"{corr_r:2.2f}".replace("0.", ".")
ax = plt.gca()
ax.set_axis_off()
marker_size = abs(corr_r) * 10000
ax.scatter([.5], [.5], marker_size, [corr_r], alpha=0.6, cmap="coolwarm",
vmin=-1, vmax=1, transform=ax.transAxes)
font_size = abs(corr_r) * 40 + 5
ax.annotate(corr_text, [.5, .5,], xycoords="axes fraction",
ha='center', va='center', fontsize=font_size)
tmp_df = df.drop(['Unnamed: 0', 'imdbid','Budget','Box Office Gross'], axis = 1)
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(tmp_df.corr(), cmap=cmap)
| Analysis/Exploratory Data Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using the fixed pose engine
# +
import os
import numpy as np
from pcg_gazebo.generators import WorldGenerator
world_gen = WorldGenerator()
# -
try:
mesh_filename = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'..',
'examples',
'meshes',
'monkey.stl')
except:
mesh_filename = os.path.abspath('../examples/meshes/monkey.stl')
print('Mesh filename = ', mesh_filename, os.path.exists(mesh_filename))
world_gen.add_asset(
tag='monkey',
description=dict(
type='mesh',
args=dict(
visual_mesh=mesh_filename,
name='monkey',
color='xkcd'
)
)
)
world_gen.add_engine(
engine_name='fixed_pose',
tag='engine',
models=['monkey'],
poses=[
[5, 0, 0, 0, 0, 0],
[1.2, 2.5, 0, 0, 0.3, 0],
[5.5, 3.2, 1.5, 0, 0, 0.5],
[-1.0, -1.0, 0.2, 0, 0, 0]
]
)
world_gen.run_engines()
print(world_gen.world.models.keys())
world_gen.world.show()
# 
| examples/gen_fixed_pose_engine.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# using GuSTO
include("../src/GuSTO.jl")
using AstrobeeRobot
robot = Freeflyer()
model = FreeflyerSE2()
# +
env = Table(:stanford);
centers = Vector()
push!(centers, [0.460; 0.315; 0.0])
push!(centers, [0.201; 1.085; 0.0])
push!(centers, [0.540; 2.020; 0.0])
push!(centers, [1.374; 0.196; 0.0])
push!(centers, [1.063; 1.354; 0.0])
push!(centers, [1.365; 2.322; 0.0])
push!(centers, [2.221; 0.548; 0.0])
push!(centers, [2.077; 1.443; 0.0])
push!(centers, [3.098; 1.186; 0.0])
push!(centers, [2.837; 2.064; 0.0])
widths = [0.27; 0.27; 0.127]
inflation = 0.05*ones(3)
for idx in 1:length(centers)
push!(env.obstacle_set, HyperRectangle(Vec3f0(centers[idx]-0.5*widths-inflation+[0.;0.;0.5*widths[1]]), Vec3f0(widths+2*inflation)))
end
N = 200
tf_guess = 200.
x_init = [0.2; 2.4; 0; 0; 0; 0]
goal_set = GoalSet()
x_goal = [3.; 0.5; 0; 0.05; -0.05; 0]
add_goal!(goal_set, Goal(PointGoal(x_goal), tf_guess, model))
PD = ProblemDefinition(robot, model, env, x_init, goal_set)
TOP = TrajectoryOptimizationProblem(PD, N, tf_guess, fixed_final_time=true)
TOSgusto = TrajectoryOptimizationSolution(TOP)
SCPS = solve_SCP!(TOSgusto, TOP, solve_gusto_jump!, init_traj_straightline, "Gurobi", OutputFlag=0);
# SCPS = solve_SCP!(TOSgusto, TOP, solve_gusto_jump!, init_traj_straightline, "Ipopt", print_level=0);
# -
@show TOSgusto.SCPS.converged
@show TOSgusto.SCPS.iterations
@show TOSgusto.SCPS.total_time
@show TOSgusto.SCPS.accept_solution
@show TOSgusto.SCPS.solver_status
@show TOSgusto.SCPS.scp_status
@show TOSgusto.SCPS.convergence_measure
@show TOSgusto.SCPS.param.alg.ω_vec
@show TOSgusto.SCPS.param.alg.Δ_vec
@show TOSgusto.SCPS.J_true
@show TOSgusto.SCPS.dual
;
using Plots
gr(fmt=:png)
plot()
plot!([collect(1:N)],[sqrt.(TOSgusto.SCPS.traj.X[4,:].^2 + TOSgusto.SCPS.traj.X[5,:].^2)],
xlabel = "t",
ylabel = "v",
legend = :none)
plot!()
# +
#### Code for animating free-flyer trajectories
vis = Visualizer()
delete!(vis)
vis[:goal]
setobject!(vis[:goal][:goal],
Object(HyperSphere(Point3(x_goal[1:2]..., robot.r), 0.1),
MeshBasicMaterial(color=RGBA(0,1.0,0.,0.3))))
vis[:table]
table_dim = env.worldAABBmax-env.worldAABBmin
setobject!(vis[:table][:table],
Object(HyperRectangle(env.worldAABBmin..., table_dim...),
MeshBasicMaterial(color=RGBA(0,1.0,0.,0.3))))
vis[:obstacles]
for (idx,obs) in enumerate(env.obstacle_set)
setobject!(vis[:obstacles][Symbol(string("obs",idx))],
Object(obs,MeshBasicMaterial(color=RGBA(1.0,0.,0.,0.3))))
end
vis[:robot]
setobject!(vis[:robot][:base],
Object(Cylinder(Point3f0(0,0,0.),Point3f0(0.,0.,0.5),Float32(robot.r)),MeshBasicMaterial(color=RGBA(0,0.,1.,0.7))))
up = [0;0;1.]
q0 = vec2quat(up,x_init[3])
Qs = Vector{Vector{Float64}}()
speed_factor = 10
for k in 1:speed_factor:N
q = [quat_inv(quat_multiply(mrp2quat([0.; 0.; tan(TOSgusto.SCPS.traj.X[3,k]/4)]), q0)); [TOSgusto.SCPS.traj.X[1:2,k]; robot.r]]
push!(Qs,q)
end
anim = MeshCat.Animation()
for k in 1:N
atframe(anim, vis, 5*k) do frame
settransform!(frame[:robot], Translation(TOSgusto.traj.X[1,k],
TOSgusto.traj.X[2,k], TOSgusto.traj.X[3,k]))
end
end
setanimation!(vis, anim)
# plot_in_cell = true
plot_in_cell = false
plot_in_cell ? IJuliaCell(vis) : open(vis)
# -
| examples/freeflyerSE2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Switchiness- miRNA
import NotebookImport
from metaPCNA import *
store = pd.HDFStore(MIRNA_STORE)
cc = [c[1:] for c in store.keys() if c[1:] in codes.unique()]
mirna_df = pd.concat([store[c] for c in cc], 1)
standardize = lambda s: s.sub(s.mean(1), axis=0).div(s.std(1), axis=0)
df = mirna_df
df_s2 = df.groupby(codes, axis=0, level=0).apply(standardize)
matched_mir = pd.read_hdf(MIRNA_STORE, 'matched_tn')
dx_mir = binomial_test_screen(matched_mir, fc=1.)
dx_mir = dx_mir[dx_mir.num_dx > 300]
def detrend(x,y):
x, y = match_series(x, y)
reg = linear_regression(x, y)
#adj = (x * reg['slope']) + reg['intercept']
adj = (y - reg['intercept']) / reg['slope']
return adj
dp = -1*meta_pcna_all.unstack()[['01','11']].dropna().T.diff().ix['11']
dp = dp[dp > 0]
dp.name = 'proliferation change'
dx = matched_mir
dx = (dx.xs('01',1,1) - dx.xs('11',1,1)).replace(0, np.nan)
dx = dx.ix[ti(dx.count(1) > 300)]
pcna_corr = dx.T.corrwith(dp)
len(dp.index.intersection(dx.columns))
pcna_corr.hist(bins=20)
dx_mir.sort('p').head()
series_scatter(dx_mir.frac, pcna_corr)
linear_regression(dx_mir.frac, pcna_corr)
# +
dp = -1*meta_pcna_all.unstack()[['01','11']].dropna().T.diff().ix['11']
dp = dp[dp > 0]
dp.name = 'proliferation change'
dx = matched_mir
dx = (dx.xs('01',1,1) - dx.xs('11',1,1)).replace(0, np.nan)
dx = dx.ix[ti(dx.count(1) > 300)]
pcna_corr_mir = dx.T.corrwith(dp)
dx = matched_rna
dx = (dx.xs('01',1,1) - dx.xs('11',1,1)).replace(0, np.nan)
dx = dx.ix[ti(dx.count(1) > 300)]
pcna_corr_gene = dx.T.corrwith(dp)
# -
fig, ax = subplots()
draw_dist(pcna_corr_mir, ax=ax)
draw_dist(pcna_corr_gene, ax=ax)
fig, ax = subplots()
draw_dist(pcna_corr, ax=ax)
draw_dist(dx_mir.frac-.5, ax=ax)
binom_test((pcna_corr > 0).sum(), pcna_corr.size)
dx = matched_rna.xs('01',1,1) - matched_rna.xs('11',1,1)
pearson_pandas(dx.sum(), dp)
dx = matched_mir.xs('01',1,1) - matched_mir.xs('11',1,1)
pearson_pandas(dx.sum(), dp)
(pcna_corr > 0).mean(), (dx_mir.frac > .5).mean()
pcna_corr.hist()
pearson_pandas(dx_mir.frac, pcna_corr)
p2 = pd.concat([gene_stats.ix['metaPCNA'], pcna_corr])
f2 = pd.concat([gene_stats.ix['f_up'], dx_mir.frac])
m = pd.rolling_mean(f2.ix[p2.order().index].dropna(),
window=500, center=True).dropna()
m2 = pd.rolling_mean(f2.ix[p2.order().index].dropna(),
window=50, center=True).dropna()
m = m.combine_first(m2)
f2_win = (f2 - m).dropna()
f2_win.name = 'f_up\n(detrended)'
#d2 = detrend(dp_dx, dx_mir.frac) - dp_dx
d2 = f2_win
series_scatter(pcna_corr, d2)
def switch_plot(g):
fig, axs = subplots(1,2, figsize=(9,4))
ax=axs[0]
series_scatter(meta_pcna_all[:,'01'], mirna_df.ix[g].ix[ti(codes=='KIRC')][:,'01'],
color=colors[1], ax=ax, ann=None)
series_scatter(meta_pcna_all[:,'11'], mirna_df.ix[g].ix[ti(codes=='KIRC')][:,'11'],
color=colors[0], ax=ax, ann=None)
ax=axs[1]
series_scatter(meta_pcna_all[:,'01'], df_s2.ix[g][:,'01'],
color=colors[1], ax=ax, ann=None, s=20, alpha=.3)
series_scatter(meta_pcna_all[:,'11'], df_s2.ix[g][:,'11'],
color=colors[0], ax=ax, ann=None, s=20)
for ax in axs:
prettify_ax(ax)
ax.set_xbound(meta_pcna.min(), meta_pcna.max())
fig.tight_layout()
pcna_corr.order().tail()
switch_plot('hsa-mir-9-1')
paired_bp_tn_split(matched_mir.ix['hsa-mir-130b'], codes)
paired_bp_tn_split(matched_mir.ix['hsa-mir-122'], codes)
switch_plot('hsa-mir-122')
switch_plot('hsa-mir-21')
switch_plot('hsa-mir-15a')
switch_plot('hsa-mir-204')
| Notebooks/switchiness_miR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/nasim-aust/Bangla-Digit-Recognition-using-CNN/blob/master/Bengali_Digit_Receognizer%20using%20CNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] _cell_guid="d93e43c9-8a48-41c7-93f2-1dbeec38f80e" _uuid="b2538ad6607dc541ad42195df42b3c70ecdd59a3" id="asmJRuOT5vvY" colab_type="text"
# # Bangla Digit Recognizer
# + [markdown] _cell_guid="8b668dd4-4ea0-4894-bf75-390f6515286f" _uuid="56ee22f1b808d3ae49db7543cbc439b3ab964426" id="y6Xk3f-K5vva" colab_type="text"
#
# **Step 1 : Process the data.<br>
# Step 2 : Design the model.<br>
# Step 3 : Train the model.**
# + _cell_guid="c386cfd1-3d2e-4bd0-b1bf-2c72698afe96" _uuid="4cdc8ebf9bc167498cace36993800c819556eb04" id="tZsTSfYK5vvb" colab_type="code" colab={} outputId="dd55c73e-a500-41ac-bac9-a7a2aa5cccf8"
# Importing necessary libraries
import numpy as np
import os
import glob
import cv2
import matplotlib.pyplot as plt
import pandas as pd
import pickle
from keras.utils import to_categorical
from keras.layers import Dense, Input, Conv2D, Flatten, MaxPooling2D, Activation,Dropout
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from keras import backend as K
from __future__ import print_function
import keras
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.utils import to_categorical
from keras import backend as k
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# + [markdown] _cell_guid="49f11a3e-7149-4877-b2d1-46bcc3876d3b" _uuid="12815b115dcf40bd1d14daa942fbe18dfc330ebf" id="XHczvaE25vvj" colab_type="text"
# ## Step 1: Process the data
# + [markdown] _cell_guid="db9ec487-5a21-4289-9b9a-f2649e94955a" _uuid="5e998f2691200a0019ad2e3940d5acee12e628be" id="QlFAsrlV5vvk" colab_type="text"
# Our dataset comes from six different source.
# + _cell_guid="a42ae61e-565e-48ae-b618-370dc4bd3718" _uuid="8f087bcd8bb6c49f440bd5bea4769ddd692d291e" id="tNtw5xWY5vvl" colab_type="code" colab={}
#Declaring constants
FIG_WIDTH=20 # Width of figure
HEIGHT_PER_ROW=3 # Height of each row when showing a figure which consists of multiple rows
RESIZE_DIM=32 # The images will be resized to 28x28 pixels
# + _uuid="5dfa2840e7fa2af7ca3ff13dcb92308f41caf244" id="lpYm-csX5vvp" colab_type="code" colab={}
data_dir=os.path.join('..','NumtaDB_with_aug')
paths_train_a=glob.glob(os.path.join(data_dir,'training-a','*.png'))
paths_train_b=glob.glob(os.path.join(data_dir,'training-b','*.png'))
paths_train_e=glob.glob(os.path.join(data_dir,'training-e','*.png'))
paths_train_c=glob.glob(os.path.join(data_dir,'training-c','*.png'))
paths_train_d=glob.glob(os.path.join(data_dir,'training-d','*.png'))
paths_train_all=paths_train_a+paths_train_b+paths_train_c+paths_train_d+paths_train_e
paths_test_a=glob.glob(os.path.join(data_dir,'testing-a','*.png'))
paths_test_b=glob.glob(os.path.join(data_dir,'testing-b','*.png'))
paths_test_e=glob.glob(os.path.join(data_dir,'testing-e','*.png'))
paths_test_c=glob.glob(os.path.join(data_dir,'testing-c','*.png'))
paths_test_d=glob.glob(os.path.join(data_dir,'testing-d','*.png'))
paths_test_f=glob.glob(os.path.join(data_dir,'testing-f','*.png'))+glob.glob(os.path.join(data_dir,'testing-f','*.JPG'))
paths_test_auga=glob.glob(os.path.join(data_dir,'testing-auga','*.png'))
paths_test_augc=glob.glob(os.path.join(data_dir,'testing-augc','*.png'))
paths_test_all=paths_test_a+paths_test_b+paths_test_c+paths_test_d+paths_test_e+paths_test_f+paths_test_auga+paths_test_augc
path_label_train_a=os.path.join(data_dir,'training-a.csv')
path_label_train_b=os.path.join(data_dir,'training-b.csv')
path_label_train_e=os.path.join(data_dir,'training-e.csv')
path_label_train_c=os.path.join(data_dir,'training-c.csv')
path_label_train_d=os.path.join(data_dir,'training-d.csv')
# + [markdown] _cell_guid="45ef7b96-b4cb-46d5-a092-6eb10775cb3e" _uuid="5aab8d2c6139e67da8cac46a41b02dd3b0c16c1a" id="Yg6yjUXS5vvq" colab_type="text"
# ### Image Processing
# + [markdown] _cell_guid="7ee61a9a-b469-4fae-8770-3981bb601a68" _uuid="21cd71c9e6faa386a2c5829cf3a3e636b338f280" id="55CiGH6k5vvq" colab_type="text"
# We shall write some helper functions to process and visualize the images. We will convert color to gray scale image. For better understanding we will unblur the image and make image sharp by using laplacian filter.
# + _cell_guid="87d7081d-aa4a-48b9-b1a4-575ceceb2eb3" _uuid="aa19885280eef5dfed1f8378d668ea7f025a7925" id="P5HOjz6h5vvr" colab_type="code" colab={}
def get_key(path):
# seperates the key of an image from the filepath
key=path.split(sep=os.sep)[-1]
return key
def get_data(paths_img,path_label=None,resize_dim=None):
'''reads images from the filepaths, resizes them (if given), and returns them in a numpy array
Args:
paths_img: image filepaths
path_label: pass image label filepaths while processing training data, defaults to None while processing testing data
resize_dim: if given, the image is resized to resize_dim x resize_dim (optional)
Returns:
X: group of images
y: categorical true labels
'''
X=[] # initialize empty list for resized images
for i,path in enumerate(paths_img):
img=cv2.imread(path,cv2.IMREAD_GRAYSCALE) # images loaded in color (BGR)
#img = cv2.bilateralFilter(img,9,75,75)
#img = cv2.medianBlur(img,5)
#img = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
#img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) # cnahging colorspace to GRAY
if resize_dim is not None:
img=cv2.resize(img,(resize_dim,resize_dim),interpolation=cv2.INTER_AREA) # resize image to 28x28
#X.append(np.expand_dims(img,axis=2)) # expand image to 28x28x1 and append to the list.
gaussian_3 = cv2.GaussianBlur(img, (9,9), 10.0) #unblur
img = cv2.addWeighted(img, 1.5, gaussian_3, -0.5, 0, img)
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]]) #filter
img = cv2.filter2D(img, -1, kernel)
#thresh = 200
#maxValue = 255
#th, img = cv2.threshold(img, thresh, maxValue, cv2.THRESH_BINARY);
ret,img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
X.append(img) # expand image to 28x28x1 and append to the list
# display progress
if i==len(paths_img)-1:
end='\n'
else: end='\r'
print('processed {}/{}'.format(i+1,len(paths_img)),end=end)
X=np.array(X) # tranform list to numpy array
if path_label is None:
return X
else:
df = pd.read_csv(path_label) # read labels
df=df.set_index('filename')
y_label=[df.loc[get_key(path)]['digit'] for path in paths_img] # get the labels corresponding to the images
y=to_categorical(y_label,10) # transfrom integer value to categorical variable
return X, y
def imshow_group(X,y,y_pred=None,n_per_row=10,phase='processed'):
'''helper function to visualize a group of images along with their categorical true labels (y) and prediction probabilities.
Args:
X: images
y: categorical true labels
y_pred: predicted class probabilities
n_per_row: number of images per row to be plotted
phase: If the images are plotted after resizing, pass 'processed' to phase argument.
It will plot the image and its true label. If the image is plotted after prediction
phase, pass predicted class probabilities to y_pred and 'prediction' to the phase argument.
It will plot the image, the true label, and it's top 3 predictions with highest probabilities.
'''
n_sample=len(X)
img_dim=X.shape[1]
j=np.ceil(n_sample/n_per_row)
fig=plt.figure(figsize=(FIG_WIDTH,HEIGHT_PER_ROW*j))
for i,img in enumerate(X):
plt.subplot(j,n_per_row,i+1)
# img_sq=np.squeeze(img,axis=2)
# plt.imshow(img_sq,cmap='gray')
plt.imshow(img)
if phase=='processed':
plt.title(np.argmax(y[i]))
if phase=='prediction':
top_n=3 # top 3 predictions with highest probabilities
ind_sorted=np.argsort(y_pred[i])[::-1]
h=img_dim+4
for k in range(top_n):
string='pred: {} ({:.0f}%)\n'.format(ind_sorted[k],y_pred[i,ind_sorted[k]]*100)
plt.text(img_dim/2, h, string, horizontalalignment='center',verticalalignment='center')
h+=4
if y is not None:
plt.text(img_dim/2, -4, 'true label: {}'.format(np.argmax(y[i])),
horizontalalignment='center',verticalalignment='center')
plt.axis('off')
plt.show()
def create_submission(predictions,keys,path):
result = pd.DataFrame(
predictions,
columns=['label'],
index=keys
)
result.index.name='key'
result.to_csv(path, index=True)
# + [markdown] _cell_guid="541cbb22-5319-490a-b6a4-8999db770722" _uuid="01fee24b540458889ebaa9d86b7646e5264e5b1e" id="SnwznmaI5vvs" colab_type="text"
# Next we are going to use the `get_data()` function to process all the images from dataset
# + _uuid="e4d65fdcac01f5d367ce863d79d22fbbf9b36b86" id="Jjdsm8aM5vvs" colab_type="code" colab={} outputId="4f67c5be-b376-4249-f6d8-327249326a6a"
X_train_a,y_train_a=get_data(paths_train_a,path_label_train_a,resize_dim=RESIZE_DIM)
X_train_b,y_train_b=get_data(paths_train_b,path_label_train_b,resize_dim=RESIZE_DIM)
X_train_c,y_train_c=get_data(paths_train_c,path_label_train_c,resize_dim=RESIZE_DIM)
X_train_d,y_train_d=get_data(paths_train_d,path_label_train_d,resize_dim=RESIZE_DIM)
X_train_e,y_train_e=get_data(paths_train_e,path_label_train_e,resize_dim=RESIZE_DIM)
# + _cell_guid="620a40d9-36f8-49fa-b96c-b823999daad0" _uuid="1d06bb5aaa056f719a918be3206c3f6b2e738e0b" id="O2Eu3gYk5vvu" colab_type="code" colab={} outputId="53765bf0-678f-4afd-ac05-4616776db776"
X_train_all=np.concatenate((X_train_a,X_train_b,X_train_c,X_train_d,X_train_e),axis=0)
y_train_all=np.concatenate((y_train_a,y_train_b,y_train_c,y_train_d,y_train_e),axis=0)
X_train_all.shape, y_train_all.shape
# + _uuid="ed3a982e1b9ac0b91ae9368c546d0d0447cc5fca" id="TRZVRgXq5vvw" colab_type="code" colab={}
X_show_all=X_train_all
# + [markdown] _cell_guid="6a2f1c8a-4bdf-4307-b9ea-7841175be203" _uuid="08f095d0c3ee5f77e811fa1f8046756544f8cce6" id="6z5wBRxa5vvx" colab_type="text"
# Let's see some samples of the processed data. The digits are more sharp now
# + _uuid="e0b3d05de0d4a6d683f8bbf9dcadbecd557d8d7c" id="6YKxQtKc5vvy" colab_type="code" colab={} outputId="c0b4a7c8-6d0a-410c-865b-b2f37f224998"
plt.subplot(221)
plt.imshow(X_train_all[0], cmap=plt.get_cmap('gray'))
plt.subplot(222)
plt.imshow(X_train_all[1], cmap=plt.get_cmap('gray'))
plt.subplot(223)
plt.imshow(X_train_all[2], cmap=plt.get_cmap('gray'))
plt.subplot(224)
plt.imshow(X_train_all[3], cmap=plt.get_cmap('gray'))
# show the plot
plt.show()
# + [markdown] _uuid="4a5791e2c7d37e6c230dd8ef5fdfb6f7b592cdb6" id="Hv2YduCx5vv0" colab_type="text"
# **Histogram of digit**
# + _uuid="c9d6d00dc0cdda91eb8baaecb33b497c4a216385" id="7hk9fT_05vv0" colab_type="code" colab={} outputId="280d8711-bc3a-4303-8d0e-562a785f6ce6"
imgg=X_train_all[1]
hist = cv2.calcHist([imgg],[0],None,[256],[0,256])
plt.hist(imgg.ravel(),256,[0,256])
# show the plotting graph of an image
plt.show()
# + _uuid="193edae07f21588132a68d0eed2b1f4646cf8a28" id="AI5ce1xM5vv2" colab_type="code" colab={} outputId="4710c857-6c79-473b-c711-7edc6331bad6"
plt.imshow(X_train_all[1])
# + _cell_guid="5930fd41-039e-4385-b753-127a5e3494ce" _uuid="57a68268499ba8d1d68d6b62d45f6c64a6453d53" id="XnbJhwPN5vv3" colab_type="code" colab={} outputId="3bb9d7cc-7030-480e-ff4f-89be837e8e72"
X_test_a=get_data(paths_test_a,resize_dim=RESIZE_DIM)
X_test_b=get_data(paths_test_b,resize_dim=RESIZE_DIM)
X_test_c=get_data(paths_test_c,resize_dim=RESIZE_DIM)
X_test_d=get_data(paths_test_d,resize_dim=RESIZE_DIM)
X_test_e=get_data(paths_test_e,resize_dim=RESIZE_DIM)
X_test_f=get_data(paths_test_f,resize_dim=RESIZE_DIM)
X_test_auga=get_data(paths_test_auga,resize_dim=RESIZE_DIM)
X_test_augc=get_data(paths_test_augc,resize_dim=RESIZE_DIM)
# + _cell_guid="1b380d90-c66f-42c9-ae97-5103e18a7c7a" _uuid="a26c04ac67f1c7919dede57707dd11f94d4590cb" id="4qtquGrr5vv5" colab_type="code" colab={}
X_test_all=np.concatenate((X_test_a,X_test_b,X_test_c,X_test_d,X_test_e,X_test_f,X_test_auga,X_test_augc))
# + _uuid="e8ef1f0166e80464b15fc7e23c4be85cc92129ed" id="KDZmeZZ85vv6" colab_type="code" colab={} outputId="32eaa2b3-4a6f-4a13-b7f4-d4f2ac4c8d7d"
X_tshow_all=X_test_all
X_tshow_all.shape
# + [markdown] _uuid="050db40f873909fb7eaddc593742560b81bb104f" id="IEXF5bG85vv9" colab_type="text"
# We will now convert image to 1 color channel
# + _uuid="3a924ec72e3e201ab7fc7a7b14c4285625cc7788" id="7-h2lXrV5vv9" colab_type="code" colab={}
X_train_all = X_train_all.reshape(X_train_all.shape[0],32, 32,1).astype('float32')
X_test_all = X_test_all.reshape(X_test_all.shape[0],32, 32,1).astype('float32')
# + _uuid="b6e46f3c07abdc22612a4996ea626f050a9b84d9" id="7hH5Aihy5vv_" colab_type="code" colab={} outputId="5ba02db3-078f-49f7-ea2e-be04f868ddc3"
X_train_all.shape
# + [markdown] _uuid="a75a4c52d3ebc246ac7ed01da04a47e4c0c6816e" id="CD80F_yk5vwC" colab_type="text"
# **Normalize data**
# + _uuid="f35d6a704e6d1015a2e65a5ca8f66e1aef8afb9b" id="HOJig0sU5vwD" colab_type="code" colab={}
X_train_all = X_train_all/255
X_test_all=X_test_all/255
# + [markdown] _cell_guid="d32f9092-0d61-4a31-940a-c47bd4a7ab70" _uuid="100fe511f70f708fe5a9a4a20a423d5cc5fc5d31" id="AcRNimMg5vwF" colab_type="text"
# Next, we are going to randomly choose 80% of the training data and use it to train our neural network. The remaining 20% images are going to be our validation data.
# + _cell_guid="45550d4b-7b11-4277-b968-3cbc74161537" _uuid="ec1470e959cf087a27d9ca3d349f2f5dd8687528" id="nmBOV3uD5vwF" colab_type="code" colab={}
indices=list(range(len(X_train_all)))
np.random.seed(42)
np.random.shuffle(indices)
ind=int(len(indices)*0.80)
# train data
X_train=X_train_all[indices[:ind]]
y_train=y_train_all[indices[:ind]]
# validation data
X_val=X_train_all[indices[-(len(indices)-ind):]]
y_val=y_train_all[indices[-(len(indices)-ind):]]
# + [markdown] _cell_guid="32382aae-4756-457e-af68-5fd94603ddfb" _uuid="ba5d4e25fb97b4afcb84f85c8788da6244afadaf" id="qQFsPI3S5vwG" colab_type="text"
# ## Step 2: Design the model
# + [markdown] _cell_guid="8f3f9cf5-17d0-4b3a-b544-84bee00bf280" _uuid="52b0f9b0fabc92f87bcd85ca6e04c93df7343ae4" id="nJXUqiF05vwH" colab_type="text"
#
# + _uuid="9748e26b1a1cbda489a438723277d7a93abcb3c8" id="m5Ff27HH5vwH" colab_type="code" colab={} outputId="1cbf5d94-d5f2-47f0-d869-bbeaee260c75"
def my_model(img_size=32,channels=1):
model = Sequential()
input_shape = (img_size,img_size,channels)
model.add(Conv2D(32, (5, 5), input_shape=input_shape,activation='relu', padding='same'))
model.add(Conv2D(32, (5, 5), activation='relu', padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3),activation='relu',padding='same'))
model.add(Conv2D(128, (3, 3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3),activation='relu',padding='same'))
model.add(Conv2D(256, (3, 3),activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],optimizer='adam')
# UNCOMMENT THIS TO VIEW THE ARCHITECTURE
#model.summary()
return model
model=my_model()
model.summary()
# + [markdown] _cell_guid="ee9607d4-40d3-449d-948d-ca743a6f9d77" _uuid="0b0adac9af387df7adc0cf26954802305849396e" id="HKEyRXhG5vwJ" colab_type="text"
# ## Step 3: Train the model
# + _cell_guid="7b15bdc9-1794-4eda-9d48-f544be3e382c" _uuid="f220f61fae1fae8c1cfb2f843300b489937b0090" id="g-4HMu905vwJ" colab_type="code" colab={} outputId="207ef042-b859-4b5e-da34-99dae0885688"
path_model='model_filter.h5' # save model at this location after each epoch
K.tensorflow_backend.clear_session() # destroys the current graph and builds a new one
model=my_model() # create the model
K.set_value(model.optimizer.lr,1e-3) # set the learning rate
# fit the model
h=model.fit(x=X_train,
y=y_train,
batch_size=64,
epochs=10,
verbose=1,
validation_data=(X_val,y_val),
shuffle=True,
callbacks=[
ModelCheckpoint(filepath=path_model),
]
)
# + [markdown] _cell_guid="1862ff7c-a071-4015-bb48-f9670a3112db" _uuid="d89525da13f845e3417890b968fb5d7753f4e54f" id="Z61Vbph95vwM" colab_type="text"
# # After 30 epochs we have a decent training and valiadation accuracy on dataset.
# + [markdown] _cell_guid="85658a4d-697d-46ef-91cc-e87a8c4c2f89" _uuid="97b8d77f4c3a692f939ffa94a466dfd257834d5a" id="et0fg71f5vwM" colab_type="text"
# ## Result Analysis
# + [markdown] _cell_guid="6b4b108e-4a11-489c-b068-55fecad1ea67" _uuid="8913f144fc15314e1a98cead1048635bfd758fd0" id="_apnLg345vwM" colab_type="text"
# Let's observe the images by our model.
# + _cell_guid="383c8574-c022-4a52-a3d9-f6c7edabc44d" _uuid="0b019de716cc8c0ab602d2600b590d83e310fb1f" id="2EDsSP3H5vwN" colab_type="code" colab={}
predictions_prob=model.predict(X_test_all) # get predictions for all the test data
# + _cell_guid="d0993d24-b26d-4934-84a0-ac145f4e8594" _uuid="4822a937ea504052db7b326595866e7a276e0a2a" id="WWcx9YXO5vwO" colab_type="code" colab={}
n_sample=200
np.random.seed(42)
ind=np.random.randint(0,len(X_test_all), size=n_sample)
# + _cell_guid="9f4260e9-e5e6-4b5e-8565-d5bed1bbc359" _uuid="2b6ec0f33275de9e2b2a4b0bd12b202f8d067551" id="3QL41zbG5vwP" colab_type="code" colab={} outputId="00ab69a1-33df-4e9c-a324-aa5c0ef587d6"
imshow_group(X=X_tshow_all[ind],y=None,y_pred=predictions_prob[ind], phase='prediction')
# + _cell_guid="5ac67cd3-11ac-4bb1-9d7a-f60417f45849" _uuid="6653b48712048170c2f12148c6dfe4988660c02f" id="37Li23it5vwQ" colab_type="code" colab={}
labels=[np.argmax(pred) for pred in predictions_prob]
# + _uuid="2dd35d48dc06d1532d165756677e990c1fe8eaa3" id="wS0NiOwO5vwR" colab_type="code" colab={}
keys=[get_key(path) for path in paths_test_all ]
# + _uuid="8fbcfb14dbfe0de631f40ec36bb3e007fdd67ea9" id="hLnSIOGX5vwT" colab_type="code" colab={}
create_submission(predictions=labels,keys=keys,path='submission7.csv')
# + _uuid="886f58583f4c1e063e7dd2f040a3bef4247f65f9" id="bB3Co9KP5vwU" colab_type="code" colab={} outputId="e5b9983e-81b6-4d3e-af17-6a69bd9a0af2"
prediction = pd.read_csv('submission7.csv')
prediction.head(100)
# + _uuid="51e7528de57416eff84e8fa131124d138dc99ea4" id="FIC6MSaI5vwV" colab_type="code" colab={}
#df=pd.read_csv(path_label_train_c,na_filter=False)
#df.head(100)
# + _uuid="f1d1ee1fb15f6bba61fd94d4492524fbb32967a0" id="NVkc6BOv5vwW" colab_type="code" colab={}
#y=np.array(df.iloc[:,3:4])
#print(y)
# + _uuid="c68432b931be3671f4c7b501ac1ff4339bbaa745" id="wO3t5jF_5vwY" colab_type="code" colab={}
# + _uuid="2f3cef391776026ced9f273990934402ce275c92" id="ZdSrUJ905vwZ" colab_type="code" colab={}
| Bengali_Digit_Recognizer using CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="copyright"
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="title:generic"
# # Vertex Pipelines: AutoML Tabular pipelines using google-cloud-pipeline-components
#
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/notebooks/official/pipelines/automl_tabular_classification_beans.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/notebooks/official/pipelines/automl_tabular_classification_beans.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# <td>
# <a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/notebooks/official/pipelines/automl_tabular_classification_beans.ipynb">
# Open in Google Cloud Notebooks
# </a>
# </td>
# </table>
# <br/><br/><br/>
# + [markdown] id="overview:pipelines,automl,beans"
# ## Overview
#
# This notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML tabular classification workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines).
#
# You'll build a pipeline that looks like this:
#
# <a href="https://storage.googleapis.com/amy-jo/images/mp/beans.png" target="_blank"><img src="https://storage.googleapis.com/amy-jo/images/mp/beans.png" width="95%"/></a>
# + [markdown] id="dataset:beans,lcn"
# ### Dataset
#
# The dataset used for this tutorial is the UCI Machine Learning ['Dry beans dataset'](https://archive.ics.uci.edu/ml/datasets/Dry+Bean+Dataset), from: <NAME>. and <NAME>., (2020), "Multiclass Classification of Dry Beans Using Computer Vision and Machine Learning Techniques."In Computers and Electronics in Agriculture, 174, 105507. [DOI](https://doi.org/10.1016/j.compag.2020.105507).
# + [markdown] id="objective:pipelines,automl"
# ### Objective
#
# In this tutorial, you create an AutoML tabular classification using a pipeline with components from `google_cloud_pipeline_components`.
#
# The steps performed include:
#
# - Create a `Dataset` resource.
# - Train an AutoML `Model` resource.
# - Creates an `Endpoint` resource.
# - Deploys the `Model` resource to the `Endpoint` resource.
#
# The components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.html#module-google_cloud_pipeline_components.aiplatform).
# + [markdown] id="costs"
# ### Costs
#
# This tutorial uses billable components of Google Cloud:
#
# * Vertex AI
# * Cloud Storage
#
# Learn about [Vertex AI
# pricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
# + [markdown] id="setup_local"
# ### Set up your local development environment
#
# If you are using Colab or Google Cloud Notebook, your environment already meets all the requirements to run this notebook. You can skip this step.
#
# Otherwise, make sure your environment meets this notebook's requirements. You need the following:
#
# - The Cloud Storage SDK
# - Git
# - Python 3
# - virtualenv
# - Jupyter notebook running in a virtual environment with Python 3
#
# The Cloud Storage guide to [Setting up a Python development environment](https://cloud.google.com/python/setup) and the [Jupyter installation guide](https://jupyter.org/install) provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
#
# 1. [Install and initialize the SDK](https://cloud.google.com/sdk/docs/).
#
# 2. [Install Python 3](https://cloud.google.com/python/setup#installing_python).
#
# 3. [Install virtualenv](Ihttps://cloud.google.com/python/setup#installing_and_using_virtualenv) and create a virtual environment that uses Python 3.
#
# 4. Activate that environment and run `pip3 install Jupyter` in a terminal shell to install Jupyter.
#
# 5. Run `jupyter notebook` on the command line in a terminal shell to launch Jupyter.
#
# 6. Open this notebook in the Jupyter Notebook Dashboard.
#
# + [markdown] id="install_aip:mbsdk"
# ## Installation
#
# Install the latest version of Vertex SDK for Python.
# + id="install_aip:mbsdk"
import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
# ! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
# + [markdown] id="install_storage"
# Install the latest GA version of *google-cloud-storage* library as well.
# + id="install_storage"
# ! pip3 install -U google-cloud-storage $USER_FLAG
# + [markdown] id="install_gcpc"
# Install the latest GA version of *google-cloud-pipeline-components* library as well.
# + id="install_gcpc"
# ! pip3 install $USER kfp google-cloud-pipeline-components --upgrade
# + [markdown] id="restart"
# ### Restart the kernel
#
# Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
# + id="restart"
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="check_versions"
# Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
# + id="check_versions:kfp,gcpc"
# ! python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
# ! python3 -c "import google_cloud_pipeline_components; print('google_cloud_pipeline_components version: {}'.format(google_cloud_pipeline_components.__version__))"
# + [markdown] id="before_you_begin:nogpu"
# ## Before you begin
#
# ### GPU runtime
#
# This tutorial does not require a GPU runtime.
#
# ### Set up your Google Cloud project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
#
# 3. [Enable the Vertex AI APIs, Compute Engine APIs, and Cloud Storage.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component,storage-component.googleapis.com)
#
# 4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.
#
# 5. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$`.
# + id="set_project_id"
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# + id="autoset_project_id"
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
# + id="set_gcloud_project_id"
# ! gcloud config set project $PROJECT_ID
# + [markdown] id="region"
# #### Region
#
# You can also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
#
# - Americas: `us-central1`
# - Europe: `europe-west4`
# - Asia Pacific: `asia-east1`
#
# You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
#
# Learn more about [Vertex AI regions](https://cloud.google.com/vertex-ai/docs/general/locations)
# + id="region"
REGION = "us-central1" # @param {type: "string"}
# + [markdown] id="timestamp"
# #### Timestamp
#
# If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
# + id="timestamp"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# + [markdown] id="gcp_authenticate"
# ### Authenticate your Google Cloud account
#
# **If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.
#
# **If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
#
# **Otherwise**, follow these steps:
#
# In the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.
#
# **Click Create service account**.
#
# In the **Service account name** field, enter a name, and click **Create**.
#
# In the **Grant this service account access to project** section, click the Role drop-down list. Type "Vertex" into the filter box, and select **Vertex Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
#
# Click Create. A JSON file that contains your key downloads to your local environment.
#
# Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
# + id="gcp_authenticate"
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# + [markdown] id="bucket:mbsdk"
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
# When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
#
# Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
# + id="bucket"
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
# + id="autoset_bucket"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
# + [markdown] id="create_bucket"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
# + id="create_bucket"
# ! gsutil mb -l $REGION $BUCKET_NAME
# + [markdown] id="validate_bucket"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + id="validate_bucket"
# ! gsutil ls -al $BUCKET_NAME
# + [markdown] id="set_service_account"
# #### Service Account
#
# **If you don't know your service account**, try to get your service account using `gcloud` command by executing the second cell below.
# + id="set_service_account"
SERVICE_ACCOUNT = "[your-service-account]" # @param {type:"string"}
# + id="autoset_service_account"
if (
SERVICE_ACCOUNT == ""
or SERVICE_ACCOUNT is None
or SERVICE_ACCOUNT == "[your-service-account]"
):
# Get your GCP project id from gcloud
# shell_output = !gcloud auth list 2>/dev/null
SERVICE_ACCOUNT = shell_output[2].strip()
print("Service Account:", SERVICE_ACCOUNT)
# + [markdown] id="set_service_account:pipelines"
# #### Set service account access for Vertex Pipelines
#
# Run the following commands to grant your service account access to read and write pipeline artifacts in the bucket that you created in the previous step -- you only need to run these once per service account.
# + id="set_service_account:pipelines"
# ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectCreator $BUCKET_NAME
# ! gsutil iam ch serviceAccount:{SERVICE_ACCOUNT}:roles/storage.objectViewer $BUCKET_NAME
# + [markdown] id="setup_vars"
# ### Set up variables
#
# Next, set up some variables used throughout the tutorial.
# ### Import libraries and define constants
# + id="import_aip:mbsdk"
import google.cloud.aiplatform as aip
# + [markdown] id="aip_constants:endpoint"
# #### Vertex AI constants
#
# Setup up the following constants for Vertex AI:
#
# - `API_ENDPOINT`: The Vertex AI API service endpoint for `Dataset`, `Model`, `Job`, `Pipeline` and `Endpoint` services.
# + id="aip_constants:endpoint"
# API service endpoint
API_ENDPOINT = "{}-aiplatform.googleapis.com".format(REGION)
# + [markdown] id="pipeline_constants"
# #### Vertex Pipelines constants
#
# Setup up the following constants for Vertex Pipelines:
# + id="pipeline_constants"
PIPELINE_ROOT = "{}/pipeline_root/beans".format(BUCKET_NAME)
# + [markdown] id="additional_imports"
# Additional imports.
# + id="import_pipelines"
from typing import NamedTuple
import kfp
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import dsl
from kfp.v2.dsl import (ClassificationMetrics, Input, Metrics, Model, Output,
component)
# + [markdown] id="init_aip:mbsdk"
# ## Initialize Vertex SDK for Python
#
# Initialize the Vertex SDK for Python for your project and corresponding bucket.
# + id="init_aip:mbsdk"
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
# + [markdown] id="define_component:eval"
# ## Define a metrics evaluation custom component
#
# In this tutorial, you define one custom pipeline component. The remaining components are pre-built
# components for Vertex AI services.
#
# The custom pipeline component you define is a Python-function-based component.
# Python function-based components make it easier to iterate quickly by letting you build your component code as a Python function and generating the component specification for you.
#
# Note the `@component` decorator. When you evaluate the `classification_model_eval` function, the component is compiled to what is essentially a task factory function, that can be used in the the pipeline definition.
#
# In addition, a `tabular_eval_component.yaml` component definition file will be generated. The component `yaml` file can be shared & placed under version control, and used later to define a pipeline step.
#
# The component definition specifies a base image for the component to use, and specifies that the `google-cloud-aiplatform` package should be installed. When not specified, the base image defaults to Python 3.7
#
# The custom pipeline component retrieves the classification model evaluation generated by the AutoML tabular training process, parses the evaluation data, and renders the ROC curve and confusion matrix for the model. It also uses given metrics threshold information and compares that to the evaluation results to determine whether the model is sufficiently accurate to deploy.
#
# *Note:* This custom component is specific to an AutoML tabular classification.
# + id="define_component:eval"
@component(
base_image="gcr.io/deeplearning-platform-release/tf2-cpu.2-3:latest",
output_component_file="tabular_eval_component.yaml",
packages_to_install=["google-cloud-aiplatform"],
)
def classification_model_eval_metrics(
project: str,
location: str, # "us-central1",
api_endpoint: str, # "us-central1-aiplatform.googleapis.com",
thresholds_dict_str: str,
model: Input[Model],
metrics: Output[Metrics],
metricsc: Output[ClassificationMetrics],
) -> NamedTuple("Outputs", [("dep_decision", str)]): # Return parameter.
import json
import logging
from google.cloud import aiplatform as aip
# Fetch model eval info
def get_eval_info(client, model_name):
from google.protobuf.json_format import MessageToDict
response = client.list_model_evaluations(parent=model_name)
metrics_list = []
metrics_string_list = []
for evaluation in response:
print("model_evaluation")
print(" name:", evaluation.name)
print(" metrics_schema_uri:", evaluation.metrics_schema_uri)
metrics = MessageToDict(evaluation._pb.metrics)
for metric in metrics.keys():
logging.info("metric: %s, value: %s", metric, metrics[metric])
metrics_str = json.dumps(metrics)
metrics_list.append(metrics)
metrics_string_list.append(metrics_str)
return (
evaluation.name,
metrics_list,
metrics_string_list,
)
# Use the given metrics threshold(s) to determine whether the model is
# accurate enough to deploy.
def classification_thresholds_check(metrics_dict, thresholds_dict):
for k, v in thresholds_dict.items():
logging.info("k {}, v {}".format(k, v))
if k in ["auRoc", "auPrc"]: # higher is better
if metrics_dict[k] < v: # if under threshold, don't deploy
logging.info("{} < {}; returning False".format(metrics_dict[k], v))
return False
logging.info("threshold checks passed.")
return True
def log_metrics(metrics_list, metricsc):
test_confusion_matrix = metrics_list[0]["confusionMatrix"]
logging.info("rows: %s", test_confusion_matrix["rows"])
# log the ROC curve
fpr = []
tpr = []
thresholds = []
for item in metrics_list[0]["confidenceMetrics"]:
fpr.append(item.get("falsePositiveRate", 0.0))
tpr.append(item.get("recall", 0.0))
thresholds.append(item.get("confidenceThreshold", 0.0))
print(f"fpr: {fpr}")
print(f"tpr: {tpr}")
print(f"thresholds: {thresholds}")
metricsc.log_roc_curve(fpr, tpr, thresholds)
# log the confusion matrix
annotations = []
for item in test_confusion_matrix["annotationSpecs"]:
annotations.append(item["displayName"])
logging.info("confusion matrix annotations: %s", annotations)
metricsc.log_confusion_matrix(
annotations,
test_confusion_matrix["rows"],
)
# log textual metrics info as well
for metric in metrics_list[0].keys():
if metric != "confidenceMetrics":
val_string = json.dumps(metrics_list[0][metric])
metrics.log_metric(metric, val_string)
# metrics.metadata["model_type"] = "AutoML Tabular classification"
logging.getLogger().setLevel(logging.INFO)
aip.init(project=project)
# extract the model resource name from the input Model Artifact
model_resource_path = model.uri.replace("aiplatform://v1/", "")
logging.info("model path: %s", model_resource_path)
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
client = aip.gapic.ModelServiceClient(client_options=client_options)
eval_name, metrics_list, metrics_str_list = get_eval_info(
client, model_resource_path
)
logging.info("got evaluation name: %s", eval_name)
logging.info("got metrics list: %s", metrics_list)
log_metrics(metrics_list, metricsc)
thresholds_dict = json.loads(thresholds_dict_str)
deploy = classification_thresholds_check(metrics_list[0], thresholds_dict)
if deploy:
dep_decision = "true"
else:
dep_decision = "false"
logging.info("deployment decision is %s", dep_decision)
return (dep_decision,)
# + [markdown] id="define_pipeline:gcpc,beans,lcn"
# ## Define an AutoML tabular classification pipeline that uses components from `google_cloud_pipeline_components`
# + id="define_pipeline:gcpc,beans,lcn"
DISPLAY_NAME = "automl-beans{}".format(TIMESTAMP)
PIPELINE_NAME = "automl-tabular-beans-training-v2"
MACHINE_TYPE = "n1-standard-4"
@kfp.dsl.pipeline(name=PIPELINE_NAME, pipeline_root=PIPELINE_ROOT)
def pipeline(
bq_source: str = "bq://aju-dev-demos.beans.beans1",
display_name: str = DISPLAY_NAME,
project: str = PROJECT_ID,
gcp_region: str = REGION,
api_endpoint: str = API_ENDPOINT,
thresholds_dict_str: str = '{"auRoc": 0.95}',
):
dataset_create_op = gcc_aip.TabularDatasetCreateOp(
project=project, display_name=display_name, bq_source=bq_source
)
training_op = gcc_aip.AutoMLTabularTrainingJobRunOp(
project=project,
display_name=display_name,
optimization_prediction_type="classification",
optimization_objective="minimize-log-loss",
budget_milli_node_hours=1000,
column_transformations=[
{"numeric": {"column_name": "Area"}},
{"numeric": {"column_name": "Perimeter"}},
{"numeric": {"column_name": "MajorAxisLength"}},
{"numeric": {"column_name": "MinorAxisLength"}},
{"numeric": {"column_name": "AspectRation"}},
{"numeric": {"column_name": "Eccentricity"}},
{"numeric": {"column_name": "ConvexArea"}},
{"numeric": {"column_name": "EquivDiameter"}},
{"numeric": {"column_name": "Extent"}},
{"numeric": {"column_name": "Solidity"}},
{"numeric": {"column_name": "roundness"}},
{"numeric": {"column_name": "Compactness"}},
{"numeric": {"column_name": "ShapeFactor1"}},
{"numeric": {"column_name": "ShapeFactor2"}},
{"numeric": {"column_name": "ShapeFactor3"}},
{"numeric": {"column_name": "ShapeFactor4"}},
{"categorical": {"column_name": "Class"}},
],
dataset=dataset_create_op.outputs["dataset"],
target_column="Class",
)
model_eval_task = classification_model_eval_metrics(
project,
gcp_region,
api_endpoint,
thresholds_dict_str,
training_op.outputs["model"],
)
with dsl.Condition(
model_eval_task.outputs["dep_decision"] == "true",
name="deploy_decision",
):
deploy_op = gcc_aip.ModelDeployOp( # noqa: F841
model=training_op.outputs["model"],
project=project,
machine_type=MACHINE_TYPE,
)
# + [markdown] id="compile_pipeline"
# ## Compile the pipeline
#
# Next, compile the pipeline.
# + id="compile_pipeline"
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline,
package_path="tabular classification_pipeline.json".replace(" ", "_"),
)
# + [markdown] id="run_pipeline:model"
# ## Run the pipeline
#
# Next, run the pipeline.
# + id="run_pipeline:model"
DISPLAY_NAME = "beans_" + TIMESTAMP
job = aip.PipelineJob(
display_name=DISPLAY_NAME,
template_path="tabular classification_pipeline.json".replace(" ", "_"),
pipeline_root=PIPELINE_ROOT,
parameter_values={"project": PROJECT_ID, "display_name": DISPLAY_NAME},
)
job.run()
# + [markdown] id="view_pipeline_run:model"
# Click on the generated link to see your run in the Cloud Console.
#
# <!-- It should look something like this as it is running:
#
# <a href="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" target="_blank"><img src="https://storage.googleapis.com/amy-jo/images/mp/automl_tabular_classif.png" width="40%"/></a> -->
# + [markdown] id="compare_pipeline_runs"
# ## Compare the parameters and metrics of the pipelines run from their tracked metadata
#
# Next, you use the Vertex SDK for Python to compare the parameters and metrics of the pipeline runs. Wait until the pipeline runs have finished to run the next cell.
# + id="compare_pipeline_runs"
pipeline_df = aip.get_pipeline_df(pipeline=PIPELINE_NAME)
print(pipeline_df.head(2))
# + [markdown] id="cleanup:pipelines"
# # Cleaning up
#
# To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial -- *Note:* this is auto-generated and not all resources may be applicable for this tutorial:
#
# - Dataset
# - Pipeline
# - Model
# - Endpoint
# - Batch Job
# - Custom Job
# - Hyperparameter Tuning Job
# - Cloud Storage Bucket
# + id="cleanup:pipelines"
delete_dataset = True
delete_pipeline = True
delete_model = True
delete_endpoint = True
delete_batchjob = True
delete_customjob = True
delete_hptjob = True
delete_bucket = True
try:
if delete_model and "DISPLAY_NAME" in globals():
models = aip.Model.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
model = models[0]
aip.Model.delete(model)
print("Deleted model:", model)
except Exception as e:
print(e)
try:
if delete_endpoint and "DISPLAY_NAME" in globals():
endpoints = aip.Endpoint.list(
filter=f"display_name={DISPLAY_NAME}_endpoint", order_by="create_time"
)
endpoint = endpoints[0]
endpoint.undeploy_all()
aip.Endpoint.delete(endpoint.resource_name)
print("Deleted endpoint:", endpoint)
except Exception as e:
print(e)
if delete_dataset and "DISPLAY_NAME" in globals():
if "tabular" == "tabular":
try:
datasets = aip.TabularDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.TabularDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
if "tabular" == "image":
try:
datasets = aip.ImageDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.ImageDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
if "tabular" == "text":
try:
datasets = aip.TextDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.TextDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
if "tabular" == "video":
try:
datasets = aip.VideoDataset.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
dataset = datasets[0]
aip.VideoDataset.delete(dataset.resource_name)
print("Deleted dataset:", dataset)
except Exception as e:
print(e)
try:
if delete_pipeline and "DISPLAY_NAME" in globals():
pipelines = aip.PipelineJob.list(
filter=f"display_name={DISPLAY_NAME}", order_by="create_time"
)
pipeline = pipelines[0]
aip.PipelineJob.delete(pipeline.resource_name)
print("Deleted pipeline:", pipeline)
except Exception as e:
print(e)
if delete_bucket and "BUCKET_NAME" in globals():
# ! gsutil rm -r $BUCKET_NAME
| notebooks/official/pipelines/automl_tabular_classification_beans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: env1
# language: python
# name: env1
# ---
import pandas as pd
import numpy as np
import matplotlib
import sklearn
print("pandas", pd.__version__)
print("numpy", np.__version__)
print("mpl", matplotlib.__version__)
print("sklearn", sklearn.__version__)
from IPython.display import Image
# %matplotlib inline
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data[:, [2,3]]
y = iris.target
np.unique(y)
# +
import matplotlib.pyplot as plt
_ = plt.scatter(X[:,0], X[:,1], c=y)
# -
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.3, random_state=1,
stratify=y)
print("label counts in y: ", np.bincount(y))
np.bincount(y_train), np.bincount(y_test)
from sklearn.preprocessing import StandardScaler
#### StandardScaler?
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
fig, axes = plt.subplots(ncols=2, nrows=2)
_ = axes[0,0].hist(X_train[:,0], label="train")
_ = axes[0,0].hist(X_test[:,0], label="test")
_ = axes[0,1].hist(X_train[:,1], label="train")
_ = axes[0,1].hist(X_test[:,1], label="test")
_ = axes[1,0].hist(X_train_std[:,0], label="train")
_ = axes[1,0].hist(X_test_std[:,0], label="test")
_ = axes[1,1].hist(X_train_std[:,1], label="train")
_ = axes[1,1].hist(X_test_std[:,1], label="test")
from sklearn.linear_model import Perceptron
ppn = Perceptron(n_iter=40, eta0=.1, random_state=1)
ppn
ppn.fit(X_train_std, y_train)
y_pred = ppn.predict(X_test_std)
"Missclassified: ", (y_test!=y_pred).sum()
fig, (ax1, ax2) = plt.subplots(ncols=2)
_ = ax1.set_title("$y$")
_ = ax2.set_title("$\hat{y}$")
_ = ax1.scatter(X_test_std[:,0], X_test_std[:,1], c=y_test)
_ = ax2.scatter(X_test_std[:,0], X_test_std[:,1], c=y_pred)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
ppn.score(X_test_std, y_test)
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
from mlxtend.plotting import plot_decision_regions
X_combined_std = np.vstack((X_train_std, X_test_std))
X_combined_std.shape
y_combined = np.hstack((y_train, y_test))
y_combined.shape
fig, ax = plt.subplots(constrained_layout=True)
plot_decision_regions(X=X_combined_std, y=y_combined, clf=ppn, ax=ax)
_ = ax.set_xlabel("petal length")
_ = ax.set_ylabel("petel width")
_ = ax.legend(loc='upper left')
def sigmoid(z):
return 1.0 / (1.0 + np.exp(-z))
# +
z = np.arange(-7, 8, 0.1)
phi_z = sigmoid(z)
fig, ax = plt.subplots(constrained_layout=True)
ax.plot(z, phi_z)
ax.axvline(0.0, color='k')
ax.set_ylim(-0.1, 1.0)
ax.set_ylabel('$\phi (z)$')
ax.set_xlabel('z')
ax.set_yticks([0.0, 0.5, 1.0])
ax.yaxis.grid(True)
# +
def cost_1(z):
return -np.log(sigmoid(z))
def cost_0(z):
return -np.log(1-sigmoid(z))
# +
z = np.arange(-10, 10, 0.1)
phi_z = sigmoid(z)
c1 = [cost_1(x) for x in z]
c0 = [cost_0(x) for x in z]
# -
fig, ax = plt.subplots()
_ = ax.plot(phi_z, c0, linestyle='--', label='J(w) if y=0')
_ = ax.plot(phi_z, c1, linestyle='--', label='J(w) if y=1')
_ = ax.legend()
_ = ax.set_ylim([0, 5])
_ = ax.set_xlim([0,1])
_ = ax.set_xlabel("$\phi(z)$")
_ = ax.set_ylabel("J(w)")
# +
#https://en.wikipedia.org/wiki/Logistic_regression
fig, (ax1, ax2) = plt.subplots(nrows=2, constrained_layout=True)
_ = ax1.plot(z, phi_z)
_ = ax1.axvline(0.0, color='k')
_ = ax1.set_ylim(-0.1, 1.0)
_ = ax1.set_ylabel('$\phi (z)$')
_ = ax1.set_xlabel('z')
_ = ax1.set_yticks([0.0, 0.5, 1.0])
_ = ax1.yaxis.grid(True)
_ = ax2.plot(phi_z, c0, linestyle='--', label='J(w) if y=0')
_ = ax2.plot(phi_z, c1, linestyle='--', label='J(w) if y=1')
_ = ax2.legend()
_ = ax2.set_ylim([0, 5])
_ = ax2.set_xlim([0,1])
_ = ax2.set_xlabel("$\phi(z)$")
_ = ax2.set_ylabel("J(w)")
# -
X_train_01_subset = X_train[(y_train==0) | (y_train == 1)]
y_train_01_subset = y_train[(y_train==0) | (y_train==1)]
_ = plt.scatter(X_train_01_subset[:,0], X_train_01_subset[:,1],
c=y_train_01_subset)
# +
from sklearn.linear_model import LogisticRegression
lrgd = LogisticRegression(C=100.0, random_state=1)
lrgd
# -
lrgd.fit(X_train_01_subset, y_train_01_subset)
plot_decision_regions(X_train_01_subset, y_train_01_subset, clf=lrgd)
plt.xlabel("length")
plt.ylabel("width")
lr = LogisticRegression(C=100.0, random_state=1)
lr.fit(X_train_std, y_train)
plot_decision_regions(X_combined_std, y_combined, lr)
plt.xlabel("length")
plt.ylabel("width")
lr.score(X_test_std, y_test)
lr.predict_proba(X_test_std)
lr.predict_proba(X_test_std).sum(axis=1)
# +
import matplotlib.colors as mcolors
import matplotlib.cm as mcm
fig, (ax1, ax2) = plt.subplots(ncols=2, constrained_layout=True)
pnorm = mcolors.Normalize(vmin=0, vmax=1)
pcmap = mcm.Blues
im = ax1.pcolormesh(lr.predict_proba(X_test_std), norm=pnorm, cmap=pcmap)
_ = ax1.set_xticks([.5, 1.5, 2.5])
_ = ax1.set_xticklabels([0,1,2])
_ = ax1.set_xlabel("classes")
ccmap = mcolors.ListedColormap([mcm.tab20c(2), mcm.tab20c(6), mcm.tab20c(10)])
cnorm = mcolors.BoundaryNorm([-.5, .5, 1.5, 2.5], ccmap.N)
im2 = ax2.pcolormesh(np.vstack([y_test, lr.predict(X_test_std)]).T,
cmap=ccmap, norm=cnorm)
ax2.set_xticks([.5, 1.5])
ax2.set_xticklabels(["Expected", "Actual"])
ax2.axvline(x=1, color='k')
cb = fig.colorbar(im, ax=ax1)
_ = cb.set_label("p(being in class)")
cb2 = fig.colorbar(im2, ax=ax2)
cb2.set_ticks([0,1,2])
cb.set_label("classes")
# -
mi = (y_test != lr.predict(X_test_std)).nonzero()[0][0]
mx, my = X_test_std[mi].squeeze()
mx, my, mi
import matplotlib.colors as mcolors
pp = lr.predict_proba(X_test_std)
fig, axes = plt.subplots(ncols=2, nrows=2, constrained_layout=True)
for i, ax in enumerate(axes.flatten()):
if i == 3:
colors = y_test
cmap = ccmap
norm = cnorm
title = "Expected classes"
else:
colors = pp[:,i]
norm = pnorm
cmap = pcmap
title = f"P( class {i})"
_ = ax.set_title(title)
im = ax.scatter(X_test_std[:,0], X_test_std[:, 1], c=colors, cmap=cmap, norm=norm)
_ = ax.scatter(mx, my, color=cmap(norm(colors[mi])), edgecolor='Red')
lr.predict_proba(X_test_std).argmax(axis=1)
y_test
(lr.predict_proba(X_test_std).argmax(axis=1)!=y_test).sum()
(lr.predict_proba(X_test_std).argmax(axis=1)!=lr.predict(X_test_std)).sum()
axes.shape
axes.flatten().shape
| chap03.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Sales conversion optimization
# Data: https://www.kaggle.com/loveall/clicks-conversion-tracking
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# +
df= pd.read_csv("KAG_conversion_data.csv")
df.head()
# -
df.describe()
df.info()
#Dummy encode any categorical or object values in the data and save the resulting data frame to variable X.
X=pd.get_dummies(data=df,drop_first=True)
X
# ## Using a heat map to show the correlation in the data
# a. Drop the first 4 columns in the data frame X.
#
# b. Basing your answer on what can be seen in the heat map, why did we drop these columns?
#
#Drop the first 4 columns in the data frame X.
X.drop(X.iloc[:, 0:4], inplace = True, axis = 1)
X.head()
#Showing correlation in the data using a heatmap and commenting why we dropped the columns above
sns.heatmap(df[["Impressions","Clicks","Spent","Total_Conversion","Approved_Conversion"]].corr(),annot=True,cmap="YlGnBu");
# ### Using the elbow method:
# a. Determine the best number of clusters for the data in the range of 2 to 20.
#
# b. Also include the graphical plot for the elbow curve.
# +
from sklearn.cluster import KMeans
import seaborn as sns
sum_of_sq_dist = {}
for k in range(2,20):
km = KMeans(n_clusters= k, init= 'k-means++', max_iter= 1000)
km = km.fit(X)
sum_of_sq_dist[k] = km.inertia_
#Plot the graph for the sum of square distance values and Number of Clusters
sns.pointplot(x = list(sum_of_sq_dist.keys()), y = list(sum_of_sq_dist.values()))
plt.xlabel('Number of Clusters(k)')
plt.ylabel('Sum of Square Distances')
plt.title('Elbow Method For Optimal k')
plt.show()
# -
# Based on the result above in 4b use the value at your elbow point to cluster the values in the data frame X.
KMean_clust = KMeans(n_clusters= 4, init= 'k-means++', max_iter= 1000)
KMean_clust.fit(X)
# +
#visualizing the clusters
sns.set(style ='darkgrid')
plt.scatter(X[KMean_clust==2,2], X[KMean_clust==2,3], s= 100, c= 'red')
# -
# ### Building KMeans model with K=4 (Training and Predicting)
# Use the model to predict the labels from the data and save them to variable y_means
# +
# Instantiating
kmeans4 = KMeans(n_clusters = 4)
# Training the model
kmeans4.fit(X)
# predicting
y_means = kmeans4.fit_predict(X)
print(y_pred)
# Storing the y_pred values in a new column
df['Advert_Type'] = y_means+1 #to start the cluster number from 1
# -
df.head()
# ### Using any form of distribution plot of your choice and the original data frame, plot 2 graphs that can be used to answer the following:
# a. Which advert type lead to the highest and consistent amount of sales by customers of all the age brackets?
#
# b. Does the company xyz have gender bias in terms of their ad spending? Are their products gender neutral?
df.groupby(['xyz_campaign_id']).sum().plot(kind='pie', y='Approved_Conversion',figsize=(15,10), autopct='%1.1f%%');
df.groupby(['gender']).sum().plot(kind='pie', y='Approved_Conversion',figsize=(15,10), autopct='%1.1f%%');
# ## Hierarchical clustering
import scipy.cluster.hierarchy as sch
dendrogram = sch.dendrogram(sch.linkage(X, method = 'ward'))
plt.title('Dendrogam', fontsize = 20)
plt.xlabel('Customers')
plt.ylabel('Ecuclidean Distance')
plt.show()
| Sales_Conversion_Optimization.ipynb |
# +
# Plot error surface for linear regression model.
# Based on https://github.com/probml/pmtk3/blob/master/demos/contoursSSEdemo.m
import numpy as np
import matplotlib.pyplot as plt
import probml_utils as pml
from mpl_toolkits.mplot3d import axes3d, Axes3D
np.random.seed(0)
N = 21
x = np.linspace(0.0, 20, N)
X0 = x.reshape(N, 1)
X = np.c_[np.ones((N, 1)), X0]
w = np.array([-1.5, 1 / 9.0])
y = w[0] * x + w[1] * np.square(x)
y = y + np.random.normal(0, 1, N) * 2
w = np.linalg.lstsq(X, y, rcond=None)[0]
W0, W1 = np.meshgrid(np.linspace(-8, 0, 100), np.linspace(-0.5, 1.5, 100))
SS = np.array([sum((w0 * X[:, 0] + w1 * X[:, 1] - y) ** 2) for w0, w1 in zip(np.ravel(W0), np.ravel(W1))])
SS = SS.reshape(W0.shape)
plt.figure()
plt.contourf(W0, W1, SS)
pml.savefig("linregHeatmapSSE.pdf")
plt.colorbar()
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
surf = ax.plot_surface(W0, W1, SS)
pml.savefig("linregSurfSSE.pdf")
plt.show()
fig, ax = plt.subplots()
CS = plt.contour(W0, W1, SS, levels=np.linspace(0, 2000, 10), cmap="jet")
plt.plot(w[0], w[1], "x")
pml.savefig("linregContoursSSE.pdf")
plt.show()
| notebooks/book1/11/linreg_contours_sse_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # HyperBand
#
# ## Introduction
#
# This example shows how to perform HyperBand parametric sweeping using CNTK with MNIST dataset to train a convolutional neural network (CNN) on a GPU cluster.
#
# ## Details
#
# - We provide a CNTK example [ConvMNIST.py](../ConvMNIST.py) to accept command line arguments for CNTK dataset, model locations, model file suffix and two hyperparameters for tuning: 1. hidden layer dimension and 2. feedforward constant
# - The implementation of HyperBand algorithm is adopted from the article [*Hyperband: A Novel Bandit-Based Approach to Hyperparameter Optimization*](https://people.eecs.berkeley.edu/~kjamieson/hyperband.html)
# - For demonstration purposes, MNIST dataset and CNTK training script will be deployed at Azure File Share;
# - Standard output of the job and the model will be stored on Azure File Share;
# - MNIST dataset (http://yann.lecun.com/exdb/mnist/) has been preprocessed by usign install_mnist.py available [here](https://batchaisamples.blob.core.windows.net/samples/mnist_dataset.zip?st=2017-09-29T18%3A29%3A00Z&se=2099-12-31T08%3A00%3A00Z&sp=rl&sv=2016-05-31&sr=c&sig=PmhL%2BYnYAyNTZr1DM2JySvrI12e%2F4wZNIwCtf7TRI%2BM%3D).
# ## Instructions
#
# ### Install Dependencies and Create Configuration file.
# Follow [instructions](/recipes) to install all dependencies and create configuration file.
# ### Read Configuration and Create Batch AI client
# + nbpresent={"id": "bfa11f00-8866-4051-bbfe-a9646e004910"}
from __future__ import print_function
import sys
import logging
import numpy as np
import azure.mgmt.batchai.models as models
from azure.storage.blob import BlockBlobService
from azure.storage.file import FileService
sys.path.append('../../..')
import utilities as utils
from utilities.job_factory import ParameterSweep, NumParamSpec, DiscreteParamSpec
cfg = utils.config.Configuration('../../configuration.json')
client = utils.config.create_batchai_client(cfg)
# -
# Create Resoruce Group and Batch AI workspace if not exists:
utils.config.create_resource_group(cfg)
_ = client.workspaces.create(cfg.resource_group, cfg.workspace, cfg.location).result()
# ## 1. Prepare Training Dataset and Script in Azure Storage
# ### Create Azure Blob Container
#
# We will create a new Blob Container with name `batchaisample` under your storage account. This will be used to store the *input training dataset*
#
# **Note** You don't need to create new blob Container for every cluster. We are doing this in this sample to simplify resource management for you.
azure_blob_container_name = 'batchaisample'
blob_service = BlockBlobService(cfg.storage_account_name, cfg.storage_account_key)
blob_service.create_container(azure_blob_container_name, fail_on_exist=False)
# ### Upload MNIST Dataset to Azure Blob Container
#
# For demonstration purposes, we will download preprocessed MNIST dataset to the current directory and upload it to Azure Blob Container directory named `mnist_dataset`.
#
# There are multiple ways to create folders and upload files into Azure Blob Container - you can use [Azure Portal](https://ms.portal.azure.com), [Storage Explorer](http://storageexplorer.com/), [Azure CLI2](/azure-cli-extension) or Azure SDK for your preferable programming language.
# In this example we will use Azure SDK for python to copy files into Blob.
mnist_dataset_directory = 'mnist_dataset'
utils.dataset.download_and_upload_mnist_dataset_to_blob(
blob_service, azure_blob_container_name, mnist_dataset_directory)
# ### Create Azure File Share
#
# For this example we will create a new File Share with name `batchaisample` under your storage account. This will be used to share the *training script file* and *output file*.
#
# **Note** You don't need to create new file share for every cluster. We are doing this in this sample to simplify resource management for you.
azure_file_share_name = 'batchaisample'
file_service = FileService(cfg.storage_account_name, cfg.storage_account_key)
file_service.create_share(azure_file_share_name, fail_on_exist=False)
# Upload the training script [ConvMNIST.py](../ConvMNIST.py) to file share directory named `hyperparam_samples`.
cntk_script_path = "hyperparam_samples"
file_service.create_directory(
azure_file_share_name, cntk_script_path, fail_on_exist=False)
file_service.create_file_from_path(
azure_file_share_name, cntk_script_path, 'ConvMNIST.py', '../ConvMNIST.py')
# ## 2. Create Azure Batch AI Compute Cluster
# ### Configure Compute Cluster
#
# - For this example we will use a GPU cluster of `STANDARD_NC6` nodes. Number of nodes in the cluster is configured with `nodes_count` variable;
# - We will call the cluster `nc6`;
#
# So, the cluster will have the following parameters:
# +
nodes_count = 4
cluster_name = 'nc6'
parameters = models.ClusterCreateParameters(
location=cfg.location,
vm_size='STANDARD_NC6',
scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=nodes_count)
),
user_account_settings=models.UserAccountSettings(
admin_user_name=cfg.admin,
admin_user_password=cfg.admin_password or None,
admin_user_ssh_public_key=cfg.admin_ssh_key or None,
)
)
# -
# ### Create Compute Cluster
_ = client.clusters.create(cfg.resource_group, cfg.workspace, cluster_name, parameters).result()
# ### Monitor Cluster Creation
#
# Monitor the just created cluster. The `utilities` module contains a helper function to print out detail status of the cluster.
cluster = client.clusters.get(cfg.resource_group, cfg.workspace, cluster_name)
utils.cluster.print_cluster_status(cluster)
# ## 3. Hyperparameter tuning using HyperBand
# Define specifications for the hyperparameters
param_specs = [
NumParamSpec(
parameter_name="FEEDFORWARD_CONSTANT",
data_type="REAL",
start=0.001,
end=10,
scale="LOG"
),
DiscreteParamSpec(
parameter_name="HIDDEN_LAYERS_DIMENSION",
values=[100, 200, 300]
)
]
# Create a parameter substitution object.
parameters = ParameterSweep(param_specs)
# Generate *num_trials* random hyper-parameter configuration and corresponding index
# We will use the parameter substitution object to specify where we would like to substitute the parameters. We substitute
# the values for feedforward constant and hidden layers dimension into `models.JobCreateParameters.cntk_settings.command_line_args`. Note that the `parameters` variable is used like a dict, with the `parameter_name` being used as the key to specify which parameter to substitute. When `parameters.generate_jobs` is called, the `parameters[name]` variables will be replaced with actual values.
azure_file_share_mount_path = 'afs'
azure_blob_mount_path = 'bfs'
jcp = models.JobCreateParameters(
cluster=models.ResourceId(id=cluster.id),
node_count=1,
std_out_err_path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}'.format(azure_file_share_mount_path),
input_directories = [
models.InputDirectory(
id='SCRIPT',
path='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}/{1}'.format(azure_blob_mount_path, mnist_dataset_directory))
],
output_directories = [
models.OutputDirectory(
id='ALL',
path_prefix='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}'.format(azure_file_share_mount_path))],
mount_volumes = models.MountVolumes(
azure_file_shares=[
models.AzureFileShareReference(
account_name=cfg.storage_account_name,
credentials=models.AzureStorageCredentialsInfo(
account_key=cfg.storage_account_key),
azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
cfg.storage_account_name, azure_file_share_name),
relative_mount_path=azure_file_share_mount_path)
],
azure_blob_file_systems=[
models.AzureBlobFileSystemReference(
account_name=cfg.storage_account_name,
credentials=models.AzureStorageCredentialsInfo(
account_key=cfg.storage_account_key),
container_name=azure_blob_container_name,
relative_mount_path=azure_blob_mount_path)
]
),
container_settings=models.ContainerSettings(
image_source_registry=models.ImageSourceRegistry(image='microsoft/cntk:2.5.1-gpu-python2.7-cuda9.0-cudnn7.0')
),
cntk_settings=models.CNTKsettings(
python_script_file_path='$AZ_BATCHAI_JOB_MOUNT_ROOT/{0}/{1}/ConvMNIST.py'.format(azure_file_share_mount_path, cntk_script_path),
command_line_args='--feedforward_const {0} --hidden_layers_dim {1} --epochs $PARAM_EPOCHS --datadir $AZ_BATCHAI_INPUT_SCRIPT --outputdir $AZ_BATCHAI_OUTPUT_ALL --logdir $AZ_BATCHAI_OUTPUT_ALL'
.format(parameters['FEEDFORWARD_CONSTANT'],
parameters['HIDDEN_LAYERS_DIMENSION']) # Substitute hyperparameters
)
)
# Create a new experiment.
experiment_name = 'hyperband_experiment'
experiment = client.experiments.create(cfg.resource_group, cfg.workspace, experiment_name).result()
experiment_utils = utils.experiment.ExperimentUtils(client, cfg.resource_group, cfg.workspace, experiment_name)
# We define the following metric extractor to extract desired metric from learning log file.
# - In this example, we extract the number between "metric =" and "%".
metric_extractor = utils.job.MetricExtractor(
output_dir_id='ALL',
logfile='progress.log',
regex='metric =(.*?)\%')
# Define the number of configurations and generate these jobs.
# +
num_configs = 16
jobs_to_submit = parameters.generate_jobs_random_search(jcp, num_configs)
# Add environment variable for changing number of epochs per iteration
for job in jobs_to_submit:
job.environment_variables.append(models.EnvironmentVariable(
name='PARAM_EPOCHS',
value=None
))
# -
# Before proceed to the following steps, please be sure you have already read the artile [*Hyperband: A Novel Bandit-Based Approach to Hyperparameter Optimization*](https://people.eecs.berkeley.edu/~kjamieson/hyperband.html)
#
# We define the following notation of parameters for HyperBand:
# - ***max_iter***: maximum iterations/epochs per configuration
# - ***eta***: downsampling rate
# - ***s_max***: number of unique executions of Successive Halving (minus one)
# - ***B***: total number of iterations (without reuse) per execution of Succesive Halving (n,r)
# - ***n***: initial number of configurations
# - ***r***: initial number of iterations to run configurations for
max_iter = num_configs
eta = 4
logeta = lambda x: np.log(x)/np.log(eta)
s_max = int(logeta(max_iter))
B = (s_max+1)*max_iter
n = int(np.ceil(B/max_iter/(s_max+1)*eta**s_max))
r = max_iter*eta**(-s_max)
# - The following loop describes the early-stopping procedure that considers multiple configurations in parallel and terminates poor performing configurations leaving more resources for more promising configurations.
# - Note that, for illustration purpose, below implemenntation is a simplified version of the HyperBand algorithm where outler-loop used for hedging was omitted. A full implementation of HyperBand will be provided soon.
# - ***n_i*** and ***r_i***denote number of remaining configurations and number of epochs to run at given iteration
# - For each configuration, we generate specific job creation parameters with given configuration and number of epochs. A new thread is started per new job that submits and monitors the job. Once job completes, the final *metric* is extracted and returned from log file
for i in range(s_max+1):
n_i = int(n*eta**(-i))
r_i = int(r*eta**(i))
print("******** Round #{0} ******** ".format(str(i+1)))
# Add number of epochs to JobCreateParameters
for job in jobs_to_submit:
for ev in job.environment_variables:
if ev.name == 'PARAM_EPOCHS':
ev.value = str(r_i)
# Submit the jobs to the experiment
jobs = experiment_utils.submit_jobs(jobs_to_submit, 'mnist_hyperband').result()
# Wait for the jobs to finish running
experiment_utils.wait_all_jobs()
# Get the results and sort by metric value
results = experiment_utils.get_metrics_for_jobs(jobs, metric_extractor)
results.sort(key=lambda res: res['metric_value'])
for result in results:
print("Job {0} completed with metric value {1}".format(result['job_name'], result['metric_value']))
# Get the N best jobs and submit them again the next iteration
num_jobs_to_submit = int(n_i/eta)
jobs_to_submit = [utils.job.convert_job_to_jcp(res['job'], client) for res in results[0:num_jobs_to_submit]]
#### End Finite Horizon Successive Halving with (n,r)
# ## 4. Clean Up (Optional)
# ### Delete the Experiment
# Delete the experiment and jobs inside it
_ = client.experiments.delete(cfg.resource_group, cfg.workspace, experiment_name).result()
# ### Delete the Cluster
# When you are finished with the sample and don't want to submit any more jobs you can delete the cluster using the following code.
client.clusters.delete(cfg.resource_group, cfg.workspace, cluster_name).result()
# ### Delete File Share
# When you are finished with the sample and don't want to submit any more jobs you can delete the file share completely with all files using the following code.
service = FileService(cfg.storage_account_name, cfg.storage_account_key)
service.delete_share(azure_file_share_name)
| recipes/Hyperparameters/HyperBand/HyperBand.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
"""Summary of tensorflow basics.
<NAME>, Jan 2016."""
# %% Import tensorflow and pyplot
import tensorflow as tf
import matplotlib.pyplot as plt
# +
# %% tf.Graph represents a collection of tf.Operations
# You can create operations by writing out equations.
# By default, there is a graph: tf.get_default_graph()
# and any new operations are added to this graph.
# The result of a tf.Operation is a tf.Tensor, which holds
# the values.
# -
# %% First a tf.Tensor
n_values = 32
x = tf.linspace(-3.0, 3.0, n_values)
# %% Construct a tf.Session to execute the graph.
sess = tf.Session()
result = sess.run(x)
# %% Alternatively pass a session to the eval fn:
x.eval(session=sess)
# x.eval() does not work, as it requires a session!
# %% We can setup an interactive session if we don't
# want to keep passing the session around:
sess.close()
sess = tf.InteractiveSession()
# %% Now this will work!
x.eval()
# %% Now a tf.Operation
# We'll use our values from [-3, 3] to create a Gaussian Distribution
sigma = 1.0
mean = 0.0
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
# %% By default, new operations are added to the default Graph
assert z.graph is tf.get_default_graph()
# %% Execute the graph and plot the result
plt.plot(x.eval(), z.eval())
plt.show()
# %% We can find out the shape of a tensor like so:
print(z.get_shape())
# %% Or in a more friendly format
print(z.get_shape().as_list())
# %% Sometimes we may not know the shape of a tensor
# until it is computed in the graph. In that case
# we should use the tf.shape fn, which will return a
# Tensor which can be eval'ed, rather than a discrete
# value of tf.Dimension
print(tf.shape(z).eval())
# %% We can combine tensors like so:
print(tf.pack([tf.shape(z), tf.shape(z), [3], [4]]).eval())
# %% Let's multiply the two to get a 2d gaussian
z_2d = tf.matmul(tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
# %% Execute the graph and store the value that `out` represents in `result`.
plt.imshow(z_2d.eval())
plt.show()
# %% For fun let's create a gabor patch:
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
y = tf.reshape(tf.ones_like(x), [1, n_values])
z = tf.mul(tf.matmul(x, y), z_2d)
plt.imshow(z.eval())
plt.show()
# %% We can also list all the operations of a graph:
ops = tf.get_default_graph().get_operations()
print([op.name for op in ops])
# %% Lets try creating a generic function for computing the same thing:
def gabor(n_values=32, sigma=1.0, mean=0.0):
x = tf.linspace(-3.0, 3.0, n_values)
z = (tf.exp(tf.neg(tf.pow(x - mean, 2.0) /
(2.0 * tf.pow(sigma, 2.0)))) *
(1.0 / (sigma * tf.sqrt(2.0 * 3.1415))))
gauss_kernel = tf.matmul(
tf.reshape(z, [n_values, 1]), tf.reshape(z, [1, n_values]))
x = tf.reshape(tf.sin(tf.linspace(-3.0, 3.0, n_values)), [n_values, 1])
y = tf.reshape(tf.ones_like(x), [1, n_values])
gabor_kernel = tf.mul(tf.matmul(x, y), gauss_kernel)
return gabor_kernel
# %% Confirm this does something:
plt.imshow(gabor().eval())
plt.show()
# %% And another function which can convolve
def convolve(img, W):
# The W matrix is only 2D
# But conv2d will need a tensor which is 4d:
# height x width x n_input x n_output
if len(W.get_shape()) == 2:
dims = W.get_shape().as_list() + [1, 1]
W = tf.reshape(W, dims)
if len(img.get_shape()) == 2:
# num x height x width x channels
dims = [1] + img.get_shape().as_list() + [1]
img = tf.reshape(img, dims)
elif len(img.get_shape()) == 3:
dims = [1] + img.get_shape().as_list()
img = tf.reshape(img, dims)
# if the image is 3 channels, then our convolution
# kernel needs to be repeated for each input channel
W = tf.concat(2, [W, W, W])
# Stride is how many values to skip for the dimensions of
# num, height, width, channels
convolved = tf.nn.conv2d(img, W,
strides=[1, 1, 1, 1], padding='SAME')
return convolved
# %% Load up an image:
from skimage import data
img = data.astronaut()
plt.imshow(img)
plt.show()
print(img.shape)
# %% Now create a placeholder for our graph which can store any input:
x = tf.placeholder(tf.float32, shape=img.shape)
# %% And a graph which can convolve our image with a gabor
out = convolve(x, gabor())
# %% Now send the image into the graph and compute the result
result = tf.squeeze(out).eval(feed_dict={x: img})
plt.imshow(result)
plt.show()
| notebooks/01_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dev
# language: python
# name: dev
# ---
import numpy as np
from sklearn.datasets import load_iris
from sklearn.impute import MissingIndicator as skMissingIndicator
class MissingIndicator():
def fit(self, X):
mask = np.isnan(X)
n_missing = np.sum(mask, axis=0)
self.features_ = np.flatnonzero(n_missing)
return self
def transform(self, X):
return np.isnan(X[:, self.features_])
X, _ = load_iris(return_X_y=True)
rng = np.random.RandomState(0)
missing_samples = np.arange(X.shape[0])
missing_features = rng.choice(X.shape[1], X.shape[0])
X[missing_samples, missing_features] = np.nan
est1 = MissingIndicator().fit(X)
est2 = skMissingIndicator().fit(X)
assert np.array_equal(est1.features_, est2.features_)
Xt1 = est1.transform(X)
Xt2 = est2.transform(X)
assert np.allclose(Xt1, Xt2)
| impute/MissingIndicator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
n=200
#polar coordinate
r1=5+np.random.randn(n)
theta1=np.random.rand(n)*2.0*np.pi
r2=1+np.random.randn(n)
theta2=np.random.rand(n)*2.0*np.pi
x1=np.concatenate( (r1*np.cos(theta1), r2*np.cos(theta2)))
x2=np.concatenate( (r1*np.sin(theta1), r2*np.sin(theta2)))
yt=np.ones( (2*n,1))
yt[n:]=-1
plt.axis('equal')
plt.plot(x1[(yt==1)[:,0]],x2[(yt==1)[:,0]],'ob')
plt.plot(x1[(yt==-1)[:,0]],x2[(yt==-1)[:,0]],'or')
# -
x=np.concatenate( (x1.reshape((-1,1)), x2.reshape((-1,1))), axis=1 )
x.shape
# +
def forward(a,w,b):
z=np.dot(a,w)+b
return np.tanh(z)
def g(a):
return 1-a*a
# -
k0=2
k1=3
k2=1
w1=np.random.randn(k0,k1)
b1=np.random.randn(1,k1)
w2=np.random.randn(k1,k2)
b2=np.random.randn(1,k2)
m=100
alpha=0.01
for i in range(m):
#forward
a1=x
a2=forward(a1,w1,b1)
a3=forward(a2,w2,b2)
yp=a3
L=0.5*np.average((yp-yt)*(yp-yt))
#back propagation
da3= (yp-yt)*g(a3)
dz2= np.dot(da3,w2.T)
da2= dz2*g(a2)
w1=w1-alpha*np.dot(a1.T, da2)
b1=b1-alpha*np.sum(a2,axis=0)
w2=w2-alpha*np.dot(a2.T, da3)
b2=b2-alpha*np.sum(a3,axis=0)
if i%(m//10)==0:
print("Loss: %s"%L)
#print(" w1:%s, b1:%s, w2:%s, b2:%s "%(w1,b1,w2,b2) )
a2
a3.shape
yt.shape
| Untitled2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Installs
pip install requests
pip install beautifulsoup4
# # Imports
import re
import json
from requests import get
from bs4 import BeautifulSoup
from pprint import pprint as pp
# # Setup
countryCodes = json.loads(get("https://gist.githubusercontent.com/jgphilpott/e49c4f53384c8e6528a4762218797355/raw/0a8d9c4e9970d2ce37810cfd7807105c7efd94b5/geoNamesCodes.js").content.decode("utf-8"))
api = "https://www.geonames.org/advanced-search.html?q=museum"
feature_class = "&featureClass=S"
max_rows = "&maxRows=500"
# # Scrape Raw Data
# +
data = []
for countryCode in countryCodes:
country = "&country={}".format(countryCode)
try:
soup = BeautifulSoup(get(api + feature_class + max_rows + country).content.decode("utf-8"))
if not soup.find_all(text="no records found in geonames database, showing wikipedia results") and not len(soup.find_all("table", class_="restable")) < 2:
row_count = 0
meta = soup.find("small").text
records = int("".join(char for char in meta if char.isdigit()))
while row_count < records and row_count <= 5000:
print("Scraping: " + api + feature_class + max_rows + country + "&startRow={}".format(row_count))
soup = BeautifulSoup(get(api + feature_class + max_rows + country + "&startRow={}".format(row_count)).content.decode("utf-8"))
table = soup.find_all("table", class_="restable")[1]
table_data = table.find_all("tr")[2:-1]
for row in table_data:
museum = {"id": len(data) + 1}
cells = row.find_all("td")[1:]
if len(cells[0].find_all("a")) >= 1:
museum["name"] = cells[0].find_all("a")[0].text.strip()
else:
museum["name"] = ""
if len(cells[0].find_all("a")) >= 2:
museum["wiki"] = cells[0].find_all("a")[1]["href"].strip()
else:
museum["wiki"] = ""
if len(cells[1].find_all("a")) >= 1:
museum["country"] = cells[1].find_all("a")[0].text.strip()
else:
museum["country"] = ""
if len(cells[1].contents) >= 2:
museum["zone"] = re.sub(r"[^\w\s]", "", cells[1].contents[1]).strip()
if len(cells[1].contents) >= 4:
museum["sub_zone"] = cells[1].contents[3].text.strip()
else:
museum["sub_zone"] = ""
else:
museum["zone"] = ""
if len(cells[2].contents) >= 1 and str(cells[2].contents[0]) != "<br/>":
museum["feature"] = cells[2].contents[0].strip()
else:
museum["feature"] = ""
if len(cells[2].contents) > 3:
museum["elevation_m"] = int("".join(char for char in cells[2].contents[2].text if char.isdigit()))
else:
museum["elevation_m"] = None
museum["latitude"] = cells[3].text.strip()
museum["longitude"] = cells[4].text.strip()
data.append(museum)
row_count += 500
else:
print("No data at: " + api + feature_class + max_rows + country + "&startRow={}".format(row_count))
except:
print("Error at: " + api + feature_class + max_rows + country + "&startRow={}".format(row_count))
print("Done!")
# -
# # Save Raw Data
with open("raw_museum_data.json", "w") as file:
json.dump(data, file, indent=2, sort_keys=True)
| notes/scraping/museums.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pylab as plt
import random
import pyblp
import numpy as np
import pandas as pd
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
from sklearn.model_selection import train_test_split
# +
df1 = pd.read_csv('../data/processed_data/merged_characteristics.csv')
df2 = pd.read_csv('../data/processed_data/merged_characteristics_2017.csv')
keys = ['HIOS ID', 'IssuerId', 'County', 'State', 'FIPS County Code', 'Policy County FIPS Code','County Name']
#create mkt share data
blp_keys = ['market_ids', 'product_ids', 'firm_ids', 'shares', 'prices','demand_instruments0']
nonlinear_cols = ['csr_pay_94']#,'SBCHavingDiabetesCopayment','SBCHavingaBabyCoinsurance']
#['act_value','csr_pay_94','csr_pay_87','SBCHavingDiabetesCopayment','SBCHavingaBabyCoinsurance']
linear_cols = [ 'act_value', 'MetalLevel_Silver','MetalLevel_Platinum','csr_tot']
#['CompositeRatingOffered_No','MetalLevel_Silver','MetalLevel_Platinum',
# 'BeginPrimaryCareCostSharingAfterNumberOfVisits' , 'csr_tot', 'csr_tot_94', 'PlanType_Indemnity']
#'CSRVariationTypeBinary','DentalOnlyPlan_No','CompositeRatingOffered_No',
firm_cols = linear_cols+ nonlinear_cols
mkt_cols = [ 'DP05_0015PE' ,'DP05_0069PE']
#mkt_cols = [ 'DP05_0015PE' ,'DP05_0072PE', 'DP03_0029PE','DP05_0069PE']
def create_blp_data(df):
df = df.fillna(0)
#create market data...
df['shares'] = df['Ever Enrolled Count']/df['DP03_0095E']
#add blp columns
df['prices'] = df['EHBPercentTotalPremium']
df['product_ids'] = df['IssuerId'].astype(str) + df['County'].astype(str)
df['market_ids'] = df['County']
df['firm_ids'] = df['IssuerId']
#demand_instrument0
MktIds = np.array(pd.get_dummies(df['IssuerId']))
MktIds2 = (MktIds.T).dot(MktIds)
dummies_proj = MktIds.dot( np.linalg.inv( MktIds2 ) ).dot( MktIds.T )
df['demand_instruments0'] = dummies_proj.dot(df['prices']) #average price across markets
#fix problematic columns =
df = df.rename(columns={'Average Monthly Advanced CSR Payment for Consumers with 94%':'csr_pay_94',
'Average Monthly Advanced CSR Payment for Consumers with 87%':'csr_pay_87',
'Total Number of Consumers':'csr_tot',
'Number of Consumers with CSR AV of 94%':'csr_tot_94'})
#TODO why is there no csr tot?
blp_data = df[blp_keys+linear_cols+nonlinear_cols+mkt_cols+['DP03_0095E']]
#standardize the cols
for col in firm_cols:
#this line is causing a warning?
blp_data[col] = blp_data[col]/blp_data[col].var()
#zero shares are not okay?
blp_data = blp_data.fillna(0)
blp_data = blp_data[blp_data['shares'] > 0]
blp_data = blp_data[blp_data['shares'] < 1 ]
return blp_data,df
X_train,df1 = create_blp_data(df1)
X_test,df2 = create_blp_data(df2)
# +
def setup_blp(X_train):
#set_up product/agent data
product_data = X_train[blp_keys+linear_cols+nonlinear_cols+mkt_cols]
product_data['market_ids'] = product_data['market_ids'].astype(str)
#setup agent data
agent_data = X_train[['market_ids']+ mkt_cols] #TODO CHANGE THIS for now cheat and add mkt cols
agent_data = agent_data.groupby('market_ids',as_index=False).mean()
agent_data['market_ids'] = agent_data['market_ids'].astype(str)
return product_data,agent_data
product_data,agent_data = setup_blp(X_train)
product_data_test,agent_data_test = setup_blp(X_test)
print( product_data_test[product_data_test['shares'] >1])
print( product_data_test[product_data_test['shares'] <0])
# +
# do the estimation with just product characteristics?
X1_formula = ''
for i in (linear_cols+mkt_cols): #TODO CHANGE THIS for now cheat and add mkt cols
X1_formula = X1_formula + i + ' + '
X1_formula = '1 + prices + ' + X1_formula[:-3]
print(X1_formula)
X2_formula = ''
for i in nonlinear_cols:
X2_formula = X2_formula + i + ' + '
X2_formula = '0 + prices + ' + X2_formula[:-3]
print(X2_formula)
X1_formulation = pyblp.Formulation(X1_formula)
X2_formulation = pyblp.Formulation(X2_formula)
product_formulations = (X1_formulation, X2_formulation)
# +
#numerical stuff
mc_integration = pyblp.Integration('monte_carlo', size=50, specification_options={'seed': 0})
mc_problem = pyblp.Problem(product_formulations, product_data, integration=mc_integration)
bfgs = pyblp.Optimization('bfgs', {'gtol': 1e-2})
#pi0 = np.zeros( (len(nonlinear_cols)+1,1) )
#sigma0 = np.ones((len(nonlinear_cols)+1, len(nonlinear_cols)+1))
sigma0 = np.eye(len(nonlinear_cols)+1)
beta0 = np.zeros( (len(nonlinear_cols)+1,1))
results1 = mc_problem.solve(sigma= sigma0,
optimization=bfgs)#,integration=mc_integration)
#TODO: Check if integration adds time?
results1
# +
mc_integration = pyblp.Integration('monte_carlo', size=50, specification_options={'seed': 0})
sim_formulations = (X1_formulation, X2_formulation,
pyblp.Formulation('0 + demand_instruments0'))
xi = np.zeros(product_data_test.shape[0])
print(product_data_test[product_data_test['shares']<=0])
print(product_data_test[product_data_test['shares']<=0])
simulation = pyblp.Simulation(product_formulations,
beta=results1.beta,
sigma=results1.sigma,
xi= xi,
product_data=product_data_test,
integration=mc_integration,
seed=0,
)
sim_results = simulation.replace_endogenous(costs=xi,iteration=pyblp.Iteration('return'))
pred_shares = sim_results.product_data.shares
# +
#evaluating model fit...
error = (X_test['shares']- pred_shares.flatten())*X_test['DP03_0095E']
true_shares = X_test['shares']*X_test['DP03_0095E']
mse = float( (error**2).mean() )
r2 = float( 1 - (error**2).mean()/true_shares.var() )
print( 'mse',mse, 'r2', r2 )
blp_pred = pred_shares.flatten()*X_test['DP03_0095E']
split =5000
plt.title('>'+str(split))
plt.hist(blp_pred[true_shares >=split],label='True',alpha=.5,density=True)
plt.hist(true_shares[true_shares >=split],label='BLP',alpha = .5,density=True)
plt.legend()
plt.show()
plt.title('<'+str(split))
plt.hist(blp_pred[true_shares <=split],label='True',alpha=.5,density=True)
plt.hist(true_shares[true_shares <=split],label='BLP',alpha = .5,density=True)
plt.legend()
plt.show()
# +
def report_error(X_test,pred_shares):
pred_shares = np.array(pred_shares.copy()).flatten()
X_test = X_test.copy()
X_test['error'] = ((X_test['shares']- pred_shares)*X_test['DP03_0095E'])**2
X_test['pred'] = pred_shares*X_test['DP03_0095E']
result = X_test.merge(df2, how='inner',left_index=True, right_index=True,suffixes=('_x',''))
result = result.sort_values('error',ascending=False).head(50)
#plt.hist(result['Ever Enrolled Count'],label='true',alpha=.5,density=True)
#plt.hist(result['pred'],label='pred',alpha=.5,density=True)
#plt.legend()
#plt.show()
result_groupby = result.groupby('State')[['pred','Ever Enrolled Count']].mean()
result_groupby[['pred_var','enroll_var']] = result.groupby('State')[['pred','Ever Enrolled Count']].std()
result_groupby['count'] =result.groupby('State')['shares'].count()
result_groupby = result_groupby.sort_values('pred',ascending=False)
return result_groupby
print(report_error(X_test,pred_shares))
# -
# # adding agents to the problem
# +
#add agents to the problem?
agent_formula = ''
for i in mkt_cols:
agent_formula = agent_formula + i + ' + '
agent_formula = '0 + ' + agent_formula[:-3]
agent_formulation = pyblp.Formulation(agent_formula)
agent_formulation
# +
mc_integration = pyblp.Integration('monte_carlo', size=1, specification_options={'seed': 0})
full_problem = pyblp.Problem(
product_formulations,
product_data,
agent_formulation,
agent_data,
integration=mc_integration
)
bfgs = pyblp.Optimization('bfgs', {'gtol': 1e-2})
#full_results = full_problem.solve(pi = np.eye(4),
# sigma=np.ones((len(nonlinear_cols)+1, len(nonlinear_cols)+1)),
# optimization=bfgs)
#full_results
# -
| analysis_2017/blp_mkt_share_2017_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] jupyter={"outputs_hidden": true}
# # 1. Pivot Row to column and back
#
# It's very common in data transformation, we need to transform some row into column. In this tutorial, we will show how to do pivot properly.
#
# ## 1.1 Prepare the spark environmnet
# -
from pyspark.sql import SparkSession, DataFrame
from pyspark.sql import functions as f
import os
# +
# create spark session
local=False
if local:
spark = SparkSession.builder\
.master("local[4]")\
.appName("Pivot_and_UnPivot")\
.config("spark.executor.memory", "4g")\
.getOrCreate()
else:
spark = SparkSession.builder\
.master("k8s://https://kubernetes.default.svc:443")\
.appName("RepartitionAndCoalesce")\
.config("spark.kubernetes.container.image", os.environ["IMAGE_NAME"])\
.config("spark.kubernetes.authenticate.driver.serviceAccountName", os.environ['KUBERNETES_SERVICE_ACCOUNT'])\
.config("spark.executor.instances", "4")\
.config("spark.executor.memory","2g")\
.config("spark.kubernetes.namespace", os.environ['KUBERNETES_NAMESPACE'])\
.getOrCreate()
# -
# ## 1.2 Prepare the data
#
# The source data frame has three column(e.g. "Product", "Amount", "Country") which describes the product exporting number for each country of a year.
#
# For example, "Banana", 1000, "USA" means USA export 1000 tons of banana.
#
# +
data = [("Banana", 1000, "USA"), ("Carrots", 1500, "USA"), ("Beans", 1600, "USA"),
("Orange", 2000, "USA"), ("Orange", 2000, "USA"), ("Banana", 400, "China"),
("Carrots", 1200, "China"), ("Beans", 1500, "China"), ("Orange", 4000, "China"),
("Banana", 2000, "Canada"), ("Carrots", 2000, "Canada"), ("Beans", 2000, "Mexico")]
columns = ["Product", "Amount", "Country"]
df = spark.createDataFrame(data=data, schema=columns)
print("main output: source data schema")
df.printSchema()
print("main output: source data")
df.show(truncate=False)
# -
# ## 1.3 First example
# Let's first understand what a pivot will do. Below we first build a df that calculates the sum of amount by product.
# The data frame are shown in below figure.
# ```text
# +-------+----------+
# |Product|amount_sum|
# +-------+----------+
# | Beans| 5100|
# | Banana| 3400|
# |Carrots| 4700|
# | Orange| 8000|
# +-------+----------+
# ```
#
df_product_sum=df.groupBy("Product").agg(f.sum("Amount").alias("amount_sum"))
df_product_sum.show()
# ### Use pivot function
# Now we want the product name become column names. Below code shows how to pivot rows to column.
# Note pivot function can only be called after a groupBy. For now, we leave the groupBy argument empty, It means there is no group.
#
# Then the pivot function takes a column name, for each distinct value in the given column, it will create a new column.
# Note, as you may have duplicate values in the given column. So the pivot function will return a list of other column values. You need to use an aggregation function to transform these list to a single value. In this example, we use first to get the first value.
df_product_sum.groupBy().pivot("Product").agg(f.first("amount_sum")).show()
# ## 1.4 A more advance example
# In the above example, we have seen it's quite easy to pivot with two dimentions. Now we will add another dimension. We want see the export number for each country and product.
#
# First, let's see which country export which product.
# show the country list groupby Product
df.groupBy("Product").agg(f.collect_list("Country")).show(truncate=False)
# show the Amount by Product and country
df.groupBy("Product", "Country").sum("Amount").show(truncate=False)
# With the multiple groupBy, we can get the export number for each country and product. But it's not very easy to read. So we want to pivot the distinct country value to columns
# The pivot function will transform the country list into columns with a value calculated by an aggregation function sum
# Note, for the rows that country does not export the product, spark fills it with null.
pivot_country = df.groupBy("Product").pivot("Country").sum("Amount")
pivot_country.printSchema()
pivot_country.show(truncate=False)
# ## 1.5 Performence issues
#
# Pivot is a very expensive operation. If the data frame that you want to pivot is quite big, you may want to optimize it by using following solutions.
# - Solution 1: We can provide a list of the column name(row value) that we want to pivot.
# - Solution 2: Two phrase groupby
# ### 1.5.1 Provide a column list
#
# By providing a column list by indicating which column you want to have can improves performence a lot. Otherwise, spark need to change the column size of the result data frame on the fly.
#
# You only need to build a list that contains all the distinct values, then put this list as the second argumnet in the pivot function.
#
country_list = ["USA", "China", "Canada", "Mexico"]
pivot_country2 = df.groupBy("Product").pivot("Country", country_list).sum("Amount")
pivot_country2.show(truncate=False)
# Note the order of the values list does not reflect on the order of columns. Because spark sort the value in alphabet order.
# What happens if I remove USA from the list?
country_list1 = [ "China", "Canada", "Mexico"]
pivot_country3 = df.groupBy("Product").pivot("Country", country_list1).sum("Amount")
pivot_country3.show(truncate=False)
# You can notice spark will simply ignore all rows of "USA", and the resulting data frame only contains the column that are defined in the column list.
# ### 1.5.2 Use two phase groupBy
#
# The optimization philosophy is exactly the same as the provinding a column list. To avoid spark change the resulting data frame's column dimension. **We need to tell spark how many columns the data frame will have before it does the pivot function**. So in the two phase groupBy, the first groupBy calculate all the possible disctinct value of the pivot column. The second phase do the pivot function.
# #### Phase 1. Use groupBy to get all dictinct value
df_tmp=df.groupBy("Product","Country").agg(f.sum("Amount").alias("sum_amount"))
df_tmp.show()
# #### Phase 2. Pivot the column
# Just little note, the sum function after pivot is different from the f.sum(). They do the same thing, but not from the same liberary.
df_pivot_country4=df_tmp.groupBy("Product").pivot("Country").sum("sum_amount")
df_pivot_country4.show()
# ## 1.6 Unpivot
# Unpivot is a reverse operation, we can achieve by rotating column values into rows values.
# PySpark SQL doesn’t have unpivot function hence will use the stack() function. Below code converts
# column countries to row.
#
#
# In the stack function, the first argument is the number of pairs that you want to unpivot. Below we set 3
# Then, we put the three pair: '<row_value>', column_name. The first argument will be the value in each row after the pivot. The second argument is the column name of the source dataframe (must be the same, otherwise raise error).
#
# Below example, you can notice the value Can_ada is in the resulting data frame.
#
# The **as (Country, Total)** defines the column name in the resulting data frame.
unpivot_expr1 = "stack(3, 'Can_ada', Canada, 'China', China, 'Mexico', Mexico) as (Country,Total)"
unpivot_df1 = pivot_country.select("Product", f.expr(unpivot_expr1)).where("Total is not null")
unpivot_df1.show(truncate=False)
unpivot_df1.printSchema()
# Below exampl shows if we want to add another column "USA"
unpivot_expr2 = "stack(4, 'Canada', Canada, 'China', China, 'Mexico', Mexico, 'USA', USA) as (Country,Total)"
unpivot_df2 = pivot_country.select("Product", f.expr(unpivot_expr2)).where("Total is not null")
unpivot_df2.show(truncate=False)
unpivot_df2.printSchema()
| notebooks/sparkcommonfunc/data_transformation/PivotRowToColumnAndBack.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:deeplearning]
# language: python
# name: conda-env-deeplearning-py
# ---
import pandas as pd
import numpy as np
# from keras.models import load_model
import h5py
import pandas as pd
import argparse
import SimpleITK as sitk
from PIL import Image
import os, glob
import os, os.path
import tensorflow as tf
import keras
from UNET_utils import *
# %matplotlib inline
# +
# import argparse
# parser = argparse.ArgumentParser(description='Prediction on HOLDOUT subset',add_help=True)
# parser.add_argument("--holdout", type=int, default=0, help="HOLDOUT subset for predictions")
# args = parser.parse_args()
# HOLDOUT = args.holdout
# +
HOLDOUT = 5
HO_dir = 'HO{}/'.format(HOLDOUT)
data_dir = '../data/luna16/'
model_wghts = 'hdf5/UNET_weights_H{}.h5'.format(HOLDOUT)
# img_filename = '1.3.6.1.4.1.14519.5.2.1.6279.6001.112767175295249119452142211437.mhd'
# model_file = 'hdf5/cnn_3DUNET_64_64_64_HOLDOUT1_20180420_120442.hdf5'
# c_objects={'dice_coef_loss': dice_coef_loss,'dice_coef':dice_coef}
# model = load_model(data_dir + model_file, custom_objects=c_objects)
# -
def size_equal(s1, s2):
return sorted(s1) == sorted(s2)
def model_create_load(padded_img):
input_shape = padded_img.reshape(tuple(list (padded_img.shape) + [1])).shape
model = create_UNET3D(input_shape)
model.compile(optimizer='adam',
loss={'PredictionMask': dice_coef_loss, \
'PredictionClass': 'binary_crossentropy'}, \
loss_weights={'PredictionMask': 0.8, 'PredictionClass': 0.2},
metrics={'PredictionMask':dice_coef,'PredictionClass': 'accuracy'})
# print(tmp_model.summary())
model.load_weights(data_dir + model_wghts, by_name=True)
return model
# +
padded_size = (480, 480, 368)
predictions_dict = {}
size_dict = {}
prev_img_size = (0,0,0)
for f in glob.glob(data_dir + HO_dir + '*.mhd'):
print ("\n Processing scan file: {}".format(f))
itk_img = sitk.ReadImage(f)
img_np_array = sitk.GetArrayFromImage(itk_img)
original_size = img_np_array.shape
print ("Original-Size of loaded image : {}".format(original_size))
## Normalizing the image size ...Need to confirm? AL
itk_img_norm = normalize_img(itk_img)
img_np_array_norm = sitk.GetArrayFromImage(itk_img_norm)
normalized_size = img_np_array_norm.shape
# print ("Normalized-Size of image : {}".format(normalized_size))
# Normalizing HU of image
img = normalize_HU(img_np_array_norm)
img = np.swapaxes(img, 0,2)
print ("Swapped axis input image : {}".format(img.shape))
padded_img = np.zeros(padded_size)
padded_img[ :img.shape[0], :img.shape[1], :img.shape[2] ] = img
model = model_create_load(padded_img)
padded_img = padded_img.reshape(tuple([1] + list (padded_img.shape) + [1]))
print ("Size of input padded image : {}".format(padded_img.shape))
#### Owing to large image size requiring heaving processing my m/c hangs
#### For predictions - uncomment the following line on AWS or GPU m/c
# f_predictions = model.predict(padded_img, verbose=1)
# predictions_dict[f] = f_predictions
size_dict[f] = img.shape
# -
size_dict
# #### Testing with image of size 48,48,48 initialized with random between 0 and 1
small_img = np.random.rand(48,48,48)
small_img = small_img.reshape(tuple([1] + list (small_img.shape) + [1]))
# small_img.shape
small_shape = (48, 48, 48, 1)
model = create_UNET3D(small_shape)
model.compile(optimizer='adam',
loss={'PredictionMask': dice_coef_loss, \
'PredictionClass': 'binary_crossentropy'}, \
loss_weights={'PredictionMask': 0.8, 'PredictionClass': 0.2},
metrics={'PredictionMask':dice_coef,'PredictionClass': 'accuracy'})
# print(tmp_model.summary())
model.load_weights(data_dir + model_wghts, by_name=True)
predictions_small_img = model.predict(small_img, verbose=1)
result = pd.DataFrame(predictions_small_img)
# +
# print ("Shape of predicted mask or segmented image : {}".format(predictions_small_img[0].shape))
# print ("Shape of predicted class : {}".format(predictions_small_img[1].shape))
# predictions_small_img[0] [:, 25 : 26, :]
# +
# ## AL - TEST : making an image of size 48,48,48 with random 0 or 1
# ### Case 2 : As a test created an input image of size (1, 48,48,48,1)
# # with random 0 or 1; this works fine and able to create predictions successfully
# t2 = np.random.choice(2,(48,48,48))
# t2 = t2.reshape(tuple([1] + list (t2.shape) + [1]))
# print ("Shape of test input image : {}".format(t2.shape))
# predictions = model.predict(t2, verbose=2)
# print ("Shape of predicted mask or segmented image : {}".format(predictions[0].shape))
# print ("Shape of predicted class : {}".format(predictions[1].shape))
# # predictions[0] [:, 25 : 26, :]
| src/predictions/UNET_Predictions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as pp
import numpy as np
import csv
import os
import itertools
import threading
import time
import sys
from matplotlib import cm
from numpy import arctan, log, pi, sqrt
# %matplotlib notebook
os.chdir("D:")
# -
# # Mesh Generation:
# +
nx = 50
ny = 50
nt = 300
xmin = -5
xmax = 5
ymin = 0
ymax = 10
dx = (xmax - xmin) / (nx - 1)
dy = (ymax - ymin) / (ny - 1)
dt = 0.001
x = np.linspace(xmin, xmax, nx)
y = np.linspace(ymin, ymax, ny)
X, Y = np.meshgrid(x, y)
L = ymax
# +
u = np.zeros((nx, ny))
un = np.zeros((nx, ny))
v = np.zeros((nx, ny))
vn = np.zeros((nx, ny))
T = np.zeros((nx, ny))
Tn = np.zeros((nx, ny))
C = np.zeros((nx, ny))
Cn = np.zeros((nx, ny))
# -
# # Flow Specs:
a = 0.1
beta = 0.001 #unsteadiness paramepter
nu = 0.000829
rho = 997.5
mu0 = 4e-7*pi
M = 8.6e5
h = 12.5e-6 #height of magnet
w = 25e-6 #width of magnet
c = 4178 #spcefic heat Cp
Cv = 4.15
eps = 0.2
tau = c/Cv
Q0 = 50 #heat generation
mu = 0.001
H0 = 253843.822358745
kf = 0.6129
kp = 80.4
gamma2 = 0.2
V0 = -8
ts = nt*dt #Final time
Cf = 0.2/100
Cinf = 0.4/100
delC = Cf - Cinf
Tinf = 333
Tf = 293 #initial Temp
delT = Tf - Tinf
alpha = kf/(rho*c) #thermal diffusity
Pr = nu/alpha
Re = V0*L/nu
# Nu = 0.05928 * ((0.44 + Cf)**0.536) * Re**0.7 * Pr**0.4
Nu = 0.2497091666
Sh = 0.922369
kB = 1.385e-23
dp = 1e-8 #particle diameter
TF = (Tinf + Tf)/2 #Film temperature
DB = (kB*TF)/(3 * pi * mu * dp)
DT = (mu/rho) * (0.26 * kf/(kf + kp))
# +
# hm = gamma2 * DB * sqrt(alpha/nu)
# -
u.fill(a*L/V0)
v.fill(V0)
T.fill(Tf)
C.fill(Cf)
# # Calculating magnitization intensity (H):
def Hcalc(M, i, j, h, w):
H = (sqrt(M**2*(-log(((-h + j)**2 +
(-w + i)**2)/((h + j)**2 +
(-w + i)**2)) +
log(((-h + j)**2 + (w + i)**2)/((h + j)**2 +
(w + i)**2)))**2/(16*pi**2) +
M**2*(-arctan(2*h*(-w + i)/(-h**2 + j**2 +
(-w + i)**2)) + arctan(2*h*(w + i)/(-h**2 + j**2 + (w + i)**2)))**2/(4*pi**2)))
H = H/H0
return H
# # Boundary conditions (1):
# +
T[:, 1] = T[:, 0] * (1 + (dy * Nu)) # y = 0
C[:, 1] = C[:, 0] * (1 + (dy * Sh)) # y = 0
T[:, ny-1] = -1 # y = oo
C[:, ny-1] = -1 # y = oo
v[:, ny-1] = 0 # y = oo
# -
# # Boundary conditions (2):
bb = int(nx/2)
for n in range(nt): # n = t
for i in range(-bb, bb + 1):
u[i, ny-1] = (a*L*i)/(V0 * (1-beta*ts*n)) # y = oo
u[i, 0] = (a*L*i)/(V0 * (1-(beta*ts*n))) # y = 0
v[i, 0] = (-1)/sqrt((1-(beta*ts*n))) # y = 0
# # 1st Derivative: $\frac{\partial F}{\partial x}$ or $\frac{\partial F}{\partial y}$
def diff1(i, j, F, d, var): #derivative 1
if (var == "x"):
dF = (F[j, i+1] - F[j, i-1])/(2*d)
elif (var == "y"):
dF = (F[j+1, i] - F[j-1, i])/(2*d)
return dF
# # 2nd Derivative: $\frac{\partial^2 F}{\partial x^2}$ or $\frac{\partial^2 F}{\partial y^2}$
def diff2(i, j, F, d, var): #derivative 2
if (var == "x"):
d2F = (F[j, i+1] - 2*F[j, i] + F[j, i-1])/(2*d)
elif (var == "y"):
d2F = (F[j+1, i] - 2*F[j, i] + F[j-1, i])/(2*d)
return d2F
# # Derivative of H: $\frac{\partial H}{\partial x}$ or $\frac{\partial H}{\partial y}$
def Hdiff(i, j, d, var):
if (var == "x"):
dH = (Hcalc(M, i+1, j, h, w) - Hcalc(M, i, j, h, w))/d
elif (var == "y"):
dH = (Hcalc(M, i, j+1, h, w) - Hcalc(M, i, j, h, w))/d
return dH
# # Initialize the equations:
# +
udiff = 1
Tdiff = 1
Cdiff = 1
accu = 0.01
iteration = []
udiffDat = []
TdiffDat = []
CdiffDat = []
# -
# # Iteration:
# +
STEP = 50
stepcount = 0
aa = int((nx-2)/2)
while (udiff > accu) and (Tdiff > accu) and (Cdiff > accu):
un = u.copy()
vn = v.copy()
Tn = T.copy()
Cn = C.copy()
for n in range(nt):
stepcount += 1
if stepcount == STEP:
print("Calculation is finished!")
break
print("Iteration number: " + str(stepcount))
iteration.append(stepcount)
for i in range(-aa, aa+1):
for j in range(1, ny-2):
#Velocity
u[i, j] = (((ts*dt)/V0) * ((a*L*i*(a+beta))/(1-beta*ts*nt) +
((nu*V0)/(L**2)) * diff2(i, j, un, dy, "y") +
((1/rho)*mu0*M*H0/L) * Hdiff(i, j, dx, "x") -
(((V0**2)/L) * un[i, j] * diff1(i, j, un, dx, "x") +
(((V0**2)/L) * vn[i, j] * diff1(i, j, un, dy, "y")))
+ un[i, j]))
#Temperature
T[i, j] = (((ts*dt)/delT) * (tau *
((DB/(L**2)) * delC * (delT) * diff1(i, j, Cn, dy, "y") * diff1(i, j, Tn, dy, "y")) +
(DT/(Tinf * (L**2))) * (delT**2) * ((diff1(i, j, Tn, dy, "y"))**2)) +
(Q0/(rho*c)) * delT * (Tn[i, j] + 1) +
((alpha * delT)/L) * diff2(i, j, Tn, dy, "y") -
((V0/L) * un[i, j] * delT * diff1(i, j, Tn, dx, "x") +
(V0/L) * vn[i, j] * delT * diff1(i, j, Tn, dy, "y"))
+ Tn[i, j])
#Concentration
C[i, j] = (((ts*dt)/delC) * ((DB/(L**2)) * delC * diff2(i, j, Cn, dy, "y") +
(DT/((L**2)*Tinf)) * delT * diff1(i, j, Tn, dy, "y") -
((V0/L) * un[i, j] * delC * diff1(i, j, Cn, dx, "x") +
(V0/L) * vn[i, j] * delC * diff1(i, j, Cn, dy, "y"))) +
Cn[i, j])
udiff = (np.sum(u) - np.sum(un))/np.sum(u)
Tdiff = (np.sum(T) - np.sum(Tn))/np.sum(T)
Cdiff = (np.sum(C) - np.sum(Cn))/np.sum(C)
udiffDat.append(udiff)
TdiffDat.append(Tdiff)
CdiffDat.append(Cdiff)
# -
# # Plot Residuals:
# +
xu = np.linspace(0, len(udiffDat), len(udiffDat))
# %matplotlib notebook
pp.figure()
pp.autoscale(enable=True, axis='x', tight=True)
pp.plot(xu, udiffDat, label = "uDiff")
pp.plot(xu, TdiffDat, label = "TDiff")
pp.plot(xu, CdiffDat, label = "CDiff")
# pp.grid(True)
pp.legend();
pp.xlabel("Iterations")
pp.ylabel("Residuals")
pp.tight_layout()
fig = pp.gcf()
fig.set_size_inches(6, 5)
# -
# # Velocity Vectors:
pp.figure()
z = 3
pp.quiver(X[::z, ::z], Y[::z, ::z], u[::z, ::z], v[::z, ::z]);
pp.tight_layout()
# # Temperature Contour:
# +
fig = pp.figure()
contour_res = 100
pp.contourf(X, Y, T, contour_res, cmap = "seismic")
pp.colorbar()
# pp.quiver(X[::z, ::z], Y[::z, ::z], u[::z, ::z], v[::z, ::z])
pp.xlabel('X')
pp.ylabel('Y')
fig = pp.gcf()
fig.set_size_inches(6, 5)
# -
# # Concentration Contour:
# +
fig = pp.figure()
pp.contourf(X, Y, C, contour_res, cmap = "seismic")
pp.colorbar()
# pp.quiver(X[::z, ::z], Y[::z, ::z], u[::z, ::z], v[::z, ::z])
pp.xlabel('X')
pp.ylabel('Y')
fig = pp.gcf()
fig.set_size_inches(6, 5)
# +
COR = (u*dt)/dx
CC = []
for i in range(len(COR)):
CC.append(max(COR[:, i]))
print(max(CC))
| FHD/FHD Dimensionless v1-Copy1.3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import random
import sys
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
# %matplotlib inline
# -
dict = {"word":"cat",
"countrycode":"VE",
"timestamp":"2017-03-02 23:25:10.07453 UTC",
"recognized":True,
"key_id":"5201136883597312",
"drawing":[
[
[130,113,99,109,76,64,55,48,48,51,59,86,133,154,170,203,214,217,215,208,186,176,162,157,132],
[72,40,27,79,82,88,100,120,134,152,165,184,189,186,179,152,131,114,100,89,76,0,31,65,70]
],[
[76,28,7],
[136,128,128]
],[
[76,23,0],
[160,164,175]
],[
[87,52,37],
[175,191,204]
],[
[174,220,246,251],
[134,132,136,139]
],[
[175,255],
[147,168]
],[
[171,208,215],
[164,198,210]
],[
[130,110,108,111,130,139,139,119],
[129,134,137,144,148,144,136,130]
],[
[107,106],
[96,113]
]
]
}
len(dict['drawing']), len(dict['drawing'][0]), len(dict['drawing'][0][0])
# !gsutil ls -r "gs://quickdraw_dataset/full/simplified/*"
# !mkdir rnn_tutorial_data
# !cd rnn_tutorial_data && gsutil -m cp "gs://quickdraw_dataset/full/simplified/*" .
def parse_line(ndjson_line):
"""Parse an ndjson line and return ink (as np array) and classname."""
sample = json.loads(ndjson_line)
class_name = sample["word"]
inkarray = sample["drawing"]
stroke_lengths = [len(stroke[0]) for stroke in inkarray]
total_points = sum(stroke_lengths)
np_ink = np.zeros((total_points, 3), dtype=np.float32)
current_t = 0
for stroke in inkarray:
for i in [0, 1]:
np_ink[current_t:(current_t + len(stroke[0])), i] = stroke[i]
current_t += len(stroke[0])
np_ink[current_t - 1, 2] = 1 # stroke_end
# Preprocessing.
# 1. Size normalization.
lower = np.min(np_ink[:, 0:2], axis=0)
upper = np.max(np_ink[:, 0:2], axis=0)
scale = upper - lower
scale[scale == 0] = 1
np_ink[:, 0:2] = (np_ink[:, 0:2] - lower) / scale
# 2. Compute deltas.
np_ink = np_ink[1:, 0:2] - np_ink[0:-1, 0:2]
return np_ink, class_name
with open("rnn_tutorial_data/The Eiffel Tower.ndjson", "r") as f:
txt = f.readline()
print("txt", txt)
np_ink, class_name = parse_line(txt)
print("np_ink", np_ink)
print("class_name", class_name)
plt.imshow(np_ink)
# +
def convert_data(trainingdata_dir,
observations_per_class,
output_file,
classnames,
output_shards=10,
offset=0):
"""Convert training data from ndjson files into tf.Example in tf.Record.
Args:
trainingdata_dir: path to the directory containin the training data.
The training data is stored in that directory as ndjson files.
observations_per_class: the number of items to load per class.
output_file: path where to write the output.
classnames: array with classnames - is auto created if not passed in.
output_shards: the number of shards to write the output in.
offset: the number of items to skip at the beginning of each file.
Returns:
classnames: the class names as strings. classnames[classes[i]] is the
textual representation of the class of the i-th data point.
"""
def _pick_output_shard():
return random.randint(0, output_shards - 1)
file_handles = []
# Open all input files.
for filename in sorted(tf.gfile.ListDirectory(trainingdata_dir)):
if not filename.endswith(".ndjson"):
print("Skipping", filename)
continue
file_handles.append(
tf.gfile.GFile(os.path.join(trainingdata_dir, filename), "r"))
if offset: # Fast forward all files to skip the offset.
count = 0
for _ in file_handles[-1]:
count += 1
if count == offset:
break
writers = []
for i in range(FLAGS.output_shards):
writers.append(
tf.python_io.TFRecordWriter("%s-%05i-of-%05i" % (output_file, i,
output_shards)))
# reading_order = range(len(file_handles)) * observations_per_class
reading_order = np.arange(0, len(file_handles))
random.shuffle(reading_order)
for c in reading_order:
line = file_handles[c].readline()
ink = None
while ink is None:
ink, class_name = parse_line(line)
if ink is None:
print ("Couldn't parse ink from '" + line + "'.")
if class_name not in classnames:
classnames.append(class_name)
features = {}
features["class_index"] = tf.train.Feature(int64_list=tf.train.Int64List(
value=[classnames.index(class_name)]))
features["ink"] = tf.train.Feature(float_list=tf.train.FloatList(
value=ink.flatten()))
features["shape"] = tf.train.Feature(int64_list=tf.train.Int64List(
value=ink.shape))
f = tf.train.Features(feature=features)
print(f)
break
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--ndjson_path",
type=str,
default="/Users/we/PycharmProjects/models/tutorials/rnn/quickdraw/rnn_tutorial_data",
help="Directory where the ndjson files are stored.")
parser.add_argument(
"--output_path",
type=str,
default="/Users/we/PycharmProjects/models/tutorials/rnn/quickdraw/rnn_tutorial_data_parsed",
help="Directory where to store the output TFRecord files.")
parser.add_argument(
"--train_observations_per_class",
type=int,
default=10000,
help="How many items per class to load for training.")
parser.add_argument(
"--eval_observations_per_class",
type=int,
default=1000,
help="How many items per class to load for evaluation.")
parser.add_argument(
"--output_shards",
type=int,
default=10,
help="Number of shards for the output.")
FLAGS, unparsed = parser.parse_known_args()
classnames = convert_data(
FLAGS.ndjson_path,
FLAGS.train_observations_per_class,
os.path.join(FLAGS.output_path, "training.tfrecord"),
classnames=[],
output_shards=FLAGS.output_shards,
offset=0)
# convert_data(
# FLAGS.ndjson_path,
# FLAGS.eval_observations_per_class,
# os.path.join(FLAGS.output_path, "eval.tfrecord"),
# classnames=classnames,
# output_shards=FLAGS.output_shards,
# offset=FLAGS.train_observations_per_class)
# -
| tutorials/rnn/quickdraw/DataTest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#import the numpy module and the csv module
import numpy as np
import csv
a = np.arange(12)**2
a
#Suppose we want to access three different elements. We could do it like this:
a[2],a[6],a[8]
#Alternatively, we can pass a single list or array of indices to obtain the same result:
indx_1 = [2,6,8]
a[indx_1]
# +
#When using fancy indexing, the shape of the result reflects the shape of the index arrays rather than the shape of the array being indexed
# -
indx_2 = np.array([[2,4],[8,10]])
indx_2
a[indx_2]
#We can also give indexes for more than one dimension. The arrays of indices for each dimension must have the same shape.
food = np.array([["blueberry","strawberry","cherry","blackberry"],
["pinenut","hazelnuts","cashewnut","coconut"],
["mustard","paprika","nutmeg","clove"]])
food
#We will now select the corner elements of this array
row = np.array([[0,0],[2,2]])
col = np.array([[0,3],[0,3]])
food[row,col]
#Notice that the first value in the result is food[0,0], next is food[0,3] , food[2,0] and lastly food[2,3]
food[2,0]
#Modifying Values with Fancy Indexing
#Just as fancy indexing can be used to access parts of an array, it can also be used to modify parts of an array.
food[row,col] = "000000"
food
#We can use any assignment-type operator for this. Consider following example:
a
indx_1
a[indx_1] = 999
a
a[indx_1] -=100
a
| IndexingWithArraysOfIndices_m02_demo01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reading and Exploring Data using Pandas
import pandas as pd
# read data
df = pd.read_csv('../data/diabetes.csv')
# examine first few rows
df.head()
# examine last few rows
df.tail()
# shape
df.shape
# columns
df.columns
# data types
df.dtypes
# info
df.info()
| book/_build/jupyter_execute/descriptive/01-Reading-And-Exploring-Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import datacube
dc = datacube.Datacube(app="List Products")
# +
area_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
149.2719268798828,
-35.217575569215505
],
[
149.5095062255859,
-35.217575569215505
],
[
149.5095062255859,
-34.97543887925878
],
[
149.2719268798828,
-34.97543887925878
],
[
149.2719268798828,
-35.217575569215505
]
]
]
}
}
]
}
# -
lons, lats = zip(*area_json["features"][0]["geometry"]["coordinates"][0])
# +
lat_extents = (min(lats), max(lats))
lon_extents = (min(lons), max(lons))
print("Lat:", lat_extents, "\n"
"Lon:", lon_extents)
# -
# # Load Data
# +
from datetime import datetime
query = dict( latitude = lat_extents,
longitude = lon_extents,
output_crs = "EPSG:3577",
time = (datetime(2017,1,1), datetime(2018,1,1)),
resolution = (30,30))
# -
# **Scenes**
scenes = dc.load(product="ls8_nbar_scene",
group_by='solar_day',
measurements = ["1","2","3","4","5","6","7"],
**query)
bands = {"1": "coastal_aerosol",
"2": "blue",
"3": "green",
"4": "red",
"5": "nir",
"6": "swir1",
"7": "swir2"}
scenes = scenes.rename(bands)
scenes.isel(time = 6).swir1.plot()
scenes
# **Pixel QA**
# +
from datacube.helpers import ga_pq_fuser
pqa = dc.load(product="ls8_pq_scene",
group_by='solar_day',
fuse_func= ga_pq_fuser,
**query)
# -
pqa
# ### Times
# +
scene_times = scenes.time.values
pqa_times = pqa.time.values
common_times = set(scene_times).intersection(set(pqa_times))
# -
scenes = scenes.sel(time = list(common_times))
pqa = pqa.sel(time = list(common_times))
# **Build PQA mask**
from datacube.storage import masking
good_quality = masking.make_mask(pqa,
cloud_acca = "no_cloud",
cloud_fmask = "no_cloud",
cloud_shadow_acca = "no_cloud_shadow",
cloud_shadow_fmask = "no_cloud_shadow",
contiguous = True)
good_quality
# ### Wofs Classify Code (from AMA)
# +
import gc
import numpy as np
import xarray as xr
import datacube
# Command line tool imports
import argparse
import os
import collections
import gdal
from datetime import datetime
def wofs_classify(dataset_in, clean_mask=None, x_coord='longitude', y_coord='latitude',
time_coord='time', no_data=-9999, mosaic=False, enforce_float64=False):
"""
Description:
Performs WOfS algorithm on given dataset.
Assumption:
- The WOfS algorithm is defined for Landsat 5/Landsat 7
References:
- Mueller, et al. (2015) "Water observations from space: Mapping surface water from
25 years of Landsat imagery across Australia." Remote Sensing of Environment.
- https://github.com/GeoscienceAustralia/eo-tools/blob/stable/eotools/water_classifier.py
-----
Inputs:
dataset_in (xarray.Dataset) - dataset retrieved from the Data Cube; should contain
coordinates: time, latitude, longitude
variables: blue, green, red, nir, swir1, swir2
x_coord, y_coord, time_coord: (str) - Names of DataArrays in `dataset_in` to use as x, y,
and time coordinates.
Optional Inputs:
clean_mask (nd numpy array with dtype boolean) - true for values user considers clean;
if user does not provide a clean mask, all values will be considered clean
no_data (int/float) - no data pixel value; default: -9999
mosaic (boolean) - flag to indicate if dataset_in is a mosaic. If mosaic = False, dataset_in
should have a time coordinate and wofs will run over each time slice; otherwise, dataset_in
should not have a time coordinate and wofs will run over the single mosaicked image
enforce_float64 (boolean) - flag to indicate whether or not to enforce float64 calculations;
will use float32 if false
Output:
dataset_out (xarray.DataArray) - wofs water classification results: 0 - not water; 1 - water
Throws:
ValueError - if dataset_in is an empty xarray.Dataset.
"""
def _band_ratio(a, b):
"""
Calculates a normalized ratio index
"""
return (a - b) / (a + b)
def _run_regression(band1, band2, band3, band4, band5, band7):
"""
Regression analysis based on Australia's training data
TODO: Return type
"""
# Compute normalized ratio indices
ndi_52 = _band_ratio(band5, band2)
ndi_43 = _band_ratio(band4, band3)
ndi_72 = _band_ratio(band7, band2)
#classified = np.ones(shape, dtype='uint8')
classified = np.full(shape, no_data, dtype='uint8')
# Start with the tree's left branch, finishing nodes as needed
# Left branch
r1 = ndi_52 <= -0.01
r2 = band1 <= 2083.5
classified[r1 & ~r2] = 0 #Node 3
r3 = band7 <= 323.5
_tmp = r1 & r2
_tmp2 = _tmp & r3
_tmp &= ~r3
r4 = ndi_43 <= 0.61
classified[_tmp2 & r4] = 1 #Node 6
classified[_tmp2 & ~r4] = 0 #Node 7
r5 = band1 <= 1400.5
_tmp2 = _tmp & ~r5
r6 = ndi_43 <= -0.01
classified[_tmp2 & r6] = 1 #Node 10
classified[_tmp2 & ~r6] = 0 #Node 11
_tmp &= r5
r7 = ndi_72 <= -0.23
_tmp2 = _tmp & ~r7
r8 = band1 <= 379
classified[_tmp2 & r8] = 1 #Node 14
classified[_tmp2 & ~r8] = 0 #Node 15
_tmp &= r7
r9 = ndi_43 <= 0.22
classified[_tmp & r9] = 1 #Node 17
_tmp &= ~r9
r10 = band1 <= 473
classified[_tmp & r10] = 1 #Node 19
classified[_tmp & ~r10] = 0 #Node 20
# Left branch complete; cleanup
del r2, r3, r4, r5, r6, r7, r8, r9, r10
gc.collect()
# Right branch of regression tree
r1 = ~r1
r11 = ndi_52 <= 0.23
_tmp = r1 & r11
r12 = band1 <= 334.5
_tmp2 = _tmp & ~r12
classified[_tmp2] = 0 #Node 23
_tmp &= r12
r13 = ndi_43 <= 0.54
_tmp2 = _tmp & ~r13
classified[_tmp2] = 0 #Node 25
_tmp &= r13
r14 = ndi_52 <= 0.12
_tmp2 = _tmp & r14
classified[_tmp2] = 1 #Node 27
_tmp &= ~r14
r15 = band3 <= 364.5
_tmp2 = _tmp & r15
r16 = band1 <= 129.5
classified[_tmp2 & r16] = 1 #Node 31
classified[_tmp2 & ~r16] = 0 #Node 32
_tmp &= ~r15
r17 = band1 <= 300.5
_tmp2 = _tmp & ~r17
_tmp &= r17
classified[_tmp] = 1 #Node 33
classified[_tmp2] = 0 #Node 34
_tmp = r1 & ~r11
r18 = ndi_52 <= 0.34
classified[_tmp & ~r18] = 0 #Node 36
_tmp &= r18
r19 = band1 <= 249.5
classified[_tmp & ~r19] = 0 #Node 38
_tmp &= r19
r20 = ndi_43 <= 0.45
classified[_tmp & ~r20] = 0 #Node 40
_tmp &= r20
r21 = band3 <= 364.5
classified[_tmp & ~r21] = 0 #Node 42
_tmp &= r21
r22 = band1 <= 129.5
classified[_tmp & r22] = 1 #Node 44
classified[_tmp & ~r22] = 0 #Node 45
# Completed regression tree
return classified
# Default to masking nothing.
if clean_mask is None:
clean_mask = create_default_clean_mask(dataset_in)
# Extract dataset bands needed for calculations
blue = dataset_in.blue
green = dataset_in.green
red = dataset_in.red
nir = dataset_in.nir
swir1 = dataset_in.swir1
swir2 = dataset_in.swir2
# Enforce float calculations - float64 if user specified, otherwise float32 will do
dtype = blue.values.dtype # This assumes all dataset bands will have
# the same dtype (should be a reasonable
# assumption)
if enforce_float64:
if dtype != 'float64':
blue.values = blue.values.astype('float64')
green.values = green.values.astype('float64')
red.values = red.values.astype('float64')
nir.values = nir.values.astype('float64')
swir1.values = swir1.values.astype('float64')
swir2.values = swir2.values.astype('float64')
else:
if dtype == 'float64':
pass
elif dtype != 'float32':
blue.values = blue.values.astype('float32')
green.values = green.values.astype('float32')
red.values = red.values.astype('float32')
nir.values = nir.values.astype('float32')
swir1.values = swir1.values.astype('float32')
swir2.values = swir2.values.astype('float32')
shape = blue.values.shape
classified = _run_regression(blue.values, green.values, red.values, nir.values, swir1.values, swir2.values)
classified_clean = np.full(classified.shape, no_data, dtype='float64')
classified_clean[clean_mask] = classified[clean_mask] # Contains data for clear pixels
# Create xarray of data
x_coords = dataset_in[x_coord]
y_coords = dataset_in[y_coord]
time = None
coords = None
dims = None
if mosaic:
coords = [y_coords, x_coords]
dims = [y_coord, x_coord]
else:
time_coords = dataset_in[time_coord]
coords = [time_coords, y_coords, x_coords]
dims = [time_coord, y_coord, x_coord]
data_array = xr.DataArray(classified_clean, coords=coords, dims=dims)
if mosaic:
dataset_out = xr.Dataset({'wofs': data_array},
coords={y_coord: y_coords, x_coord: x_coords})
else:
dataset_out = xr.Dataset(
{'wofs': data_array},
coords={time_coord: time_coords, y_coord: y_coords, x_coord: x_coords})
return dataset_out
# -
# ### Run wofs classify
water_classes = wofs_classify(scenes, clean_mask = good_quality.pqa.values, no_data = np.nan, x_coord='x', y_coord = "y")
# %matplotlib inline
# +
import matplotlib.pyplot as plt
for t in water_classes.time:
water = water_classes.wofs.sel(time = t)
water.plot()
plt.show()
# -
plt.figure(figsize = (10,8))
water_classes.wofs.mean(dim = 'time').plot()
# Ensure the output directory exists before writing to it.
# !mkdir -p output/netcdfs/landsat7
output_file_name = "output/netcdfs/landsat8_nbar/l8_wofs_product.nc"
water_classes
scenes.attrs
dataset_to_output = water_classes.assign_attrs(scenes.attrs)
datacube.storage.storage.write_dataset_to_netcdf(dataset_to_output, output_file_name)
# !ls output/netcdfs/landsat8_nbar/
| water_interoperability/l8_nbar_load_and_classify.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="458aUugKRDVW" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0.0}, "output_extras": [{"item_id": 18.0}, {"item_id": 34.0}, {"item_id": 47.0}], "base_uri": "https://localhost:8080/", "height": 2757.0} outputId="93d6a5db-8c70-476f-e70c-33a9bc61a0e8" executionInfo={"status": "ok", "timestamp": 1520054982016.0, "user_tz": -330.0, "elapsed": 50716.0, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-qIbE8Z4Hng0/AAAAAAAAAAI/AAAAAAAAAf8/jYgwkubgbYk/s50-c-k-no/photo.jpg", "userId": "102805649547285129830"}}
## PREREQUISITE FOR RUNNUNG ON GOOGLE COLAB
from urllib.request import urlretrieve
from zipfile import ZipFile
import os
# # !rm -rf CarND-Behavioral-Cloning-P3/
if ("CarND-Behavioral-Cloning-P3" not in os.listdir(".")):
# !git clone https://github.com/udacity/CarND-Behavioral-Cloning-P3.git
# !pip install opencv-python
# !apt update && apt install -y libsm6 libxext6
# !pip install -q keras
urlretrieve("https://d17h27t6h515a5.cloudfront.net/topher/2016/December/584f6edd_data/data.zip","./CarND-Behavioral-Cloning-P3/data.zip")
with ZipFile("./CarND-Behavioral-Cloning-P3/data.zip") as zipf:
zipf.extractall("./CarND-Behavioral-Cloning-P3/")
# !pip install -q imageio
# !pip install moviepy
# !python -c "exec(\"import imageio\\nimageio.plugins.ffmpeg.download()\")"
# !pip install flask-socketio
# !pip install eventlet
# !apt-get -o Dpkg::Options::="--force-confmiss" install --reinstall netbase
# + id="Pf63bXclRakB" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0.0}}
import csv
import cv2
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import gc
import random
class DataSet:
lines_train = None
lines_test = None
gen_train = None
gen_test = None
train_size = None
test_size = None
image_shape = None
base_path = None
batch_size = None
def __init__(self, data_file, batch_size=64):
self.batch_size = batch_size
self.base_path = data_file.rsplit("/", 1)[0]
lines = []
with open(data_file) as csv_file:
reader = csv.reader(csv_file)
for line in reader:
lines.append(line)
lines = lines[1:]
self.lines_train, self.lines_test = train_test_split(lines, test_size=0.2)
self.train_size = len(self.lines_train)
self.test_size = len(self.lines_test)
self.n = len(lines)
self.image_shape = cv2.imread(self.base_path + "/" + lines[0][0].strip()).shape
def __generate(self, lines):
while True:
shuffle(lines)
for offset in range(0, len(lines), self.batch_size):
batch = lines[offset: offset + self.batch_size]
views = []
steers = []
for line in batch:
view = [cv2.imread(self.base_path + "/" + (line[i].strip())) for i in range(3)]
steer = [float(line[3]),0.25+float(line[3]),float(line[3])-0.25]
for img in list(view):
view.append(np.fliplr(img))
for angle in list(steer):
steer.append(-angle)
index = np.zeros(len(view))
if len(views) > 0:
index = [random.randrange(0, len(views)+i) for i in range(len(index))]
for i in range(len(index)):
views.insert(int(index[i]), view[i])
steers.insert(int(index[i]), steer[i])
views = views[:len(lines)]
steers = steers[:len(lines)]
X = np.array(views)
y = np.array(steers)
gc.collect()
yield X, y
def for_training(self):
if self.gen_train is None:
self.gen_train = self.__generate(self.lines_train)
return self.gen_train
def for_testing(self):
if self.gen_test is None:
self.gen_test = self.__generate(self.lines_test)
return self.gen_test
# + id="AgZUA-nBRhHi" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0.0}, "output_extras": [{"item_id": 1.0}, {"item_id": 307.0}], "base_uri": "https://localhost:8080/", "height": 232.0} outputId="a5023929-2975-4f64-aaf6-e2c0714e5ced" executionInfo={"status": "ok", "timestamp": 1520067166234.0, "user_tz": -330.0, "elapsed": 321595.0, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-qIbE8Z4Hng0/AAAAAAAAAAI/AAAAAAAAAf8/jYgwkubgbYk/s50-c-k-no/photo.jpg", "userId": "102805649547285129830"}}
dataset = DataSet("./CarND-Behavioral-Cloning-P3/data/driving_log.csv")
from keras.callbacks import ModelCheckpoint
from keras.models import Sequential, Model
from keras.layers import Dense, Flatten, Lambda, Dropout, Cropping2D, Activation, Merge, Input, ELU
from keras.layers.convolutional import Convolution2D
from keras.layers.core import *
from keras.models import Model
from keras.layers.merge import concatenate
def nvidia_functional():
imagein = Input(shape=dataset.image_shape)
layer = Cropping2D(cropping=((70, 20), (0, 0)))(imagein)
layer = Lambda(lambda x: (x / 127.5) - 1)(layer)
layer = Convolution2D(24, 5, 5, border_mode='valid', activation='relu', subsample=(2, 2))(layer)
layer = Convolution2D(36, 5, 5, border_mode='valid', activation='relu', subsample=(2, 2))(layer)
layer = Convolution2D(48, 5, 5, border_mode='valid', activation='relu', subsample=(2, 2))(layer)
layer = Convolution2D(64, 3, 3, border_mode='valid', activation='relu', subsample=(1, 1))(layer)
layer = Convolution2D(64, 3, 3, border_mode='valid', activation='relu', subsample=(1, 1))(layer)
layer = Flatten()(layer)
layer = Dense(100, activation="elu")(layer)
layer = Dropout(0.5)(layer)
layer = Dense(50, activation="elu")(layer)
layer = Dropout(0.5)(layer)
layer = Dense(10)(layer)
steerout = Dense(1)(layer)
model = Model(inputs=imagein, outputs=steerout)
return model
def nvidia_sequential():
model = Sequential()
model.add(Cropping2D(cropping=((70, 25), (0, 0)), input_shape=dataset.image_shape))
model.add(Lambda(lambda x: (x / 127.5) - 1))
model.add(Convolution2D(24, 5, 5, border_mode='valid', activation='relu', subsample=(2, 2)))
model.add(Convolution2D(36, 5, 5, border_mode='valid', activation='relu', subsample=(2, 2)))
model.add(Convolution2D(48, 5, 5, border_mode='valid', activation='relu', subsample=(2, 2)))
model.add(Convolution2D(64, 3, 3, border_mode='valid', activation='relu', subsample=(1, 1)))
model.add(Convolution2D(64, 3, 3, border_mode='valid', activation='relu', subsample=(1, 1)))
model.add(Flatten())
model.add(Dense(100, activation="elu"))
model.add(Dropout(0.5))
model.add(Dense(50, activation="elu"))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Dense(1))
return model
def comma_ai():
model = Sequential()
model.add(Cropping2D(cropping=((70, 25), (0, 0)), input_shape=dataset.image_shape))
model.add(Lambda(lambda x: x/127.5 - 1.0))
model.add(Convolution2D(16, 8, 8, subsample=(4, 4), border_mode="same"))
model.add(ELU())
model.add(Convolution2D(32, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(ELU())
model.add(Convolution2D(64, 5, 5, subsample=(2, 2), border_mode="same"))
model.add(Flatten())
model.add(Dropout(.2))
model.add(ELU())
model.add(Dense(512))
model.add(Dropout(.5))
model.add(ELU())
model.add(Dense(1))
return model
model = nvidia_sequential()
model.compile(optimizer="adam", loss="mse")
model.fit_generator(generator=dataset.for_training(),
steps_per_epoch=dataset.train_size / dataset.batch_size,
validation_data=dataset.for_testing(),
validation_steps=dataset.test_size / dataset.batch_size,
epochs=3)
model.save('./CarND-Behavioral-Cloning-P3/model.h5')
# + id="aBYP8uDPWhC8" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0.0}, "output_extras": [{"item_id": 289.0}, {"item_id": 290.0}], "base_uri": "https://localhost:8080/", "height": 141.0} outputId="cb9fa623-51b7-41c4-8df6-f8b0005c2789" executionInfo={"status": "ok", "timestamp": 1520066041233.0, "user_tz": -330.0, "elapsed": 305377.0, "user": {"displayName": "<NAME>", "photoUrl": "//lh5.googleusercontent.com/-qIbE8Z4Hng0/AAAAAAAAAAI/AAAAAAAAAf8/jYgwkubgbYk/s50-c-k-no/photo.jpg", "userId": "102805649547285129830"}}
from keras.models import load_model
model = load_model('./CarND-Behavioral-Cloning-P3/model.h5')
model.fit_generator(generator=dataset.for_training(),
steps_per_epoch=dataset.train_size / dataset.batch_size,
validation_data=dataset.for_testing(),
validation_steps=dataset.test_size / dataset.batch_size,
epochs=3)
# + id="XpBIzp32WlBE" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0.0}}
# DOWNLOAD TRAINED MODEL FROM GOOGLE COLAB
model.save('./CarND-Behavioral-Cloning-P3/model.h5')
from google.colab import files
files.download('./CarND-Behavioral-Cloning-P3/model.h5')
| colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training an activity classifier
#
# This notebook was used to train the activity classification model **GestureClassifier.mlmodel** supplied with the book Machine Learning by Tutorials.
'''
/// Copyright (c) 2018 Razeware LLC
///
/// Permission is hereby granted, free of charge, to any person obtaining a copy
/// of this software and associated documentation files (the "Software"), to deal
/// in the Software without restriction, including without limitation the rights
/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
/// copies of the Software, and to permit persons to whom the Software is
/// furnished to do so, subject to the following conditions:
///
/// The above copyright notice and this permission notice shall be included in
/// all copies or substantial portions of the Software.
///
/// Notwithstanding the foregoing, you may not use, copy, modify, merge, publish,
/// distribute, sublicense, create a derivative work, and/or sell copies of the
/// Software in any work that is designed, intended, or marketed for pedagogical or
/// instructional purposes related to programming, coding, application development,
/// or information technology. Permission for such use, copying, modification,
/// merger, publication, distribution, sublicensing, creation of derivative works,
/// or sale is expressly withheld.
///
/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
/// THE SOFTWARE.
'''
import turicreate as tc
import activity_detector_utils as utils
# # Load clean datasets
train_sf = tc.SFrame("data/cleaned_train_sframe")
valid_sf = tc.SFrame("data/cleaned_valid_sframe")
test_sf = tc.SFrame("data/cleaned_test_sframe")
# ### Optional: Split a validation set from your training set
#
# This step is **not** necessary because we already have a separate validation set. This project does not use the results of the following cell, but it's here as an example to show you how to do it *if* you ever want to in your own projects.
train, valid = tc.activity_classifier.util.random_split_by_session(train_sf, session_id='sessionId', fraction=0.9)
utils.count_activities(train)
utils.count_activities(valid)
# # Verify your model correctness by overfitting
#
# This step is to ensure the dataset and model are applicable to the problem, and the model is implemented properly. The following cell grabs a random 5% of the training data and trains an activity classifier model with it to ensure it learns successfully. You can see the training accuracy is quite high – at times 100% – which is a good indication that the model should be able to learn from your dataset.
# +
tiny_train, _ = tc.activity_classifier.util.random_split_by_session(train_sf, session_id='sessionId', fraction=0.05)
tc.activity_classifier.create(
dataset=tiny_train, session_id='sessionId', target='activity',
features=["rotX", "rotY", "rotZ", "accelX", "accelY", "accelZ"],
prediction_window=20, max_iterations=50)
# -
# # Train the model
# You'll get different results every time you run this training cell, because there's some randomness built into the training process. You may get slightly better or worse results, but they should be pretty close to these.
# Create an activity classifier
model = tc.activity_classifier.create(
dataset=train_sf, session_id='sessionId', target='activity',
features=["rotX", "rotY", "rotZ", "accelX", "accelY", "accelZ"],
prediction_window=20, validation_set=valid_sf, max_iterations=50)
model.summary()
# ## Evaluate trained model
metrics = model.evaluate(test_sf)
print(f"Accuracy: {metrics['accuracy']}")
print(metrics)
metrics['confusion_matrix'].print_rows(num_rows=11)
# ## Save the trained model
#
# Export the model to Core ML and save a copy you can reload here if you want to do anything else with it (e.g. test it on a different dataset)
model.export_coreml("GestureClassifier.mlmodel")
model.save("GestureClassifier")
| mlt-materials-editions-2.0/12-sequence-model-creation/projects/notebooks/Model_Training_Complete.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++11
// language: C++11
// name: python3100jvsc74a57bd0a2954483fe4136a94bd721da8601251ae4c667fedb6adcfe70ff0183b19bacbc
// ---
// # 布尔值
//
// 布尔变量(`bool`)的取值:布尔字面值常量。布尔字面值常量的取值为 `true` (整型值为 `1`) 和 `false` (整型值为 `0`)。
//
// 布尔变量常用于表示逻辑运算的结果。
int a {20};
int b {30};
bool c {a == b};
c
b = 20;
bool c1 {a == b};
c1
true
false
| docs/start/concepts/01_bool.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="LcmGjHZnkxRs" colab_type="text"
# <h1 align="center"> UNIVERSIDAD NACIONAL DE INGENIERIA</h1>
# <h1 align="center"> Inteligencia Artificial</h1>
# <h3 align="center"> Proyecto Final Parte 01 - Extracción de características </h3>
# <h4 align="center"> <NAME> : <EMAIL></h4>
# <h4 align="center"> <NAME> : <EMAIL> </h4>
# <h4 align="center"> <NAME> : <EMAIL> </h4>
# <h4 align="center"> Docente : <NAME> </h4>
#
#
# + [markdown] id="4tj7r4SZkxRv" colab_type="text"
# --------------------------------
# + [markdown] id="26jtw_25kxRw" colab_type="text"
# **Resumen :** Los usuarios de una línea telefónica son propensos a portar a otro operador, por ende es necesario saber previamente si esto va a suceder para que la empresa pueda emplear medidas para que esto no suceda. El proyecto presente tiene dos objetivos principales: la clasificación usuarios que portan o no y la interpretación del modelo para identificar las razones especificas de la portabilidad de los clientes.
#
# + id="s3lXGbll9Qeo" colab_type="code" colab={}
# + [markdown] id="fKmI3w92kxR3" colab_type="text"
# --------------------------
# + [markdown] id="4TESSR7ikxR9" colab_type="text"
# <a id="1"></a>
#
# # 1. INDICE
#
#
# * [1. INDICE](#1)
# * [2. DATASET](#2)
# * [3. MÉTRICA](#3)
# * [4. IMPORTANDO LIBRERIAS](#4)
# * [5. CARGAR DATA](#5)
# * [6. DATA EXPLORATION and FEATURES CREATION](#6)
# * [7. PROBANDO PCA PARA REDUCIR LOS FEATURES](#7)
# * [8. USANDO LOS FEATURES HALLADOS EN EL ARCHIVO PROYECTO_FINAL_02](#8)
# * [9. CONTINUAR ARCHIVO PROYECTO_FINAL_02](#9)
#
#
#
# + [markdown] id="dhhAsxGJkxSC" colab_type="text"
# <a id="2"></a>
#
# # 2. DATASET
# + [markdown] id="3rMUYs_zkxSI" colab_type="text"
# Consta de 6 archivos
#
# - TRAIN_TARGET : 2 variables
# - TRAIN_FUENTE_PLANTAPOST : 10 variables
# - TRAIN_FUENTE_EQUIPOS : 18 variables
# - TRAIN_FUENTE_DATOS : 10 variables
# - TRAIN_FUENTE_VOZ : 29 variables
# - TRAIN_FUENTE_APLICACIONES : 14 variables
# - TRAIN_FUENTE_CONSULTA_PREV_PORTA : 3 variables
#
# + [markdown] id="Oq4fteSrkxSO" colab_type="text"
# <a id="3"></a>
#
# # 3. METRICA
# + [markdown] id="8s3N-A7wkxSR" colab_type="text"
# La métrica de evaluación usaremos es el AUC, para entenderlo primero haremos unas definiciones:
#
# + [markdown] id="MzQ2SDPVkxSV" colab_type="text"
# ### CURVA ROC
# + [markdown] id="1p0a9tMwkxSW" colab_type="text"
# Una **curva ROC (curva de característica operativa del recepto)** es un gráfico que muestra el rendimiento de un modelo de clasificación en todos los umbrales de clasificación. Esta curva representa dos parámetros:
# - Tasa de verdaderos positivos
# - Tasas de falsos positivos
# + [markdown] id="UZtHnhy7kxSX" colab_type="text"
# **Tasa de verdaderos positivos (TPR)** es sinónimo de exhaustividad y, por lo tanto, se define de la siguiente manera:
#
# \begin{equation}
# TPR = \frac{VP}{VP+FN}
# \end{equation}
#
# + [markdown] id="26AsoT3VkxSa" colab_type="text"
# **Tasa de falsos positivos (FPR)** se define de la siguiente manera:
#
# \begin{equation}
# FPR = \frac{FP}{FP+VN}
# \end{equation}
#
# + [markdown] id="Ef5qhyrpkxSb" colab_type="text"
# Una curva ROC representa TPR frente a FPR en diferentes umbrales de clasificación. Reducir el umbral de clasificación clasifica más elementos como positivos, por lo que aumentarán tanto los falsos positivos como los verdaderos positivos. En la siguiente figura, se muestra una curva ROC típica.
# Para calcular los puntos en una curva ROC, podríamos evaluar un modelo de regresión logística muchas veces con diferentes umbrales de clasificación, pero esto es ineficiente. Afortunadamente, existe un algoritmo eficiente basado en ordenamiento que puede brindarnos esta información, denominado AUC.
# + id="WdUe5CRvkxSc" colab_type="code" outputId="e05c9d59-851a-43ea-c069-1379c412a07a" colab={}
from PIL import Image
Image.open('/home/liiarpi/Downloads/DATA/MOVISTAR/PAPER/imagenes/roc.png')
# + [markdown] id="KV7cAvc5kxSt" colab_type="text"
# ### AUC : Área bajo la curva ROC
# + [markdown] id="BeqcQ-QHkxSv" colab_type="text"
# **AUC** significa "área bajo la curva ROC". Esto significa que el AUC mide toda el área bidimensional por debajo de la curva ROC completa (piensa en un cálculo integral) de (0,0) a (1,1).
#
# + id="hqVnL0CvkxSw" colab_type="code" outputId="19cb103d-bf00-4ff7-f42a-90bab58c023f" colab={}
Image.open('/home/liiarpi/Downloads/DATA/MOVISTAR/PAPER/imagenes/auc.png')
# + [markdown] id="EUq4SbgckxS3" colab_type="text"
# El AUC proporciona una medición agregada del rendimiento en todos los umbrales de clasificación posibles. Una forma de interpretar el AUC es como la probabilidad de que el modelo clasifique un ejemplo positivo aleatorio más alto que un ejemplo negativo aleatorio. Observa, a modo de ilustración, los siguientes ejemplos, que están ordenados de izquierda a derecha en orden ascendente con respecto a las predicciones de regresión logística:
#
# El AUC representa la probabilidad de que un ejemplo aleatorio positivo (verde) se posicione a la derecha de un ejemplo aleatorio negativo (rojo).
#
# El AUC oscila en valor del 0 al 1. Un modelo cuyas predicciones son un 100% incorrectas tiene un AUC de 0.0; otro cuyas predicciones son un 100% correctas tiene un AUC de 1.0.
# + id="t4Vwi7PvkxS8" colab_type="code" outputId="a4fd73e2-8abf-4ba9-b93f-98394df4ff64" colab={}
Image.open('/home/liiarpi/Downloads/DATA/MOVISTAR/PAPER/imagenes/cuadri.png')
# + [markdown] id="yvX2XA7HkxTK" colab_type="text"
# **Razones por la cual usamos AUC como métrica :**
#
# - El AUC es **invariable con respecto a la escala.** Mide qué tan bien se clasifican las predicciones, en lugar de sus valores absolutos.
# - El AUC es **invariable con respecto al umbral de clasificación.** Mide la calidad de las predicciones del modelo, sin tener en cuenta qué umbral de clasificación se elige.
#
# + [markdown] id="d7tvYvt9kxTN" colab_type="text"
# <a id="4"></a>
#
# # 4. IMPORTAR LIBRERIAS
# + id="3RtMRv-ZkxTQ" colab_type="code" colab={}
from librerias3 import *
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as pltd
import seaborn as sns
import gc
from sklearn.model_selection import train_test_split
import lightgbm as lgb
import warnings
warnings.filterwarnings('ignore')
# + [markdown] id="Icf4kOYbkxTV" colab_type="text"
# <a id="5"></a>
#
# # 5. CARGAR DATA
# + id="7KW_iCRFkxTV" colab_type="code" colab={}
train_path='/home/liiarpi/Downloads/DATA/MOVISTAR/DATA/TRAIN/'
target=pd.read_csv(train_path+'TRAIN_TARGET.txt',sep='|')
tfa =pd.read_csv(train_path+'TRAIN_FUENTE_APLICACIONES.txt',sep='|',parse_dates=['PERIODO'])
tfc =pd.read_csv(train_path+'TRAIN_FUENTE_CONSULTA_PREV_PORTA.txt',sep='|',parse_dates=['PERIODO'])
tfd =pd.read_csv(train_path+'TRAIN_FUENTE_DATOS.txt',sep='|',parse_dates=['PERIODO'])
tfe =pd.read_csv(train_path+'TRAIN_FUENTE_EQUIPOS.txt',sep='|',parse_dates=['PERIODO'])
tfv =pd.read_csv(train_path+'TRAIN_FUENTE_VOZ.txt',sep='|',parse_dates=['PERIODO'])
tpp =pd.read_csv(train_path+'TRAIN_PLANTAPOST.txt',sep='|',parse_dates=['PERIODO'])
# + id="p6DOkyDNkxTX" colab_type="code" outputId="2879efde-6876-4420-c2b8-75505388064c" colab={}
target.head()
# + id="noAM__g2kxTc" colab_type="code" outputId="20c2d67a-16aa-41f1-e7ac-bdf5dc3edd97" colab={}
tfc.head()
# + id="OS8i8QM0kxTg" colab_type="code" outputId="61869af9-51e4-482e-bf3c-2a601a0f30ab" colab={}
names=tfa.columns.values
found_index = np.in1d(names, ['ID_CLIENTE','PERIODO']).nonzero()[0]
names = np.delete(names, found_index)+'_mean'
names
# + [markdown] id="pIBVv9cHkxTl" colab_type="text"
# <a id="6"></a>
#
# # 6. Data Exploration and Feature Creation
# + id="HAWrhBh0kxTo" colab_type="code" colab={}
def procesar_2(names,df):
x= df[names]
names=x.columns.values
for i in range(0,x.shape[1]):
namecolumn=names[i]
x['alta_'+namecolumn]=(x[namecolumn]>(x[namecolumn].mean()+x[namecolumn].std()))
x['media_'+namecolumn]=(x[namecolumn]<=(x[namecolumn].mean()+x[namecolumn].std()))
x['media_'+namecolumn]=x['media_'+namecolumn]&(x[namecolumn]>=(x[namecolumn].mean()-x[namecolumn].std()))
x['baja_'+namecolumn]=(x[namecolumn]<=(x[namecolumn].mean()-x[namecolumn].std()))
x=x.drop([namecolumn],axis=1)
x=x.astype('float')
return x
# + id="LoijPZlKkxTt" colab_type="code" colab={}
def agg_numeric(df, group_var):
# Remove id variables other than grouping variable
for col in df:
if col != group_var and 'ID_CLIENTE' in col:
df = df.drop(columns = col)
group_ids = df[group_var]
numeric_df = df.select_dtypes('number')
numeric_df[group_var] = group_ids
# Group by the specified variable and calculate the statistics
agg = numeric_df.groupby(group_var).agg(['count', 'mean', 'max', 'min', 'sum']).reset_index()
# Need to create new column names
columns = [group_var]
# Iterate through the variables names
for var in agg.columns.levels[0]:
# Skip the grouping variable
if var != group_var:
# Iterate through the stat names
for stat in agg.columns.levels[1][:-1]:
# Make a new column name for the variable and stat
columns.append('%s_%s' % (var, stat))
agg.columns = columns
print('NUEVOS FEATURES CREADOS')
return agg
def agg_numeric2(df, group_var):
# Remove id variables other than grouping variable
for col in df:
if col != group_var and 'ID_CLIENTE' in col:
df = df.drop(columns = col)
group_ids = df[group_var]
numeric_df = df.select_dtypes('number')
numeric_df[group_var] = group_ids
# Group by the specified variable and calculate the statistics
agg = numeric_df.groupby(group_var).agg(['mean']).reset_index()
# Need to create new column names
columns = [group_var]
# Iterate through the variables names
for var in agg.columns.levels[0]:
# Skip the grouping variable
if var != group_var:
# Iterate through the stat names
for stat in agg.columns.levels[1][:-1]:
# Make a new column name for the variable and stat
columns.append('%s_%s' % (var, stat))
agg.columns = columns
print('NUEVOS FEATURES CREADOS')
return agg
# + id="xySZG2-NkxTv" colab_type="code" colab={}
def procesar_1(path,name):
df=pd.read_csv(path+name,sep='|', parse_dates=['PERIODO'])
print(df.shape)
#names=df.columns.values
#found_index = np.in1d(names, ['ID_CLIENTE','PERIODO']).nonzero()[0]
#names = np.delete(names, found_index)+'_mean'
#names2=df.columns.values
#found_index2 = np.in1d(names2, ['PERIODO']).nonzero()[0]
#names2 = np.delete(names2, found_index2)
#df_date = agg_numeric2(df[['ID_CLIENTE','PERIODO']], group_var = 'ID_CLIENTE')
date_attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear', 'Is_month_end',
'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
for n in date_attr:
a = getattr(df['PERIODO'].dt, n.lower())
#a[a==np.NaN]=0
a[(1-np.isnan(a))>0.0001]=0
a[a==np.Inf]=0
a[a==-np.Inf]=0
df['PERIODO_' + n]=a.astype(float)
# 2. Agregamos una columan con una representación numérica de la fecha
df['PERIODO_elapsed'] = df['PERIODO'].astype(np.int64) // 10 ** 9
# 3. Eliminamos la variable date
df.drop('PERIODO', axis=1, inplace=True)
names3=df.columns.values
found_index3 = np.in1d(names3, ['ID_CLIENTE']).nonzero()[0]
names3 = np.delete(names3, found_index3)+'_mean'
#VARIABLES OBJECT FLOAT
for n,col in df.items():
if not pd.api.types.is_numeric_dtype(col) and n != 'ID_CLIENTE':
df[n] = col.astype('category')
for n,col in df.items() :
if pd.api.types.is_categorical_dtype(col) and n != 'ID_CLIENTE':
df[n] = col.cat.codes+1
names = df.columns.values
aa=df.isna().sum() / len(df) # % de data faltante
for i in range (0,aa.shape[0]):
if(aa[i]>0):
print('DATA FALTANTE -',names[i])
df[str(names[i])]=df[str(names[i])].fillna(df[str(names[i])].mean())
df = agg_numeric(df, group_var = 'ID_CLIENTE')
#df = agg_numeric(df[names2], group_var = 'ID_CLIENTE')
#df = pd.merge(df,df_date, on='ID_CLIENTE', how='left')
#print(df.columns.values)
#print(names3)
df2 =procesar_2(names3,df)
df = pd.concat([df,df2], axis=1)
print(df.shape)
return df
# + id="jWosRHCkkxTw" colab_type="code" outputId="81d11d55-1ef5-42fd-a6cd-9231a5321b65" colab={}
a=[2,3,np.NaN,np.Inf]
a[a==np.NaN]=0
a
# + id="0smlPvTQkxT1" colab_type="code" outputId="f1703ed6-89ba-44d7-f3a3-87e135497633" colab={}
train_path='/home/liiarpi/Downloads/DATA/MOVISTAR/DATA/TRAIN/'
df=procesar_1(train_path,'TRAIN_FUENTE_CONSULTA_PREV_PORTA.txt')
# + id="yYiuxb4gkxT5" colab_type="code" outputId="a0e75645-faa8-43d9-da8a-0abd98be6682" colab={}
train_path='/home/liiarpi/Downloads/DATA/MOVISTAR/DATA/TRAIN/'
df=procesar_1(train_path,'TRAIN_FUENTE_VOZ.txt')
# + id="9RtiO-x2kxT8" colab_type="code" colab={}
path='/home/liiarpi/Downloads/DATA/MOVISTAR'
pathtrain=path+'/DATA/TRAIN/'
pathtest =path+'/DATA/TEST/'
pathguardartrain=path+'/NEW_DATA3/NEW_TRAIN/'
pathguardartest =path+'/NEW_DATA3/NEW_TEST/'
def transformar(pathorigen,path,name):
df=procesar_1(pathorigen,name+'.txt')
names = df.columns.values
df.to_csv(path+name+'.csv',index=None)
# + id="P9JkTxFakxT-" colab_type="code" outputId="8f23e767-ceab-4f4d-9072-79ec2f46e05e" colab={}
transformar(pathtrain,pathguardartrain,'TRAIN_FUENTE_APLICACIONES')
transformar(pathtrain,pathguardartrain,'TRAIN_FUENTE_CONSULTA_PREV_PORTA')
transformar(pathtrain,pathguardartrain,'TRAIN_FUENTE_DATOS')
transformar(pathtrain,pathguardartrain,'TRAIN_FUENTE_EQUIPOS')
transformar(pathtrain,pathguardartrain,'TRAIN_FUENTE_VOZ')
transformar(pathtrain,pathguardartrain,'TRAIN_PLANTAPOST')
# + id="AB7lDAsFkxUA" colab_type="code" outputId="d11156f8-73d5-4370-86f6-7ac33697ab63" colab={}
transformar(pathtest,pathguardartest,'TEST_FUENTE_APLICACIONES')
transformar(pathtest,pathguardartest,'TEST_FUENTE_CONSULTA_PREV_PORTA')
transformar(pathtest,pathguardartest,'TEST_FUENTE_DATOS')
transformar(pathtest,pathguardartest,'TEST_FUENTE_EQUIPOS')
transformar(pathtest,pathguardartest,'TEST_FUENTE_VOZ')
transformar(pathtest,pathguardartest,'TEST_PLANTAPOST')
# + id="9UFpSY5BkxUE" colab_type="code" colab={}
# + id="iXcDmY2SkxUG" colab_type="code" colab={}
train_path='/home/liiarpi/Downloads/DATA/MOVISTAR/NEW_DATA3/NEW_TRAIN/'
target=pd.read_csv('/home/liiarpi/Downloads/DATA/MOVISTAR/DATA/TRAIN/TRAIN_TARGET.txt',sep='|')
tfa=pd.read_csv(train_path+'TRAIN_FUENTE_APLICACIONES.csv')
tfc=pd.read_csv(train_path+'TRAIN_FUENTE_CONSULTA_PREV_PORTA.csv')
tfd=pd.read_csv(train_path+'TRAIN_FUENTE_DATOS.csv')
tfe=pd.read_csv(train_path+'TRAIN_FUENTE_EQUIPOS.csv')
tfv=pd.read_csv(train_path+'TRAIN_FUENTE_VOZ.csv')
tpp=pd.read_csv(train_path+'TRAIN_PLANTAPOST.csv')
# + id="EYn4cXOLkxUJ" colab_type="code" colab={}
TrainData = pd.merge(target, tfa, how='left')
TrainData = pd.merge(TrainData, tfc, on='ID_CLIENTE', how='left')
TrainData = pd.merge(TrainData, tfd, on='ID_CLIENTE', how='left')
TrainData = pd.merge(TrainData, tfe, on='ID_CLIENTE', how='left')
TrainData = pd.merge(TrainData, tfv, on='ID_CLIENTE', how='left')
TrainData = pd.merge(TrainData, tpp, on='ID_CLIENTE', how='left')
# + id="1oGJDh3BkxUL" colab_type="code" colab={}
train=pd.read_csv('Data_tratada_Train.csv',na_values=['NaN','nan',-1])
# + id="sPm2NhA4kxUM" colab_type="code" outputId="d62cbff7-1c3c-4a1a-fe80-6969cea4baa7" colab={}
print('tamaño = ', train.shape)
# Presentamos un ejemplo (de 5 datos) del dataset
train.head()
# + id="4awuU8mSkxUQ" colab_type="code" colab={}
X=train.drop(columns=['ID_CLIENTE', 'TARGET'])
y=train['TARGET']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=7)
# + id="zpE9PUtMkxUS" colab_type="code" colab={}
#X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=7)
train_data=lgb.Dataset(X_train, y_train)
test_data=lgb.Dataset(X_test, y_test, reference=train_data)
# + id="UCZV1UClkxUT" colab_type="code" colab={}
params = {
'task': 'train',
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'auc'},
'num_leaves': 20,
'num_boost_round': 5000,
'min_data_in_leaf': 50,
'max_depth': -1,
'verbose': -1,
'n_jobs': -1,
'learning_rate': 0.035
}
# + id="zS6tx1pEkxUU" colab_type="code" outputId="4e911875-c3bf-4ea6-bb70-aa87af47735f" colab={}
model = lgb.train(params,train_data,valid_sets=test_data,early_stopping_rounds=80)
# + id="305V3rNHkxUW" colab_type="code" outputId="706ee473-f5cc-4f6f-b948-3e1b105fcd61" colab={}
a=lgb.plot_importance(model,importance_type='split',height=0.8,figsize=(18,80),grid=False,ignore_zero=False)
# + [markdown] id="MH6ZiU0zkxUY" colab_type="text"
# <a id="7"></a>
#
# # 7.PROBANDO PCA PARA REDUCIR LOS FEATURES
# + id="N26kNVcYkxUY" colab_type="code" colab={}
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
def pcaa(a,xx,yy):
print("PCA de %d elementos"%a)
pca =PCA(n_components=a)
pca.fit(xx)
xx=pca.transform(xx)
print('pca listo')
print('USANDO RANDOMFOREST CLASSIFIER')
xx_train, xx_val, yy_train, yy_val = train_test_split(xx, yy, test_size=0.2, random_state=42)
mm = RandomForestClassifier(20, n_jobs=-1, oob_score=True,min_samples_split=10)#,max_features=10)
mm.fit(xx_train,yy_train)
y_train_pred = mm.predict(xx_train)
y_val_pred = mm.predict(xx_val)
print(f'Scores:')
print(f'Train = {metrics.accuracy_score(yy_train, y_train_pred):.4}')
print(f'Validation = {metrics.accuracy_score(yy_val, y_val_pred):.4}')
if hasattr(mm, 'oob_score_'): print(f'OOB = {mm.oob_score_:.4}')
print("AUC Train ",roc_auc_score(yy_train, y_train_pred))
print("AUC Test ",roc_auc_score(yy_val, y_val_pred))
print('USANDO SVM')
'''
from sklearn.svm import SVC
model_svm = SVC(gamma='auto')
model_svm.fit(xx_train,yy_train)
y_train_pred = model_svm.predict(xx_train)
y_val_pred = model_svm.predict(xx_val)
print("Accuracy Training Support Vector Machines:",metrics.accuracy_score(yy_train, y_train_pred))
print("Accuracy Validation Support Vector Machines:",metrics.accuracy_score(yy_val, y_val_pred))
print("AUC Train ",roc_auc_score(yy_train, y_train_pred))
print("AUC Test ",roc_auc_score(yy_val, y_val_pred))'''
print('USANDO LOGISTIC')
from sklearn.linear_model import LogisticRegression
model_logistic = LogisticRegression(random_state=0, solver='lbfgs')
model_logistic.fit(xx_train, yy_train)
y_train_pred = model_logistic.predict(xx_train)
y_val_pred = model_logistic.predict(xx_val)
print("Accuracy Training Logistic:",metrics.accuracy_score(yy_train, y_train_pred))
print("Accuracy Validation Logistic:",metrics.accuracy_score(yy_val, y_val_pred))
print("AUC Train ",roc_auc_score(yy_train, y_train_pred))
print("AUC Test ",roc_auc_score(yy_val, y_val_pred))
return mm,pca
# + id="sUsvAfPVkxUa" colab_type="code" outputId="f628cf8f-73f1-45ca-9eb8-b9d1736a6569" colab={}
pcaa(10,X,y)
# + id="-I2ajm-YkxUc" colab_type="code" outputId="6f894c3c-0fc7-4982-d03c-49e12276c2d8" colab={}
modelo,pca_modelo=pcaa(20,X,y)
# + id="gy5zjk7fkxUe" colab_type="code" outputId="20dabf73-8998-474f-cbf8-b2d4c9316e35" colab={}
pcaa(15,X,y)
# + [markdown] id="tjCAzodGkxUg" colab_type="text"
# ### No mejoramos el modelo de lightgbm
# + id="G-0BW208kxUg" colab_type="code" colab={}
test_path='/home/liiarpi/Downloads/DATA/MOVISTAR/NEW_DATA3/NEW_TEST/'
sfa=pd.read_csv(test_path+'TEST_FUENTE_APLICACIONES.csv')
sfc=pd.read_csv(test_path+'TEST_FUENTE_CONSULTA_PREV_PORTA.csv')
sfd=pd.read_csv(test_path+'TEST_FUENTE_DATOS.csv')
sfe=pd.read_csv(test_path+'TEST_FUENTE_EQUIPOS.csv')
sfv=pd.read_csv(test_path+'TEST_FUENTE_VOZ.csv')
spp=pd.read_csv(test_path+'TEST_PLANTAPOST.csv')
envio=pd.read_csv('/home/liiarpi/Downloads/DATA/MOVISTAR/DATA/TEST/TEST_ENVIO.txt',sep='|')
# + id="B5b3j621kxUh" colab_type="code" colab={}
sfa=sfa.reindex(tfa.columns,axis=1)
sfc=sfc.reindex(tfc.columns,axis=1)
sfd=sfd.reindex(tfd.columns,axis=1)
sfe=sfe.reindex(tfe.columns,axis=1)
sfv=sfv.reindex(tfv.columns,axis=1)
spp=spp.reindex(tpp.columns,axis=1)
# + id="BFqxX2c5kxUl" colab_type="code" colab={}
envio=pd.merge(envio,sfa,on='ID_CLIENTE',how='left')
envio=pd.merge(envio,sfc,on='ID_CLIENTE',how='left')
envio=pd.merge(envio,sfd,on='ID_CLIENTE',how='left')
envio=pd.merge(envio,sfe,on='ID_CLIENTE',how='left')
envio=pd.merge(envio,sfv,on='ID_CLIENTE',how='left')
envio=pd.merge(envio,spp,on='ID_CLIENTE',how='left')
# + id="K7_HaFrgkxUn" colab_type="code" colab={}
envio['TARGET']=model.predict(envio.drop(columns='ID_CLIENTE'),num_iteration=model.best_iteration)
# + id="1wFmTT0wkxUp" colab_type="code" colab={}
envio = envio.loc[:,['ID_CLIENTE','TARGET']]
# + id="aUzpvY6okxUv" colab_type="code" colab={}
envio.to_csv('/home/liiarpi/Downloads/DATA/MOVISTAR/submissions/sub08_DATA3.csv',index=None, sep=',')
# + id="6YOy9T7DkxUx" colab_type="code" colab={}
envio['ID_CLIENTE']=envio['ID_CLIENTE'].astype(str)
envio['TARGET']=envio['TARGET'].astype('float64')
# + id="NaBU1FyHkxU6" colab_type="code" outputId="6f128206-c47f-4a84-9b0c-aa869545d91c" colab={}
pd.read_csv('/home/liiarpi/Downloads/DATA/MOVISTAR/submissions/sub08_DATA3.csv').info()
# + [markdown] id="RclOA-UYkxU9" colab_type="text"
# ### Probando los resultados en la página oficial obtenemos un 0.67375 en Private Score
# ### y obtenemos un 0.66571 en Public Score
# + id="HWaqfW-fkxU-" colab_type="code" outputId="5e810351-a0c2-468f-8beb-f55a8236e6ef" executionInfo={"status": "error", "timestamp": 1544471022655, "user_tz": 300, "elapsed": 1278, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13922697713342692898"}} colab={"base_uri": "https://localhost:8080/"}
from PIL import Image
Image.open('1.jpg')
# + [markdown] id="57vm8Ub1kxVB" colab_type="text"
# <a id="8"></a>
#
# # 8. USANDO LOS FEATURES HALLADOS EN EL ARCHIVO PROYECTO_FINAL_02
# + id="_o5d9khKkxVB" colab_type="code" colab={}
test=pd.read_csv('Data_tratada_test.csv',na_values=['NaN','nan',-1])
# + id="tpYD9ux1kxVE" colab_type="code" colab={}
features=['ID_CLIENTE','AIRTIME_IN_TOT_count', 'AIRTIME_OUT_ON_count', 'APP_10_count',
'APP_1_count', 'APP_2_count', 'APP_4_count', 'APP_5_count',
'APP_6_count', 'APP_7_count', 'APP_8_count', 'CALLS_IN_TOT_count',
'CALLS_OUT_ON_count', 'CALLS_OUT_TOT_count', 'CONT_OFF_count',
'CONT_ON_count', 'CONT_TOT_count', 'DAYS_IN_VOICE_OFF_sum',
'DAYS_IN_VOICE_TOT_count', 'DAYS_OUT_VOICE_OFF_sum',
'DEST_VOICE_ON_count', 'DEST_VOICE_count', 'MB_TOTAL_APP_count',
'N_DIAS_NAVEGACION_count', 'N_DIAS_NAVEGACION_max',
'N_DIAS_NAVEGACION_sum', 'TOP_CONT_5_count', 'TOP_CONT_5_max',
'TOP_CONT_5_sum', 'TOP_CONT_OFF_5_count', 'TOP_CONT_OFF_5_max',
'TOP_CONT_OFF_5_mean', 'TOP_CONT_OFF_5_sum', 'TOP_CONT_ON_5_count',
'TRAF_DATOS_mean', 'TRAF_DATOS_sum', 'alta_CALLS_IN_OFF_mean',
'alta_CALLS_OUT_OFF_mean', 'alta_DAYS_IN_VOICE_OFF_mean',
'alta_DAYS_OUT_VOICE_OFF_mean', 'alta_DEST_VOICE_OFF_mean',
'alta_TOP_CONT_OFF_5_mean', 'baja_DAYS_IN_VOICE_OFF_mean',
'baja_DAYS_OUT_VOICE_OFF_mean', 'baja_PANTALLA_mean',
'baja_TEC_CHIP_mean', 'media_APP_10_mean', 'media_APP_6_mean',
'media_APP_7_mean', 'media_APP_8_mean', 'media_APP_9_mean',
'media_N_DIAS_NAVEGACION_mean']
# + id="k-u8cwaKkxVK" colab_type="code" colab={}
test=test[features]
# + id="fi93xthQkxVL" colab_type="code" outputId="1e9a55b9-5625-4943-e7ec-e13a44ed89fc" colab={}
envio2=pd.read_csv('/home/cristianl/Desktop/MOVISTAR/PAPER2/DATA/TEST/TEST_ENVIO.txt',sep='|')
envio2.head(2)
# + id="VVzFlBEjkxVM" colab_type="code" colab={}
envio2=pd.merge(envio2,test,on='ID_CLIENTE',how='left')
# + id="j6MZV8ECkxVN" colab_type="code" outputId="a7bddf0b-1a1a-4353-f60a-d74fac0299cb" colab={}
envio2.head(2)
# + id="2orJa7efkxVP" colab_type="code" outputId="3e92a228-80a1-452e-dd68-40af0c916248" colab={}
for n,col in envio2.items():
if n != 'ID_CLIENTE':
print(n)
envio2[n][(1-np.isnan(envio2[n]))>0.0001]=0
a=np.copy(envio2[n])
envio2[n][np.isnan(a)]=0
envio2[n][a==np.Inf]=0
envio2[n][a==-np.Inf]=0
# + id="7kQ2QQcnkxVQ" colab_type="code" outputId="ac9d8d6d-a9b5-4a13-9faa-7e9fa5ca4249" colab={}
envio2.head()
# + id="od3fIz7ekxVS" colab_type="code" colab={}
envio3=pca_modelo.transform(envio2.drop(columns='ID_CLIENTE'))
# + id="THD-7vhbkxVS" colab_type="code" colab={}
envio2['TARGET']=modelo.predict(envio3)
# + id="KNcljRg_kxVV" colab_type="code" colab={}
envio2 = envio2.loc[:,['ID_CLIENTE','TARGET']]
# + id="DaghGpQYkxVW" colab_type="code" colab={}
envio2.to_csv('/home/cristianl/Desktop/MOVISTAR/PAPER3/subm/subfinal.csv',index=None, sep=',')
# + [markdown] id="JJLVBXnUkxVX" colab_type="text"
# ### Probando los resultados en la página oficial obtenemos un 0.68032 en Private Score
# ### y obtenemos un 0.6733 en Public Score a que nos ubicaría en el segundo puesto
# + id="Z7s44jngkxVY" colab_type="code" outputId="da4ef765-d371-4114-eb96-5d94fc06aed9" colab={}
Image.open('2.jpg')
# + id="jKTQkr-mkxVZ" colab_type="code" colab={}
| Projects-2018-2/Movistar/PROYECTO_IA_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# !cd `pwd`
from tf.app import use
A = use("missieven:clone", checkout="clone", hoist=globals())
A.showContext()
v = F.otype.s("volume")[1]
T.sectionFromNode(v)
T.sectionFromNode(v, fillup=True)
n = T.nodeFromSection((4, 400))
n
T.sectionFromNode(n)
A.reuse()
A.webLink(n)
A = use("bhsa:clone", checkout="clone", hoist=globals())
b = T.nodeFromSection(("Genesis",))
c = T.nodeFromSection(("Genesis", 1))
v = T.nodeFromSection(("Genesis", 1, 1))
A.webLink(b)
A.webLink(c)
A.webLink(v)
| test/webOffset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table><tr>
# <td style="background-color:#ffffff;text-align:left;"><a href="http://qworld.lu.lv" target="_blank"><img src="images\qworld.jpg" width="30%" align="left"></a></td>
# <td style="background-color:#ffffff;"> </td>
# <td style="background-color:#ffffff;vertical-align:text-middle;text-align:right;">
# <table><tr style="background-color:white;">
# <td> Visit</td>
# <td><a href="http://qworld.lu.lv" target="_blank"><img src="images/web-logo.png" width="35px"></a></td>
# <td width="10pt"></td>
# <td> Join</td>
# <td><a href="https://qworldworkspace.slack.com/" target="_blank"><img src="images/slack-icon.png" width="80px"></a></td>
# <td width="10pt"></td>
# <td>Follow</td>
# <td><a href="https://www.facebook.com/qworld19/" target="_blank"><img src="images/facebook-icon.png" width="40px"></a></td>
# <td><a href="https://twitter.com/QWorld19" target="_blank"><img src="images/twitter-icon.png" width="40px"></a></td>
# </tr></table>
# </td>
# </tr></table>
#
# <h1 align="center" style="color: #cd7f32;"> Welcome to Bronze </h1>
# <hr>
# <font style="color: #cd7f32;size:+1;"><b><i>Bronze</i></b></font> is our introductory material to introduce the basics of quantum computation and quantum programming. It is a big collection of Jupyter notebooks](https://jupyter.org).
#
# This is <a href="www.qturkey.org" target="blank">QTurkey</a> version of Bronze and the original version can be found <a href="https://gitlab.com/qkitchen/basics-of-quantum-computing" target="_blank"> here</a>.
#
# Bronze can be used to organize two-day or three-day long workshops or to design one-semester course for the second or third year university students. In Bronze, we focus on real numbers and skip to use complex numbers to keep the tutorial simpler. Here is a complete list of our workshops using Bronze: <a href="http://qworld.lu.lv/index.php/workshop-bronze/#list" target="_blank">QBronze</a>.
#
# *If you are using Jupyter notebooks for the first time, you can check our very short <a href="python/Python02_Into_Notebooks.ipynb" target="_blank">Introduction for Notebooks</a>.*
#
# **The open-source toolkits we are using**
# - Programming language: <a href="https://www.python.org" target="_blank">python</a>
# - Quantum programming libraries:</u> <a href="https://qiskit.org" target="_blank">Qiskit</a> is the main library at the moment. - We use <a href="https://www.mathjax.org" target="_blank">MathJax</a> to display mathematical expressions on html files (e.g., exercises).
# - We use open source interactive tool <a href="http://play.quantumgame.io" target="_blank">quantumgame</a> for showing quantum coin flipping experiments.
#
# **Support:** Please use _#general channel under_ <a href="https://qworldworkspace.slack.com/" target="_blank">QWorld's slack workspace</a> to ask your questions.
# <h1 align="center" style="color: #cd7f32;"> Content </h2>
# <a href="bronze/B00_Credits.ipynb" target="_blank">Credits</a>
# <h3 align="left"> Installation and Test </h3>
# _Python libraries including quantum ones are often updated. Therefore, there might appear some problems due to different versions_
#
# Before starting to use Bronze, please test your system by using the following notebook!
#
# Qiskit is the main the quantum programming library, and you should install it to follow the whole Bronze.
# <ul>
# <li><a href="test/Qiskit_installation_and_test.ipynb" target="_blank">Qiskit installation and test</a></li>
# </ul>
# <b>Connecting to real quantum computers</b> (optional)
# <ul>
# <li><a href="https://qiskit.org/documentation/install.html#install-access-ibm-q-devices-label" target="_blank">Access IBM Q Systems</a> (external link)</li>
# <li><a href="test/Executing_quantum_programs_on_IBMQ.ipynb" target="_blank">Executing Quantum Programs on IBMQ</a>
# </ul>
# <b>References</b>
#
#
# [Python Reference](python/Python04_Quick_Reference.ipynb) |
# [Python: Drawing](python/Python06_Drawing.ipynb) |
# [Qiskit Reference](bronze/B01_Qiskit_Reference.ipynb)
#
# <b>Python review</b>
#
# [Variables](python/Python08_Basics_Variables.ipynb) |
# [Loops](python/Python12_Basics_Loops.ipynb) |
# [Conditionals](python/Python16_Basics_Conditionals.ipynb) |
# [Lists](python/Python20_Basics_Lists.ipynb)
#
# <b>Basic math</b>
#
# [Vectors](math/Math20_Vectors.ipynb) |
# [Dot Product](math/Math24_Dot_Product.ipynb) |
# [Matrices](math/Math28_Matrices.ipynb) |
# [Tensor Product](math/Math32_Tensor_Product.ipynb) |
#
#
# <b>Basics of classical systems</b>
# <ol>
# <li><a href="bronze/B03_One_Bit.ipynb" target="_blank">One Bit</a></li>
# <li><a href="bronze/B06_Coin_Flip.ipynb" target="_blank">Coin Flipping</a></li>
# <li><a href="bronze/B07_Probabilistic_Bit.ipynb" target="_blank">Probabilistic Bit</a></li>
# <li><a href="bronze/B12_Probabilistic_States.ipynb" target="_blank">Probabilistic States</a></li>
# <li><a href="bronze/B09_Coin_Flip_Game.ipynb" target="_blank">Coin Flipping Game</a></li>
# <li><a href="bronze/B15_Probabilistic_Operators.ipynb" target="_blank">Probabilistic Operators</a></li>
# <li><a href="bronze/B17_Two_Probabilistic_Bits.ipynb" target="_blank">Two Probabilistic Bits</a></li>
# </ol>
#
# <b>Basics of quantum systems</b>
# <ol start="8">
# <li><a href="bronze/B20_Quantum_Coin_Flipping.ipynb" target="_blank">Quantum Coin Flipping</a></li>
# <li><a href="bronze/B25_One_Qubit.ipynb" target="_blank">One Qubit</a></li>
# <li><a href="bronze/B28_Quantum_State.ipynb" target="_blank">Quantum States</a></li>
# <li><a href="bronze/B29_Quantum_Operators.ipynb" target="_blank">Quantum Operators</a></li>
# <li><a href="bronze/B24_Hadamard.ipynb" target="_blank">Hadamard Operator</a></li>
# <li><a href="bronze/B38_Multiple_Qubits.ipynb" target="_blank">Multiple Qubits</a></li>
# <li><a href="bronze/B39_Controlled_Operations.ipynb"target="_blank">Controlled Operations</a></li>
# <li><a href="bronze/B30_Visualization_of_a_Qubit.ipynb" target="_blank">Visualization of a (Real-Valued) Qubit</a></li>
# <li><a href="bronze/B32_Initializing_a_Qubit.ipynb" target="_blank">Initializing a Qubit</a></li>
#
# </ol>
#
#
# ##### <b>Operations on (real-valued) qubits</b>
# <ol start="17">
# <li><a href="bronze/B40_Operations_on_the_Unit_Circle.ipynb" target="_blank">Operations on the Unit Circle</a></li>
# <li><a href="bronze/B60_Reflections.ipynb" target="_blank">Reflections</a></li>
# <li><a href="bronze/B64_Rotations.ipynb" target="_blank">Rotations</a></li>
# </ol>
# <b>Basic quantum protocols</b>
# <ol start="21">
# <li><a href="bronze/B50_Superdense_Coding.ipynb" target="_blank">Entanglement and Superdense Coding</a></li>
# <li><a href="bronze/B54_Quantum_Teleportation.ipynb" target="_blank">Quantum Teleportation</a></li>
# </ol>
# <!--
# <ol> <li><a href="bronze/B84_Phase_Kickback.ipynb" target="_blank">Phase Kickback</a></li>
# <li><a href="bronze/B52_Quantum_State_of_a_Qubit.ipynb" target="_blank">Quantum State of a Qubit</a></li>
# <li><a href="bronze/B54_Random_Quantum_States.ipynb" target="_blank">Random Quantum States</a></li>
# <li><a href="bronze/B56_Angle_Between_Two_Quantum_States.ipynb" target="_blank">Angle Between Two Quantum States</a>
# </ol>
# -->
# <b>Grover's search algorithm</b>
# <ol start="23">
# <li><a href="bronze/B80_Inversion_About_the_Mean.ipynb" target="_blank">Inversion About the Mean</a></li>
# <li><a href="bronze/B81_Grovers_Algorithm.ipynb" target="_blank">Grover's Search Algorithm</a></li>
# <li><a href="bronze/B88_Grovers_Search_One_Qubit_Representation.ipynb" target="_blank">Grover's Search: One Qubit Representation</a></li>
# <li><a href="bronze/B84_Phase_Kickback.ipynb" target="_blank">Phase Kickback</a></li>
# <li><a href="bronze/B90_Grovers_Search_Implementation.ipynb" target="_blank">Grover's Search: Implementation</a></li>
# </ol>
# <li><a href="bronze/Resources.ipynb" target="_blank">Further Resources</a></li>
# <b>Exercises</b>
# <ul>
# <li><a href="./exercises/E05_Basic_Math.html" target="_blank">Basic Math</a></li>
# <li><a href="./exercises/E09_Probabilistic_Systems.html" target="_blank">Probabilistic Systems</a></li>
# <li><a href="./exercises/P09_Probabilistic_Systems.html" target="_blank">Probabilistic Systems Problem Set</a></li>
# <li><a href="./exercises/E13_Basics_of_Quantum_Systems.html" target="_blank">Basics of Quantum Systems</a></li>
# <li><a href="./exercises/E16_Quantum_Operators_on_a_Real-Valued_Qubit.html" target="_blank">Quantum Operators on a Real Valued Qubit</a></li>
# <li><a href="./exercises/E18_Quantum_Correlation.html" target="_blank">Quantum Correlation</a></li>
# </ul>
#
# <b>Optional notebooks</b>
# <ul>
# <li><a href="bronze/B48_Quantum_Tomography.ipynb" target="_blank">Quantum Tomography (Optional)</a></li>
# <li><a href="bronze/B72_Rotation_Automata.ipynb" target="_blank">Rotation Automata (Optional)</a></li>
# <li><a href="bronze/B76_Multiple_Rotations.ipynb" target="_blank">Multiple Rotations (Optional)</a></li>
# </ul>
# <h1 style="color: #cd7f32;" align="center"> Projects </h1>
#
# *Difficulty levels:
# easy (<font size="+1" color="7777ee">★</font>),
# medium (<font size="+1" color="7777ee">★★</font>), and
# hard (<font size="+1" color="7777ee">★★★</font>).*
#
# <font size="+1" color="7777ee"> ★</font> |
# [Correlation Game](projects/Project_Correlation_Game.ipynb) *on classical bits*
#
# <font size="+1" color="7777ee"> ★</font> |
# [Swapping Quantum States](projects/Project_Swapping_Quantum_States.ipynb) *on qubits*
#
# <font size="+1" color="7777ee"> ☆★</font> |
# [Simulating a Real-Valued Qubit](projects/Project_Simulating_a_RealValued_Qubit.ipynb)
#
# <font size="+1" color="7777ee"> ★★</font> |
# [Quantum Tomography with Many Qubits](projects/Project_Quantum_Tomography_with_Many_Qubits.ipynb)
#
# <font size="+1" color="7777ee"> ★★</font> |
# [Implementing Quantum Teleportation](projects/Project_Implementing_Quantum_Teleportation.ipynb)
#
# <font size="+1" color="7777ee">☆★★</font> |
# [Communication via Superdense Coding](projects/Project_Communication_via_Superdense_Coding.ipynb)
#
# <font size="+1" color="7777ee">★★★</font> |
# [Your Quantum Simulator](projects/Project_Your_Quantum_Simulator.ipynb)
# <table>
# <td style="background-color:#ffffff;text-align:left;"><a href="http://qturkey.org" target="_blank"><img src="images\logot.png" width="25%" align="left"></a></td>
# <td style="background-color:#ffffff;"> </td>
# <td style="background-color:#ffffff;vertical-align:text-middle;text-align:right;">
# <table><tr style="background-color:white;">
# <td> Visit</td>
# <td><a href="http://www.qturkey.org" target="_blank"><img src="images/web-logo.png" width="35px"></a></td>
# <td> Join</td>
# <td><a href="http://ej.uz/qturkey_slack/" target="_blank"><img src="images/slack-icon.png" width="80px"></a></td>
# <td>Follow</td>
# <td><a href="https://twitter.com/KuantumTurkiye" target="_blank"><img src="images/twitter-icon.png" width="30px"></a></td>
# </tr></table>
# </td>
# </table>
| index_bronze.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Test Windowed Positions in SExtractor
# Both SExtractor and SEP windowed positions can fail in the presence of nearby neighbors. This demonstrates the results from SExtractor for the same image as the one used in test_winpos.ipynb
# +
from __future__ import division, print_function
import os
import numpy as np
from astropy.io import fits
import astropy.wcs
import astropyp
import astropyp.wrappers.astromatic as aw
% matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
# Astropy gives a lot of warnings that
# are difficult to filter individually
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
warnings.simplefilter('ignore', category=UserWarning)
# Location of files
basepath = '/media/data-beta/users/fmooleka/2016decam'
temp_path = '/media/data-beta/users/fmooleka/temp'
aper_radius=8
# Set filenames
new_img_filename = os.path.join(temp_path, 'F100_i_7.fits')
new_dqmask_filename = os.path.join(temp_path, 'F100_i_7.dqmask.fits')
# -
# # Run SExtractor
# Use astropyp.wrappers to run SExtractor and detect X,Y, XWIN, YWIN positions
# +
files = {
'image': new_img_filename, # Name of the file to use for the image data
'dqmask': new_dqmask_filename # Name of the file to use for the dqmask
}
# Keyword arguments for SExtractor execution
kwargs = {
'code': 'SExtractor',
'cmd': '/media/data-beta/users/fmooleka/astromatic/build/sex/bin/sex',
'config': {
'CATALOG_NAME': os.path.join(temp_path, 'test.ldac.fits'),
'CATALOG_TYPE': 'FITS_LDAC',
'FILTER': False,
#'PARAMETERS_NAME': 'filename.param',
#'WEIGHT_TYPE': 'MAP_WEIGHT',
#'WEIGHT_IMAGE': files['wtmap'],
'FLAG_IMAGE': files['dqmask']
},
'temp_path': '.',
'config_file': '/media/data-beta/users/fmooleka/decam.config.decam_nopsf.sex',
'params': ['NUMBER', 'EXT_NUMBER', 'X_IMAGE','Y_IMAGE','XWIN_IMAGE','YWIN_IMAGE',
'X_WORLD','Y_WORLD','XWIN_WORLD', 'YWIN_WORLD', 'MAG_AUTO', 'ERRX2WIN_IMAGE', 'ERRY2WIN_IMAGE',
'FLAGS','FLAGS_WIN', 'IMAFLAGS ISO']
}
# Run SExtractor on the first HDU
sextractor = aw.api.Astromatic('SExtractor')
sextractor.run_frames(files['image'], frames=[1], **kwargs)
# -
# #SExtractor Results
# The top plot shows that sources that are not flagged can be off by as much as the width of the CCD (2000 pix).
# The bottom plot shows that even the sources that are not flagged can be off by tens of pixels, much larger than the aperture radius.
# Load the SExtactor results from a FITS_LDAC table
sexcat = aw.ldac.get_fits_table(os.path.join(temp_path, 'test.ldac.fits'), frame=1)
xdiff = (sexcat['X_IMAGE']-sexcat['XWIN_IMAGE']).value
ydiff = (sexcat['Y_IMAGE']-sexcat['YWIN_IMAGE']).value
# Remove sources with bad photometry
cuts = (sexcat['MAG_AUTO'].value<99)
# Plot the results
plt.errorbar(sexcat['MAG_AUTO'][cuts].value, xdiff[cuts], fmt='.',
yerr=(sexcat['ERRX2WIN_IMAGE'][cuts].value,sexcat['ERRX2WIN_IMAGE'][cuts].value))
plt.title('Winpos of all sources')
plt.xlabel('Instrumental Magnitude (mag)')
plt.ylabel('Change from X to XWIN (px)')
plt.show()
# Remove sources with bad windowed positions
cuts = cuts&(sexcat['FLAGS_WIN']==0)
# Plot the results
plt.errorbar(sexcat['MAG_AUTO'][cuts].value, xdiff[cuts], fmt='.',
yerr=(sexcat['ERRX2WIN_IMAGE'][cuts].value,sexcat['ERRX2WIN_IMAGE'][cuts].value))
plt.title('Winpos with FLAGS_WIN==0')
plt.xlabel('Instrumental Magnitude (mag)')
plt.ylabel('Change from X to XWIN (px)')
plt.show()
| examples/.ipynb_checkpoints/test_sextractor_winpos-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.2
# language: julia
# name: julia-1.7
# ---
# # Performance engineering and optimisation
#
# In this project we will first review a few standard pitfalls for performance in Julia and then get our hands dirty in optimising a few pieces of code ourselves.
#
# For more details on the issues mentioned here, see the [performance tips](https://docs.julialang.org/en/v1/manual/performance-tips/) and this [blog article](https://www.stochasticlifestyle.com/7-julia-gotchas-handle/) by <NAME>.
# ## Pitfall 1: Global scope
# +
a = 2.0
b = 3.0
function linear_combination()
return 2a + b
end
answer = linear_combination()
@show answer;
@code_warntype linear_combination()
# -
# Even though all types are known, the compiler does not make use of them. The reason is that in global scope (such as a Jupyter notebook or the REPL) there are no guarantees that `a` and `b` are of a certain type as any later reassignment might change this.
using Traceur
@trace linear_combination()
# ### Solution 1a: Wrap code in functions
#
# Sounds simple, but this is often a very good to (not only this) performance problem.
# +
function outer()
a = 2.0
b = 3.0
function linear_combination()
return 2a + b
end
linear_combination()
end
answer = outer()
@show answer;
@code_warntype outer()
# -
# Notice that **constant propagation** is even possible in this case (i.e. Julia will do the computation at compile time):
@code_llvm outer()
# ## Pitfall 2: Type-instabilities
# The following function looks innocent ...
function g()
x = 1
for i = 1:10
x = x / 2
end
x
end
# ... but is not:
@code_warntype debuginfo=:none g()
# The issue is that the type of the accumulator `x` changes *during the iterations*!
# ### Solution 2a: Avoid type change
function h()
x = 1.0
for i = 1:10
x = x / 2
end
x
end
@code_warntype debuginfo=:none h()
@code_llvm debuginfo=:none h()
# (Side note: Things are actually not *too* bad in this case, as `Float64` and `Int64` have the same bit width, so Julia con do a cool thing called *union splitting*, see https://julialang.org/blog/2018/08/union-splitting)
# ### Solution 2b: Specify types explicitly
#
# ... the Fortran / C way ;)
function g2()
x::Float64 = 1 # Enforces conversion to Float64
for i = 1:10
x = x / 2
end
x
end
@code_llvm debuginfo=:none g2()
# ### Solution 2c: Function barriers
data = Union{Int64,Float64,String}[4, 2.0, "test", 3.2, 1]
function calc_square(x)
for i in eachindex(x)
val = x[i]
val^2
end
end
@code_warntype calc_square(data)
# +
function calc_square_outer(x)
for i in eachindex(x)
calc_square_inner(x[i])
end
end
calc_square_inner(x) = x^2
# -
@code_warntype calc_square_inner(data[1])
# # Pitfall 3: Views and copies
#
# By default slicing into a matrix, actually returns a copy and not a view.
# +
using BenchmarkTools, LinearAlgebra
M = rand(3,3);
x = rand(3);
# -
f(x, M) = dot(M[1:3, 1], x) # Implicit copy
@btime f($x, $M); # ($ syntax in BenchmarkTools to avoid global scope
# ... otherwise numbers could be less meaningful.)
g(x,M) = dot(view(M, 1:3, 1), x) # Avoids the copy
@btime g($x, $M);
g(x,M) = @views dot(M[1:3,1], x) # More convenient
@btime g($x, $M);
# # Pitfall 4: Temporary allocations and vectorised code
# +
using BenchmarkTools
function f()
x = [1; 5; 6] # Column-vector
for i in 1:100_000
x = x + 2*x
end
x
end
@btime f();
# -
# ### Solution 4a: Use dot syntax!
#
# The vectorisation syntax (`.`) we already talked about is a semantic syntax to enforce loop fusion (see blog post by <NAME>: https://julialang.org/blog/2017/01/moredots), which avoids temporaries and thus speeds up computations.
function f1()
x = [1; 5; 6]
for i in 1:100_000
x .= x .+ 2 .* x
# @. x = x + 2*x # equivalent
end
x
end
@btime f1();
# Notice the 10-fold speedup!
#
# Even faster is only writing the loop-fusion explicitly (and using `@inbounds`.
function f()
x = [1; 5; 6]
@inbounds for i in 1:100_000
for k in 1:3
x[k] = x[k] + 2*x[k]
end
end
return x
end
@btime f();
# # Pitfall 5: Abstract fields
#
# (See also the project on [custom types](01_Types_Specialisation.ipynb)).
using BenchmarkTools
# +
struct MyType
x::AbstractFloat
y::AbstractString
end
f(a::MyType) = a.x^2 + sqrt(a.x)
# +
a = MyType(3.0, "test")
@btime f($a);
# -
# ### Solution 5a: Use concrte types in structs
# +
struct MyTypeConcrete
x::Float64
y::String
end
f(b::MyTypeConcrete) = b.x^2 + sqrt(b.x)
# +
b = MyTypeConcrete(3.0, "test")
@btime f($b);
# -
# Note that the latter implementation is **more than 30x faster**!
# ### Solution 5b: If generic content is needed
#
# Use [parametric types](01_Types_Specialisation.ipynb).
# +
struct MyTypeParametric{A<:AbstractFloat, B<:AbstractString}
x::A
y::B
end
f(c::MyTypeParametric) = c.x^2 + sqrt(c.x)
# -
c = MyTypeParametric(3.0, "test")
# While this makes the code a little less readable (field types and stack traces are now less meaningful),
# the compiler is able to produce optimal code, since the types of `x` and `y` are encoded in the type of the struct:
@btime f($c);
c = MyTypeParametric(Float32(3.0), SubString("test"))
@btime f($c);
# # Pitfall 6: Column major order
#
# Unlike C or numpy (but like MATLAB and FORTRAN), Julia uses column-major ordering in matrices:
M = reshape(1:9, 3, 3)
@show M[1, 2] M[2, 2] M[3, 2]
# i.e. **earlier indices run faster**!
# Neglecting this leads to a performance penalty:
# +
M = rand(1000,1000);
function fcol(M)
for col in 1:size(M, 2)
for row in 1:size(M, 1)
M[row, col] = 42
end
end
nothing
end
function frow(M)
for row in 1:size(M, 1)
for col in 1:size(M, 2)
M[row, col] = 42
end
end
nothing
end
# -
@btime fcol($M)
@btime frow($M)
# ## Performance takeaways
#
# * Gotcha 1: **Wrap code in self-contained functions** in performance critical applications, i.e. avoid global scope.
# * Gotcha 2: Write **type-stable code** (check with `@code_warntype`).
# * Gotcha 3: Use **views** instead of copies to avoid unnecessary allocations.
# * Gotcha 4: Use **broadcasting (more dots)** to avoid temporary allocations in vectorized code (or write out loops).
# * Gotcha 5: **Types should always have concrete fields.** If you don't know them in advance, use type parameters.
# * Gotcha 6: Be aware of **column major order** when looping over arrays.
#
#
# ##### More details
# - Check out [this MIT lecture](https://mitmath.github.io/18337/lecture2/optimizing).
# ## Extra performance tips
#
# Compared to python and C, Julia puts a much stronger emphasis on functional programming,
# which often allows to write concise code which *avoids allocations*. For example
# +
using BenchmarkTools
function myfun_naive(x)
x_mod = @. abs(2x - x)
minimum(x_mod)
end
x = randn(10_000)
@btime myfun_naive($x);
# -
# Now, `minimum` allows to take a function as first argument. This function is applied *elementwise* before doing the standard action of `minimum` (taking the minimum):
function myfun_fast(x)
minimum(xi -> abs(2xi - xi), x)
end
@btime myfun_fast($x);
# A convenience syntax allows to write this even nicer for more complicated expressions:
function myfun_fast(x)
minimum(x) do xi
abs(2xi - xi)
end
end
# This is equivalent to the first definition of `myfun_fast`. Notice, how the first (function) argument of `minimum` disappeared and is replaced by a `do ... end` block, which defines the function to be used as first argument.
#
# `minimum` is by now means special here. This syntax is general and works for *all* functions, which take a function as first argument, such as `map`, `filter`, `sum`, `minimum`, `maximum` ...
#
# As usual, custom functions in julia are no different here:
# +
function print_every_second(f, x)
for i in 1:2:length(x)
println(f(x[i]))
end
end
x = [1, 2, 3, 4, 5 , 6]
print_every_second(x) do xi
2xi
end
# -
# ## Optimisation project 1
# Optimize the following code.
#
# (The type and size of the input is fixed/may not be changed.)
# +
function work!(A, N)
D = zeros(N,N)
for i in 1:N
D = b[i]*c*A
b[i] = sum(D)
end
end
N = 100
A = rand(N,N)
b = rand(N)
c = 1.23
work!(A,N)
# -
using BenchmarkTools
@btime work!($A, $N);
# ## Optimization project 2
# Optimize the following function.
function work!(A, B, v, N)
val = 0
for i in 1:N
for j in 1:N
val = mod(v[i],256);
A[i,j] = B[i,j]*(sin(val)*sin(val)-cos(val)*cos(val));
end
end
end
# The (fixed) input is given by:
# +
N = 4000
A = zeros(N,N)
B = rand(N,N)
v = rand(Int, N);
work!(A,B,v,N)
# -
# You can benchmark with the following code snippet. The larger the Mega-iterations per second the better!
using BenchmarkTools
runtime = @belapsed work!($A, $B, $v, $N);
perf = N*N*1e-6/runtime # MIt/s
println("Performance: $perf MIt/s")
| Projects/03_Performance_Engineering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exercise 2
# +
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import svm
from sklearn.model_selection import cross_val_score
import numpy as np
titanic_data=pd.read_csv(r"train.csv")
titanic_data.head()
# -
titanic_data.dtypes
titanic_data.isnull().sum() ### Check for missing values
titanic_data["Sex"] = titanic_data["Sex"].astype('category').cat.codes
titanic_data["Embarked"] = titanic_data["Embarked"].astype('category').cat.codes
titanic_data.dtypes
# +
#### Missing Value imputation
mean_value=titanic_data['Age'].mean()
titanic_data['Age']=titanic_data['Age'].fillna(mean_value)
titanic_data[['Cabin']]=np.where(titanic_data[['Cabin']].isnull(), 0, 1)
# +
target = 'Survived'
X = titanic_data.drop(['PassengerId','Survived','Name','Ticket'],axis=1)
y=titanic_data[target]
X_train, X_test, y_train, y_test = train_test_split(X.values,y,test_size=0.50,random_state=123, stratify=y)
# -
clf_svm=svm.SVC(kernel='linear', C=1)
clf_svm
clf_svm.fit(X_train,y_train)
clf_svm.score(X_test, y_test)
# ## Exercise 3
import graphviz
from sklearn import tree
from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus
clf_tree = tree.DecisionTreeClassifier()
clf_tree = clf_tree.fit(X_train, y_train)
dot_data = StringIO()
export_graphviz(clf_tree, out_file=dot_data,
filled=True, rounded=True,
class_names=['Died','Survived'],max_depth = 3,
special_characters=True,feature_names=X.columns.values)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())
clf_tree.score(X_test, y_test)
# ## Exercise 4
from sklearn.ensemble import RandomForestClassifier
# +
clf_random = RandomForestClassifier(n_estimators=20, max_depth=None,
min_samples_split=7, random_state=0)
# -
clf_random.fit(X_train,y_train)
clf_random.score(X_test, y_test)
| 8). Fine-Tuning Classification Algorithms/.ipynb_checkpoints/Exercise 2-4-Lesson 08-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
# +
#Common imports
import numpy as np
import os
import sys
#To make outputs more consistent
np.random.seed(42)
#To Save & Load Models
import pickle
#To plot figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as pt
import seaborn as sns
sns.set(font_scale=1)
pt.rcParams['axes.labelsize'] = 14
pt.rcParams['xtick.labelsize'] = 12
pt.rcParams['ytick.labelsize'] = 12
#Folder Directory Structure
PROJECT_ROOT_DIR ='.'
PROJECT_FOLDER = 'titanic_kaggle'
PROJECT_ID='end_to_end_project_titanic_kaggle'
PROJECT_OUTPUT_PATH = os.path.join(PROJECT_ROOT_DIR,'model',PROJECT_ID)
def save_fig(fig_id,tight_layout=True,fig_extension='png',resolution=300):
if not os.path.exists(IMAGES_PATH):
os.makedirs(PROJECT_OUTPUT_PATH)
path = os.path.join(PROJECT_OUTPUT_PATH,fig_id + '.' + fig_extension)
print("Saving Figure : {}".format(fig_id))
if tight_layout:
plt.tight_layout()
plt.savefig(path,format=fig_extension,dpi=resolution)
#Saving the Model
import pickle
def save_model(model,model_name):
if not os.path.exists(PROJECT_OUTPUT_PATH):
os.makedirs(PROJECT_OUTPUT_PATH)
model_file = os.path.join(PROJECT_OUTPUT_PATH,model_name+'.pkl')
with open(model_file, 'wb') as f:
pickle.dump(model, f)
def load_model(model,model_name):
model_file = os.path.join(PROJECT_OUTPUT_PATH,model_name+'.pkl')
with open(model_file, 'rb') as f:
model = pickle.load(f)
return model
# -
# ### Step 1 : Download the Data
# +
import tarfile
from six.moves import urllib
from six.moves import urllib
#Server Location
DOWNLOAD_ROOT = "https://www.kaggle.com/c/3136/download/"
TITANIC_PATH = os.path.join("data", PROJECT_FOLDER)
TITANIC_GENDER_SUBMISSION = DOWNLOAD_ROOT + "gender_submission.csv"
TITANIC_TRAIN_SET = DOWNLOAD_ROOT + "train.csv"
TITANIC_TEST_SET = DOWNLOAD_ROOT + "test.csv"
def fetch_data(file='train.csv',url=TITANIC_TRAIN_SET,path=TITANIC_PATH,):
print('URL to Download : {}'.format(url))
print('Path to Download : {} '.format(path))
if not os.path.isdir(path):
os.makedirs(path)
csv_path = os.path.join(path,file)
if not os.path.exists(csv_path):
print('Downloading data ...')
urllib.request.urlretrieve(url, csv_path)
#Downloading & Extract the data
fetch_data()
# -
# ### Step 2 - Load Data
# +
import pandas as pd
def load_data(path=TITANIC_PATH,file='train.csv'):
data_file = os.path.join(path,file)
return pd.read_csv(data_file)
#Load data
print('Loading the Data...')
titanic = load_data()
#Verift the data
titanic.head(2)
# -
# ### Step 3 - Train & Test Data Set
titanic_train = load_data(file='train.csv')
titanic_test = load_data(file='test.csv')
print('Titanic Training Data : {}'.format(titanic_train.shape))
print('Titanic Test Data : {}'.format(titanic_test.shape))
titanic.info()
titanic.isnull().sum()
# ### Step 3 - Data Visualization & Inspection
# #### Features
#
# survival: Survival
# PassengerId: Unique Id of a passenger.
# pclass: Ticket class
# sex: Sex
# Age: Age in years
# sibsp: # of siblings / spouses aboard the Titanic
# parch: # of parents / children aboard the Titanic
# ticket: Ticket number
# fare: Passenger fare
# cabin: Cabin number
# embarked: Port of Embarkation
#We can see the graphical distribution too as follows
import matplotlib.pyplot as plt
#Vertical Axis has number of instances
#Horizontal Axis has values of the attributes
titanic.hist(bins=50,figsize=(20,15))
plt.show()
# We see that age and fare are on completely different scale so we do feature scaling.
# +
corr=titanic.corr()#["Survived"]
plt.figure(figsize=(15, 10))
sns.heatmap(corr, vmax=.9, linewidths=0.01,
square=True,annot=True,cmap='YlGnBu',linecolor="white")
plt.title('Correlation between features');
# -
#correlation of features with target variable
titanic.corr()["Survived"]
# It's visible that survival rate has the highest Positive Correlation with Fare followed by Parch and Highest negative correlation with Pclass followed by age.
# ### Step 5 - Data Pre-processing
#
# It's important to fill null values and scale the features before feeding the data into the model.
#Lets check which rows have null Embarked column
titanic[titanic['Embarked'].isnull()]
# Since both the records are from Pclass 1 in the same cabin as well, let's try to visualize the other similar records
sns.boxplot(x="Embarked", y="Fare", hue="Pclass", data=titanic);
titanic['Embarked'].describe()
#titanic['Pclass'].mean()
titanic[titanic['Pclass']==1].sort_values(['PassengerId']).loc[0:70]
#titanic['Pclass'].mean()
#titanic[titanic['Fare'] ==80.0]
#As it can be seen that other Cabin Passengers also majorly boarded from C
titanic["Embarked"] = titanic["Embarked"].fillna('C')
# Also We can see that for 1st class median line is coming around fare $80 for embarked value 'C'. So we can replace NA values in Embarked column with 'C'
# #### Fare Column
# +
#we can replace missing value in fare by taking median of all fares of those passengers
#who share 3rd Passenger class and Embarked from 'S'
def fill_missing_fare(df):
median_fare=df[(df['Pclass'] == 3) & (df['Embarked'] == 'S')]['Fare'].median()
#'S'
#print(median_fare)
df["Fare"] = df["Fare"].fillna(median_fare)
return df
titanic_test=fill_missing_fare(titanic_test)
# -
# ### Feature Engineering
# #### 1. Deck - Where exactly were passenger on the ship?
titanic["Deck"]=titanic.Cabin.str[0]
titanic_test["Deck"]=titanic_test.Cabin.str[0]
titanic["Deck"].unique() # 0 is for null values
g = sns.factorplot("Survived", col="Deck", col_wrap=4,
data=titanic[titanic.Deck.notnull()],
kind="count", size=2.5, aspect=.8);
titanic.Deck.fillna('Z', inplace=True)
titanic_test.Deck.fillna('Z', inplace=True)
titanic["Deck"].unique() # Z is for null values
# #### 2. How Big is your family?
# Creating a family size variable including the passenger themselves
titanic["FamilySize"] = titanic["SibSp"] + titanic["Parch"]+1
titanic_test["FamilySize"] = titanic_test["SibSp"] + titanic_test["Parch"]+1
print(titanic["FamilySize"].value_counts())
# +
# Discretize family size
titanic.loc[titanic["FamilySize"] == 1, "FsizeD"] = 'singleton'
titanic.loc[(titanic["FamilySize"] > 1) & (titanic["FamilySize"] < 5) , "FsizeD"] = 'small'
titanic.loc[titanic["FamilySize"] >4, "FsizeD"] = 'large'
titanic_test.loc[titanic_test["FamilySize"] == 1, "FsizeD"] = 'singleton'
titanic_test.loc[(titanic_test["FamilySize"] >1) & (titanic_test["FamilySize"] <5) , "FsizeD"] = 'small'
titanic_test.loc[titanic_test["FamilySize"] >4, "FsizeD"] = 'large'
print(titanic["FsizeD"].unique())
print(titanic["FsizeD"].value_counts())
# -
# #### 3. Do you have longer names?
# +
#Create feture for length of name
# The .apply method generates a new series
titanic["NameLength"] = titanic["Name"].apply(lambda x: len(x))
titanic_test["NameLength"] = titanic_test["Name"].apply(lambda x: len(x))
#print(titanic["NameLength"].value_counts())
bins = [0, 20, 40, 57, 85]
group_names = ['short', 'okay', 'good', 'long']
titanic['NlengthD'] = pd.cut(titanic['NameLength'], bins, labels=group_names)
titanic_test['NlengthD'] = pd.cut(titanic_test['NameLength'], bins, labels=group_names)
print(titanic["NlengthD"].unique())
# -
# #### 4. Name Title
# +
import re
#A function to get the title from a name.
def get_title(name):
# Use a regular expression to search for a title. Titles always consist of capital and lowercase letters, and end with a period.
title_search = re.search(' ([A-Za-z]+)\.', name)
#If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
# -
def add_titles(df):
#Get all the titles and print how often each one occurs.
titles = df["Name"].apply(get_title)
#print(pd.value_counts(titles))
#type(titles)
#Add in the title column.
df["Title"] = titles
# Titles with very low cell counts to be combined to "rare" level
rare_title = ['Dona', 'Lady', 'Countess','Capt', 'Col', 'Don',
'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer']
# Also reassign mlle, ms, and mme accordingly
df.loc[df["Title"] == "Mlle", "Title"] = 'Miss'
df.loc[df["Title"] == "Ms", "Title"] = 'Miss'
df.loc[df["Title"] == "Mme", "Title"] = 'Mrs'
df.loc[df["Title"] == "Dona", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Lady", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Countess", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Capt", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Col", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Don", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Major", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Rev", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Sir", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Jonkheer", "Title"] = 'Rare Title'
df.loc[df["Title"] == "Dr", "Title"] = 'Rare Title'
print(df["Title"].value_counts())
return df
titanic = add_titles(titanic)
titanic_test = add_titles(titanic_test)
# #### 5.Ticket Number extraction
# +
print(titanic["Ticket"].tail())
titanic["TicketNumber"] = titanic["Ticket"].str.extract('(\d{2,})', expand=True)
titanic["TicketNumber"] = titanic["TicketNumber"].apply(pd.to_numeric)
titanic_test["TicketNumber"] = titanic_test["Ticket"].str.extract('(\d{2,})', expand=True)
titanic_test["TicketNumber"] = titanic_test["TicketNumber"].apply(pd.to_numeric)
# -
#some rows in ticket column dont have numeric value so we got NaN there
titanic[titanic["TicketNumber"].isnull()]
titanic.TicketNumber.fillna(titanic["TicketNumber"].median(), inplace=True)
titanic_test.TicketNumber.fillna(titanic_test["TicketNumber"].median(), inplace=True)
# #### 6. Categorical to Numeric Features.
# Using Label Encoder one at a time as Categorical Encoder is not available
# +
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
labelEnc=LabelEncoder()
cat_vars=['Embarked','Sex',"Title","FsizeD","NlengthD",'Deck']
for col in cat_vars:
titanic[col]=labelEnc.fit_transform(titanic[col])
titanic_test[col]=labelEnc.fit_transform(titanic_test[col])
# -
# #### 7. Age
# Age seems to be promising feature. So it doesnt make sense to simply fill null values out with median/mean/mode.
# Using Random Forest algorithm to predict ages.
from sklearn.ensemble import RandomForestRegressor
#predicting missing values in age using Random Forest
def fill_missing_age(df):
#Feature set
age_df = df[['Age','Embarked','Fare', 'Parch', 'SibSp',
'TicketNumber', 'Title','Pclass','FamilySize',
'FsizeD','NameLength',"NlengthD",'Deck']]
# Split sets into train and test
train = age_df.loc[ (df.Age.notnull()) ]# known Age values
test = age_df.loc[ (df.Age.isnull()) ]# null Ages
# All age values are stored in a target array
y = train.values[:, 0]
# All the other values are stored in the feature array
X = train.values[:, 1::]
# Create and fit a model
rtr = RandomForestRegressor(n_estimators=2000, n_jobs=-1)
rtr.fit(X, y)
# Use the fitted model to predict the missing values
predictedAges = rtr.predict(test.values[:, 1::])
# Assign those predictions to the full data set
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges
return df
titanic=fill_missing_age(titanic)
titanic_test=fill_missing_age(titanic_test)
# Scaling Age & Fare
#
# +
from sklearn import preprocessing
std_scale = preprocessing.StandardScaler().fit(titanic[['Age', 'Fare']])
titanic[['Age', 'Fare']] = std_scale.transform(titanic[['Age', 'Fare']])
std_scale = preprocessing.StandardScaler().fit(titanic_test[['Age', 'Fare']])
titanic_test[['Age', 'Fare']] = std_scale.transform(titanic_test[['Age', 'Fare']])
# -
# ###### New Correleation
titanic.corr()["Survived"]
# ### Step 6. Model Building
# #### 1. Linear Regression
# +
# Import the linear regression class
from sklearn.linear_model import LinearRegression
# Sklearn also has a helper that makes it easy to do cross validation
from sklearn.cross_validation import KFold
# The columns we'll use to predict the target
predictors = ["Pclass", "Sex", "Age","SibSp", "Parch", "Fare",
"Embarked","NlengthD", "FsizeD", "Title","Deck"]
target="Survived"
# Initialize our algorithm class
lin_reg = LinearRegression()
# Generate cross validation folds for the titanic dataset. It return the row indices corresponding to train and test.
# We set random_state to ensure we get the same splits every time we run this.
kf = KFold(titanic.shape[0], n_folds=3, random_state=1)
predictions = []
# -
for train, test in kf:
# The predictors we're using the train the algorithm. Note how we only take the rows in the train folds.
train_predictors = (titanic[predictors].iloc[train,:])
# The target we're using to train the algorithm.
train_target = titanic[target].iloc[train]
# Training the algorithm using the predictors and target.
lin_reg.fit(train_predictors, train_target)
# We can now make predictions on the test fold
test_predictions = lin_reg.predict(titanic[predictors].iloc[test,:])
predictions.append(test_predictions)
# +
predictions = np.concatenate(predictions, axis=0)
# Map predictions to outcomes (only possible outcomes are 1 and 0)
predictions[predictions > .5] = 1
predictions[predictions <=.5] = 0
accuracy=sum(titanic["Survived"]==predictions)/len(titanic["Survived"])
accuracy
# -
# #### 2. Logistic Regression
# +
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
predictors = ["Pclass", "Sex", "Fare", "Embarked","Deck","Age",
"FsizeD", "NlengthD","Title","Parch"]
# Initialize our algorithm
lr = LogisticRegression(random_state=1)
# Compute the accuracy score for all the cross validation folds.
cv = ShuffleSplit(n_splits=10, test_size=0.3, random_state=50)
scores = cross_val_score(lr, titanic[predictors],
titanic["Survived"],scoring='f1', cv=cv)
# Take the mean of the scores (because we have one for each fold)
print(scores.mean())
# -
# #### 4. Random Forest
# +
from sklearn import cross_validation
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import KFold
from sklearn.model_selection import cross_val_predict
import numpy as np
predictors = ["Pclass", "Sex", "Age",
"Fare","NlengthD","NameLength", "FsizeD", "Title","Deck"]
# Initialize our algorithm with the default paramters
# n_estimators is the number of trees we want to make
# min_samples_split is the minimum number of rows we need to make a split
# min_samples_leaf is the minimum number of samples we can have at the place where a tree branch ends (the bottom points of the tree)
rf = RandomForestClassifier(random_state=1, n_estimators=10, min_samples_split=2,
min_samples_leaf=1)
kf = KFold(titanic.shape[0], n_folds=5, random_state=1)
cv = ShuffleSplit(n_splits=10, test_size=0.3, random_state=50)
predictions = cross_validation.cross_val_predict(rf, titanic[predictors],titanic["Survived"],cv=kf)
predictions = pd.Series(predictions)
scores = cross_val_score(rf, titanic[predictors], titanic["Survived"],
scoring='f1', cv=kf)
# Take the mean of the scores (because we have one for each fold)
print(scores.mean())
# -
predictors = ["Pclass", "Sex", "Age",
"Fare","NlengthD","NameLength", "FsizeD", "Title","Deck","TicketNumber"]
rf = RandomForestClassifier(random_state=1, n_estimators=50, max_depth=9,min_samples_split=6, min_samples_leaf=4)
rf.fit(titanic[predictors],titanic["Survived"])
kf = KFold(titanic.shape[0], n_folds=5, random_state=1)
predictions = cross_validation.cross_val_predict(rf, titanic[predictors],titanic["Survived"],cv=kf)
predictions = pd.Series(predictions)
scores = cross_val_score(rf, titanic[predictors], titanic["Survived"],scoring='f1', cv=kf)
# Take the mean of the scores (because we have one for each fold)
print(scores.mean())
# #### Important Features
# +
importances=rf.feature_importances_
std = np.std([rf.feature_importances_ for tree in rf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
sorted_important_features=[]
for i in indices:
sorted_important_features.append(predictors[i])
#predictors=titanic.columns
plt.figure()
plt.title("Feature Importances By Random Forest Model")
plt.bar(range(np.size(predictors)), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(np.size(predictors)), sorted_important_features, rotation='vertical')
plt.xlim([-1, np.size(predictors)]);
# -
# ### Gradient Boosting
# +
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.cross_validation import KFold
# %matplotlib inline
import matplotlib.pyplot as plt
#predictors = ["Pclass", "Sex", "Age", "Fare",
# "FsizeD", "Embarked", "NlengthD","Deck","TicketNumber"]
predictors = ["Pclass", "Sex", "Age",
"Fare","NlengthD", "FsizeD","NameLength","Deck","Embarked"]
# Perform feature selection
selector = SelectKBest(f_classif, k=5)
selector.fit(titanic[predictors], titanic["Survived"])
# Get the raw p-values for each feature, and transform from p-values into scores
scores = -np.log10(selector.pvalues_)
indices = np.argsort(scores)[::-1]
sorted_important_features=[]
for i in indices:
sorted_important_features.append(predictors[i])
plt.figure()
plt.title("Feature Importances By SelectKBest")
plt.bar(range(np.size(predictors)), scores[indices],
color="seagreen", yerr=std[indices], align="center")
plt.xticks(range(np.size(predictors)), sorted_important_features, rotation='vertical')
plt.xlim([-1, np.size(predictors)]);
# +
from sklearn import cross_validation
from sklearn.linear_model import LogisticRegression
predictors = ["Pclass", "Sex", "Age", "Fare", "Embarked","NlengthD",
"FsizeD", "Title","Deck"]
# Initialize our algorithm
lr = LogisticRegression(random_state=1)
# Compute the accuracy score for all the cross validation folds.
cv = ShuffleSplit(n_splits=10, test_size=0.3, random_state=50)
scores = cross_val_score(lr, titanic[predictors], titanic["Survived"], scoring='f1',cv=cv)
print(scores.mean())
# -
# ### Ensemble Model
# +
predictions=["Pclass", "Sex", "Age", "Fare", "Embarked","NlengthD",
"FsizeD", "Title","Deck","NameLength","TicketNumber"]
from sklearn.ensemble import VotingClassifier
eclf1 = VotingClassifier(estimators=[
('lr', lr), ('rf', rf)], voting='soft')
eclf1 = eclf1.fit(titanic[predictors], titanic["Survived"])
predictions=eclf1.predict(titanic[predictors])
#predictions
test_predictions=eclf1.predict(titanic_test[predictors])
test_predictions=test_predictions.astype(int)
submission = pd.DataFrame({
"PassengerId": titanic_test["PassengerId"],
"Survived": test_predictions
})
submission.to_csv("titanic_submission.csv", index=False)
# -
save_model(eclf1,'ensemble_model')
| scikit-learn/2_Machine Learning Project - Titanic - Kaggle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# ## Lane Finding
#
# The code below aims to provide lane assistance / lane markings for autonomous cars.
#
# ### Steps
# 1. Calibrate Camera.
# 2. Generate Binary Threshold.
# 3. Warp image
# 4. Identify Lane Lines and draw on Warped image
# 5. Calculate curvature
# 6. Unwarp the image and merge with undistorted image
#
# ### Importing Modules
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
import glob
# %matplotlib inline
# ### PreprocessImage
#
# The PreprocessImage provides following functionality.
#
# * Calibrate Camera Helper Functions
# * Undistort Image on calibration parameters
# +
#Chessboard dimension contants in X and Y direction
_NX_CHESSBOARD = 9
_NY_CHESSBOARD = 6
class PreprocessImage:
"""
This module preprocesses the image.
It provides helper function to calibrate the camera.
"""
def __init__(self, nx, ny):
self.nx = nx
self.ny = ny
self.obj_points = []
self.img_points = []
self.objp = np.zeros((self.nx * self.ny, 3) , np.float32)
self.objp[:,:2] = np.mgrid[0:self.nx, 0:self.ny].T.reshape(-1,2)
def populate_calibration_points(self, img, drawImage=False):
"""
This function should be recursively called on chessboard images to calibrate the camera
"""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (self.nx, self.ny), None)
if (ret == True):
self.img_points.append(corners)
self.obj_points.append(self.objp)
if drawImage:
print("Display corners")
img_drawn_corner = cv2.drawChessboardCorners(img, (self.nx, self.ny), corners, ret)
plt.imshow(img_drawn_corner)
plt.show()
def calibrate_camera(self, img):
"""
Generates calibration parameters after the sufficient chess board images are read
"""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = cv2.calibrateCamera(self.obj_points,
self.img_points,
gray.shape[::-1],
None,
None)
return
def undistort_image(self, img):
"""
Undistort an image
"""
return cv2.undistort(img, self.mtx, self.dist,None, self.mtx)
# -
# ### Generate Calibration Points
# +
calibrationObj_test = PreprocessImage(_NX_CHESSBOARD, _NY_CHESSBOARD)
#Loop through chessboard images to generate calibration parameters
for files in glob.glob('camera_cal\*.jpg'):
img = cv2.imread(files)
calibrationObj_test.populate_calibration_points(img, False)
#Calibrate Camera after sufficient calibration images are read
calibrationObj_test.calibrate_camera(img)
img = cv2.imread('camera_cal\calibration5.jpg')
out_img = calibrationObj_test.undistort_image(img)
cv2.imwrite("output_images/undistortedImage.jpg", out_img)
# -
# ### Generate Threshold Image and Warp Image
# +
class WarpImage():
"""
Provides helper functions to Warp and Unwarp an image
"""
def __init__(self, img_size):
offset = 100
src = np.float32([[552,462], [760,462] , [1350,668],[140,668]])
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
#Parameter to warp an Image
self.M = cv2.getPerspectiveTransform(src, dst)
#Generate parameter to unwarp an image
self.Minv = cv2.getPerspectiveTransform(dst, src)
self.bias = ((1350 + 140) - img_size[0]) // 2
#print("Image size" , img_size)
def getBias(self):
return self.bias
def create_warped(self, img):
img_size = (img.shape[1], img.shape[0])
warped = cv2.warpPerspective(img, self.M, img_size, flags=cv2.INTER_LINEAR)
return warped
def create_unwarped(self, img):
img_size = (img.shape[1], img.shape[0])
unwarped = cv2.warpPerspective(img, self.Minv, img_size, flags=cv2.INTER_LINEAR)
return unwarped
def binary_threshold(img):
"""
Applies sobel transform along with colour gradient to generate binary threshold
"""
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1 , 0)
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
thresh_min = 15
thresh_max = 255
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
#Find binary threshold on Color image in HLS format
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
thresh = (170, 255)
#S Channel is better for thresholding
S = hls[:,:,2]
sbinary = np.zeros_like(S)
sbinary[(S > thresh[0]) & (S <= thresh[1])] = 1
combined_binary = np.zeros_like(sxbinary)
combined_binary[(sbinary == 1) | (sxbinary == 1)] = 1
return combined_binary
def test2():
test_img = cv2.imread("test_images/test1.jpg")
test_undst = calibrationObj_test.undistort_image(test_img)
test_binary_img = binary_threshold(test_undst)
#Plot the trapezoid on the image to fine tune image
plt.imshow(test_binary_img, cmap='gray')
out = np.zeros(test_binary_img.shape, np.double)
normalized = cv2.normalize(test_binary_img, out, 255.0, 0.0, cv2.NORM_MINMAX, dtype=cv2.CV_64F)
cv2.imwrite("output_images/binaryThreshold_test1.png", normalized)
#[585,446], [715,446] , [1350,668],[140,668]]
src = np.array([[(585,446),(715,446), (1350,668), (140,668)]], dtype = np.int32)
roi_img = cv2.polylines(test_undst, src, True, (255, 0, 0), thickness = 2)
plt.figure()
plt.imshow(roi_img)
plt.show()
cv2.imwrite("output_images/roi.jpg", roi_img)
test_warp_image_obj = WarpImage((test_img.shape[1], test_img.shape[0]))
roi_warped = test_warp_image_obj.create_warped(roi_img)
plt.imshow(roi_warped)
plt.show()
cv2.imwrite("output_images/roi_warped.jpg", roi_warped)
test_warped_image = test_warp_image_obj.create_warped(normalized)
plt.imshow(test_warped_image, cmap = 'gray')
plt.show()
cv2.imwrite("output_images/binaryThreshold_test1_warped.jpg", test_warped_image)
test2()
# +
def hist(img):
bottom_half = img[462:,:] #img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half, axis = 0)
return histogram
def test1():
test_img = cv2.imread("test_images/straight_lines1.jpg")
test_undst = calibrationObj_test.undistort_image(test_img)
test_binary_img = binary_threshold(test_undst)
test_warp_image_obj = WarpImage((test_img.shape[1], test_img.shape[0]))
test_warped_image = test_warp_image_obj.create_warped(test_binary_img)
test_histogram = hist(test_warped_image)
plt.plot(test_histogram)
test_midpoint = np.int(test_histogram.shape[0]//2)
test_leftx_base = np.argmax(test_histogram[:test_midpoint])
test_rightx_base = np.argmax(test_histogram[test_midpoint:]) + test_midpoint
if (test_rightx_base - test_leftx_base) > 900:
print("Redjust the base")
if test_histogram[test_leftx_base] > test_histogram[test_rightx_base]:
dummy_start = np.int(test_leftx_base + 550)
dummy_midpoint = (dummy_start + test_histogram.shape[0]) // 2
test_rightx_base = dummy_start + np.argmax(test_histogram[test_leftx_base + 550:dummy_midpoint])
print(test_leftx_base)
print(test_rightx_base)
test1()
# +
# Define conversions in x and y from pixels space to meters
YM_PER_PIX = 30/720 # meters per pixel in y dimension
XM_PER_PIX = 3.7/700
def adjust_base_pixels(histogram, leftx_base, rightx_base):
max_lane_width = 900
min_lane_width = 640
if (((rightx_base - leftx_base) > max_lane_width) or ((rightx_base - leftx_base) < min_lane_width)):
if histogram[leftx_base] > histogram[rightx_base]:
#As pixels at leftx_base are more, use leftx base as reference
dummy_start = np.int(leftx_base + min_lane_width)
dummy_midpoint = (dummy_start + histogram.shape[0]) // 2
rightx_base = dummy_start + np.argmax(histogram[dummy_start:dummy_midpoint])
else:
#As pixels at rightx_base are more, use leftx base as reference
dummy_start = np.int(rightx_base - min_lane_width)
dummy_midpoint = (dummy_start) // 2
leftx_base = np.argmax(histogram[:dummy_midpoint])
return leftx_base, rightx_base
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = []
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
dummy_hist = histogram[midpoint:]
rightx_base = np.argmax(dummy_hist) + midpoint
leftx_base, rightx_base = adjust_base_pixels(histogram, leftx_base, rightx_base)
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
#plt.imshow(out_img)
#plt.show()
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def fit_polynomial(binary_warped):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels(binary_warped)
# Fit a second order polynomial to each using `np.polyfit`
# left_fit_cr = np.polyfit(ploty*ym_per_pix, leftx*xm_per_pix, 2)
#right_fit_cr = np.polyfit(ploty*ym_per_pix, rightx*xm_per_pix, 2)
left_fit = np.polyfit(lefty , leftx, 2)
right_fit = np.polyfit(righty , rightx , 2)
#print(left_fit)
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
#print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Plots the left and right polynomials on the lane lines
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
return out_img
def test3():
test_img = cv2.imread("test_images/straight_lines1.jpg")
test_undst = calibrationObj_test.undistort_image(test_img)
test_binary_img = binary_threshold(test_undst)
#plt.imshow(test_binary_img, cmap='gray')
#plt.show()
test_warp_image_obj = WarpImage((test_img.shape[1], test_img.shape[0]))
test_warped_image = test_warp_image_obj.create_warped(test_binary_img)
test_out_img = fit_polynomial(test_warped_image)
plt.imshow(test_out_img)
cv2.imwrite("output_images/drawn_lines.jpg", test_out_img)
test3()
# -
# +
def test_draw_lane( warp_image_obj, test_undst, test_warped_image):
test_warp_zero = np.zeros_like(test_warped_image).astype(np.uint8)
test_color_warp = np.dstack((test_warp_zero, test_warp_zero, test_warp_zero))
test_leftx, test_lefty, test_rightx, test_righty, test_out_img = find_lane_pixels(test_warped_image)
# Fit a second order polynomial to each using `np.polyfit`
test_left_fit = np.polyfit(test_lefty , test_leftx, 2)
test_right_fit = np.polyfit(test_righty , test_rightx , 2)
# Generate x and y values for plotting
test_ploty = np.linspace(0, test_warped_image.shape[0]-1, test_warped_image.shape[0] )
#try:
test_left_fitx = test_left_fit[0]*test_ploty**2 + test_left_fit[1]*test_ploty + test_left_fit[2]
test_right_fitx = test_right_fit[0]*test_ploty**2 + test_right_fit[1]*test_ploty + test_right_fit[2]
# Recast the x and y points into usable format for cv2.fillPoly()
test_pts_left = np.array([np.transpose(np.vstack([test_left_fitx, test_ploty]))])
test_pts_right = np.array([np.flipud(np.transpose(np.vstack([test_right_fitx, test_ploty])))])
test_pts = np.hstack((test_pts_left, test_pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(test_color_warp, np.int_([test_pts]), (0,255, 0))
test_newwarp = warp_image_obj.create_unwarped(test_color_warp)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
#newwarp = cv2.warpPerspective(color_warp, Minv, (image.shape[1], image.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(test_undst, 1, test_newwarp, 0.3, 0)
return result
def test4():
test_img = cv2.imread("test_images/test5.jpg")
test_undst = calibrationObj_test.undistort_image(test_img)
test_binary_img = binary_threshold(test_undst)
#plt.imshow(test_binary_img, cmap='gray')
#plt.show()
test_warp_image_obj = WarpImage((test_img.shape[1], test_img.shape[0]))
test_warped_image = test_warp_image_obj.create_warped(test_binary_img)
plt.imshow(test_draw_lane(test_warp_image_obj, test_undst, test_warped_image))
#plt.imshow(result)
plt.show()
test4()
# +
class Lines():
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = []
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
#x values for detected line pixels
self.allx = None
#y values for detected line pixels
self.ally = None
self.ym_per_pix = 30/720 # meters per pixel in y dimension
self.xm_per_pix = 3.7/700 # meters per pixel in x dimension
def getBasePosition(self):
return self.line_base_pos
def setBasePosition(self, basePosition):
self.line_base_pos = basePosition
def fitLine(self, img,warped_img):
self.current_fit = np.polyfit(self.ally , self.allx, 2)
self.best_fit = self.current_fit
#self.line_base_pos = ((img.shape[1]) //2 - self.getFit(img.shape[0])) * self.xm_per_pix
self.line_base_pos = (self.getFit(img.shape[0]))
## Visualization ##
# Colors in the left and right lane regions
#img[self.ally, self.allx] = [255, 0, 0]
#out_img[righty, rightx] = [0, 0, 255]
#print(self.ally)
#print(self.allx)
# Plots the left and right polynomials on the lane lines
#ploty = np.linspace(0, warped_img.shape[0] - 1, warped_img.shape[1])
#print(ploty)
#left_fitx = self.current_fit[0]*ploty**2 + self.current_fit[1]*ploty + self.current_fit[2]
#plt.plot(left_fitx, ploty, color='yellow')
#plt.plot(right_fitx, ploty, color='yellow')
#print(self.current_fit)
#plt.imshow(img)
#plt.show()
return
def calcCurvature(self, y_eval):
"""
Calculate Radius of Curvature
"""
#y_eval = np.max(self.ally)
#A = self.best_fit[0]
#B = self.best_fit[1]
A = self.xm_per_pix / (self.ym_per_pix ** 2) * self.best_fit[0]
B = (self.xm_per_pix/self.ym_per_pix) * self.best_fit[1]
C = self.best_fit[2]
#x= mx / (my ** 2) *a*(y**2)+(mx/my)*b*y+c
#self.radius_of_curvature = ((1 + (2*self.best_fit[0]*y_eval*self.ym_per_pix + self.best_fit[1])**2)**1.5) / np.absolute(2*self.best_fit[0])
self.radius_of_curvature = ((1 + (2*A*y_eval*self.ym_per_pix + B)**2)**1.5) / np.absolute(2* A)
#self.radius_of_curvature = ((1 + (2*A*y_eval + B)**2)**1.5) / np.absolute(2* A)
return self.radius_of_curvature
def getCurvature(self):
"""
Get current Radius of curvature
"""
return self.radius_of_curvature
def getFit(self, ploty):
try:
fitx = self.best_fit[0]*ploty**2 + self.best_fit[1]*ploty + self.best_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
fitx = 1*ploty**2 + 1*ploty
return fitx
# -
class DemarcateLanes:
def __init__(self, path):
# Conversions in x and y from pixels space to meters
self.imgcenter = 0
self.left = Lines()
self.right = Lines()
#Calibrate the Camera
self.calibrationObj = PreprocessImage(9,6)
img = None
for files in glob.glob(path):
img = cv2.imread(files)
self.calibrationObj.populate_calibration_points(img, False)
self.calibrationObj.calibrate_camera(img)
#Generate warping and unwarping factor
self.warp_image_obj = WarpImage((img.shape[1], img.shape[0]))
self.detected = 0
self.line_base_pos = None
def drawLanes(self, img):
undistorted_image = self.calibrationObj.undistort_image(img)
bin_thres_img = binary_threshold(undistorted_image)
warped_image = self.warp_image_obj.create_warped(bin_thres_img)
#plt.imshow(warped_image, cmap='gray')
#plt.show()
if (self.detected == False):
#self.detected = True
self.imgcenter = img.shape[1] // 2
out_img = self.drawLanes_firstImage(warped_image)
else:
out_img = self.drawLanes_subsequentImage(warped_image)
left_curve, right_curve = self.measure_curvature_real(img.shape[0])
lane_center_warped = (self.left.getBasePosition() + self.right.getBasePosition()) /2
lane_center = lane_center_warped + self.warp_image_obj.getBias()
self.line_base_pos = (lane_center - self.imgcenter) * XM_PER_PIX
#self.line_base_pos = lane_center_map
out_img = self.plot(warped_image, undistorted_image)
font = cv2.FONT_HERSHEY_SIMPLEX
bottomLeftCornerOfText = (10,30)
fontScale = 1
fontColor = (255,255,255)
lineType = 2
cv2.putText(out_img,'Radius of Curvature(Left) = ' + str((left_curve)),
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
bottomLeftCornerOfText = (10, 70)
cv2.putText(out_img,'Radius of Curvature(Right) = ' + str((right_curve)),
bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
bottomLeftCornerOfText = (10,110)
cv2.putText(out_img,'Distance from center = ' + str(self.line_base_pos), bottomLeftCornerOfText,
font,
fontScale,
fontColor,
lineType)
return out_img
def plot(self, warped_image, undistorted_img):
warp_zero = np.zeros_like(warped_image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
ploty = np.linspace(0, warped_image.shape[0]-1, warped_image.shape[0])
left_fitx = self.left.getFit(ploty)
right_fitx = self.right.getFit(ploty)
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
newwarp = self.warp_image_obj.create_unwarped(color_warp)
result = cv2.addWeighted(undistorted_img, 1, newwarp, 0.3, 0)
return result
def drawLanes_firstImage(self, warped_image):
self.left.allx, self.left.ally, self.right.allx, self.right.ally, out_img = find_lane_pixels(warped_image)
self.left.fitLine(out_img, warped_image)
self.right.fitLine(out_img, warped_image)
self.ploty = np.linspace(0, warped_image.shape[0]-1, warped_image.shape[0])
left_fitx = self.left.getFit(self.ploty)
right_fitx = self.right.getFit(self.ploty)
return
def drawLanes_subsequentImage(self , binary_warped ):
# HYPERPARAMETER
# Width of the margin around the previous polynomial to search
margin = 100
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
left_lane_inds = (((nonzerox > self.left.getFit(nonzeroy) - margin)) &
(nonzerox < (self.left.getFit(nonzeroy) + margin)))
right_lane_inds = ((nonzerox > (self.right.getFit(nonzeroy) - margin)) &
(nonzerox < (self.right.getFit(nonzeroy) + margin)))
# Again, extract left and right line pixel positions
self.left.allx = nonzerox[left_lane_inds]
self.left.ally = nonzeroy[left_lane_inds]
self.right.allx = nonzerox[right_lane_inds]
self.right.ally = nonzeroy[right_lane_inds]
self.left.fitLine()
self.right.fitLine()
return
def measure_curvature_real(self, y_eval):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
#y_eval = np.max(self.ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = self.left.calcCurvature(y_eval)
right_curverad = self.right.calcCurvature(y_eval)
return left_curverad, right_curverad
def measure_offset_real(self):
"""
Calculate the offset of the vehicle with center
"""
lane_center = (self.right.getBasePosition() + self.left.getBasePosition()) // 2
if (self.imgcenter > lane_center):
return (self.imgcenter - lane_center) * self.xm_per_pix
return (lane_center - self.imgcenter) * self.xm_per_pix
# +
drawEngine = DemarcateLanes('camera_cal\*.jpg')
'''
image = cv2.imread("test_images/test4.jpg")
out_img = drawEngine.drawLanes_firstImage(image)
cv2.imwrite("output_images/"+ file, out_img)
'''
for file in os.listdir("test_images/"):
image = mpimg.imread("test_images/" + file)
out_img = drawEngine.drawLanes(image)
cv2.imwrite("output_images/"+ file, out_img)
# -
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
drawEngine = DemarcateLanes('camera_cal\*.jpg')
def process_image(image):
#img = drawEngine.drawLanes_firstImage()
out_img = drawEngine.drawLanes(image)
left_curve, right_curve = drawEngine.measure_curvature_real(image.shape[0])
return out_img
#cv2.imwrite("output_images/"+ file, out_img)
# +
white_output = 'output_images/project_video.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
##clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
input_video = "project_video.mp4"
clip1 = VideoFileClip(input_video)
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
# %time white_clip.write_videofile(white_output, audio=False)
# -
| advanced_lane_lines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup
# First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
# +
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "decision_trees"
def image_path(fig_id):
return os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id)
def save_fig(fig_id, tight_layout=True):
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(image_path(fig_id) + ".png", format='png', dpi=300)
# -
# # Training and visualizing
# +
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
iris = load_iris()
X = iris.data[:, 2:] # petal length and width
y = iris.target
tree_clf = DecisionTreeClassifier(max_depth=2, random_state=42)
tree_clf.fit(X, y)
# +
from sklearn.tree import export_graphviz
export_graphviz(
tree_clf,
out_file=image_path("iris_tree.dot"),
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True
)
# +
from matplotlib.colors import ListedColormap
def plot_decision_boundary(clf, X, y, axes=[0, 7.5, 0, 3], iris=True, legend=False, plot_training=True):
x1s = np.linspace(axes[0], axes[1], 100)
x2s = np.linspace(axes[2], axes[3], 100)
x1, x2 = np.meshgrid(x1s, x2s)
X_new = np.c_[x1.ravel(), x2.ravel()]
y_pred = clf.predict(X_new).reshape(x1.shape)
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x1, x2, y_pred, alpha=0.3, cmap=custom_cmap)
if not iris:
custom_cmap2 = ListedColormap(['#7d7d58','#4c4c7f','#507d50'])
plt.contour(x1, x2, y_pred, cmap=custom_cmap2, alpha=0.8)
if plot_training:
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris-Setosa")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris-Versicolor")
plt.plot(X[:, 0][y==2], X[:, 1][y==2], "g^", label="Iris-Virginica")
plt.axis(axes)
if iris:
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
else:
plt.xlabel(r"$x_1$", fontsize=18)
plt.ylabel(r"$x_2$", fontsize=18, rotation=0)
if legend:
plt.legend(loc="lower right", fontsize=14)
plt.figure(figsize=(8, 4))
plot_decision_boundary(tree_clf, X, y)
save_fig("decision_tree_decision_boundaries_plot")
plt.show()
# -
| 04-DecisionTrees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Part of this file can't be rendered in GitHub. Refer to the following link for a properly rendered version of this file: https://nbviewer.jupyter.org/github/sfu-db/dataprep/blob/develop/examples/DataConnector_DBLP.ipynb
# # Connector for DBLP
#
# In this example, we will be going over how to use Connector with DBLP.
# ## Prerequisites
#
# Connector is a component in the DataPrep library that aims to simplify data access by providing a standard API set. The goal is to help users skip the complex API configuration. In this tutorial, we demonstrate how to use the connector component with DBLP.
#
# If you haven't installed DataPrep, run command `!pip install dataprep` or execute the following cell.
# Run me if you'd like to install
# !pip install dataprep
# The code below gives you a peek what data is available behind the DBLP API.
from dataprep.connector import info as info
info('dblp')
# # Download and store the configuration files in DataPrep.
#
# The configuration files are used to configure the parameters and initial setup for the API.
# You will use the configuration files here [Configuration Files](https://github.com/sfu-db/DataConnectorConfigs) by default.
# The configuration files can be manually downloaded and modified.
# You can specify the directoty of the configuration files using "config_path" parameter.
# # Initialize connector
#
# To initialize, run the following code. To fetch DBLP data, tokens and client information are not needed.
from dataprep.connector import connect
conn = connect("dblp")
df = await conn.query("publication", q="CVPR 2020", _count=2000)
df.head()
# # Functionalities
#
# Connector has several functions you can perform to gain insight on the data downloaded from DBLP.
# ### Connector.info
# The info method gives information and guidelines on using the connector. There are 4 sections in the response and they are table, parameters, example and schema.
#
# >1. Table - The table(s) being accessed.
# >2. Parameters - Identifies which parameters can be used to call the method.
# >3. Examples - Shows how you can call the methods in the Connector class.
# >4. Schema - Names and data types of attributes in the response.
conn.info()
# ### Connector.query
# The query method downloads the website data and displays it in a Dataframe. The parameters must meet the requirements as indicated in connector.info for the operation to run.
#
# When the data is received from the server, it will either be in a JSON or XML format. The connector reformats the data in pandas Dataframe for the convenience of downstream operations.
#
# As an example, let's try to get the data from the "publication" table, using "lee" as the query search.
df = await conn.query('publication', q='lee', _count=20)
df
# From query results, you can see how easy it is to download the publication data from DBLP into a pandas Dataframe.
#
# Now that you have an understanding of how connector operates, you can easily accomplish the task with two lines of code.
#
#
# >1. dc = Connector(...)
# >2. dc.query(...)
# # That's all for now.
# If you are interested in writing your own configuration file or modify an existing one, refer to the [Configuration Files](https://github.com/sfu-db/DataConnectorConfigs>).
| examples/DataConnector_DBLP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (imgaug37)
# language: python
# name: imgaug37
# ---
# # Augmentation on multiple CPU cores
# Augmentation can be a slow process, especially when working with large images and when combining many different augmentation techniques. Take a look at the [performance documentation](https://imgaug.readthedocs.io/en/latest/source/performance.html) for some lower bounds on the expected single core performance using outdated hardware.
#
# One way to improve performance is to augment simultaneously on multiple CPU cores. `imgaug` offers a native system to do that. It is based on roughly the following steps:
# 1. Split the dataset into batches. Each batch contains one or more images and the additional data associated with them, e.g. bounding boxes or segmentation maps. (Splitting the data can also be done on the fly using a generator.)
# 2. Start one or more child processes. Each of them runs on its own CPU core.
# 3. Send batches to the child processes. Try to distribute them equally over the child processes so that each of them has a similar amount of work to do.
# 4. Let the child processes augment the data.
# 5. Receive the augmented batches from the child processes.
# A few important points can be derived from these steps. First, the data has to be split into batches. Second, combining all data into one batch and using multicore augmentation is pointless, as each individual batch is augmented by exactly one core. Third, using multicore augmentation for small amounts of data can also be pointless as starting the child processes might take up more time than simply augmenting the dataset on a single CPU core. (Though, you can re-use the child processes between epochs, so it might still pay off.)
#
# **Important**: `imgaug` offers multicore features and it is recommended to use them for multicore augmentation. It is **not** recommended to execute `imgaug` in a custom-made multicore routine using e.g. python's `multiprocessing` library or by using the multiprocessing support of some deep learning libraries. Doing so runs a major risk to accidentally apply the same augmentations in each child worker (just to different images). If one still decides to build a custom implementation, make sure to call `imgaug.seed(value)` and `augmenter.reseed(value)` with *different seeds per child process*. Generating debug outputs per child process is then also recommended. Messing this up is easy and hard to even notice.
# ## Example: augment_batches(..., background=True)
# The easiest way to do multicore augmentation in `imgaug` is to call `augment_batches(..., background=True)`. It works similarly to e.g. `augment_images()`. The difference is that it expects a list of `imgaug.augmentables.batches.Batch` or `imgaug.augmentables.batches.UnnormalizedBatch` instances. Each of these instances contains the data of a batch, e.g. images or bounding boxes. Creating a batch is trivial and can be done via e.g. `batch = UnnormalizedBatch(images=<list of numpy arrays>, bounding_boxes=<list of imgaug.BoundingBoxOnImages>)`. Another difference to `augment_images()` is that `augment_batches()` returns a generator, which continuously yields augmented batches as they are received from the child processes. The final (and important) difference is that `augment_batches()` currently does *not* use the random state set in the augmenter, but rather picks a new one. That is because otherwise all child processes would apply the same augmentations (just to different images). If you need more control over the random state use `pool()` or `imgaug.multicore.Pool` instead (see further below).
# Let's try to use `augment_batches()`. First, we define some example data.
# +
import numpy as np
import imgaug as ia
# %matplotlib inline
BATCH_SIZE = 16
NB_BATCHES = 100
image = ia.quokka_square(size=(256, 256))
images = [np.copy(image) for _ in range(BATCH_SIZE)]
# -
# Now we combine the images to `UnnormalizedBatch` instances:
from imgaug.augmentables.batches import UnnormalizedBatch
batches = [UnnormalizedBatch(images=images) for _ in range(NB_BATCHES)]
# As the data here is already well normalized, we could also use `imgaug.augmentables.batches.Batch` instead, which has the same interface. If e.g. we had keypoints and these were provided to as lists of xy-tuples, `UnnormalizedBatch` would have to be used and it would convert these lists to `KeypointsOnImage` instances.
# Our augmentation sequence contains `PiecewiseAffine`, which tends to be a very slow augmenter. We further slow it down by using a denser grid of points on the image. Each such point will lead to more local affine transformations being applied.
# +
from imgaug import augmenters as iaa
aug = iaa.Sequential([
iaa.PiecewiseAffine(scale=0.05, nb_cols=6, nb_rows=6), # very slow
iaa.Fliplr(0.5), # very fast
iaa.CropAndPad(px=(-10, 10)) # very fast
])
# -
# Now we augment the generated batches. Let's first augment without multicore augmentation to see how long a single CPU core needs. `augment_batches()` returns a generator of `Batch` instances. We can then access the augmented images via the attribute `UnnormalizedBatch.images_aug`.
# +
import time
time_start = time.time()
batches_aug = list(aug.augment_batches(batches, background=False)) # list() converts generator to list
time_end = time.time()
print("Augmentation done in %.2fs" % (time_end - time_start,))
ia.imshow(batches_aug[0].images_aug[0])
# -
# Roughly 130 seconds for 100 batches, each containing 16 images of size 256x256. That's about 0.08s per image. Not very fast, the GPU would most likely train faster than this. Let's try it instead with multicore augmentation.
# +
time_start = time.time()
batches_aug = list(aug.augment_batches(batches, background=True)) # background=True for multicore aug
time_end = time.time()
print("Augmentation done in %.2fs" % (time_end - time_start,))
ia.imshow(batches_aug[0].images_aug[0])
# -
# Down to less than 30 seconds -- or roughly a quarter of the single core time. That is already much better. Note that this is on an outdated CPU with 4 cores and 8 threads. A modern 8 core CPU should benefit even more.
# ## Batches with Non-Image Data
#
# The example above only showed how to augment images. Often enough, you will also want to augment e.g. keypoints or bounding boxes on these. That is achieved by a trivial change when creating `UnnormalizedBatch` instances. You do not have to worry about random states or stochastic/deterministic mode in this case. `imgaug` will automatically handle that and make sure that the augmentations between images and associated data align.
# Let's extend our previous example data with some keypoints.
# +
BATCH_SIZE = 16
NB_BATCHES = 100
image = ia.quokka(size=0.2)
images = [np.copy(image) for _ in range(BATCH_SIZE)]
keypoints = ia.quokka_keypoints(size=0.2)
keypoints = [keypoints.deepcopy() for _ in range(BATCH_SIZE)]
batches = [UnnormalizedBatch(images=images, keypoints=keypoints) for _ in range(NB_BATCHES)]
# -
# And now augment the data in the same way as before:
# +
time_start = time.time()
batches_aug = list(aug.augment_batches(batches, background=True)) # background=True for multicore aug
time_end = time.time()
print("Augmentation done in %.2fs" % (time_end - time_start,))
ia.imshow(
batches_aug[0].keypoints_aug[0].draw_on_image(
batches_aug[0].images_aug[0]
)
)
# -
# And that's it. Simply add `keypoints=<list of imgaug.augmentables.kps.KeypointsOnImage>` when instantiating an `UnnormalizedBatch` instance and the rest is handled by the library. The same can be done for bounding boxes (`bounding_boxes=<list of imgaug.augmentables.bbs.BoundingBoxesOnImage>`), heatmaps (`heatmaps=<list of imgaug.augmentables.heatmaps.HeatmapsOnImage>`) or segmentation maps (`segmentation_maps=<list of imgaug.augmentables.segmaps.SegmentationMapOnImage>`). Just make sure that the lists have the same length and entries with the same index actually belong to each other (e.g. image `0014059.jpg` and the keypoints for image `0014059.jpg`).
#
# You might have noticed that the augmentation time here went up from ~30 seconds to ~80 seconds -- just by adding keypoints. That is because `PiecewiseAffine` uses an image based method for keypoint augmentation due to inaccuracies when transforming keypoints as coordinates. It is currently the slowest keypoint augmenter in the library (so avoid using `PiecewiseAffine` when augmenting keypoints or bounding boxes).
# # Using Pool
# `augment_batches()` is easy to use, but does not offer much customization. If you want to e.g. control the number of used CPU cores or the random number seed, `augmenter.pool()` is a simple alternative (and it is the backend that `augment_batches()` uses). The example below augments again the previously defined batches, this time with `pool()`. We configure the pool to use all CPU cores except one (`processes=-1`), restart child processes after 20 tasks (`maxtasksperchild=20`) and to start with a random number seed of `1`. The argument `maxtasksperchild` can be useful if you deal with memory leaks that lead to more and more memory consumption over time. If you don't have this problem, there is no reason to use the argument (and it does cost performance to use it).
with aug.pool(processes=-1, maxtasksperchild=20, seed=1) as pool:
batches_aug = pool.map_batches(batches)
ia.imshow(batches_aug[0].images_aug[0])
# Note that we called `map_batches()` here exactly once to augment the input batches. In practice, we can call that command many times for each generated pool using different input batches -- and it is recommended to do so, because creating a new pool requires respawning the child processes, which does cost some time.
# `augmenter.pool()` is a shortcut that creates an instance of `imgaug.multicore.Pool`, which again is a wrapper around python's `multiprocessing.Pool`. The wrapper deals mainly with the correct management of random states between child processes. The below example shows the usage of `imgaug.multicore.Pool`, using the same seed as in the `augmenter.pool()` example above and hence generating the same output.
# +
from imgaug import multicore
with multicore.Pool(aug, processes=-1, maxtasksperchild=20, seed=1) as pool:
batches_aug = pool.map_batches(batches)
ia.imshow(batches_aug[0].images_aug[0])
# -
# # Using Pool with Generators
# The two previous examples showed how to use lists with `imgaug`'s Pool. For large datasets, using generators can be more appropiate to avoid having to store the whole dataset in memory. This is trivially done by replacing `map_batches(<list>)` with `imap_batches(<generator>)`. The output of that function is also a generator.
# +
def create_generator(lst):
for list_entry in lst:
yield list_entry
my_generator = create_generator(batches)
with aug.pool(processes=-1, seed=1) as pool:
batches_aug = pool.imap_batches(my_generator)
for i, batch_aug in enumerate(batches_aug):
if i == 0:
ia.imshow(batch_aug.images_aug[0])
# do something else with the batch here
# -
# Note that if you don't need your batches to be returned in the same order as you provided them, you can also use `imap_batches_unordered()` instead of `imap_batches()`. The unordered method tends to be faster.
# # Rate-Limiting Pool to decrease maximum RAM requirements
#
# *New in version 0.3.0.*
#
# By default, pool will *greedely* load (and augment) as many batches as possible from generators. There is no rate-limiting that restricts how many augmented batches are at most allowed to "wait" in the pipeline. That means that in the worst case (when the model trains very slowly, while the augmentation is very fast) the whole augmented dataset could be waiting to be retrieved, which would potentially lead to high RAM usage, depending on how large the dataset is.
#
# To fix this problem, the argument `output_buffer_size` can be used. The value controls how many batches are at most allowed to exist within the whole augmentation pipeline, i.e. `imap_batches(gen)` will load new batches from `gen` until `output_buffer_size` batches are reached and then only load another batch from `gen` whenever it has successfully yielded an augmented batch.
#
# Below code shows an example. It is similar to the one above, but uses an augmentation pipeline that produces batches faster than they are consumed. Messages are printed that show exactly when batches are loaded and when they are requested from the augmentation pipeline. To limit the RAM requirements of this fast pipeline, `output_buffer_size=5` is used, restricting the allowed number of waiting batches to five. Note that batches in imgaug contain the images before *and* after augmentation, so the effective RAM requirement is here `5*2*I`, where `I` is the size of a single image. In practice that value should be viewed as a lower bound for the actual RAM demands, as e.g. copying data to background processes could temporarily double the requirements.
# +
import time
# We use a single, very fast augmenter here to show that batches
# are only loaded once there is space again in the buffer.
pipeline = iaa.Fliplr(0.5)
def create_generator(lst):
for list_entry in lst:
print("Loading next unaugmented batch...")
yield list_entry
# only use 25 batches here, which is enough to show the effect
my_generator = create_generator(batches[0:25])
with pipeline.pool(processes=-1, seed=1) as pool:
batches_aug = pool.imap_batches(my_generator, output_buffer_size=5)
print("Requesting next augmented batch...")
for i, batch_aug in enumerate(batches_aug):
# sleep here for a while to simulate a slowly training model
time.sleep(0.1)
if i < len(batches)-1:
print("Requesting next augmented batch...")
# -
# The method `imap_batches_unordered()` also supports `output_buffer_size`. However, `map_batches()` does not support the argument and always augments the whole input list.
# ## Conclusion
#
# So to use multicore augmentation with `imgaug` just do the following:
# * Convert your data to instances of `imgaug.Batch`. Make sure that corresponding data has the same list index within the batch, e.g. images and their corresponding keypoints.
# * Call `augmenter.augment_batches(batches, background=True)`. This returns a generator.
# * Use `augmenter.pool([processes], [maxtasksperchild], [seed])` if you need more control or want to use a generator as input. Call `pool.map_batches(list)` or `pool.imap_batches(generator)` on the pool.
# * Avoid implementing your own multicore system or using another library for that as it is easy to mess up.
| notebooks/A03 - Multicore Augmentation.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="Hg-zX739jQQ0"
from fastbook import *
from fastai.vision.widgets import *
# -
path = Path()
# + id="4IsllUQ1jQSH"
learn_inf = load_learner(path/'flag.pkl')
# + colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["616bf2db333e4ca1ac412c62ee928193", "ae3919aa220b4088b80662721477e4cd", "e33a77437bc34c06af7b56cd2f17c337"]} id="bXuYBKQKjQSV" outputId="75d0d059-0a81-49f9-fb74-b06302954937"
btn_upload = widgets.FileUpload()
out_pl = widgets.Output()
lbl_pred = widgets.Label()
btn_run = widgets.Button(description='Classify')
# + id="TIk7c-zKjQSX"
def on_click_classify(change):
img = PILImage.create(btn_upload.data[-1])
out_pl.clear_output()
with out_pl: display(img.to_thumb(128,128))
pred,pred_idx,probs = learn_inf.predict(img)
lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
btn_run.on_click(on_click_classify)
# + colab={"base_uri": "https://localhost:8080/", "height": 230, "referenced_widgets": ["57da8d39d8934c0899aeb276e64cc86b", "0b1a388d2ec24739b89134ce48e30dc4", "<KEY>", "<KEY>", "616bf2db333e4ca1ac412c62ee928193", "<KEY>", "f38351e8b46a43b182d59d2357835017", "8184717c221148b2ac4e2ede29e50b31", "5b05b00187fc41eca2ea2a630cf71548", "ed2cea49859b482d8c5efe5543693d4f", "cdd3fbf345ef4f7caa8f36a341e581ed", "<KEY>", "e33a77437bc34c06af7b56cd2f17c337", "86e7c4d9293b4d04895ba74dec7909a3", "18ea6b47cf5149e5a4d36caef00013e8", "3b03af52887f4adcbd8618ec4b4d8f5a"]} id="Dmm3xSuNjQSY" outputId="6ce29279-3eec-46f2-fa6f-d8c23b8911fb"
VBox([widgets.Label('Select your flag!'),
btn_upload, btn_run, out_pl, lbl_pred])
| flag_predict.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convert from ENVI to NetCDF
#
# This is a pure GDAL/netCDF4 implementation of conversion from ENVI raster image for AVIRIS-NG. I will document a simpler workflow using `xarray` some other time.
#
# <h2 id="tocheading">Table of Contents</h2>
# <br>
# <div id="toc"></div>
#
# *This next cell calls a script to generate a TOC. It will display above when this notebook is opened in the Jupyter environment. Ignore.*
# + language="javascript"
# $.getScript('scripts/tocgen.js')
# -
# ## Review
#
# Import some functions based on the gdal logic covered in [0_read_with_gdal.ipynb](0_read_with_gdal.ipynb):
from scripts.gdalfunc import *
# ## Workflow
#
# ### Imports and example file
# Import requirements. Minimal packages:
# +
#import numpy as np
import netCDF4 as nc4
#from osgeo import gdal, osr
#from pyproj import Proj, transform
from scripts.prog import log_progress # optional
# -
# Example reflectance file:
# +
img = 'data/ang20180814t224053_rfl_v2r2/ang20180814t224053_corr_v2r2_img'
hdr = 'data/ang20180814t224053_rfl_v2r2/ang20180814t224053_corr_v2r2_img.hdr'
ds = gdal.Open(img)
# -
# ### Creating a new netCDF dataset
#
# The remaining the steps use the basic functionality of the Python interface to *libnetcdf* (maintained by Unidata).
#
# #### Open in write mode
# Open a netCDF4.Dataset for writing:
# +
nc = nc4.Dataset("output/test.nc", "w")
nc
# -
# #### Add coordinate dimensions
# Use the `get_shape()` function defined above to get the shape of the raster image and add dimensions **band, y, x**:
# +
bands, cols, rows = get_shape(ds) # get image shape
band_dim = nc.createDimension('band') # add band dim
y_dim = nc.createDimension('y', size=rows) # add y dim
x_dim = nc.createDimension('x', size=cols) # add x dim
nc
# -
# #### Add coordinate variables
# Use the `get_xy_arrays()` function defined above to get the two 1d arrays of x and y coordinates, then add the x and y variables with proper CF attributes:
# +
xarr, yarr = get_xy_arrays(ds)
y_var = nc.createVariable('y', 'float32', ('y'))
y_var.units = "m"
y_var.standard_name = "projection_y_coordinate"
y_var.long_name = "y coordinate of projection"
y_var[:] = yarr
x_var = nc.createVariable('x', 'float32', ('x'))
x_var.units = "m"
x_var.standard_name = "projection_x_coordinate"
x_var.long_name = "x coordinate of projection"
x_var[:] = xarr
nc
# -
# Now add a variable to store the band numbers:
bandnum_var = nc.createVariable('band', 'short', ('band'))
bandnum_var.long_name = "AVIRIS-NG band/channel number"
bandnum_var[:] = list(range(1, bands+1))
# #### Grid mapping
# Make a function to collect some attributes for the `crs` variable:
def get_crs_attributes(ds):
"""Collect projection parameters for CRS variable."""
native_srs, proj4 = get_proj(ds)
crs_atts = {
name: native_srs.GetProjParm(par) for par,name in {
"scale_factor": "scale_factor_at_central_meridian",
"central_meridian": "longitude_of_central_meridian",
"latitude_of_origin": "latitude_of_projection_origin",
"false_easting": "false_easting",
"false_northing": "false_northing"}.items()}
crs_atts["utm_zone"] = native_srs.GetUTMZone()
crs_atts["proj4"] = proj4
crs_atts["crs_wkt"] = ds.GetProjection()
return(crs_atts)
# Test the new function and add the `crs` variable to the file:
# +
crs_atts = get_crs_attributes(ds)
crs_var = nc.createVariable('crs', 'short')
crs_var.grid_mapping_name = "transverse_mercator"
for att,val in crs_atts.items():
crs_var.setncattr(att,val)
print(crs_var)
# -
# #### Latitude and longitude variables
# The projected netCDF specification (CF) requires that 2-dimensional arrays be stored for both latitude and longitude.
#
# Use the function defined above `get_latlon_arrays()` to get the arrays and add the **lat, lon** variables:
# +
latarr2d, lonarr2d = get_latlon_arrays(ds)
lat_var = nc.createVariable('lat', 'float32', ('y', 'x'))
lat_var.units = "degrees_north"
lat_var.standard_name = "latitude"
lat_var.long_name = "latitude coordinate"
lat_var[:,:] = latarr2d
lon_var = nc.createVariable('lon', 'float32', ('y', 'x'))
lon_var.units = "degrees_east"
lon_var.standard_name = "longitude"
lon_var.long_name = "longitude coordinate"
lon_var[:,:] = lonarr2d
nc
# -
# #### Wavelength and reflectance variables
#
# Add two final variables to the file without setting values:
# 1. a 1d variable (band dimension) to store the wavelength of the center of each band (floats)
# 2. a 3d variable (band, y, x) to store the raster
# +
# add wavelength variable
bandwav_var = nc.createVariable('band_wavelength', 'float32', ('band'))
bandwav_var.units = "nanometers"
bandwav_var.long_name = "wavelength of band center"
# add reflectance variable
refl_var = nc.createVariable('reflectance', 'float32', ('y', 'x', 'band'))
refl_var.units = "unitless"
refl_var.coordinates = "lon lat"
refl_var.grid_mapping = "crs"
refl_var.standard_name = "reflectance"
refl_var.long_name = "atmospherically corrected surface reflectance"
nc
# -
# **Write buffered data to the output file:**
nc.sync()
# ### Read from ENVI and write to netCDF in a loop
#
# This last section demonstrates how to read from the ENVI raster image and write to the output netCDF in a loop (one band at a time) such that we never have more than one band of the image in memory at a time.
#
# #### Parsing band metadata from ENVI image
# This snippet is imperfect. It may not work for ENVI raster images that aren't generated by the standard AVIRIS-NG processing logic at JPL. Hard to say. But, here we are parsing the band number:center wavelength pairs accessible via GDAL through the band-level metadata:
# +
import collections
# make a dictionary of band number, band center pairs
wl = {}
for n, w in ds.GetMetadata().items():
if "Band" in n:
wl[int(n.split("_")[1])] = float(w.split(" ")[0])
# convert to ordered dict and sort
wl = collections.OrderedDict(sorted(wl.items()))
wl[1]
# -
# #### Writing in a loop
# Iterate over the length of the band dimension, writing each raster band to variable `reflectance` and its wavelength to variable `band_wavelength`:
# +
for b in log_progress(range(1, nc.dimensions['band'].size + 1)):
#print("Writing band " + str(b) + " ... ")
# get band b from input raster image
barr = ds.GetRasterBand(b).ReadAsArray()
# set all -9999. to numpy.nan
barr[barr==-9999.] = np.nan
# update the nc reflectance array for band b
nc.variables['reflectance'][:, :, b-1] = barr
# update the nc wavelength array for band b
nc.variables['band_wavelength'][b-1] = wl[b]
# write changes to disk
nc.sync()
nc
# -
# #### Add global attributes
#
# Use the function defined above `get_global_attributes(<header_file.hdr>)` to get a dictionary of global attributes and add them to the netCDF in a loop:
# +
# get global attributes
global_atts = get_global_attributes(hdr)
# add global attributes
for att,val in global_atts.items():
nc.setncattr(att,val)
# -
# #### Close the file
#
# Do not forget to close the file! Python will close it during clean up when your session ends, but it's best to close manually:
nc.close()
| notebooks/1_convert_to_netcdf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Getting Started with Conda / python / Jupyter Notebook environment
# --
# Preamble
# --
# Choose a working directory (IMPORTANT)
#
# Check how much space you have in your network home dir (/homes/yourlogin). You have a 7 Go quota, so this should be more than enough. It is highly recommended to work there, as this is backuped and saved daily.
# If you don't have enough space there, FREE SOME SPACE (recommended), or alternatively you can create a folder under /users/local/ (but data is not guaranteed to stay there).
# Checking out the git repository of the course
# --
#
# Change dir to your working directory and type
#
# git clone https://github.com/brain-bzh/INT277.git
#
# This will create a INT277 folder containing Jupyter Notebooks to start the projects.
# Neuroimaging data viewer
# --
# See here to install [MRICRON](http://people.cas.sc.edu/rorden/mricron/install.html)
#
# Alternatively, you can also use this online viewer : http://ric.uthscsa.edu/mango/papaya/index.html
#
# Miniconda installation
# --
# 1. Open a terminal
# 2. go to the chosen working directory
# 3. wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh
# 4. bash Miniconda2-latest-Linux-x86_64.sh
# 5. Review license agreement, press ENTER. Press q to exit the license agreement... type "yes", then change installation dir (if necessary)
# Virtual environements
# --
#
# We will use a virtual environment in Conda, which makes it easier to install new packages without admin priviledges.
#
# Create a new environment with the following command
#
# conda create -n intersem python
#
# You can now list the available environements by typing :
#
# conda env list
#
# You should see the newly created environment "intersem". Activate it using :
#
# source activate intersem
#
# You will see (intersem) at the beginning of your terminal, indicating you are now working under the virtual environment.
#
# To deactivate the current environemnt, just type :
#
# source deactivate
#
# More on conda environments [here](http://conda.pydata.org/docs/using/envs.html).
# Installing new packages using PIP
# --
#
# When the environment is activated, install new packages using pip. For example :
#
# pip install jupyter
#
# To see all packages currently available under an environment, type :
#
# conda list
# For the projects, we will need the following packages :
# - jupyter
# - matplotlib
# - pandas
# - Nilearn
# - sklearn
# - nibabel
#
#
#
# You can install them all in one go by doing :
#
# pip install jupyter matplotlib pandas nilearn sklearn nibabel
| installation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Anaconda 5)
# language: python
# name: anaconda5
# ---
# Name: <NAME>, <NAME>
#
# Student ID: 2294644,2291700
#
# Email: <EMAIL>,<EMAIL>
#
# Course: PHYS220/MATH220/CPSC220 Fall 2018
#
# Assignment: CW10
# # Abstract
# There are many analytical methods to solve an ordinary differential equation (ODE), but since the advent of computers, and in this paper, we are interested in numerical approximations of the solution. To further elaborate, we will be discussing 5 solution methods and what each one actually does. These methods include:
#
# Euler's Method, Leapfrog (Midpoint) Method, Heun's (Trapezoid) Method, 2nd-order Runge-Kutta Method, and 4th-order Runge-Kutta Method.
# ## Introduction
# Consider a first-order ordinary differential equation (ODE):
# $$u'(t) = f[t, u(t)]$$
# For a more specific case, let's say $u'(t)=2u(t)$, where $f[t, u(t)]$ is the slope of $u(t)$ at some domain point $t$. If we define discrete domain points as $(t_0, t_1, t_2, ..., t_N)$, where $\Delta t = t_{k+1} - t_k$ with uniform spacing. If we know some initial condition, $u(t_0)=u_0$, we can then construct range points $(u_0, u_1, u_2, ..., u_N)$, where $u(t_k)=u_k$, by using the slope, $u'(t_k)=u'_k$, at each domain point. However, this requires knowledge of the original function, $u(t)$.
# There are five solution methods that work well for numerical approximations, and they are listed below:
#
# Euler's Method, Leapfrog (Midpoint) Method, Heun's (Trapezoid) Method, 2nd-order Runge-Kutta Method, and 4th-order Runge-Kutta Method.
# ## Euler's Method
# Euler's method, which is accurate to 1st-order, is defined as:
# \[u_{k+1} = u_k + \Delta t\, f[t_k, u_k]\]
#
# To understand what this means, let's work with an example $u'(t)=u(t)$. By looking at this example, we can see that $u(t)=e^t$. But let's assume we don't know that, and the only thing we know is some initial condition $u_0=u(0)=1$. We will first work with a $\Delta t=1$ (a $\Delta t$ this big won't be as accurate as a smaller $\Delta t$, and we will see that later).
#
# For a $u_0=u'_0=1$, we can then say, using Euler's method:
# \[u_1=u_0+\Delta tu'_0=2\]
#
# For $t=2$ and $u'_1=2$:
# \[u_2=u_1+\Delta tu'_1=4\]
#
# Let's now compare this to our function $e^x$:
# \[e^0=1\,e^1=e\,e^2\approx 7.389\]
#
# We can see a step size of $\Delta t = 1$, is not very accuarate. Let's try a step size of $\Delta t = 0.25$.
#
# For a $u_0=u'_0=1$, we can then say, using Euler's method:
# \[u_1=u(0.25)=u_0+\Delta tu'_0=1.25\]
#
# For $t=2$ and $u'_1=1.25$:
# \[u_2=u(0.5)=u_1+\Delta tu'_1=1.5625\]
#
# Let's now compare this to our function $e^x$:
# \[e^0=1\,e^{0.25}\approx 1.284\,e^{0.5}\approx 1.649\]
#
# Thus, if we keep decreasing the step-size $\Delta t$, we should get increasingly accurate results. This process of using a smaller step size can be handled by a computer.
#
# To understand why a smaller step size is preferable, we should understand what this looks like graphically. For this method, we are assuming a constant slope between each domain point. Now this isn't a problem for a linear graph, since it has a constant slope everywhere. But the same cannot be said for an exponential graph. So, for smaller $\Delta t's$, we are making smaller and smaller assumptions, thus more accurate results.
# ## Leapfrog Method
# The leapfrog method, which is accurate to the 2nd-order, is defined as:
# \[u_{k+1} = u_{k-1} + 2\Delta t, f[t_k, u_k]\]
#
# In this method, to compute the next point of the function, all we need is the slope at the current point and the result at the last point.
#
# The leapfrog method can easily be reversed to find the previous answer as well, using knowledge of later points in the function.
# \[u_{k-1} = u_{k+1} - 2\Delta t, f[t_k, u_k]\]
#
# Just like Euler's method, the leapfrog method becomes more and more accurate the smaller the $\Delta t$ steps are, because at these smaller $\Delta t$ steps, the linear approximation becomes closer and closer to the function. This is unfeasible for humans but easy for a computer to calculate.
# ## Heun's Method
# Heun's Method is an attempt to reconcile the downsides of Euler's Method (namely its slowness and lack of accuracy at large curvatures) by taking the tangent at both the current and next point and taking the average of the two.
#
# To perform Heun's method, we need the tangent at the two points. The tangent at the first point is
# \[slope_{left} = f[x_n,y_n]\]
#
# and we can use Euler's Method to approximate the coordinates at the next point n+1:
#
# \[(x_{n+1},y_{n+1}) = (x_n+h,y_n+hf(x_n,y_n))\]
#
# we can find the tangent at n+1 as
#
# \[slope_{right}= f(x_n+h,y_n+hf(x_n,y_n))\]
#
# Finally, Heun's Method finds the point n+1 as
#
# \[(x_{n+1},y_{n+1}) = 1/2(slope_{left}+slope_{right})\]
# ## 2nd Order Runge-Kutta Method
# The Runge-Kutta methods attempt to minimize error during calculation by taking an intermediate trial step to cancel out error terms. The second order method takes the form of:
# \[k_1 = hf[x_n,y_n]\]
# \[k_2 = hf[x_n+(1/2)h,y_n+(1/2)k_1]\]
# \[y_{n+1}=y_n+k_2+O(h^3)\]
#
# O is a Landeau symbol; f = O(p) means that the absolute value of f is less than A*p for some constant A at all values of x and n.
# ## 4th Order Runge-Kutta Method
# Let's now talk about 4th-order Runge-Kutta Method, which is accurate to 4th-order:
#
# $u_{k+1} = u_k + (K_1 + 2K_2 + 2K_3 + K_4)/6$,
#
# $K_1 = \Delta t\,f[t_k,u_k]$,
#
# $K_2 = \Delta t\, f[t_k + \Delta t/2, u_k + K_1/2]$,
#
# $K_3 = \Delta t\, f[t_k + \Delta t/2, u_k + K_2/2]$
#
# $K_4 = \Delta t\,f[t_k + \Delta t, u_k + K_3]$
#
# For simplification purposes, we can rewrite this as:
#
# $u_{k+1} = u_k + \Delta t\,(K_1 + 2K_2 + 2K_3 + K_4)/6$,
#
# $K_1 = f[t_k,u_k]$,
#
# $K_2 = f[t_k + \Delta t/2, u_k + K_1/2]$,
#
# $K_3 = f[t_k + \Delta t/2, u_k + K_2/2]$
#
# $K_4 = f[t_k + \Delta t, u_k + K_3]$
#
# where $(K_1 + 2K_2 + 2K_3 + K_4)/6$ is our average slope.
#
# So, we can calculate this $K-values$, these slopes, at 4 different points, we can then approximate $u_k$ for each $t-value$.
#
# Let's work through an example for a more clear understanding:
#
# Suppose $u'(t) = f[t, u(t)] = 2u + t$, where $u(0) = u_0 = 1$ and $\Delta t = 1$
# Then,
#
# $K_1 = f[t_k,u_k] = 2(1) + 0 = 2$
#
# $K_2 = f[t_k + \Delta t/2, u_k + K_1/2] = 2(2) + 0.5 = 4.5$,
#
# $K_3 = f[t_k + \Delta t/2, u_k + K_2/2] = 2(3.25) + 0.5 = 7$
#
# $K_4 = f[t_k + \Delta t, u_k + K_3] = 2(8) + 1 = 17$
#
# Thus, our average slope is $(K_1 + 2K_2 + 2K_3 + K_4)/6 = (2 + 2(4.5) + 2(7) + 17)/6 = 7$
#
# So, $u_{0+1} = u_1 = u_0 + \Delta t\,(average\,slope) = 1 + 1(7) = 8$
#
# Now we can graph each $u_k$ based on its average slope.
| ode_methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pandas as pd
import xgboost as xgb
import numpy as np
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import graphviz
data = pd.read_csv("data/wholesale-data.csv")
data.head()
int(data.shape[0]*0.8)
X = data.copy()
X.drop("Channel", inplace = True, axis = 1)
Y = data.Channel
X_train, X_test = X[:int(X.shape[0]*0.8)].values, X[int(X.shape[0]*0.8):].values
Y_train, Y_test = Y[:int(Y.shape[0]*0.8)].values, Y[int(Y.shape[0]*0.8):].values
train = xgb.DMatrix(X_train, label=Y_train)
test = xgb.DMatrix(X_test, label=Y_test)
param = {'max_depth':6, 'eta':0.1, 'silent':1, 'objective':'multi:softmax', 'num_class': 3}
num_round = 5
model = xgb.train(param, train, num_round)
model.save_model('wholesale-model.model')
loaded_model = xgb.Booster({'nthread': 8})
loaded_model.load_model('wholesale-model.model')
preds = loaded_model.predict(test)
accuracy = accuracy_score(Y[int(Y.shape[0]*0.8):].values, preds)
print("Accuracy: %.2f%%" % (accuracy * 100.0))
| Chapter05/XGBoost Save Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Run the test
# + run_control={"marked": true}
import atmPy
from atmPy.unit_testing import run_nose_tests
from atmPy.unit_testing import nose_tests
plt.style.use('hagen_default')
# + deletable=false editable=false run_control={"frozen": true}
# reload(run_nose_tests)
# reload(nose_tests)
# -
# ## all
run_nose_tests.run()
# ## onebyone
reload(nose_tests)
from atmPy.unit_testing import run_nose_tests
from atmPy.unit_testing import nose_tests
test = nose_tests.SizeDistTest()
test.test_mixing_ratios()
# # data_archives
# ## ARM
from atmPy.data_archives import arm
arm.arm_products.keys()
fname = './test_data/'
# ### 1twr10xC1
# +
# # %%debug -b /Users/htelg/prog/atm-py/atmPy/data_archives/arm/_1twr10xC1.py:44
# # %%debug -b /Users/htelg/prog/atm-py/atmPy/data_archives/arm/_read_data.py:193
# # %%debug -b /Users/htelg/prog/atm-py/atmPy/general/timeseries.py:327
# # create the test file
fname = './test_data/'
out = atmPy.data_archives.arm.read_netCDF(fname, data_product='1twr10xC1')
out = out['1twr10xC1']
# out.relative_humidity.data.to_csv(fname+'1twr10xC1_rh.csv')
# out.temperature.data.to_csv(fname+'1twr10xC1_temp.csv')
# out.vapor_pressure.data.to_csv(fname + '1twr10xC1_p_vapor.csv')
# -
from copy import deepcopy
# %%debug -b /Users/htelg/prog/atm-py/atmPy/data_archives/arm/_read_data.py:178
fname = './test_data/sgp1twr10xC1.b1.20120201.000000.cdf'
out = atmPy.data_archives.arm.read_netCDF(fname, data_product='1twr10xC1')
bla = deepcopy(out.relative_humidity.data)
bla = atmPy.general.timeseries.TimeSeries(out.relative_humidity.data)
out.relative_humidity._data_period
delattr(out, 'netCDF')
deepcopy(out.relative_humidity)
out.relative_humidity.copy()
reload(nose_tests)
nose_tests.test_1twr10xC1()
test_data_folder = './test_data/'
from atmPy.data_archives import arm
out = arm.read_netCDF(test_data_folder, data_product='1twr10xC1')
out = out['1twr10xC1']
# rh
soll = pd.read_csv(test_data_folder + '1twr10xC1_rh.csv', index_col=0,
dtype={'rh_25m': np.float32, 'rh_60m': np.float32}
)
# self.assertTrue(np.all(out.relative_humidity.data == soll))
# np.all(out.relative_humidity.data.index == soll.index)
out.relative_humidity.data.index == pd.to_datetime(soll.index)
out.relative_humidity.data.index
soll.columns.name = out.relative_humidity.data.columns.name
# +
# temp
soll = pd.read_csv(test_data_folder + '1twr10xC1_temp.csv', index_col=0,
dtype={'temp_25m': np.float32, 'temp_60m': np.float32}
)
self.assertTrue(np.all(out.temperature.data == soll))
# vapor pressure
soll = pd.read_csv(test_data_folder + '1twr10xC1_p_vapor.csv', index_col=0,
dtype={'vap_pres_25m': np.float32, 'vap_pres_60m': np.float32}
)
self.assertTrue(np.all(out.vapor_pressure.data == soll))
# -
# # Aerosols
# ## sizedistribution
# ### concentrations
from atmPy.aerosols import size_distribution
# %matplotlib inline
# +
sd = size_distribution.sizedistribution.simulate_sizedistribution(diameter=[15, 3000],
numberOfDiameters=50,
centerOfAerosolMode=222,
widthOfAerosolMode=0.18,
numberOfParticsInMode=888)
sd.particle_number_concentration == 888.0
float(sd.particle_surface_concentration.values) == 194.42186363605904
float(sd.particle_volume_concentration.values) == 11.068545094055812
sd.properties.particle_density =2.2
float(sd.particle_mass_concentration) == 24.350799206922783
# -
# #### including mixing rations
from atmPy.aerosols import size_distribution
# %matplotlib inline
# +
sd = size_distribution.sizedistribution.simulate_sizedistribution_timeseries(diameter=[15, 3000],
numberOfDiameters=50,
centerOfAerosolMode=222,
widthOfAerosolMode=0.18,
numberOfParticsInMode=888,
startDate='2015-10-23 16:00:00',
endDate='2015-10-23 17:00:00',
frequency=60)
sd.data = sd.data.iloc[[0],:]
sd.housekeeping = atmPy.general.timeseries.TimeSeries(pd.DataFrame(np.array([[250.], [750.]]).transpose(), index = sd.data.index, columns=['temperature_K', 'pressure_Pa']))
sd.parameters4reductions.particle_density =2.8
float(sd.particle_mass_mixing_ratio.data.values) * 1e6 == 2.96533739732464
# -
# #### muell
from atmPy.aerosols import size_distribution
# %matplotlib inline
sd = size_distribution.sizedistribution.simulate_sizedistribution_timeseries(diameter=[15, 3000],
numberOfDiameters=50,
centerOfAerosolMode=222,
widthOfAerosolMode=0.18,
numberOfParticsInMode=888,
startDate='2015-10-23 16:00:00',
endDate='2015-10-23 17:00:00',
frequency=60)
sd.hygroscopicity.parameters._check_growth_parameters_exist()
sd.hygroscopicity.parameters.kappa = 1.5
sd.hygroscopicity.parameters._check_growth_parameters_exist()
sd.hygroscopicity.parameters.growth_distribution = 5
sd.hygroscopicity.parameters
sd.optical_properties.parameters.wavelength = 550
sd.optical_properties.parameters.refractive_index = 1.5
sd.optical_properties.parameters._check_opt_prop_param_exist()
np.any([False,False])
# ### moment conversion
from atmPy.aerosols import size_distribution
# %matplotlib inline
# +
sd = size_distribution.sizedistribution.simulate_sizedistribution(diameter=[15, 3000],
numberOfDiameters=50,
centerOfAerosolMode=222,
widthOfAerosolMode=0.18,
numberOfParticsInMode=888)
sd_dNdDp = sd.convert2dNdDp()
sd_dNdlogDp = sd.convert2dNdlogDp()
sd_dSdDp = sd.convert2dSdDp()
sd_dSdlogDp = sd.convert2dSdlogDp()
sd_dVdDp = sd.convert2dVdDp()
sd_dVdlogDp = sd.convert2dVdlogDp()
folder = './test_data/'
# sd.save_csv(folder + 'aerosols_size_dist_moments_sd.nc')
# sd_dNdDp.save_csv(folder + 'aerosols_size_dist_moments_sd_dNdDp.nc')
# sd_dNdlogDp.save_csv(folder + 'aerosols_size_dist_moments_sd_dNdlogDp.nc')
# sd_dSdDp.save_csv(folder + 'aerosols_size_dist_moments_sd_dSdDp.nc')
# sd_dSdlogDp.save_csv(folder + 'aerosols_size_dist_moments_sd_dSdlogDp.nc')
# sd_dVdDp.save_csv(folder + 'aerosols_size_dist_moments_sd_dVdDp.nc')
# sd_dVdlogDp.save_csv(folder + 'aerosols_size_dist_moments_sd_dVdlogDp.nc')
sd_soll = size_distribution.sizedistribution.read_csv(folder + 'aerosols_size_dist_moments_sd.nc')
sd_dNdDp_soll = size_distribution.sizedistribution.read_csv(folder + 'aerosols_size_dist_moments_sd_dNdDp.nc')
sd_dNdlogDp_soll = size_distribution.sizedistribution.read_csv(folder + 'aerosols_size_dist_moments_sd_dNdlogDp.nc')
sd_dSdDp_soll = size_distribution.sizedistribution.read_csv(folder + 'aerosols_size_dist_moments_sd_dSdDp.nc')
sd_dSdlogDp_soll = size_distribution.sizedistribution.read_csv(folder + 'aerosols_size_dist_moments_sd_dSdlogDp.nc')
sd_dVdDp_soll = size_distribution.sizedistribution.read_csv(folder + 'aerosols_size_dist_moments_sd_dVdDp.nc')
sd_dVdlogDp_soll = size_distribution.sizedistribution.read_csv(folder + 'aerosols_size_dist_moments_sd_dVdlogDp.nc')
threshold = 1e-10
assert(abs((sd.data.values - sd_soll.data)).values.sum() < (sd.data.values.max() * threshold))
assert((sd_dNdDp.data - sd_dNdDp_soll.data).values.sum() < (sd_dNdDp.data.values.max() * threshold))
assert((sd_dSdDp.data - sd_dSdDp_soll.data).values.sum() < (sd_dSdDp.data.values.max() * threshold))
assert((sd_dVdDp.data - sd_dVdDp_soll.data).values.sum() < (sd_dVdDp.data.values.max() * threshold))
assert((sd_dNdlogDp.data - sd_dNdlogDp_soll.data).values.sum() < (sd_dNdlogDp.data.values.max() * threshold))
assert((sd_dSdlogDp.data - sd_dSdlogDp_soll.data).values.sum() < (sd_dSdlogDp.data.values.max() * threshold))
assert((sd_dVdlogDp.data - sd_dVdlogDp_soll.data).values.sum() < (sd_dVdlogDp.data.values.max() * threshold))
# -
(sd.data.values - sd_soll.data.values).sum()
(abs((sd.data - sd_soll.data).dropna()).values.sum() < (sd.data.values.max() * threshold))
# ### optical properties
from atmPy.aerosols import size_distribution
import atmPy
from atmPy.unit_testing import nose_tests
# %matplotlib inline
test = nose_tests.SizeDistTest()
test.test_opt_prop_LS()
# +
sd = size_distribution.sizedistribution.simulate_sizedistribution_layerseries(diameter=[10, 2500],
numberOfDiameters=100,
heightlimits=[0, 6000],
noOflayers=100,
layerHeight=[500.0, 4000.0],
layerThickness=[100.0, 300.0],
layerDensity=[1000.0, 50.0],
layerModecenter=[200.0, 800.0],
widthOfAerosolMode=0.2)
sd.optical_properties.parameters.refractive_index = 1.56
sd.optical_properties.parameters.wavelength = 515
fname = './test_data/aerosols_size_dist_LS_optprop.nc'
sdl = atmPy.read_file.netCDF(fname)
np.all(sd.optical_properties.aod_cumulative.data['aod'] == sdl.data['AOD per Layer'])
# -
(sd.optical_properties.aod_cumulative.data['aod'] - sdl.data['AOD per Layer']).sum()
if aod:
layerThickness = sdls.layerbounderies[i][1] - sdls.layerbounderies[i][0]
AOD_perBin = extinction_coefficient * layerThickness
AOD_layer[i] = AOD_perBin.values.sum()
out['AOD'] = AOD_layer[~ _np.isnan(AOD_layer)].sum()
out['AOD_layer'] = _pd.DataFrame(AOD_layer, index=sdls.layercenters, columns=['AOD per Layer'])
out['AOD_cum'] = out['AOD_layer'].iloc[::-1].cumsum().iloc[::-1]
layerThickness = sd.layerbounderies[i][1] - sd.layerbounderies[i][0]
AOD_perBin = extinction_coefficient * layerThickness
AOD_layer[i] = AOD_perBin.values.sum()
layerthickness = sd.layerbounderies[:,1] - sd.layerbounderies[:,0]
aod_per_bin_per_layer = sd.optical_properties.extinction_coeff_per_bin.multiply(layerthickness, axis=0)
aod_per_layer = pd.DataFrame(aod_per_bin_per_layer.sum(axis=1), columns= ['aod_per_layer'])
aod = aod_per_layer.values.sum()
aod_cumulative = aod_per_layer.iloc[::-1].cumsum()
aod_cumulative.rename(columns={'aod_per_layer': 'aod'})
sd.optical_properties._optical_porperties.keys()
# + deletable=false editable=false run_control={"frozen": true}
# fname = './test_data/aerosols_size_dist_LS_optprop.nc'
# sd.optical_properties.aerosol_optical_depth_cumulative_VP.save_netCDF(fname)
# -
# ### hygroscopic growth
sdto = nose_tests.SizeDistTest()
sdto.test_opt_prop_LS()
sdto.test_growth_opt_propLS()
sdto.sizedistributionLS.distributionType
from atmPy.unit_testing import nose_tests
from atmPy.general import vertical_profile
import atmPy
# %matplotlib inline
# nose_tests.size_distribution.sizedistribution.settings
# +
# use the same dist_LS as in test_opt_prop_LS
sdto = nose_tests.SizeDistTest()
sdto.test_opt_prop_LS()
# generate some RH which we can put into the housekeeping
hk = pd.DataFrame(index = sdto.sizedistributionLS.data.index, columns=['Relative_humidity'])
hk['Relative_humidity'] = 90
hk = vertical_profile.VerticalProfile(hk)
sdto.sizedistributionLS.housekeeping = hk
sdto.sizedistributionLS.hygroscopicity.parameters.RH = hk
# let it grow
sdto.sizedistributionLS.hygroscopicity.parameters.kappa = 0.7
distg = sdto.sizedistributionLS.hygroscopicity.grown_size_distribution
distg.optical_properties.parameters.wavelength = sdto.sizedistributionLS.optical_properties.parameters.wavelength.value
# load the test data
fname = './test_data/aerosols_size_dist_LS_hyg_growth_optprop.nc'
aodcs = atmPy.read_file.netCDF(fname)
threshold = distg.optical_properties.aod_cumulative.data.values.sum() * 1e-5
res = np.abs(distg.optical_properties.aod_cumulative.data.values
- aodcs.data.values).sum() < threshold
print(np.abs(distg.optical_properties.aod_cumulative.data.values
- aodcs.data.values).sum() , threshold), res
# -
distg.plot()
a = aodcs.plot()
distg.optical_properties.aod_cumulative.plot(ax = a)
(aodcs.data.iloc[:,0] / distg.optical_properties.aod_cumulative.data.iloc[:,0]).plot()
distg.optical_properties.aod_cumulative.plot()
# + deletable=false editable=false run_control={"frozen": true}
# fname = './test_data/aerosols_size_dist_LS_hyg_growth_optprop.nc'
# distg.optical_properties.aod_cumulative.save_netCDF(fname)
# -
# ## physics
# ### hygroscopicity
# #### growth_factor_distribution
from atmPy.aerosols.physics import hygroscopicity as hyg
from atmPy.data_archives import arm
from atmPy.unit_testing import nose_tests
sdto = nose_tests.PhysicsHygroscopicityTest()
sdto.test_hygroscopic_growth_factor_distributions()
# +
fname = './test_data/sgptdmahygC1.b1.20120601.004227.cdf'
out = arm.read_netCDF(fname, data_quality= 'patchy', leave_cdf_open= False)
hgfd = hyg.HygroscopicGrowthFactorDistributions(out.hyg_distributions.data.loc[:,200.0,:].transpose())
hgfd.plot()
fname = './test_data/aerosols_physics_hygroscopicity_growth_mode.csv'
growth_mode_soll = pd.read_csv(fname, index_col=0)
threshold = growth_mode_soll.ratio.sum() * 1e-5
res = np.abs(hgfd.growth_modes_gf.ratio - growth_mode_soll.ratio).sum() < threshold
print(np.abs(hgfd.growth_modes_gf.ratio - growth_mode_soll.ratio).sum(), threshold, res)
threshold = growth_mode_soll.gf.sum() * 1e-7
res = np.abs(hgfd.growth_modes_gf.gf - growth_mode_soll.gf).sum()< threshold
print(np.abs(hgfd.growth_modes_gf.gf - growth_mode_soll.gf).sum(), threshold, res)
#######
fname = './test_data/aerosols_physics_hygroscopicity_mixing_state.csv'
mixing_state_soll = pd.read_csv(fname, index_col=0)
threshold = mixing_state_soll.mixing_state.sum() * 1e-6
res = np.abs(hgfd.mixing_state.mixing_state - mixing_state_soll.mixing_state).sum() < threshold
print(np.abs(hgfd.mixing_state.mixing_state - mixing_state_soll.mixing_state).sum(), threshold, res)
# + deletable=false editable=false run_control={"frozen": true}
# fname = './test_data/aerosols_physics_hygroscopicity_growth_mode.csv'
# hgfd.growth_modes.to_csv(fname)
# + deletable=false editable=false run_control={"frozen": true}
# fname = './test_data/aerosols_physics_hygroscopicity_mixing_state.csv'
# hgfd.mixing_state.to_csv(fname)
# -
# ### f of RH
# #### kappa
from atmPy.data_archives import arm
# %matplotlib inline
plt.style.use('hagen_default')
# + run_control={"marked": false}
fname = 'test_data/sgptdmaapssizeC1.c1.20120601.004227.cdf'
tdmaaps = arm.read_netCDF(fname, data_quality= 'patchy', leave_cdf_open= False)
sd = tdmaaps.size_distribution
fname = 'test_data/sgpaosacsmC1.b1.20120601.002649.cdf'
acsm = arm.read_netCDF(fname, data_quality= 'patchy', leave_cdf_open= False)
sd.parameters4reductions.refractive_index = acsm.refractive_index
sd.hygroscopicity.parameters.kappa = 0.6
sd.optical_properties.parameters.wavelength = 550
sd.optical_properties.parameters.refractive_index = 1.5
fname = './test_data/aerosols_physics_hygroscopicity_fRH_kappa.csv'
fRHk_soll = atmPy.read_file.netCDF(fname)
threshold = sd.hygroscopicity.f_RH_85_40.data.sum().values[0] * 1e-10
np.abs(sd.hygroscopicity.f_RH_85_40.data - fRHk_soll.data).sum().values[0] < threshold
# -
a = sd.hygroscopicity.f_RH_85_40.plot()
fRHk_soll.plot(ax = a)
# + deletable=false editable=false run_control={"frozen": true}
# fname = './test_data/aerosols_physics_hygroscopicity_fRH_kappa.csv'
# sd.hygroscopicity.f_RH_85_40.save_netCDF(fname)
# -
#
# #### growth distribution
from atmPy.data_archives import arm
import atmPy
# %matplotlib inline
plt.style.use('hagen_default')
# +
fname = 'test_data/sgptdmaapssizeC1.c1.20120601.004227.cdf'
tdmaaps = arm.read_netCDF(fname, data_quality= 'patchy', leave_cdf_open= False)
sd = tdmaaps.size_distribution
fname = 'test_data/sgpaosacsmC1.b1.20120601.002649.cdf'
acsm = arm.read_netCDF(fname, data_quality= 'patchy', leave_cdf_open= False)
sd.parameters4reductions.refractive_index = acsm.refractive_index
fname = 'test_data/sgptdmahygC1.b1.20120601.004227.cdf'
out = arm.read_netCDF(fname, data_quality= 'patchy', leave_cdf_open= False)
hgfd = out.hyg_distributions_d200nm
sd.hygroscopicity.parameters.growth_distribution = hgfd
sd.optical_properties.parameters.wavelength = 550
# sd.hygroscopicity.parameters.RH = 85
fname = './test_data/aerosol_fRH_from_size_dist_and_growthdistribution.cdf'
fRH_gd_soll = atmPy.read_file.netCDF(fname)
threshold = sd.hygroscopicity.f_RH_85_40.data.sum().values[0] * 1e-5
np.abs(sd.hygroscopicity.f_RH_85_40.data - fRH_gd_soll.data).sum().values[0] < threshold
# -
a = sd.hygroscopicity.f_RH_85_40.plot()
fRH_gd_soll.plot(ax = a)
# + deletable=false editable=false run_control={"frozen": true}
# fname = './test_data/aerosol_fRH_from_size_dist_and_growthdistribution.cdf'
# sd.hygroscopicity.f_RH_85_40.save_netCDF(fname)
# -
| atmPy/unit_testing/nose_unit_tests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pyqrcode
import png
from pyqrcode import QRCode
# String which represents the QR code
s = "www.geeksforgeeks.org"
# Generate QR code
url = pyqrcode.create(s)
# Create and save the png file naming "myqr.png"
url.png('myqr.png', scale = 6)
# -
| data/qrcode_generator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 6. Function
# **6.1 defining a function**
# +
# define a function
def main():
kitten(5)
def kitten(n):
print(f'{n} Meow...')
if __name__ == '__main__': main()
# -
# **6.2 Function Arguments**
# +
# define a function
def main():
kitten1(5) # b default value 1
kitten1(5)
def kitten(n):
print(f'{n} Meow...')
def kitten1(a,b=1):
print(f'{a} {b} Meow...')
# Uncomment for error
#def kitten2(a,b=1,c):
# print(f'{a} {b} Meow...')
if __name__ == '__main__': main()
# -
# **6.3 Argument List**
# +
def argListFunction(*args):
print('type of x is {}'.format(type(args)))
for i in args:
print(i,end = ' ',flush=True)
argListFunction(list(range(10)))
# -
# **6.4 Keyword Arguments**
# +
def argListFunction(**args):
print('type of x is {}'.format(type(args)))
for i in args:
print(i,end = ' ',flush=True)
argListFunction(one= 1, two = 2)
# -
# **6.5 Return value**
# +
def sumUp(*args):
total = 0
for i in args:
total +=i
return total
sumUp(1,2,4,5,6,7,8)
# -
# **6.6 Generator**
# +
def evenNumberGen(n =2):
for i in range(n):
if i == 0: continue
elif i % 2 == 0: yield i
for i in evenNumberGen(20):
print(i,end= ' ',flush=True)
# -
# **6.7 Decorators**
# +
def test():
print('this is a test function')
x = test
x()
# +
def test1(f):
def test2():
print('inside test 2 start')
f()
print('inside test 2 end')
return test2
def test3():
print('this is function test3')
t = test1(test3)
t()
# +
def test1(f):
def test2():
print('inside test 2 start')
f()
print('inside test 2 end')
return test2
@test1
def test3():
print('this is function test3')
test3()
| 6. Function.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from selenium import webdriver
import csv
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox')
driver = webdriver.Chrome("C:\\Users\<NAME>\Documents\My Work\PakkaIndia\chromedriver",chrome_options=chrome_options)
# -
driver.get('https://catalog.ufl.edu/UGRD/courses/')
data1=driver.find_elements_by_id('/UGRD/courses/')
len(data1)
data2=data1[0].find_elements_by_tag_name('a')
len(data2)
data2[0].text
data2[0].get_attribute('href')
driver.get('http://gradcatalog.ufl.edu/content.php?catoid=10&navoid=2013')
data3=driver.find_elements_by_tag_name('h5')
len(data3)
data4=data3[1].find_elements_by_tag_name('a')
len(data4)
data4[0].text
data4[0].get_attribute('href')
# # Full Code
# +
from selenium import webdriver
import csv
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument('--no-sandbox')
driver = webdriver.Chrome("C:\\Users\<NAME>\Documents\My Work\PakkaIndia\chromedriver",chrome_options=chrome_options)
lis=['Course','University','URL']
info=[]
info.append(lis)
link=['https://catalog.ufl.edu/UGRD/courses/','http://gradcatalog.ufl.edu/content.php?catoid=10&navoid=2013']
for lin in range(len(link)):
driver.get(link[lin])
if(lin==0):
data1=driver.find_elements_by_id('/UGRD/courses/')
data2=data1[0].find_elements_by_tag_name('a')
for i in range(len(data2)):
c=data2[i].text
url=data2[i].get_attribute('href')
info.append([c,'University of Florida',url])
print(c,url)
else:
data3=driver.find_elements_by_tag_name('h5')
data4=data3[1].find_elements_by_tag_name('a')
for j in range(len(data4)):
c=data4[j].text
url=data4[j].get_attribute('href')
info.append([c,'University of Florida',url])
print(c,url)
driver.quit()
# -
len(info)
with open('University_of_Florida.csv','w',encoding="utf-8",newline="") as file:
write=csv.writer(file)
for row in info:
write.writerow(row)
| American_Universities/University_of_Florida/University_of_Florida.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Proyecto Titanic
import numpy as np
import pandas as pd
# Carga de archivo de datos para análisis exploratorio
dataset = pd.read_csv("./data/train.csv")
dataset.head()
dataset.describe()
| main_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Clustering of metabolite peaks from LC-MS
#
# Incorporating retention time into hierachical clustering of LC-MS peaks.
#
# This was first used in
# Gardinassi et al. (2018) Integrative metabolomics and transcriptomics signatures of clinical tolerance to Plasmodium vivax reveal activation of innate cell immunity and T cell signaling. Redox Biology. DOI: 10.1016/j.redox.2018.04.011
#
# The example below is part of data from the paper.
#
# <NAME>, 2018-05-12
# +
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from scipy.cluster.hierarchy import *
from scipy.spatial.distance import pdist, squareform
infile = "../input_data/HILIC_pos_diag_basel.txt"
# -
metabo = pd.read_table(infile)
print(metabo.shape)
metabo.head()
# +
'''
Default input format: m/z retention_time samples
# Scipy implementation of hierarchical clustering is mirroring Matlab
# https://docs.scipy.org/doc/scipy/reference/cluster.hierarchy.html
# https://www.mathworks.com/help/stats/hierarchical-clustering.html
This adds a function to penalize distance in retention time.
Output includes two figures, dendrogram and heatmap, which can be slow especially in pdf.
Sensitive to missing input data.
'''
# distance matrix, this is [1 - (Pearson R)]
YM = pdist(metabo.values[:, 1:], 'correlation')
retention_time = metabo.values[:,1]
min_retention_time, max_retention_time = min(retention_time), max(retention_time)
range_retention_time = max_retention_time - min_retention_time
print("min_retention_time, max_retention_time", min_retention_time, max_retention_time)
PearsonR = 1 - YM
delta_RT = []
for ii in range(metabo.shape[0]):
for jj in range(ii+1, metabo.shape[0]):
delta_RT.append(abs(retention_time[ii] - retention_time[jj]))
print("Vector delta_RT len: ", len(delta_RT))
#
# weighting function
# distance = 1 - (1 - delta_RT/range_retention_time)*PearsonR
#
YM_new = 1 - (1- np.array(delta_RT)/range_retention_time)*PearsonR
print("Shape of dist matrix, ", YM_new.shape)
# Metabolite features linkage matrix using new distance matrix
ZM = linkage(YM_new, method='complete')
plt.figure(figsize=(10, 6))
#plt.title('HCL HILICpos study xyz')
plt.ylabel('Distance')
dendrogram(ZM)
# use .pdf if desired, but slower
plt.savefig('fig1.png')
# +
# Based on the dendrogram above, choose
distance_cut=1.5
# do linkage heat map
plt.figure(figsize=(10, 10))
sns.clustermap(squareform(YM_new), row_linkage=ZM, col_linkage=ZM, cmap="YlGnBu")
plt.savefig('fig2.png')
metClus = fcluster(ZM, distance_cut, criterion='distance')
# Compile clusters
metClusDict = {}
for ii in range(len(metClus)):
if metClusDict.has_key(metClus[ii]):
metClusDict[ metClus[ii] ].append(ii)
else:
metClusDict[ metClus[ii] ] = [ii]
print("number of clusters: ", len(metClusDict.keys()))
# +
# write out clusters.
def write_cluster(OUTDIR, wanted_clusters, metClus, metabo, prefix="metabo_"):
for c in wanted_clusters:
goodrows = []
for ii in range(metabo.shape[0]):
if metClus[ii] == c:
goodrows.append(ii)
metabo.iloc[goodrows, :].to_csv( OUTDIR + prefix + "clus_%d.txt" %c, sep="\t")
# do all
wanted = metClusDict.keys()
# Need create OUTDIR first
OUTDIR = 'export_clusters/'
os.mkdir(OUTDIR)
write_cluster(OUTDIR, wanted, metClus, metabo, prefix="metabo_")
# +
# to export collapsed values of each cluster, as per sample a = sum(z score)/squareroot(feature number)
def zscore(V):
# input np array
V = list(V)
m, std = np.mean(V), np.std(V)
return (V-m)/std
def get_cluster_activity(M):
sqN = np.sqrt(M.shape[0])
new, new2 = [], []
for row in M:
new.append(zscore(row))
for ii in range(len(row)):
new2.append(sum([x[ii] for x in new])/sqN)
return new2
def write_cluster_activities(OUTDIR, wanted_clusters, metClus, metabo, prefix="metabo_"):
'''
To export collapsed values of each cluster, as per sample a = sum(z score)/squareroot(feature number)
The columns of m/z and rtime may bet converted but they are meaningless
'''
s = 'cluster_number\t' + '\t'.join(list(metabo.columns)) + '\n'
for c in wanted_clusters:
goodrows = []
for ii in range(metabo.shape[0]):
if metClus[ii] == c:
goodrows.append(ii)
# Note values in metabo starts from col 1
s += prefix+str(c) + '\t' + '\t'.join(
[str(x) for x in get_cluster_activity(metabo.iloc[goodrows, 1:].values)]) + '\n'
with open('cluster_activities.txt', 'w') as file:
file.write(s)
write_cluster_activities('./', wanted, metClus, metabo, prefix="metabo_")
# -
# The output cluster activities can be used for further statistical analysis, similarly to feature level analysis.
#
| notebooks/HCL_clustering_considering_retention_time.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Regularización
# La regularización es un término que comprende diferentes métodos que fuerzan a los algoritmos de aprendizaje a crear un modelo menos complejo. De esta forma, permitimos que un algoritmo sea capaz de generalizar mejor, a pesar de dar peores resultados en el entrenamiento.
#
# Supongamos que intentamos ajustar los datos de entrenamiento mediante una recta y un polinomio:
#
# 
#
# ¿Cuál de los dos modelos se ajusta mejor a los datos? Sin duda, el modelo más complejo. Ahora bien, en muchas ocasiones seleccionar un modelo complejo puede llevar a un sobreajuste, que impida al algoritmo generalizar bien a datos que no ha utilizado en el entrenamiento.
#
# ¿Por qué motivo? Porque el modelo ha interpretado relaciones entre los datos de entrenamiento que en realidad se dan muy poco o son inexistentes en el resto de los datos.
# ¿Y qué podemos hacer? Pues podemos reducir la complejidad del modelo para que generalice mejor. En la práctica, esto puede llevar a menudo a un sesgo ligeramente mayor pero reduce de forma significativa la varianza. Este término se conoce como el *bias-variance tradeoff*, es decir, existe una relación de compensación entre el sesgo y la varianza del modelo.
#
# 
#
# [Fuente - From Understanding the Bias-Variance Tradeoff, por <NAME>.](http://scott.fortmann-roe.com/docs/BiasVariance.html)
# Queremos que nuestro modelo sea lo más simple posible pero no tan simple. Es por ello que deberemos de probar diferentes cosas para llegar a un óptimo:
#
# 
# A medida que añadimos más y más información a nuestro modelo estamos aumentando su complejidad. Este suele llevar a un incremento de la varianza mejorando nuestro sesgo pero esto conduce, también, a potencial sobreajuste.
#
# Por tanto, en la práctica debemos encontrar un óptimo donde no se nos dispare nada (sesgo, varianza, complejidad) y que nuestro modelo generalice bien. Pero no hay una forma de hacer esto que sea totalmente objetiva y analítica.
#
# Si nuestro modelo funciona mal podemos añadirle mayor información. A medida que añadimos más *features* a nuestro modelo su complejidad aumenta y debería ayudarnos a reducir el sesgo pero hay que hacerlo con cabeza para que no lleguemos a un sobreajuste.
#
# ¿Cómo podemos lidiar con estas cosas?
#
# Pues podemos utilizar la **regularización**. En la regularización lo que hacemos es añadir un coeficiente de penalización que aumentará de valor a medida que el modelo sea más complejo y de esta forma reducirá la varianza del modelo.
#
# Y ahora, veamos cómo funciona.
# # Sobreajuste o *overfitting* en la regresión lineal
# Vamos a calcular la regresión de un modelo para ver qué efecto tiene el sobreajuste. Para ello, vamos a generar un nuevo dataset más complejo a partir de los datos de precios de viviendas de Boston:
# +
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
def load_extended_boston():
boston = load_boston()
X = boston.data
X = MinMaxScaler().fit_transform(boston.data) # normaliza los datos
X = PolynomialFeatures(degree=2).fit_transform(X) # genera nuevas columnas para obtener un modelo más complejo
return X, boston.target
X, y = load_extended_boston()
print(X.shape, y.shape)
# -
# Vemos que tenemos un dataset con 506 ejemplos (filas) y 105 *features* (columnas).
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
linreg = LinearRegression().fit(X_train, y_train)
# Y ahora calculamos qué bien está funcionando nuestra regresión lineal sobre los datos de entrenamiento:
print(linreg.score(X_train, y_train))
# ¿Y con datos nunca antes vistos?
print(linreg.score(X_test, y_test))
# ¡Vaya! Este valor es mucho más bajo que en los datos de entrenamiento. Como hemos explicado, ahora podríamos mejorar el modelo mediante una regularización. Veamos cómo hacerlo.
# # Regularización Ridge
# Vamos a ver primero la regularización más utilizada habitualmente, **Ridge**:
#
# $$ f(w, b) = \lambda||\textbf{w}||^2 +\frac{1}{N}\sum_{i=1}^{N}(y_i - (w*x_i+b))^2 $$
#
# donde $ ||\textbf{w}|| = \sum_{j=1}^{D}(w_{j})^2 $, es la suma del valor al cuadrado de los pesos
# y $ \lambda $ sigue siendo un hiperparámetro que controla la importancia de la regularización.
#
# Si $ \lambda = 0 $ nos encontramos en el caso de la regresión lineal. En cambio, si $ \lambda >> 0 $ el algoritmo de aprendizaje intentará que todos los $ w_j $ estén muy cerca de 0 (pero no siendo cero).
#
# En general, la regularización **Ridge** es conveniente cuando pensamos que todas las columnas tienen importancia a la hora de explicar la variable dependiente.
#
# En sklearn podemos ejecutar esta regularización de la siguiente forma:
from sklearn.linear_model import Ridge
linreg_ridge = Ridge(alpha=1.0).fit(X_train, y_train)
# Veamos qué error estamos teniendo en el dataset de entrenamiento:
print(linreg_ridge.score(X_train, y_train))
# Y ahora, con datos no utilizamos en el entrenamiento:
print(linreg_ridge.score(X_test, y_test))
# ¿Qué ha sucedido?
#
# Como podéis ver los valores son menor y mayor para los datos de entrenamiento y de prueba, respectivamente, cuando lo comparamos con el caso de la regresión lineal (0.952, 0.607). Recordad que la regresión lineal estaba sobreajustando.
#
# Un modelo menos complejo es un modelo que funcionará un poco mejor en el entrenamiento pero generalizará mejor. Como estamos interesados en el rendimiento de la generalización deberíamos elegir el modelo *Ridge* en lugar del modelo de regresión lineal para este caso concreto.
#
# Como expertos científicos de datos deberemos ajustar correctamente el parámetro $ \lambda $ de la regularización L2 que usa este modelo. $ \lambda $ es el parámetro `alpha` en `scikit-learn`. En el anterior ejemplo hemos usado el valor por defecto que usa `scikit-learn` (`alpha=1`). El valor óptimo de `alpha` dependerá de los datos que usemos. Incrementarlo llevará a tener peor ajuste en el entrenamiento pero puede llevar a mejor generalización. Vamos a aprender a usarlo en los ejercicios.
#
# Incrementar `alpha` significa hacer que los pesos o coeficientes estén más restringidos.
# # Regularización Lasso
# Una alternativa a Ridge para regularizar sería Lasso (Least Absolute Shrinkage and Selection Operator).
#
# La diferencia principal entre Ridge y Lasso es que Lasso permite que algunas columnas o *features* de nuestro dataset queden anuladas, es decir, las multiplica por cero para que no se utilicen en la regresión lineal. Esto permite hacer selección de *features* permitiendo eliminar algunas de ellos y que nuestro modelo sea más explicable al tener menos dimensiones.
#
# La fórmula de cálculo de esta regularización se muestra a continuación:
#
# $$ f(w, b) = \lambda|\textbf{w}|+\frac{1}{N}\sum_{i=1}^{N}(y_i - (w*x_i+b))^2 $$
#
# donde $ |\textbf{w}| = \sum_{j=1}^{D}|w_{j}| $, es la suma del valor absoluto de los pesos
# y $ \lambda $ es un hiperparámetro que controla la importancia de la regularización.
#
# Si $ \lambda = 0 $ nos encontramos en el caso de la regresión lineal. En cambio, si $ \lambda >> 0 $ el algoritmo de aprendizaje intentará que todos los $ w_j $ estén muy cerca de 0 o siendo 0 y el modelo puede acabar siendo muy simple y puede acabar en un subajuste (*underfitting*).
#
from sklearn.linear_model import Lasso
linreg_lasso = Lasso(alpha=1.0).fit(X_train, y_train)
# Miremos ahora cómo se comporta en los datos de entrenamiento:
print(linreg_lasso.score(X_train, y_train))
# Los resultados son muy malos, ¿y en el conjunto de datos de prueba?
print(linreg_lasso.score(X_test, y_test))
# ¿Por qué está pasando esto?, ¿cómo es posible que funcione tan mal?
#
# Si miramos el total de *features* que está seleccionando Lasso lo podemos entender fácilmente:
print(np.sum(linreg_lasso.coef_ != 0))
# ¡Únicamente está utilizando 4 columnas de todas las columnas disponibles! (más de 100)
#
# Como en el caso de `Ridge`, `Lasso` también tiene su hiperparámetro, la $ \lambda $ que podemos toquetear, que, al igualque en el caso de `Ridge`, en `scikit-learn` se llama `alpha`. El valor por defecto vuelve a ser `alpha=1`. Vimos que en ambas regularizacionea cuando incrementábamos el valor de $ \lambda $ (`alpha` en `scikit-learn`) los valores tendían más a 0. En el caso que estamos viendo quizá sea mejor usar un valor entre 0 y 1 porque incrementar aun más `alpha` nos podría dar incluso peores resultados todavía.
#
# Probemos con el valor 0.01:
linreg_lasso001 = Lasso(alpha=0.01).fit(X_train, y_train)
# La anterior advertencia nos indica que quizá debemos aumentar el número de iteraciones para que los valores converjan a una tolerancia aceptable. Vamos a hacer caso a los expertos:
linreg_lasso001 = Lasso(alpha=0.01, max_iter=100_000).fit(X_train, y_train)
# El error en este caso sobre los datos de entrenamiento es:
print(linreg_lasso001.score(X_train, y_train))
# Y sobre los datos de test:
print(linreg_lasso001.score(X_test, y_test))
# Y el número de columnas seleccionadas es:
print(np.sum(linreg_lasso001.coef_ != 0))
# ¡Esto ya es otra cosa! El modelo es más complejo porque tiene más dimensiones pero parece mucho más útil que nuestro intento inicial.
# # Ejercicios
# **Ejercicio 1**. En la regularización Ridge hemos probado únicamente el valor por defecto `alpha=1`. Realiza la misma ejecución con `alpha=0.1` y `alpha=0.01` y razona sobre los resultados, ¿qué valor deberíamos escoger?
# Ejemplo de solución:
#
# (**NOTA**: ejecuta la celda para cargar el código)
# %load ../../solutions/03_01_ridge.py
# **Ejercicio 2**. Después de la ejecución anterior, imagínate que queremos saber qué valor de alpha sería el más adecuado entre todas las posibilidades entre 0.005 y 1 con incrementos de 0.005, para asegurarnos que estamos escogiendo el mejor valor posible.
#
# Para ello, puedes utilizar el método [sklearn.linear_model.RidgeCV](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RidgeCV.html) al que podemos pasar una lista de valores alphas y nos devolverá el alpha con el mejor resultado. Así de sencillo.
#
# Por ejemplo:
# +
from sklearn.linear_model import RidgeCV
alphas = [1, 0.1, 0.01]
linreg_ridge_cv = RidgeCV(alphas).fit(X_train, y_train)
print(linreg_ridge_cv.alpha_)
# -
# ¡Ahora te toca a ti!
# %load ../../solutions/03_02_ridgecv.py
# **Ejercicio 3**. Como hemos comentado, la regularización Lasso permite realizar una selección de las columnas. En concreto, Lasso retiene las *features* que considera más adecuadas para ajustar el modelo creado a los datos de entrenamiento, según el parámetro alpha especificado. Por lo tanto, cambiando el parámetro, obtendremos un conjunto u otro de columnas. Y además, si cambiamos los datos de entramiento, podríamos con el mismo parámetro alpha incluso tener una selección diferente de columnas.
#
# En este sentido, comprueba ahora qué resultado tenemos mediante una regularización Lasso para los valores 0.005 y 0.5 y qué número de columnas selecciona para cada caso, ¿qué conclusión podemos sacar de los valores obtenidos? Utiliza en ambos casos 100.000 iteraciones máximas.
# %load ../../solutions/03_03_lasso.py
# # Desarrollo matemático
# Los dos tipos de regularización más ampliamente usados se llaman regularización L1 y regularización L2. La idea es bastante simple, para crear un modelo regularizado modificamos la función objetivo añadiendo un término de penalización cuyo valor es más alto cuando el modelo es más complejo.
#
# Por simplicidad, vamos a ver esto de la regularización usando la regresion lineal. El mismo principio puede ser aplicado a una amplia variedad de modelos.
# Vamos a recordar la función objetivo a minimizar de la regresión lineal:
#
# $$ f(w, b) = \frac{1}{N}\sum_{i=1}^{N}(y_i - (w*x_i+b))^2 $$
#
# A la anterior función le metemos un término que penaliza la función de pérdida.
# Una función objetivo regularizada usando el tipo L1 sería de la siguiente forma:
#
# $$ f(w, b) = \lambda|\textbf{w}|+\frac{1}{N}\sum_{i=1}^{N}(y_i - (w*x_i+b))^2 $$
#
# donde $ |\textbf{w}| = \sum_{j=1}^{D}|w_{j}| $, es la suma del valor absoluto de los pesos
# y $ \lambda $ es un hiperparámetro que controla la importancia de la regularización.
#
# Si $ \lambda = 0 $ nos encontramos en el caso de la regresión lineal. En cambio, si $ \lambda >> 0 $ el algoritmo de aprendizaje intentará que todos los $ w_j $ estén muy cerca de 0 o siendo 0 y el modelo puede acabar siendo muy simple y puede acabar en un subajuste (*underfitting*). Tu trabajo va a ser ajustar correctamente este hiperparámetro.
# De la misma forma, una función objetivo regularizada usando el tipo L2 sería de la siguiente forma:
#
# $$ f(w, b) = \lambda||\textbf{w}||^2 +\frac{1}{N}\sum_{i=1}^{N}(y_i - (w*x_i+b))^2 $$
#
# donde $ ||\textbf{w}|| = \sum_{j=1}^{D}(w_{j})^2 $, es la suma del valor al cuadrado de los pesos
# y $ \lambda $ sigue siendo un hiperparámetro que controla la importancia de la regularización.
#
# Al igual que antes, si $ \lambda = 0 $ nos encontramos en el caso de la regresión lineal. En cambio, si $ \lambda >> 0 $ el algoritmo de aprendizaje intentará que todos los $ w_j $ estén muy cerca de 0 (pero no siendo cero).
# La diferencia básica entre la regularización L1 y la regularización L2 es que en el caso de la primera varios pesos acaban siendo cero ayudando a mostrar qué *features* van a ser importantes en el ajuste. Nos permite hacer *feature selection* lo que permite que nuestro modelo sea más explicable por la simplificación. El L2 generaliza más y suele dar mejores resultados. En la literatura se puede encontrar que la regularización L1 se le llama también **Lasso** y la regularización L2 se le llama también **Ridge**.
#
# Se pueden combinar las regularizaciones L1 y L2 (por ejemplo, *elastic net*). Estas regularizaciones se usan ampliamente en modelos lineales pero también en redes neuronales y otros tipos de modelos.
#
# L2 también nos permite resolverlo usando Gradiente Descendente ya que es diferenciable. En cambio, Lasso (y ElasticNet) no usan gradiente descendiente para minimizar su función de coste. Esto es debido a que $ |\textbf{w}| $ no es diferenciable y no podemos usar Gradiente Descendente. En este caso usan un algoritmo de optimización que se llama [*Coordinate descent*](https://en.wikipedia.org/wiki/Coordinate_descent).
# Existen otros tipos de regularizaciones que no vamos a ver como *dropout*, *batchnormalization*, *data augmentation* o *early stopping*.
# # Referencias
# * https://www.quora.com/What-is-regularization-in-machine-learning
# * http://scott.fortmann-roe.com/docs/BiasVariance.html
# * https://trainingdatascience.com/training/401-linear-regression/
# * https://www.thelearningmachine.ai/lle
# * https://web.stanford.edu/~hastie/Papers/glmpath.pdf
# * https://towardsdatascience.com/polynomial-regression-bbe8b9d97491
| notebooks/03_regularizacion/03-Regularizacion-PyConES2019.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/khushi-411/emotion-recognition/blob/main/data-visualization/density_plots.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="nYblfTfT0ZCl"
# # <font face = "Times New Roman"> <b> DENSITY PLOTS </b> </font>
# + [markdown] id="dE3XAEPn0evy"
# ### <font face = "Times New Roman"> <b> Importing Libraries </b> </font>
# + id="LQes5Ut-_Gtm"
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
# + id="lxf5at1wLkig"
from google.colab import drive
# + id="EkkPCF-biPwB" colab={"base_uri": "https://localhost:8080/"} outputId="ebb566e8-966c-4fd4-cc4a-f5ff10087592"
"""
Connect google drive to google colab.
"""
drive.mount("/content/drive")
# + [markdown] id="edTKENXFqma5"
# ### <font face = "Times New Roman"> <b> Loading Dataset </b> </font>
# + id="bTpkBlZIUf4R"
"""
Function to load dataset.
"""
def load_data(file_path):
try:
df = pd.read_csv(file_path)
return df
except FileNotFoundError as error:
print("File Not Found Error during loading data")
raise error
except Exception as error:
print("Exception Error: load_data")
print(error)
return ""
# + id="4Aur4I4SUf7M"
"""
Loading dataset.
"""
data = "/content/drive/MyDrive/er-output.csv"
# + id="P8z11mZyxnkk"
df = load_data(data)
# + id="P9EBGTQRMC9J"
df.drop(labels='Unnamed: 0', axis='columns', inplace=True, errors='raise')
# + id="Bj0vL_onwqN4"
emotion_label = {0:'anger', 1:'disgust', 2:'fear', 3:'happiness', 4: 'sadness', 5: 'surprise', 6: 'neutral'}
# + id="wB9kBkuCfLso"
"""
Converting dataframe into numpy array.
"""
df_all = df.iloc[:,:2304]
df_all_numpy = df_all.to_numpy()
df_label = df["emotion"]
df_label_numpy = df_label.to_numpy()
# + id="aARRMu5-a_wc"
def select_emotion(df_all, df_label, df, _emotion):
"""Select emotion type among all.
Parameters
----------
df_all : data-frame of shape (35887, 2304)
columns consists of pixel values (emotion column removed)
df_label : consists of emotion label
df : complete data-frame of shape (35887, 2305)
_emotion : emotion type selected by the user
Returns
-------
_data : all the data consisting the given emotion
_data_numpy : numpy array to _data
_data_label : consist all the data labels, of provided emotion
_data_label_numpy : numpy array of data label
"""
try:
_emotion = int(_emotion)
_data = df_all[df["emotion"] == _emotion]
_data_numpy = _data.to_numpy()
_data_label = df_label[df["emotion"] == _emotion]
_data_label_numpy = _data_label.to_numpy()
return _data, _data_numpy, _data_label, _data_label_numpy
except ValueError as error:
raise error
except TypeError as error:
raise error
except IndentationError as error:
raise error
# + id="fI--MuG2YBQ1"
def intensity_plot(_data, pixel):
try:
start = time.time()
ax = sns.displot(_data,
x="pixel_"+pixel,
kde=True
)
ax.set(xlabel='pixel_'+pixel, ylabel='Frequency')
print("Time taken to plot: ", time.time() - start)
except AttributeError as error:
print("Attribute Error Occured.")
print("The error is ", error)
except ValueError as error:
print("Value Error Occured.")
print("The error is ", error)
# + colab={"base_uri": "https://localhost:8080/", "height": 421} id="pkOPVhPgb3oT" outputId="ec582042-7aaf-4fc7-d5e4-30b3913a4760"
_emotion_type = input("Enter type of emotion whose distribution has to be ploted: ")
pixel = input("Enter pixel value you want to plot: pixel_")
_data, _data_numpy, _data_label, _data_label_numpy = select_emotion(df_all, df_label, df, _emotion_type)
intensity_plot(_data, pixel)
# + id="Qn6IVilyL190"
def intensity_plot_hue(_data, pixel):
try:
start = time.time()
ax = sns.displot(_data,
x="pixel_"+pixel,
hue=df['emotion'],
kind='kde'
#multiple='stack'
)
ax.set(xlabel='pixel_'+pixel, ylabel='Frequency')
print("Time taken to plot: ", time.time() - start)
except AttributeError as error:
print("Attribute Error Occured.")
print("The error is ", error)
except ValueError as error:
print("Value Error Occured.")
print("The error is ", error)
# + colab={"base_uri": "https://localhost:8080/", "height": 422} id="8eeFwjIEMBtB" outputId="723af439-d407-414d-d646-4f9df35c6525"
_emotion_type = input("Enter type of emotion whose distribution has to be ploted: ")
pixel = input("Enter pixel value you want to plot: pixel_")
#_data, _data_numpy, _data_label, _data_label_numpy = select_emotion(df_all, df_label, df, _emotion_type)
intensity_plot_hue(df, pixel)
# + id="ZLh8cgVbsqMq"
def scatter_plot(_data, pixel_x, pixel_y):
try:
start = time.time()
fig = px.scatter(_data, x="pixel_"+pixel_x, y="pixel_"+pixel_y)
fig.show()
print("Time taken to plot: ", time.time() - start)
except AttributeError as error:
print("Attribute Error Occured.")
print("The error is ", error)
except ValueError as error:
print("Value Error Occured.")
print("The error is ", error)
# + id="F5hY99_4sGDl"
_emotion_type = input("Enter the emotion type whose mean of pixel values you want to find: ")
print("Data Type of _emotion_type: ", type(_emotion_type))
pixel_x = input("Enter pixel value you want to plot (x-axis): pixel_")
pixel_x = input("Enter pixel value you want to plot (y-axis): pixel_")
_data, _data_numpy, _data_label, _data_label_numpy = select_emotion(df_all, df_label, df, _emotion_type)
scatter_plot(_data, pixel_x, pixel_y)
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Kan3sLCTcDzc" outputId="a3e23b6c-cf69-4db0-f202-215918f3cd66"
fig = px.scatter(_data, x="pixel_1", y="pixel_10", color="pixel_25",
size='pixel_1', hover_data=['pixel_10'])
fig.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="ow6X6M5De3ku" outputId="4150096d-3798-4d1f-cc19-337e3081ea05"
fig = go.Figure(data=go.Scatter(x=_data['pixel_1'],
y=_data['pixel_20'],
mode='markers',
marker_color=_data['pixel_30'],
text=_data['pixel_1'])) # hover text goes here
fig.update_layout(title='Density Plot')
fig.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="lfQgSbRQfgEE" outputId="f6506aeb-93d9-45bd-ef78-c74ee4b07908"
fig = go.Figure(data=go.Scatter(
y = _data["pixel_29"],
mode='markers',
marker=dict(
size=16,
color=np.random.randn(256), #set color equal to a variable
colorscale='Viridis', # one of plotly colorscales
showscale=True
)
))
fig.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="5XD_fB5ggaJq" outputId="fec61fb3-af3e-40e6-d860-8425185fbb52"
fig = go.Figure(data=go.Scattergl(
x = _data["pixel_10"],
y = _data["pixel_30"],
mode='markers',
marker=dict(
color=np.random.randn(255),
colorscale='Viridis',
line_width=1
)
))
fig.show()
| data-visualization/density_plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="k9MKGXRIPqEp"
# #**Listas Encadeadas**
# + id="Hg7_j5GyPoK2"
import numpy as np
# + id="kvri4CA8P_Fu"
class No:
def __init__(self, valor):
self.valor = valor
self.proximo = None
def mostra_no(self):
print(self.valor)
# + colab={"base_uri": "https://localhost:8080/"} id="nh3LlUtTQp-Q" outputId="6b215223-965e-4ede-9838-9f3efedc8833"
no1 = No(1)
no1.mostra_no()
# + colab={"base_uri": "https://localhost:8080/"} id="RvNHqFI6Q0cG" outputId="d65d596c-1e56-4f18-dd26-89653f701532"
no2 =No(2)
no2.mostra_no()
# + id="UPnjFTnzRDe6"
class ListaEncadeada:
def __init__(self):
self.primeiro = None
def insere_inicio(self, valor):
novo = No(valor)
novo.proximo = self.primeiro
self.primeiro = novo
def mostrar(self):
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
while atual != None:
atual.mostra_no()
atual = atual.proximo
def pesquisa(self, valor):
if self.primeiro == None:
print('A lista está vazia')
return None
pequisa = ListaEncadeada()
if pesquisa != None:
print('Encontrado: ', pesquisa.valor)
else:
print('Não encontrado')
atual = self.primeiro
while atual.valor != valor:
if atual.proximo == None:
return None
else:
atual = atual.proximo
return atual
def excluir_inicio(self):
if self.primeiro == None:
print('A lista está vazia')
return None
temp = self.primeiro
self.primeiro = self.primeiro.proximo
return temp
def excluir_posicao(self, valor):
if self.primeiro == None:
print('A lista está vazia')
return None
atual = self.primeiro
anterior = self.primeiro
while atual.valor != valor:
if atual.proximo == None:
return None
else:
anterior = atual
atual = atual.proximo
if atual == self.primeiro:
self.primeiro = self.primeiro.proximo
else:
anterior.proximo = atual.proximo
return atual
# + [markdown] id="WT0UrtSnT6Lw"
# ###Insere no início
# + id="LlLIM1ZgUCLn"
lista = ListaEncadeada()
# + id="nMUDCRPgUHYN"
lista.insere_inicio(1)
# + colab={"base_uri": "https://localhost:8080/"} id="TcsA4qA2UVaT" outputId="eff77b63-688d-437f-a718-8b891d85fda3"
lista.primeiro
# + colab={"base_uri": "https://localhost:8080/"} id="NEsjkIacUOgf" outputId="3776616b-ac62-4213-d2da-168b268b0942"
lista.mostrar()
# + colab={"base_uri": "https://localhost:8080/"} id="uMqK4BgBUpDL" outputId="1e3b77ac-5bf7-439d-8f69-2d1e26c38ae2"
lista.insere_inicio(1)
lista.insere_inicio(2)
lista.insere_inicio(3)
lista.insere_inicio(4)
lista.mostrar()
# + [markdown] id="Bv3T5vX3WqhI"
# ###Excluir do início
# + id="_0woBkNnWwGC"
lista = ListaEncadeada()
# + colab={"base_uri": "https://localhost:8080/"} id="QBG32wz0W8Q2" outputId="f94934e6-8322-4e1e-b3a2-1463958fa49d"
lista.insere_inicio(1)
lista.insere_inicio(2)
lista.insere_inicio(3)
lista.insere_inicio(4)
lista.mostrar()
# + id="HRvn8lHYZJPB"
lista.insere_inicio(1)
# + colab={"base_uri": "https://localhost:8080/"} id="wPd0ifc9ZMv4" outputId="ebaf5e6e-2bd2-4927-e19c-d92a27b5d161"
lista.excluir_inicio()
# + colab={"base_uri": "https://localhost:8080/"} id="glgsYwCxXAY1" outputId="75efd6af-90ce-4dfc-bd82-c1cea6e2f238"
lista.excluir_inicio()
lista.mostrar()
# + colab={"base_uri": "https://localhost:8080/"} id="wD9i-NH3Y1xY" outputId="223a580c-ec50-4be7-b664-3972311831dd"
lista.excluir_inicio()
lista.excluir_inicio()
# + [markdown] id="zWH4K0iUayDR"
# ###Pesquisa
# + colab={"base_uri": "https://localhost:8080/"} id="eA3p4dmqa3bE" outputId="4c8189b5-318b-4d6c-c3ec-fae0a0c0aa66"
lista.insere_inicio(1)
lista.insere_inicio(2)
lista.insere_inicio(3)
lista.insere_inicio(4)
lista.mostrar()
# + colab={"base_uri": "https://localhost:8080/"} id="DpMpJDbna83Q" outputId="16b50015-7119-4420-85b1-4e9cf34f403d"
pesquisa = lista.pesquisa(3)
# + [markdown] id="Ft7KfW6les7Z"
# ###Excluir posição
# + colab={"base_uri": "https://localhost:8080/"} id="IWQnicotexrr" outputId="9957d663-27e4-4e58-8779-70b5aaba636e"
lista.insere_inicio(1)
lista.insere_inicio(2)
lista.insere_inicio(3)
lista.insere_inicio(4)
lista.insere_inicio(5)
lista.mostrar()
# + colab={"base_uri": "https://localhost:8080/"} id="otMxrjmOe7Nu" outputId="298f90a5-2bef-4e14-a220-4afaca218d06"
lista.excluir_posicao(3)
lista.mostrar()
| ListaEncade.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Contextualization
# SIB(Small Industries Bank) loans money to companies in exchange for the promise of repayment. Given a number of metrics, we are to find whether a client for the bank will be defaulting on their loan or not.
# # Loading imports and data:
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn.metrics
from sklearn.model_selection import train_test_split, KFold, GridSearchCV, RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
# # Understanding the data
train.head()
# We have 12 different columns, after discounting the default status, with only 6 of them being purely numerical and one of them being the index which will not be used. We will require to categorize some of the features as well as create a few new ones
train.isnull().sum()
test.isnull().sum()
# Only one record in the train set is empty or null, we will remove it from the data set altogether as it is not a significant portion of the records.
train = train.dropna()
train.isnull().sum()
train.default_status.value_counts()
# **id**: Is a non relevant factor and will be dropped later
#
# **Industry**: We will start by analyzing if type of industry has any influence on probability of defaulting loan
plt.figure(figsize=(20,5))
ax = sns.countplot(x="industry", hue="default_status", data=train)
train[['industry', 'default_status']].groupby(['industry']).count().sort_values(by='default_status', ascending=False)
plt.figure(figsize=(20,5))
g = sns.barplot(x="industry",y="default_status",data=train)
g = g.set_ylabel("Default Probability")
train[['industry', 'default_status']].groupby(['industry']).mean().sort_values(by='default_status', ascending=False)
# None of the Industries show a particularly high rate of defaulting, however due to the low amount of data in some of them, their variance seems to be less stable, such as with Energy Industry. Values will be converted into numerical categories.
# **State** More data that is not numerically categorized. If no pattern is identified from the different data, this feature will be considered for removal.
plt.figure(figsize=(20,5))
ax = sns.countplot(x="state", hue="default_status", data=train)
plt.figure(figsize=(20,5))
g = sns.barplot(x="state",y="default_status",data=train)
g = g.set_ylabel("Default Probability")
train[['state', 'default_status']].groupby(['state']).mean().sort_values(by='default_status', ascending=False)
# From the observed above, a higher chance to incur in a default is observed from states where the economy for small businesses are not as highly thriving as the more densely populated states. Such is the example of Denver, Arizona, Georgia and Arkansas. However in contrast to this, some of the states that have much more active economies for Small businesses such as California and Washington are close to split evenly in their chances of defaulting. As such this feature will be divided in 3 categories. High Risk with those states with a 50% chance or higher, Mid Risk with those between 21 and 49% risk and Low Risk/Slow Economy for those with 20% or less risk
# **request date**: As a non discrete value, it needs to be discretized in a way that allows us to get insights into the data
train['request_date'] = pd.to_datetime(train['request_date'])
print(train.request_date.max())
print(train.request_date.min())
dates_train = train[['request_date', 'default_status']]
dates_train['month_year'] = pd.to_datetime(dates_train.request_date).dt.to_period('M')
dates_train = dates_train.sort_values(by=['month_year'])
dates_train.head()
plt.figure(figsize=(20,5))
ax = sns.countplot(x="month_year", hue="default_status", data=dates_train)
# With only 1 year of data and no previous historical records, it is not a significant factor in the analysis. This feature will be later dropped
# **term**: We will observe the distribution of terms.
fig = plt.figure(figsize=(10,8),)
axis = sns.kdeplot(train.loc[(train['default_status'] == 1),'term'] , color='g',shade=True, label='Defaulted')
axis = sns.kdeplot(train.loc[(train['default_status'] == 0),'term'] , color='b',shade=True,label='Did Not default')
plt.title('Term Distribution - Default vs No Default', fontsize = 20)
plt.xlabel("Term", fontsize = 12)
plt.ylabel('Frequency', fontsize = 12);
# Short term loans appear to be more likely to default. As such we will bin them in Short term, Mid term and Long term during feature engineering.
#
# **employee count**: We will also analyse its distribution:
fig = plt.figure(figsize=(20,8),)
axis = sns.kdeplot(train.loc[(train['default_status'] == 1),'employee_count'] , color='g',shade=True, label='Defaulted')
axis = sns.kdeplot(train.loc[(train['default_status'] == 0),'employee_count'] , color='b',shade=True,label='Did Not default')
plt.title('Employee Count Distribution - Default vs No Default', fontsize = 20)
plt.xlabel("employee_count", fontsize = 12)
plt.ylabel('Frequency', fontsize = 12);
# Companies with very few employees do appear to have more chance of defaulting, we will be analyzing this during feature transformation and deciding on dropping the feature or not.
#
# **business new**: Analysing the correlation with defaults:
plt.figure(figsize=(20,5))
g = sns.barplot(x="business_type",y="default_status",data=train)
g = g.set_ylabel("Default Probability")
train[['business_type', 'default_status']].groupby(['business_type']).mean().sort_values(by='default_status', ascending=False)
| .ipynb_checkpoints/SIB Loan Default-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Multiwell Tile Display
# ### View multiple tile acquisition series in the tileviewer
#
# In the following cell,
#
# 1. Edit the `output_directory` to be where we should build the 'ImagePyramid' multi-level image. Note that this directory is both a destination and a name (will be created if it doesn't exist) and *may become quite large* (larger than sum of ImagePyramids for individual tile series)
#
# 2. Edit the `series_glob_pattern` to find your series on the cluster. Alternatively, enter your series manually as a list in `tile_series`.
#
# Run the cell, and if the expected number of output series is displayed, run the whole notebook.
# Upon completion of the ImagePyramid a tileviewer will be opened in your default webbrowser.
#
# ### Notes
# - we assume tile series are on a cluster, as '.pcs' files (spooled as pzf files)
# - this computer must have PYME installed, and be connected to that cluster
# - The concatenated datasource used in this will never be itself saved to disk, but the directories and files supporting the ImagePyramid we display in the tileviewer will be saved to `output_directory` and may be large (for example: using 96 series, each about 400 pixel diameter and spaced on a 9 mm grid [typical 96 well plate], the resulting directory takes up 15.5 GB)
# + tags=[]
output_directory = '/media/smeagol/SSD/ultron_the_supersource'
series_glob_pattern = 'Bergamot/2020_11_3/EMC0406S1/try5/00*/*.pcs'
tile_series = ['pyme-cluster:///' + s for s in clusterIO.cglob(series_glob_pattern)]
print('%d tile series selected' % len(tile_series))
# + tags=[]
from PYME.IO import clusterIO, MetaDataHandler
from PYME.Analysis import tile_pyramid
from PYME.IO.image import ImageStack
from PYME.IO.DataSources import ClusterPZFDataSource, BaseDataSource
import numpy as np
from PYME.Analysis.tile_pyramid import get_position_from_events
import subprocess
import sys
import time
import requests
import os
class ConcatenatedDataSource(BaseDataSource.BaseDataSource):
moduleName = 'booyah'
def __init__(self, datasource_list):
self._datasources = datasource_list
self._n_datasources = len(self._datasources)
self._n_frames = np.zeros(self._n_datasources, dtype=int)
for ind in range(self._n_datasources):
# fixme - care about startat? all zero for my spooled series
self._n_frames[ind] = self._datasources[ind].shape[2]
self._n_frames_total = self._n_frames.sum()
self._indices = np.zeros(self._n_frames_total,
dtype=[('series', int), ('frame', int)])
self._positions = np.zeros(self._n_frames_total,
dtype=[('x', float), ('y', float)])
start = 0
for ind in range(self._n_datasources):
self._indices[start:start + self._n_frames[ind]]['series'] = ind
frames = np.arange(self._n_frames[ind], dtype=int)
self._indices[start:start + self._n_frames[ind]]['frame'] = frames
x_map, y_map = get_position_from_events(self._datasources[ind].getEvents(),
self._datasources[ind].mdh)
self._positions['x'][start:start + self._n_frames[ind]] = x_map(frames)
self._positions['y'][start:start + self._n_frames[ind]] = y_map(frames)
start += self._n_frames[ind]
self.mdh = MetaDataHandler.NestedClassMDHandler()
self.mdh.update(self._datasources[0].mdh)
self.fshape = None
def getSlice(self, ind):
ds_ind, frame_ind = self._indices[ind]['series'], self._indices[ind]['frame']
return self._datasources[ds_ind].getSlice(frame_ind)
def getSliceShape(self):
if self.fshape is None:
self.fshape = self.getSlice(0).shape
return self.fshape
def getNumSlices(self):
return self._n_frames_total
def getEvents(self):
return []
def getMetadata(self):
return self.mdh
# + tags=["outputPrepend"]
# build up our hacked/mega datasource
datasources = []
for series in tile_series:
datasources.append(ClusterPZFDataSource.DataSource(series))
ultron_the_supersource = ConcatenatedDataSource(datasources)
im = ImageStack(ultron_the_supersource)
# -
tile_pyramid.tile_pyramid(output_directory, im.data, im.data._positions['x'], im.data._positions['y'], im.data.mdh, dark=im.data.mdh['Camera.ADOffset'])
# +
if not os.path.isabs(output_directory):
# TODO - should we be doing the `.isabs()` check on the parent directory instead?
from PYME.IO.FileUtils import nameUtils
tiledir = nameUtils.getFullFilename(output_directory)
try: # if we already have a tileviewer serving, change the directory
requests.get('http://127.0.0.1:8979/set_tile_source?tile_dir=%s' % output_directory)
except requests.ConnectionError: # start a new process
try:
pargs = {'creationflags': subprocess.CREATE_NEW_CONSOLE}
except AttributeError: # not on windows
pargs = {'shell': True}
subprocess.Popen('%s -m PYME.tileviewer.tileviewer %s' % (sys.executable, output_directory), **pargs)
time.sleep(3)
# + language="html"
# <iframe src="http://127.0.0.1:8979/" width="1200" height="1000"></iframe>
| multiwell_tile_display.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting Started with EDA
#
# Before you develop a score card model, getting a well understanding of the data you are using plays an import role. In this tuorial, we will discuss exploratory data analysis module included in `yasc`.
# ## Imports
#
# First we import `yasc` and check its version.
# +
# imports
import yasc
from yasc.data import german_data
from yasc.eda import (
missing_stat,
numeric_stat,
categorical_stat,
corr_analysis,
describe
)
import pandas as pd
import numpy as np
# show version
yasc.__version__
# -
# ## Load data
#
# Here we use `german_data()` to load german credit data and show its first five rows.
# load german credit data
data = german_data()
data.head()
# ## Check missing values I
#
# `missing_stat()` function can help us find out columns with missing values and output missing rates.
missing_stat(data)
# only include columns with missing values
missing_stat(data, only_missing_columns=True)
# ## Check missing values II
#
# The german data happens to have no missing values, let's create a data frame with missing values to test `missing_stat()` function.
# create a data frame with missing values
df1 = pd.DataFrame({'a': [1, 2, 3, 4], 'b': [np.nan, 5, 6, 7], 'c': [8, 9, np.nan, 10]})
missing_stat(df1)
# check missing statistics of single column
missing_stat(df1, "b")
# ## Describe numeric columns
# check statistics of numeric columns
numeric_stat(data)
# ## Describe categorial columns
# check statistics of categorical columns
categorical_stat(data)
# ## Generate descriptive statistics
#
# `describe()` function is used to generate descriptive statistics of observed data. Beyond what we get from `pandas.core.frame.DataFrame.describe()`, from `describe()` we can check missing values in columns and type of column (numeric or categorical).
describe(df1)
# get a descriptive statistics
describe(data)
# ## Correlation analysis
corr, ax = corr_analysis(data, show_plot=True)
| docs/tutorial/get_started_with_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Precillieo/Plotly-Tutorial-For-Beginners/blob/main/Plotly_WIDS.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="7nJ3SZnXfwm4"
# # PLOTLY LIBRARY
# Plotly is a Python graphing library. It makes exotic, interactive, publication-quality graphs online.
#
# * Importation of libraries and data loading
# * Different Types of Charts
# * Line Chart
# * Scatter Chart
# * Bar Chart
# * Pie Chart
# * Bubble Chart
# * Histogram
# * WordCloud
# * Box Plot
# * Scatter Matrix Plot
#
#
#
# + id="R3XFszUh-UUa" colab={"base_uri": "https://localhost:8080/"} outputId="676dfb3c-89fc-42e1-d9e6-83ecc4c8f07b"
pip install plotly
# + id="qK0Qqxfd_trJ"
import pandas as pd
import numpy as np
from plotly.offline import init_notebook_mode, iplot, plot
import plotly as py
#init_notebook_mode(connected= True)
import plotly.graph_objs as go .
from wordcloud import WordCloud
import matplotlib.pyplot as plt
# + [markdown] id="R10E8WlujGgD"
# * Plotly was designed to render graphs on a web server or a local port. In order to render the plots inside the jupyter notebook, the notebook mode of plotly must be initialized. Without initializing notebook mode, no plotly plots can be visualized within this notebook (or any jupyter notebook).
#
# * To start creating graphs using plotly, we need to import 'graph_objs' modules
#
# * iplot() plots the figure(fig) that is created by data and layout
#
# # 3 Parts To Every Graph
# * Data or Trace: This is usually a Python list object and contains all the data that we would want to plot. A trace is a collection of data points and their specifications that we would want to plot.
#
# * Layout: This object is used to change the features of the graph like axis titles, spacing, fonts etc. which are unrelated to the data itself.
# * Figure: This is a dictionary-like object which contains both the data object and the layout object and this defines the graph.
# + id="FnbINgksBWx4" colab={"base_uri": "https://localhost:8080/", "height": 479} outputId="a21c832b-99de-4b5e-985d-2c2a7be08a40"
data= pd.read_csv('/content/drive/MyDrive/Plotly Class/timesData.csv')
data.head()
# + id="x3Ono38PCZus" colab={"base_uri": "https://localhost:8080/"} outputId="f0759397-36bb-4ac1-c14b-25dbdf2fd4df"
data.info()
# + id="sKan29zgUDNl"
#Generating few parts of the data in a cell
df14= data[data.year==2014].iloc[:100, :]
df15= data[data.year==2015].iloc[:100, :]
df16= data[data.year==2016].iloc[:100, :]
df2014= data[data.year== 2014].iloc[:3, :]
df2016= data[data.year== 2016].iloc[:10, :]
df12= data[data.year== 2016].iloc[:20, :]
x2011 = data.student_staff_ratio[data.year == 2011]
x2012 = data.student_staff_ratio[data.year == 2012]
x11 = data.country[data.year == 2012]
x2015 = data[data.year == 2015]
# + [markdown] id="fx0YsMJ2Djd0"
# # Line Graph
# # Citation and Teaching vs World Rank of Top 100 Universities
# + id="dzN9GmzRCpnJ" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="54de3c6e-7cfb-4616-ae04-80362e434828"
df= data.iloc[:100, :]
first_trace= go.Scatter(x= df.world_rank,
y= df.citations,
mode= 'lines+markers',
name= 'citations',
marker= dict(color= 'rgba(16, 112, 2, 0.8)'),
text= df.university_name)
second_trace= go.Scatter(x= df.world_rank,
y= df.teaching,
mode= 'lines+markers',
name= 'teaching',
marker= dict(color= 'rgba(0, 200, 255, 0.8)'),
text= df.university_name)
data= [first_trace, second_trace]
layout= dict(title = 'Citation and Teaching vs World Rank of Top 100 Universities',
xaxis= dict(title='World_Rank', ticklen= 5, zeroline= False))
fig= dict(data= data, layout= layout)
#fig.show()
iplot(fig)
# + [markdown] id="zlwyhUR9NUti"
# # ScatterPlot
# # Citation vs world rank of top 100 universities with 2014, 2015 and 2016 years
# + id="iKLhB_1XG3bE" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="4bde949a-b180-4828-b8ca-169cf169b61d"
fst_trace= go.Scatter(x= df14.world_rank,
y= df14.citations,
mode= 'markers',
name= '2014',
marker= dict(color= 'rgba(255, 128, 255, 0.8)'),
text= df.university_name)
sec_trace= go.Scatter(x= df15.world_rank,
y= df15.citations,
mode= 'markers',
name= '2015',
marker= dict(color= 'rgba(255, 8, 255, 0.8)'),
text= df.university_name)
trd_trace= go.Scatter(x= df16.world_rank,
y= df16.citations,
mode= 'markers',
name= '2016',
marker= dict(color= 'rgba(0, 200, 255, 0.8)'),
text= df.university_name)
data= [fst_trace, sec_trace, trd_trace]
layout= dict(title= 'Citation vs world rank of top 100 universities with 2014, 2015 and 2016 years',
xaxis= dict(title= 'World_Rank', ticklen= 5, zeroline= False),
yaxis= dict(title= 'Citations', ticklen= 5, zeroline= False))
fig= dict(data= data, layout= layout)
iplot(fig)
# + [markdown] id="rHp_c0krYHh8"
# # Bar Graph
# # citations and teaching of top 3 universities in 2014 (style1)
#
#
# + id="_kqRGZSWR93Z" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="9f32836e-4c23-4149-d7de-27c7f6229170"
trace1= go.Bar(x= df2014.university_name,
y= df2014.citations,
name= 'citations',
marker= dict(color= 'rgba(255, 128, 255, 0.8)',
line= dict(color= 'rgb(0,0,0)', width= 1.5)),
text= df2014.country)
trace2= go.Bar(x= df2014.university_name,
y= df2014.teaching,
name= 'teaching',
marker= dict(color= 'rgba(0, 200, 255, 0.8)',
line= dict(color= 'rgb(0,0,0)', width= 1.5)),
text= df2014.country)
data= [trace1, trace2]
layout= go.Layout(barmode= 'group')
fig= go.Figure(data= data, layout= layout)
iplot(fig)
# + [markdown] id="shipTet-flxm"
# # Bar Graph 2
# + id="l08ZAyMWeHC2" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="0f547083-a255-43da-e982-f1004271dbad"
trace1= go.Bar(x= df2014.university_name,
y= df2014.citations,
name= 'citations',
type= 'bar')
trace2= go.Bar(x= df2014.university_name,
y= df2014.teaching,
name= 'teaching',
type= 'bar')
data= [trace1, trace2]
layout= dict(title= 'citations and teaching of top 3 universities in 2014',
xaxis= dict(title= 'Top 3 Universities'),
barmode= 'stack')
fig= go.Figure(data= data, layout= layout)
iplot(fig)
# + [markdown] id="B7Vd7NzZP1MS"
# # Bar Graph
# # Horizontal bar charts. (style3) Citation vs income for universities In 2016
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="39LlH-9aBDU1" outputId="e94fac73-768d-4fd5-cfcb-1462c1330f44"
df2016.info()
# + id="7w0ByOCfhjXC"
x_res= [x for x in df2016.research]
y_inc= [float(x) for x in df2016.income]
x_name= [x for x in df2016.university_name]
y_name= [x for x in df2016.university_name]
from plotly import tools
# + id="kTm_WI1wVpOE" colab={"base_uri": "https://localhost:8080/", "height": 610} outputId="0cfb8dd8-6416-45d5-92a7-86e6a6462461"
trace= go.Bar(x=x_res,
y= y_name,
marker= dict(color= 'rgba(0, 200, 255, 0.8)', line= dict(color='rgba(0, 0, 0)', width= 1.5)),
name= 'research',
orientation= 'h')
traces= go.Scatter(x=y_inc,
y=x_name,
mode= 'lines+markers',
line=dict(color='rgb(63, 72, 204)'),
name= 'income')
layout= dict(title= 'Citation and Income')
#yaxis= dict(showticklabels= True, domain= [0, 0.85]),
#yaxis2= dict(showticklabels= False, showline= True, linecolor= 'rgba(102, 102, 102, 0.8)', linewidth= 2, domain= [0,0.85]),
#xaxis= dict(showline= False, zeroline= False, showticklabels= True, showgrid= True, domain= [0, 0.42]),
#xaxis2= dict(showline= False, zeroline= False, showticklabels= True, showgrid= True, domain= [0.47, 0], side= 'top', dtick= 25),
#legend= dict(x= 0.029, y= 1.038, font= dict(size= 10)),
#margin=dict(l=200, r=20,t=70,b=70),
#paper_bgcolor='rgb(248, 248, 255)',
#plot_bgcolor='rgb(248, 248, 255)')
annotations= []
x_s= np.round(x_res, decimals= 2)
x_c= np.rint(y_inc)
for a , b, c in zip(x_c, x_s, x_name):
annotations.append(dict(xref= 'x2', yref= 'y2', y= c, x= a-4, text='{:,}'.format(a),
font= dict(family= 'Arial', size= 12, color='rgb(63, 72, 204)'), showarrow= False))
annotations.append(dict(xref= 'x1', yref= 'y1', y= c, x= b + 3, text=str(b),
font= dict(family= 'Arial', size= 12, color='rgb(171, 50, 96)'), showarrow= False))
layout['annotations']= annotations
fig= tools.make_subplots(rows= 1, cols= 2, specs=[[{}, {}]], shared_xaxes= True, shared_yaxes= False, vertical_spacing= 0.001)
fig.append_trace(trace, 1, 1)
fig.append_trace(traces, 1, 2)
fig['layout'].update(layout)
iplot(fig)
# + [markdown] id="a1ZxDX39D5cp"
# # Pie Chart
# # Student Rate at Top 10 Universities in 2016
# + id="Vgx0hc3XYFvb" colab={"base_uri": "https://localhost:8080/", "height": 542} outputId="95a6d693-d9b9-4106-c2b9-56eeb7b06a5c"
pie= df2016.num_students
list_pie= [float(x.replace(',','.'))for x in df2016.num_students]
label= df2016.university_name
data= dict(values= list_pie, labels= label, domain=dict(x = [0, .6]),
name= 'Number of Student Rate', hoverinfo= 'label+percent+name', hole= .3, type= 'pie' )
layout= dict(title= 'Student Rate at Top 10 Universities in 2016', annotations= [{ "font": { "size": 18},
"showarrow": False,
"text": "Number of Students",
"x": 0.20,
"y": 1}])
fig= dict(data= data, layout= layout)
iplot(fig)
# + [markdown] id="m9U91dqs270I"
# # Bubble Chart
# # University world rank (first 20) vs teaching score with number of students(size) and international score (color) in 2016
# + id="uqvgz8nmFcY_"
df12['num_students']= df12.num_students.str.replace(',','.', regex=True)
df12.international= df12.international.str.replace(',','.', regex=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="cFYWmG0Hzre4" outputId="972a4331-8741-41dc-8cb7-fe9ad8b3dd66"
stud_size = [float(x) for x in df12.num_students]
int_color = [float(x) for x in df12.international]
data= dict(x= df12.world_rank,
y= df12.teaching,
mode= 'markers',
marker= dict(color= int_color, size=stud_size, showscale= True),
text= df12.university_name)
layout= dict(title= 'Uni World Rank, Teaching with Number of Student as Size, International Score as Color')
fig= dict(data= data, layout = layout)
iplot(fig)
# + [markdown] id="i4B-fKNQKOY2"
# # Histogram
# # students-staff ratio in 2011 and 2012 years
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="OGmy5hd__sby" outputId="f9cbf2d4-58ae-4554-8176-2d5e68a86411"
fst_trace= go.Histogram(x= x2011,
opacity= 0.75,
name= '2011',
marker= dict(color= 'rgba(0, 200, 255, 0.8)'))
scs_trace=go.Histogram(x= x2012,
opacity= 0.75,
name= '2012',
marker= dict(color= 'rgba(255, 128, 255, 0.8)'))
data= [fst_trace, scs_trace]
layout= go.Layout(barmode= 'overlay',
title= ' students-staff ratio in 2011 and 2012',
xaxis= dict(title= 'student_staff_ratio'),
yaxis= dict(title= 'Counts'))
fig= dict(data= data, layout= layout)
iplot(fig)
# + [markdown] id="fLr_tD6eNnoL"
# # Word Cloud
# # Most Mentioned Country In 2011
# * A Wordcloud (or Tag cloud) is a visual representation of text data. It displays a list of words, the importance of each beeing shown with font size or color
# + colab={"base_uri": "https://localhost:8080/", "height": 449} id="7c0nyeYtM3j7" outputId="f00af54f-8a85-42ca-97a8-9a42e9069894"
plt.subplots(figsize=(10,10))
cloud= WordCloud(background_color='black', width= 512, height= 384).generate(" ".join(x11))
plt.imshow(cloud)
plt.axis('off')
plt.savefig('graph.png')
plt.show()
# + [markdown] id="o4qAg0YdUdbY"
# # Box Plot
# # Total Score and Research in 2015
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="7_Pqx2ZMPuy0" outputId="2af3a0a7-a864-4d7f-d698-2b741b52c56d"
trace= go.Box(y= x2015.total_score,
name= 'total score of universities in 2015',
marker=dict(color= 'rgba(16, 112, 2, 0.8)'))
traces= go.Box(y= x2015.research,
name= 'research',
marker= dict(color= 'rgb(12, 12, 140)'))
data= [trace, traces]
iplot(data)
# + [markdown] id="aMOodDOGXBKZ"
# # Scatter MatrixPlot
# #Research, Total_Score, International In 2015
# + colab={"base_uri": "https://localhost:8080/", "height": 717} id="gNCJI76tXAq8" outputId="48704095-1b36-422d-f959-33569d8fd54c"
import plotly.figure_factory as ff
data2015 = x2015.loc[:,["research","international", "total_score"]]
data2015["index"] = np.arange(1,len(data2015)+1)
fig = ff.create_scatterplotmatrix(data2015, diag='box', index='index',colormap='Portland',
colormap_type='cat',
height=700, width=700)
iplot(fig)
# + [markdown] id="rXKMyX7QYKfk"
# # 3D Scatter Plot
# # World Rank, Citation, Research In 3D
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="2QU0M6lhXp3J" outputId="3a00f5c5-12f0-4737-bce4-b4c00be86418"
trace= go.Scatter3d(x= x2015.world_rank,
y= x2015.citations,
z= x2015.research,
mode= 'markers',
marker=dict(size=10,color='rgb(255,0,0)'))
data= [trace]
layout= go.Layout(margin=dict(l=0, r=0, b=0, t=0))
fig= go.Figure(data= data, layout= layout)
iplot(fig)
# + [markdown] id="nilb1eaVomr1"
# # Thank You For The Staying Power Indulged. Hope You Liked It??
| Projects/World_ranking_Universities/Plotly_WIDS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import torch
from torch.optim import Adam
from birdseye.sensor import *
from birdseye.actions import *
from birdseye.dqn import *
from birdseye.env import RFEnv
from birdseye.rl_common.logger import init_logger
from birdseye.rl_common.models import CNN, MLP
# + tags=["outputPrepend"]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
sensor = Drone()
actions = SimpleActions()
env = RFEnv(sensor, actions)
log_path = 'rl_log'
logger = init_logger(log_path)
number_timesteps = 10000
dueling = True
atom_num = 1
policy_dim = len(env.actions.action_space)
in_dim = (1, 100, 100)
network = CNN(in_dim, policy_dim, atom_num, dueling)
optimizer = Adam(network.parameters(), 1e-4, eps=1e-5)
save_path = 'checkpoints'
save_interval = 1000 # save model every x steps (0 = disabled)
ob_scale = 1
gamma = 0.99
grad_norm = 10 # set by original author
double_q = True
param_noise = False
exploration_fraction = 0.1
exploration_final_eps = 0.01
batch_size = 100
train_freq = 4
learning_starts = 1000
target_network_update_freq = 200
buffer_size = 10000
prioritized_replay = False
prioritized_replay_alpha = 0.6
prioritized_replay_beta0 = 0.4
min_value = -10
max_value = 10
learn(logger,
device, env,
number_timesteps,
network, optimizer,
save_path, save_interval, ob_scale,
gamma, grad_norm,
double_q, param_noise,
exploration_fraction, exploration_final_eps,
batch_size, train_freq, learning_starts, target_network_update_freq,
buffer_size, prioritized_replay, prioritized_replay_alpha,
prioritized_replay_beta0, atom_num, min_value, max_value)
| birdseye/notebooks/rl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Week 13. In-Class Practice
# ## 修真聊天群的 word2vec 實作
# 本文參考[自然語言處理入門- Word2vec小實作](https://medium.com/pyladies-taiwan/%E8%87%AA%E7%84%B6%E8%AA%9E%E8%A8%80%E8%99%95%E7%90%86%E5%85%A5%E9%96%80-word2vec%E5%B0%8F%E5%AF%A6%E4%BD%9C-f8832d9677c8)進行詞向量實作應用
import jieba
import numpy as np
import gensim
import warnings
from ipywidgets import IntProgress
from hanziconv import HanziConv
from IPython.display import display
warnings.filterwarnings("ignore")
# +
file_train_read = []
with open('./dataset/xiuzhenliaotianqun.txt', encoding='utf-8') as file_train_raw:
for line in file_train_raw:
if line.strip() != '':
file_train_read.append(HanziConv.toTraditional(line.strip()))
print("Text lines:", len(file_train_read))
# +
stopwords = set()
with open('./dataset/stopwords.txt', encoding='utf-8') as stopword_file:
for words in stopword_file:
stopwords.add(HanziConv.toTraditional(words.strip()))
print("Stopwords number:", len(stopwords))
# -
# ### 斷詞與word2vec實現
# ### 1. 進行斷詞
# +
progress = IntProgress(min=0, max=len(file_train_read))
progress.value = 0
progress.description = "[ %s / %s ]"%(str(progress.value), str(progress.max))
display(progress)
file_train_seg = []
for i in range(len(file_train_read)):
file_train_seg.append([' '.join([word for word in jieba.cut(file_train_read[i], cut_all=False)
if word not in stopwords])])
progress.value +=1
progress.description = "[ %s / %s ]"%(str(progress.value), str(progress.max))
# +
# 將 jieba 的斷詞產出存檔
file_seg_word_done_path = 'corpus_seg_done.txt'
with open(file_seg_word_done_path, 'wb') as f:
for i in range(len(file_train_seg)):
f.write((file_train_seg[i][0] + '\n').encode('utf-8'))
# 檢視斷詞 jieba 的結果
def print_list_chinese(list):
for i in range(len(list)):
print(list[i])
print_list_chinese(file_train_seg[3])
# -
# ### 2. word2vec 轉成高維空間向量
# jieba分詞轉 word2vec 向量
from gensim.models import word2vec
sentences = word2vec.LineSentence(file_seg_word_done_path)
model = word2vec.Word2Vec(sentences, size=250)
model.save("word2vec.model")
# +
# 可以用 model.wv.syn0 檢視詞向量,因為一個詞有250個維度,全部列出過於冗長.....
# 這邊僅僅呈現 10 個詞的 10 個維度
model.wv.syn0[0:10,0:10]
# +
# 可檢視第 996~1000 個字詞是什麼
for i in range(995,1000):
print(model.wv.index2word[i])
# -
# ### 評價所建「詞向量」的好壞
# +
# 顯示空間距離相近的詞
print("宋書航 相近詞:", [i[0] for i in model.similar_by_vector('宋書航')], '\n')
print("修真 相近詞:", [i[0] for i in model.similar_by_vector('修真')], '\n')
print("法術 相近詞:", [i[0] for i in model.similar_by_vector('法術')], '\n')
# 顯示相近詞和詞向量,直接使用 similar_by_vector() 就可以了
model.similar_by_vector('宋書航')
# -
# 回過頭來看一下詞向量模型的結果。
#
# 首先我們用咱們男主角的名字「宋書航」丟進去測試,原則上要跑出一些主線人物的名字,畢竟他們伴隨著男主角成長,比較可能存在類似的脈絡中而被模型捕捉到。乍看也還算合理,不過「瞭書航」、「著書航」這二個詞很明顯地就是由簡轉繁引發的斷詞失誤了。
#
# 第二個嘗試看看「修真」的相近詞,跑出來「資料」、「權限」、「共享」等詞彙,再度懷疑是斷詞引擎或者詞向量模型出了問題。其實不然,有看過這部小說的同學就會知道這部作品是多麼的不(ㄋㄠˇ) 落(ㄉㄨㄥˋ) 俗(ㄉㄚˋ) 套(ㄎㄞ)。這個例子再度告訴我們,Domain Knowledge 的重要性啊 XDDD。
#
# 最後看看「法術」的相近詞,嗯,終於合乎我們的預期。這些字詞都是玄幻修真類小說的常用語彙,成功~
#
# ---------
# 不過還有一個小麻煩,同樣關乎斷詞引擎的失誤,就是當我們輸入一個很重要的角色「白前輩」進去搜尋相近詞時,居然找不到!
model.similar_by_vector('白前輩')
# 這邊我們有兩個問題要修正,一是關於簡轉繁後的判讀失誤,二是人名的錯誤斷詞。第一個問題,我們可以先讓斷詞引擎斷完,我們再翻譯語料庫中的詞彙。第二個問題我們則要加入相關字詞的字庫來調整斷詞。
# ------
# ### 修正文本
# 這邊直接導入文本,不做繁簡體的轉換。
# +
file_train_read = []
with open('./dataset/xiuzhenliaotianqun.txt', encoding='utf-8') as file_train_raw:
for line in file_train_raw:
if line.strip() != '':
file_train_read.append(line.strip())
print("Text lines:", len(file_train_read))
# +
stopwords = set()
with open('./dataset/stopwords.txt', encoding='utf-8') as stopword_file:
for words in stopword_file:
stopwords.add(words.strip())
print("Stopwords number:", len(stopwords))
# -
# ### 建立字典
# 我們從百度上面透過爬蟲抓了一些關鍵字(角色名稱、事物)當作分詞的辭典,加強斷詞的效果。
# +
import sys
sys.path.insert(0, "../modules")
import wenwenbaidu as wwbd
dictwords = set(wwbd.get_keywords())
dictwords.add('白前辈')
print(dictwords)
for i in dictwords:
jieba.add_word(i)
jieba.suggest_freq(i, tune=True)
# +
progress = IntProgress(min=0, max=len(file_train_read))
progress.value = 0
progress.description = "[ %s / %s ]"%(str(progress.value), str(progress.max))
display(progress)
# 用 jieba 斷詞
file_train_seg = []
for i in range(len(file_train_read)):
file_train_seg.append([' '.join([word for word in jieba.cut(file_train_read[i], cut_all=False)
if word not in stopwords])])
progress.value +=1
progress.description = "[ %s / %s ]"%(str(progress.value), str(progress.max))
# 將 jieba 的斷詞產出存檔
file_seg_word_done_path = 'corpus_seg_done.txt'
with open(file_seg_word_done_path, 'wb') as f:
for i in range(len(file_train_seg)):
f.write((file_train_seg[i][0] + '\n').encode('utf-8'))
# +
# 檢視斷詞 jieba 的結果
def print_list_chinese(list):
for i in range(len(list)):
print(list[i])
print_list_chinese(file_train_seg[3])
# -
# jieba分詞轉 word2vec 向量
from gensim.models import word2vec
sentences = word2vec.LineSentence(file_seg_word_done_path)
model = word2vec.Word2Vec(sentences, size=250)
model.save("word2vec.model")
# ### 重新分詞後的詞向量
# 這次的結果就顯得比較合理,沒有出現奇怪的字詞。
# 顯示空間距離相近的詞
print("宋書航 相近詞:", [i[0] for i in model.similar_by_vector('宋书航')], '\n')
print("修真 相近詞:", [i[0] for i in model.similar_by_vector('修真')], '\n')
print("法術 相近詞:", [i[0] for i in model.similar_by_vector('法术')], '\n')
# ### 將距離相近的詞以視覺化呈現
# 我們可以試著把小說裡面的角色,利用 PCA 降維至平面上然後進行視覺化。
# +
print("九州一號群內角色:\n")
print("苏氏阿十六 相近詞:", [i[0] for i in model.similar_by_vector('苏氏阿十六')], '\n')
print("羽柔子 相近詞:", [i[0] for i in model.similar_by_vector('羽柔子')], '\n')
print("白前辈 相近詞:", [i[0] for i in model.similar_by_vector('白前辈')], '\n')
print("黄山尊者 相近詞:", [i[0] for i in model.similar_by_vector('黄山尊者')], '\n')
print("\n(遭懟)大佬:\n")
print("白前辈two 相近詞:", [i[0] for i in model.similar_by_vector('白前辈two')], '\n')
print("胖球 相近詞:", [i[0] for i in model.similar_by_vector('胖球')], '\n')
print("天帝 相近詞:", [i[0] for i in model.similar_by_vector('天帝')], '\n')
print("\n其他人物:\n")
print("程琳 相近詞:", [i[0] for i in model.similar_by_vector('程琳')], '\n')
print("琉璃书生 相近詞:", [i[0] for i in model.similar_by_vector('琉璃书生')], '\n')
# +
from sklearn.decomposition import PCA
main_characters = ['宋书航', '苏氏阿十六', '黄山尊者', '白前辈two', '胖球', '天帝', '程琳']
word_list = []
similar_num = 5
for person in main_characters:
word_list.append(person)
word_list = word_list + [i[0] for i in model.similar_by_vector(person)][:similar_num]
# -
word_vectors = model.wv[word_list]
word_vectors.shape
pca = PCA(n_components=2)
reduced_vectors = pca.fit_transform(word_vectors)
reduced_vectors.shape
# +
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
# %matplotlib inline
font = FontProperties(fname = r"C:\Windows\Fonts\msyh.ttc")
fig, ax = plt.subplots(figsize=(10, 10))
ax.set_xlim(-10,10)
ax.set_ylim(-10,10)
for i in range(len(main_characters)):
text_color = 'C' + str(i)
for j in range(i*6, i*6+similar_num+1):
ax.annotate(word_list[j], xy=reduced_vectors[j,:], color=text_color, fontsize=12, fontproperties=font)
plt.show()
# -
# 如果有看過這部小說的人,大致可以透過降維後的視覺化,發現詞向量的合理。九州一號群分布於座標軸的左半部分(黃山尊者在中間真不愧是群主),九幽界的大佬們在右下半部,天庭相關的角色分布於右上半部。有興趣的人可以試著玩玩看其他的文本~~
# #
| week_13/week_13_inclass_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
#import modules and packages
import numpy as np
from numpy import genfromtxt
from matplotlib import pyplot as plt
from sklearn.neural_network import MLPRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.gaussian_process.kernels import WhiteKernel
from sklearn.gaussian_process import GaussianProcessRegressor
########################################################################################
#Main part of run
#setup jupyter inline parameters
plt.rcParams['figure.figsize'] = [20,10]
#initialize plot objects
fig, ax = plt.subplots(1,3)
#file tags
random_numbers = 0
sin = 0
periodic_table = 1
#set ratio of training to test ratio
test_ratio = .1
#plot_tags
train_alpha = test_ratio
test_alpha = 1 - test_ratio
#read a given set of data
if random_numbers:
file = '__SAMPLE_toy_data_X1X2X3X4.csv'
X = genfromtxt(file, delimiter=',',skip_header=1,usecols = range(0,3))
Y = genfromtxt(file, delimiter=',',skip_header=1,usecols = range(7,8))
elif sin:
file = '__SAMPLE_sin.csv'
X = genfromtxt(file, delimiter=',',skip_header=1,usecols = range(0,1))
Y = genfromtxt(file, delimiter=',',skip_header=1,usecols = range(1,2))
X = X.reshape(-1, 1)
elif periodic_table:
file = '__SAMPLE_toy_data_atoms.csv'
X = genfromtxt(file, delimiter=',',skip_header=1,usecols = range(1,13))
Y = genfromtxt(file, delimiter=',',skip_header=1,usecols = range(13,14))
########################################################################################
#partition training/test sets
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = test_ratio)
#initialize kernels
#Linear Regression
linear_model = LinearRegression().fit(X_train, Y_train)
#Gaussian Process Regression
kernel = RBF(length_scale=100, length_scale_bounds=(1e-5,1e5)) + WhiteKernel()
gpr_model = GaussianProcessRegressor(kernel=kernel).fit(X_train, Y_train)
#Neural Network
nn_model = MLPRegressor(hidden_layer_sizes=(1000), activation='tanh', solver='lbfgs', alpha=.00001,
batch_size='auto', learning_rate='adaptive', learning_rate_init=0.01, power_t=0.0005,
max_iter=100, shuffle=True, random_state=False, tol=0.0001, verbose=False, warm_start=False,
momentum=0.9, nesterovs_momentum=True, early_stopping=True, validation_fraction=0.25,
beta_1=0.1, beta_2=0.999, epsilon=1e-08).fit(X_train, Y_train)
#make predictions
y_lin_test= linear_model.predict(X_test)
y_lin_train= linear_model.predict(X_train)
y_gpr_test,gpr_unc_test = gpr_model.predict(X_test,return_std=True)
y_gpr_train,gpr_unc_train = gpr_model.predict(X_train,return_std=True)
y_nn_test = nn_model.predict(X_test)
y_nn_train = nn_model.predict(X_train)
########################################################################################
#Plot the model predictions
#default for parity
x = np.linspace(min(Y),max(Y),len(Y))
#Linear model
ax[0].scatter(Y_train,y_lin_train,color='r',label='Linear Train',alpha=.5)
ax[0].scatter(Y_test,y_lin_test,label='Linear Test',alpha=.5)
ax[0].plot(x,x,'k')
ax[0].set_xlabel('Y actual')
ax[0].set_ylabel('Y predicted')
ax[0].legend(loc='upper left')
#Gaussian Process model
ax[1].scatter(Y_train,y_gpr_train,color='r',alpha=train_alpha,label='GPR Train')
ax[1].scatter(Y_test,y_gpr_test,alpha=test_alpha,label='GPR Test')
ax[1].plot(x,x,'k')
ax[1].set_xlabel('Y actual')
ax[1].set_ylabel('Y predicted')
ax[1].legend(loc='upper left')
#Neural Network model
ax[2].scatter(Y_test,y_nn_test,label='Neural Network Test',alpha=.5)
ax[2].scatter(Y_train,y_nn_train,color='r',alpha=.5,label='Neural Network Train')
ax[2].plot(x,x,'k')
ax[2].set_xlabel('Y actual')
ax[2].set_ylabel('Y predicted')
ax[2].legend(loc='upper left')
#show the plot
plt.show()
# -
| Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bias on Wikipedia
#
# This ipython notebook is created for DATA512 at UW for this assignment: https://wiki.communitydata.cc/HCDS_(Fall_2017)/Assignments#A2:_Bias_in_data
#
# Our goal is to analyze the content of wikipedia to understand the biases of the site by looking at the content coverage for political members of countries. We look at how many pages there are (as a percent of the country's population) and how many of the pages are high quality (using scores from the ORES system, more info below).
#
# In the end, we show the top/bottom 10 countries for these 2 categories.
#
# ## Related Data Files
#
# raw data files:
# - page_data.csv : raw wikipedia data
# - WPDS_2018_data.csv : raw country population data
#
# Output files:
# - ores_data.csv : articles scores from the ORES system
# - combined_data.csv : combined data (country population, ores data and wikipedia data)
#
# First, import necessary packages
# +
import requests
import json
import pandas as pd
import numpy as np
# -
# Import the data, and print out the first few rows to see examples.
#
# Data comes from a few different sources. Wikipedia data is available via figshare (https://figshare.com/articles/Untitled_Item/5513449 , under country/data/) with license CC-BY-SA 4.0. This contains "most English-language Wikipedia articles within the category 'Category:Politicians by nationality' and subcategories". This data contains 3 columns, which are called out in the above link as follows:
#
# 1. "country", containing the sanitised country name, extracted from the category name;
# 2. "page", containing the unsanitised page title.
# 3. "last_edit", containing the edit ID of the last edit to the page.
#
# Population data is available via https://www.dropbox.com/s/5u7sy1xt7g0oi2c/WPDS_2018_data.csv?dl=0.
# This file contains the population in millions from mid-2018 along with the country name.
#
# A copy of the datasets, downloaded in oct, 2018, are available in this repo.
# +
wiki_data = pd.read_csv('page_data.csv')
country_data = pd.read_csv('WPDS_2018_data.csv',thousands=',')
country_data.rename(columns={"Population mid-2018 (millions)": "population"},inplace=True)
wiki_data.head()
# -
country_data.head()
# Here we create a helper function for getting ores scores
#
# This function takes revision ids (and the headers needed to make the call) and scores the function using the ORES system. The score and the revision id are appended to the ores_data list.
#
# ORES (Objective Revision Evaluation Service) is a machine learning service that ranks the quality of a given article. The ranks go from best to worst as FA, GA, B, C, Start and Stub. For the purposes of this analysis, we use only the predicted category (rather than the probabilities, which are also available).
# link with more info: https://www.mediawiki.org/wiki/ORES
#
#
# +
def get_ores_data(revision_ids, headers):
temp_data = []
# Define the endpoint
endpoint = 'https://ores.wikimedia.org/v3/scores/{project}/?models={model}&revids={revids}'
params = {'project' : 'enwiki',
'model' : 'wp10',
'revids' : '|'.join(str(x) for x in revision_ids)
}
api_call = requests.get(endpoint.format(**params))
response = pd.read_json(json.dumps(api_call.json(), indent=4, sort_keys=True))
for id in response['enwiki']['scores']:
try:
ores_data.append([id, response['enwiki']['scores'][id]['wp10']['score']['prediction']])
except:
pass
#print(json.dumps(response, indent=4, sort_keys=True))
#return temp_data #response
# -
# Here we define the header needed to call the above function and iterate over all of the revions, calling the function in batches (of about 100, or 472 batches for slightly less than 47k revisions).
# %%time
# So if we grab some example revision IDs and turn them into a list and then call get_ores_data...
ores_data = [] #pd.DataFrame(columns =['revid','category'])
#ores_data.append([['a','b']])
#print(ores_data)
headers = {'User-Agent' : 'https://github.com/your_github_username', 'From' : '<EMAIL>'}
for i in np.array_split(np.asarray(wiki_data['rev_id']),472): #, 472): #split into buckets of approximately 100
get_ores_data(i, headers)#,columns =['revid','category']
#temp_data = pd.DataFrame(get_ores_data(i, headers),columns =['revid','category'])
#print("here")
#print(ores_data)
#print(temp_data)
#ores_data.append(temp_data)
# Here we convert the ores_data into a pandas dataframe and save to a csv for reference.
ores_data = pd.DataFrame(ores_data,columns =['revision_id','article_quality'])#.set_index('revision_id')
ores_data.to_csv('ores_data.csv')
# We convert revision_id to a int so we can join it to the wikipedia data.
#check out ores
ores_data['revision_id'] = ores_data['revision_id'].astype(int)
#ores_data.set_index('revid')
#ores_data.reset_index(inplace=True)
ores_data.head()
# Here we merge the wikipedia data to the ores data on the revision id. We also merge onto the country data on the country/geography columns. There are 44,973 rows left after we inner join.
# Merge data
combined_data = wiki_data.merge(country_data,
how = 'inner',
left_on ='country',
right_on = 'Geography').merge(ores_data,
how = 'inner',
left_on = 'rev_id',
right_on = 'revision_id'
)
print(combined_data.shape)
# Here is a preview of the US data:
combined_data[combined_data['country']=='United States'].head()
# We filter the new dataset to remove duplicate columns and save this to a csv.
combined_data = combined_data[['country','page','revision_id','article_quality','population']]
combined_data.to_csv('combined_data.csv')
# ## Analysis
#
# Here we start analysing the data. First, we create a pivot table with population by country.
# Analysis
articles_and_population = combined_data.pivot_table(values = ['population'],
index = ['country'],
dropna = False,
#columns = ['article_quality'],
aggfunc = {'population': min,'country':'count'}
).rename(columns={"country": "num_articles"}).reset_index()
articles_and_population.shape
# Next, we create a pivot table with number of high quality articles by country.
high_qual_articles = combined_data[combined_data['article_quality'].isin(['FA','GA'])].pivot_table(values = ['population'],
index = ['country'],
dropna = False,
#columns = ['article_quality'],
aggfunc = {'country':'count'}
).rename(columns={"country": "num_high_quality_articles"}).reset_index()
high_qual_articles.shape
# We join the datasets and fill NAs with zeros. We change num_articles to be an int and population to be a float.
#
# We then calculate the articles_per_population (which is per million people) and the high quality article percentage for each country.
#
# Finally, we set the index as the country (as these are unique) and display the results.
# +
dataset = articles_and_population.merge(high_qual_articles, how='left').fillna(0)
dataset['num_articles'] = dataset['num_articles'].astype(int)
dataset['population'] = dataset['population'].astype(float)
#dataset.dropna(inplace=True)
dataset['articles_per_population'] = dataset['num_articles'] / dataset['population']
dataset['high_qual_article_perc'] = dataset['num_high_quality_articles'] / dataset['num_articles']
dataset.set_index('country',inplace=True)
dataset
# -
# Finally, display the top and bottome countries by articles per million people. Tuvalu has the highest value, but does have an extremely small population. Of the represented countries, India has the smallest article per million people.
dataset.sort_values(by = 'articles_per_population',ascending = False)[0:10]
dataset.sort_values(by = 'articles_per_population',ascending = True)[0:10]
# And lastly, we look at the top and bottom countries by high quality article percentage. North Korea has the highest percentage at approximately 18% while Tanzania has the lowest at around .2%. Note that there are some countries that have been removed due to not having any high quality articles. The full list of these countries is at the end.
dataset.sort_values(by = 'high_qual_article_perc',ascending = False)[0:10]
#dataset.sort_values(by = 'high_qual_article_perc',ascending = True)[0:10]
dataset[dataset['high_qual_article_perc']>0].sort_values(by = 'high_qual_article_perc',ascending = True)[0:10]
# Countries with 0 high quality articles:
dataset[dataset['high_qual_article_perc']==0].index
import matplotlib.pyplot as plt
plt.scatter(np.log(dataset['high_qual_article_perc']+.0001),
np.log(dataset['articles_per_population']),
c='r',
s=1
)
plt.show()
# # Learnings
#
# From this analysis, we expected to see varying amounts of both coverage and quality articles as we look at different countries. While I expected there to be better coverage and quality for more developed nations, this did not appear to be the case. It is true that are discrepancies between nations, in large part due to the extreme differences in population between countries. There are many country-specific factors we have not included in this analysis that may help illustrate the trend including education, access to internet, wikipedia popularity, government internet regulations and more.
| hcds-a2-bias.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: azmeta-project
# language: python
# name: azmeta-project
# ---
# # Azure Monitor Log Analytics Workspace Summary
#
# Get a birds-eye view of the utilization and cost of your Log Analytics workspaces.
# ## Parameters
#
# **resource_filter**: Optional KQL where clause to limit Azure Monitor workspace resources in scope.
# + tags=["parameters"]
resource_filter = None
# -
# ## Setup
from azmeta.access import resource_graph, monitor_logs, list_subscription_ids
from azmeta.access.billing import full_day_timespan, create_basic_filter, create_cost_query, GroupByColumn, GranularityType, query_cost_dataframe
from azmeta.access.kusto import serialize_to_kql
import azmeta.notebook.interactive as azmi
import pandas as pd
import itertools
from datetime import datetime, timedelta
# # Log Analytics Workspace Selection
#
# Retrieve all the workspaces selected for analysis using Azure Resource Graph.
context = azmi.resource_context()
all_subscription_ids = list_subscription_ids(context.subscriptions)
workspaces = resource_graph.query_dataframe(all_subscription_ids, f"""
Resources
| where type == 'microsoft.operationalinsights/workspaces'
| where {resource_filter if resource_filter else "1 == 1"}
| join kind=leftouter (ResourceContainers | where type == 'microsoft.resources/subscriptions' | project subscriptionName=name, subscriptionId) on subscriptionId
| project subscriptionName, resourceGroup, name, sku = properties.sku.name, reservedGB = properties.sku.capacityReservationLevel, storeDays = properties.retentionInDays, id = properties.customerId, resourceId = tolower(id)
| order by subscriptionName asc
""").set_index('id')
workspaces.style.hide_columns('resourceId')
# # Workspace Utilization
#
# Retrieves the workspace utilization metrics from Log Analytics metadata tables.
# +
today = datetime.today()
yesterday = today - timedelta(days=1)
yesterday_begin, yesterday_end = full_day_timespan(yesterday, end_midnight=True)
thirtyday = today - timedelta(days=30)
thirty_begin, thirty_end = full_day_timespan(thirtyday, yesterday, end_midnight=True)
# +
def la_query(query):
return monitor_logs.query_dataframe(query, workspaces.index.to_list()).primary_result.set_index('id')
df_lfd_volume = la_query(f"""
Usage
| where TimeGenerated > {serialize_to_kql(yesterday_begin)} and TimeGenerated <= {serialize_to_kql(yesterday_end)}
| where IsBillable == true
| summarize lastFullDayGB = sum(Quantity) / 1000 by TenantId
| project-rename id = TenantId
""")
# -
df_30d_volume = la_query(f"""
Usage
| where TimeGenerated > {serialize_to_kql(thirty_begin)} and TimeGenerated < {serialize_to_kql(thirty_end)}
| where IsBillable == true
| summarize fullDayGB = sum(Quantity) / 1000 by TenantId, bin(TimeGenerated, 1d)
| summarize medianDayGB = percentile(fullDayGB, 50), minDayGB = min(fullDayGB) by TenantId
| project-rename id = TenantId
""")
df_lfd_nodes = la_query(f"""
Heartbeat
| where TimeGenerated > {serialize_to_kql(yesterday_begin)} and TimeGenerated <= {serialize_to_kql(yesterday_end)}
| summarize by SourceComputerId, TenantId
| summarize nodesReporting = count() by TenantId
| project-rename id = TenantId
""")
# # Workspace Cost
#
# Retrieve the workspace cost information from Azure Cost Management.
workspace_resource_ids = workspaces.resourceId.to_list()
query_filter = create_basic_filter(
resource_ids=workspace_resource_ids
)
query = create_cost_query(
full_day_timespan(thirtyday, yesterday),
grouping=GroupByColumn("ResourceId"),
filter=query_filter,
granularity=GranularityType.daily,
)
cost_df = query_cost_dataframe(context.default_billing_account, query)
total_cost = cost_df.groupby('ResourceId').sum().Cost
median_cost = cost_df.groupby('ResourceId').median().Cost
lfd_cost = cost_df[cost_df.UsageDate == cost_df.UsageDate.max()].set_index('ResourceId').Cost
cost_agg_df = pd.DataFrame({'thirty_day_cost': total_cost, 'thirty_day_median_cost': median_cost, 'last_full_day_cost': lfd_cost })
# # Dashboard
#
# Top cost workspaces.
full = workspaces \
.join([df_lfd_volume, df_30d_volume, df_lfd_nodes]) \
.join(cost_agg_df, on='resourceId')
full = full.assign(full_day_avg_cost=full.last_full_day_cost/full.nodesReporting)
full = full.sort_values(['medianDayGB', 'thirty_day_cost'], ascending=[True,False], key=lambda x:pd.isna(x) if x.name == 'medianDayGB' else x)
# +
def build_header_style(col_groups):
start = 0
styles = []
palette = { 'Config': '#f6f6f6', 'Thirty Day': '#eae9e9', 'Last Full Day': '#d4d7dd'}
for group in itertools.groupby(col_groups, lambda c:c[0]):
styles.append({'selector': f'.col_heading.level0.col{start}', 'props': [('background-color', palette[group[0]])]})
group_len = len(tuple(group[1]))
for j in range(group_len):
styles.append({'selector': f'.col_heading.level1.col{start + j}', 'props': [('background-color', palette[group[0]])]})
start += group_len
return styles
fulls = full.copy().drop(columns='resourceId')
fulls.columns = pd.MultiIndex.from_tuples([*itertools.product(['Config'], ['Subscription Name', 'Resource Group', 'Name', 'SKU', 'Reserved GB', 'Retention (days)']), ('Last Full Day', 'Total GB'), ('Thirty Day', 'Median GB'), ('Thirty Day', 'Min GB'), ('Last Full Day', 'Nodes Reporting'), ('Thirty Day', 'Total Cost'), ('Thirty Day', 'Median Cost'), ('Last Full Day', 'Total Cost'), ('Last Full Day', 'Avg Cost Per Node')])
styler = fulls.style.hide_index() \
.format('${:,.2f}', na_rep='N/A', subset=[x for x in fulls.columns if 'Cost' in x[1]]) \
.format('{:,.1f}', na_rep='N/A', subset=[x for x in fulls.columns if 'GB' in x[1] and 'Config' != x[0]]) \
.set_table_styles(build_header_style(fulls.columns))
for column in [x for x in fulls.columns if 'Cost' in x[1] or 'GB' in x[1] and 'Config' != x[0]]:
styler.background_gradient(subset=[column])
styler
# -
| src/monitor/workspace_summary.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
# [](https://mybinder.org/v2/gh/schlichtanders/fall-in-love-with-julia/master?filepath=05%20machinelearning%20with%20MLJ%20-%2001%20introduction.ipynb)
# # Introduction to Machine Learning in Julia with MLJ
#
# Welcome to this little Jupyter Notebook for getting to know MLJ, the goto ML platform within Julia.
#
# To start with, take a look at [MLJ's github page](https://github.com/alan-turing-institute/MLJ.jl):
# * super well organized: own [Github Organization "JuliaAI"](https://github.com/JuliaAI)
# * well maintained and supported: see the maintainers and support below
# > -----------------------------
# >
# > <div align="center">
# > <img src="https://github.com/alan-turing-institute/MLJ.jl/raw/dev/material/MLJLogo2.svg" alt="MLJ" width="200">
# > </div>
# >
# > <h2 align="center">A Machine Learning Framework for Julia
# > </h2>
# >
# >
# > MLJ (Machine Learning in Julia) is a toolbox written in Julia
# > providing a common interface and meta-algorithms for selecting,
# > tuning, evaluating, composing and comparing over [160 machine learning
# > models](https://alan-turing-institute.github.io/MLJ.jl/dev/list_of_supported_models/)
# > written in Julia and other languages.
# >
# > **New to MLJ?** Start [here](https://alan-turing-institute.github.io/MLJ.jl/dev/).
# >
# > **Integrating an existing machine learning model into the MLJ
# > framework?** Start [here](https://alan-turing-institute.github.io/MLJ.jl/dev/quick_start_guide_to_adding_models/).
# >
# > MLJ was initially created as a Tools, Practices and Systems project at
# > the [Alan Turing Institute](https://www.turing.ac.uk/)
# > in 2019. Current funding is provided by a [New Zealand Strategic
# > Science Investment
# > Fund](https://www.mbie.govt.nz/science-and-technology/science-and-innovation/funding-information-and-opportunities/investment-funds/strategic-science-investment-fund/ssif-funded-programmes/university-of-auckland/)
# > awarded to the University of Auckland.
# >
# > MLJ been developed with the support of the following organizations:
# >
# > <div align="center">
# > <img src="https://github.com/alan-turing-institute/MLJ.jl/raw/dev/material/Turing_logo.png" width = 100/>
# > <img src="https://github.com/alan-turing-institute/MLJ.jl/raw/dev/material/UoA_logo.png" width = 100/>
# > <img src="https://github.com/alan-turing-institute/MLJ.jl/raw/dev/material/IQVIA_logo.png" width = 100/>
# > <img src="https://github.com/alan-turing-institute/MLJ.jl/raw/dev/material/warwick.png" width = 100/>
# > <img src="https://github.com/alan-turing-institute/MLJ.jl/raw/dev/material/julia.png" width = 100/>
# > </div>
# >
# >
# > ### The MLJ Universe
# >
# > The functionality of MLJ is distributed over a number of repositories
# > illustrated in the dependency chart below. These repositories live at
# > the [JuliaAI](https://github.com/JuliaAI) umbrella organization.
# >
# > <div align="center">
# > <img src="https://github.com/alan-turing-institute/MLJ.jl/raw/dev/material/MLJ_stack.svg" alt="Dependency Chart">
# > </div>
# >
# > *Dependency chart for MLJ repositories. Repositories with dashed
# > connections do not currently exist but are planned/proposed.*
# >
# > <br>
# > <p align="center">
# > <a href="CONTRIBUTING.md">Contributing</a> •
# > <a href="ORGANIZATION.md">Code Organization</a> •
# > <a href="ROADMAP.md">Road Map</a>
# > </br>
# >
# > #### Contributors
# >
# > *Core design*: <NAME>, <NAME>, <NAME>
# >
# > *Lead contributor*: <NAME>
# >
# > *Active maintainers*: <NAME>, <NAME>, <NAME>, <NAME>
# >
# >
# > ------------------------
# Disclaimer: Many examples and text snippets are taken directly from documentation and examples provided by MLJ.
# # Let's jump into it: Supervised Learning
using MLJ
# ### Loading a Machine Learning Model
DecisionTreeClassifier = @iload DecisionTreeClassifier # interactive model loading
DecisionTreeClassifier = @load DecisionTreeClassifier pkg=DecisionTree # declaritive model loading
tree = DecisionTreeClassifier() # instance
# MLJ is essentially a big wrapper providing unified access to other packages containing the models
# ### Loading Data
import RDatasets
iris = RDatasets.dataset("datasets", "iris"); # a DataFrame
y, X = unpack(iris, ==(:Species), colname -> true); # y = a vector, and X = a DataFrame
first(X, 3) |> pretty
?unpack
# ----------------
# ### Fit & Predict
mach = machine(tree, X, y) # adding a mutable cache to the model+data for performant training
train, test = partition(eachindex(y), 0.7, shuffle=false); # 70:30 split
fit!(mach, rows=train)
yhat = predict(mach, X[test,:]);
yhat[3:5]
using Distributions
isa(yhat[1], Distribution)
Distributions.mode.(yhat[3:5])
log_loss(yhat, y[test]) |> mean
measures()
for m in measures()
if "log_loss" in m.instances
display(m)
end
end
# ### Evaluate = auto fit/predict
mach = machine(tree, X, y)
evaluate!(mach, resampling=Holdout(fraction_train=0.7, shuffle=false),
measures=[log_loss, brier_score], verbosity=0)
tree.max_depth = 3
evaluate!(mach, resampling=CV(shuffle=true), measure=[accuracy, balanced_accuracy], operation=predict_mode, verbosity=0)
# ### Unsupervised Learning: fit!, transform, inverse_transform
v = [1, 2, 3, 4]
mach2 = machine(UnivariateStandardizer(), v)
fit!(mach2)
w = transform(mach2, v)
inverse_transform(mach2, w)
# --------------------------------
# # MLJ features
#
#
# MLJ (Machine Learning in Julia) is a toolbox written in Julia
# providing a common interface and meta-algorithms for selecting,
# tuning, evaluating, composing and comparing machine learning models
# written in Julia and other languages. In particular MLJ wraps a large
# number of [scikit-learn](https://scikit-learn.org/stable/) models.
#
#
# * Data agnostic, train models on any data supported by the
# [Tables.jl](https://github.com/JuliaData/Tables.jl) interface,
#
# * Extensive support for model composition (*pipelines* and *learning
# networks*),
#
# * Convenient syntax to tune and evaluate (composite) models.
#
# * Consistent interface to handle probabilistic predictions.
#
# * Extensible [tuning
# interface](https://github.com/alan-turing-institute/MLJTuning.jl),
# to support growing number of optimization strategies, and designed
# to play well with model composition.
#
#
# More information is available from the [MLJ design paper](https://github.com/alan-turing-institute/MLJ.jl/blob/master/paper/paper.md)
# ### Model Registry
#
# MLJ has a model registry, allowing the user to search models and their properties.
models(matching(X,y))
?models
info("DecisionTreeClassifier", pkg="DecisionTree")
# -----------------
#
# # A more advanced example
#
# Disclaimer: This is taken almost completely from an existing MLJ example
#
# As in other frameworks, MLJ also supports a variety of unsupervised models for pre-processing data, reducing dimensionality, etc. It also provides a [wrapper](https://alan-turing-institute.github.io/MLJ.jl/dev/tuning_models/) for tuning model hyper-parameters in various ways. Data transformations, and supervised models are then typically combined into linear [pipelines](https://alan-turing-institute.github.io/MLJ.jl/dev/composing_models/#Linear-pipelines-1). However, a more advanced feature of MLJ not common in other frameworks allows you to combine models in more complicated ways. We give a simple demonstration of that next.
#
# We start by loading the model code we'll need:
RidgeRegressor = @load RidgeRegressor pkg=MultivariateStats
RandomForestRegressor = @load RandomForestRegressor pkg=DecisionTree;
# The next step is to define "learning network" - a kind of blueprint for the new composite model type. Later we "export" the network as a new stand-alone model type. Learning networks can be seen as pipelines on steroids.
#
# Let's consider the following simple DAG:
# 
#
# Our learing network will:
#
# - standarizes the input data
#
# - learn and apply a Box-Cox transformation to the target variable
#
# - blend the predictions of two supervised learning models - a ridge regressor and a random forest regressor; we'll blend using a simple average (for a more sophisticated stacking example, see [here](https://alan-turing-institute.github.io/DataScienceTutorials.jl/getting-started/stacking/))
#
# - apply the *inverse* Box-Cox transformation to this blended prediction
# **The basic idea is to proceed as if one were composing the various steps "by hand", but to wrap the training data in "source nodes" first.** In place of production data, one typically uses some dummy data, to test the network as it is built. When the learning network is "exported" as a new stand-alone model type, it will no longer be bound to any data. You bind the exported model to production data when your're ready to use your new model type (just like you would with any other MLJ model).
#
# There is no need to `fit!` the machines you create, as this will happen automatically when you *call* the final node in the network (assuming you provide the dummy data).
#
# *Input layer*
# +
# define some synthetic data:
X, y = make_regression(100)
y = abs.(y)
test, train = partition(eachindex(y), 0.8);
# wrap as source nodes:
Xs = source(X)
ys = source(y)
# -
# *First layer and target transformation*
# +
std_model = Standardizer()
stand = machine(std_model, Xs)
W = MLJ.transform(stand, Xs)
box_model = UnivariateBoxCoxTransformer()
box = machine(box_model, ys)
z = MLJ.transform(box, ys)
# -
# *Second layer*
# +
ridge_model = RidgeRegressor(lambda=0.1)
ridge = machine(ridge_model, W, z)
forest_model = RandomForestRegressor(n_trees=50)
forest = machine(forest_model, W, z)
ẑ = 0.5*predict(ridge, W) + 0.5*predict(forest, W)
# -
# *Output*
ŷ = inverse_transform(box, ẑ)
# No fitting has been done thus far, we have just defined a sequence of operations. We can test the netork by fitting the final predction node and then calling it to retrieve the prediction:
fit!(ŷ);
ŷ()[1:4]
# To "export" the network a new stand-alone model type, we can use a macro:
@from_network machine(Deterministic(), Xs, ys, predict=ŷ) begin
mutable struct CompositeModel
rgs1 = ridge_model
rgs2 = forest_model
end
end
# Here's an instance of our new type:
composite = CompositeModel()
# Since we made our model mutable, we could change the regressors for different ones.
#
# For now we'll evaluate this model on the famous Boston data set:
X, y = @load_boston
evaluate(composite, X, y, resampling=CV(nfolds=6, shuffle=true), measures=[rms, mae])
# ### Check out more [Data Science Tutorials in Julia](https://alan-turing-institute.github.io/DataScienceTutorials.jl/)
# +
# try out one tutorial of your choice right in here
# ...
# -
# # Thank you for being here
#
# further information about MLJ in general:
# * MLJ repository: https://github.com/alan-turing-institute/MLJ.jl
# * MLJ docs: https://alan-turing-institute.github.io/MLJ.jl/dev/
# * MLJ tutorials: https://alan-turing-institute.github.io/DataScienceTutorials.jl/
#
# further information about MLJ's model composition feature
# * MLJ docs: https://alan-turing-institute.github.io/MLJ.jl/dev/composing_models/
# * MLJ paper: https://arxiv.org/abs/2012.15505
# * MLJ tutorial: https://alan-turing-institute.github.io/DataScienceTutorials.jl/getting-started/learning-networks/
#
# In case you have more questions or suggestions, always feel welcome to reach out to me at Meetup and Julia User Group Munich, or directly at <EMAIL>
| 05 machinelearning with MLJ - 01 introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from minisom import MiniSom
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# %matplotlib inline
# +
data = np.genfromtxt('iris.csv', delimiter=',', usecols=(0, 1, 2, 3))
# data normalization
data = np.apply_along_axis(lambda x: x/np.linalg.norm(x), 1, data)
# Initialization and training
som = MiniSom(7, 7, 4, sigma=1.0, learning_rate=0.5)
som.random_weights_init(data)
print("Training...")
som.train_random(data, 100) # random training
print("\n...ready!")
data.shape
# -
# Plotting the response for each pattern in the iris dataset
plt.bone()
plt.pcolor(som.distance_map().T) # plotting the distance map as background
plt.colorbar()
target = np.genfromtxt('iris.csv', delimiter=',', usecols=(4), dtype=str)
t = np.zeros(len(target), dtype=int)
t[target == 'Iris-setosa'] = 0
t[target == 'Iris-versicolor'] = 1
t[target == 'Iris-virginica'] = 2
# +
# use different colors and markers for each label
markers = ['o', 's', 'D']
colors = ['r', 'g', 'b'] # (red,iris-setosa),(green,iris-versicolor),(blue,iris-virginica)
for cnt, xx in enumerate(data):
w = som.winner(xx) # getting the winner
# palce a marker on the winning position for the sample xx
plt.plot(w[0]+.5, w[1]+.5, markers[t[cnt]], markerfacecolor='None',
markeredgecolor=colors[t[cnt]], markersize=12, markeredgewidth=2)
#print(t[cnt])
plt.axis([0, 7, 0, 7])
#colors = ('red' , 'green', 'blue')
#markers = ('Iris-setosa','Iris-versicolor','Iris-virginica')
#plt.legend(colors, markers)
plt.show()
# -
| iris-minisom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Quick Regression Example on using Xgboost and Skater
# +
# %matplotlib inline
import matplotlib.pyplot
from sklearn.datasets import load_boston, load_breast_cancer
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import LogisticRegression, LinearRegression
import matplotlib.pyplot as plt
# Reference for customizing matplotlib: https://matplotlib.org/users/style_sheets.html
plt.style.use('ggplot')
import pandas as pd
import numpy as np
# Load Bosting housing data
regressor_data = load_boston()
# Get information about the data
print(regressor_data.DESCR)
# -
regressor_X = regressor_data.data
regressor_y = regressor_data.target
regressor_data.feature_names
from xgboost import XGBRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import cross_validate
# XGRegressor example
xgb = XGBRegressor(objective='reg:linear', booster='gbtree', n_jobs= -1)
# +
#XGB parameters for grid search
xgb_grid = {"max_depth" : [6],
"learning_rate" : [0.2],
"gamma" : [0],
"n_estimators" : [150],
"min_child_weight" : [1],
"base_score" : [0.5],
"subsample" : [1],
"max_delta_step" : [0],
"colsample_bytree" : [0.5],
"colsample_bylevel" : [0.4],
"reg_alpha" : [0],
"reg_lambda" : [60],
"scale_pos_weight" : [1]
}
clf = GridSearchCV(xgb, param_grid = xgb_grid, cv = 3, n_jobs = -1)
clf.fit(regressor_X, regressor_y)
# -
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
y_hat = clf.predict(regressor_X)
print(mean_squared_error(regressor_y, y_hat))
print(r2_score(regressor_y, y_hat))
# ### Inference with Skater
# +
from skater.core.explanations import Interpretation
from skater.model import InMemoryModel
interpreter = Interpretation(regressor_X, feature_names=regressor_data.feature_names)
annotated_model = InMemoryModel(clf.predict, examples=regressor_X)
print("Number of classes: {}".format(annotated_model.n_classes))
print("Input shape: {}".format(annotated_model.input_shape))
print("Model Type: {}".format(annotated_model.model_type))
print("Output Shape: {}".format(annotated_model.output_shape))
print("Output Type: {}".format(annotated_model.output_type))
print("Returns Probabilities: {}".format(annotated_model.probability))
# +
# There is some issue in the way xgboost handles the thread pool for concurrency and other multiprocessing python
# libraries. For more information follow the discussion at the below mentioned links
# 1. https://github.com/datascienceinc/Skater/issues/212
# 2. https://github.com/dmlc/xgboost/issues/2163
# 3. http://scikit-learn.org/stable/faq.html#why-do-i-sometime-get-a-crash-freeze-with-n-jobs-1-under-osx-or-linux
# Workaround: The current workaround is to use n_jobs=
print("2-way partial dependence plots")
# Features can passed as a tuple for 2-way partial plot
pdp_features = [('DIS', 'RM')]
interpreter.partial_dependence.plot_partial_dependence(
pdp_features, annotated_model, grid_resolution=30, n_jobs=1
)
# -
print("1-way partial dependence plots")
# or as independent features for 1-way partial plots
pdp_features = ['DIS', 'RM']
interpreter.partial_dependence.plot_partial_dependence(
pdp_features, annotated_model, grid_resolution=30, progressbar=False, n_jobs=1, with_variance=True
)
| examples/xgboost_regression_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/coding-ss/DS-Unit-1-Sprint-2-Data-Wrangling/blob/master/DS_Unit_1_Sprint_Challenge_2_Data_Wrangling.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="4yMHi_PX9hEz" colab_type="text"
# # Data Science Unit 1 Sprint Challenge 2
#
# ## Data Wrangling
#
# In this Sprint Challenge you will use data from [Gapminder](https://www.gapminder.org/about-gapminder/), a Swedish non-profit co-founded by <NAME>. "Gapminder produces free teaching resources making the world understandable based on reliable statistics."
# - [Cell phones (total), by country and year](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--cell_phones_total--by--geo--time.csv)
# - [Population (total), by country and year](https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv)
# - [Geo country codes](https://github.com/open-numbers/ddf--gapminder--systema_globalis/blob/master/ddf--entities--geo--country.csv)
#
# These two links have everything you need to successfully complete the Sprint Challenge!
# - [Pandas documentation: Working with Text Data](https://pandas.pydata.org/pandas-docs/stable/text.html]) (one question)
# - [Pandas Cheat Sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf) (everything else)
# + [markdown] id="wWEU2GemX68A" colab_type="text"
# ## Part 0. Load data
#
# You don't need to add or change anything here. Just run this cell and it loads the data for you, into three dataframes.
# + id="bxKtSi5sRQOl" colab_type="code" colab={}
import pandas as pd
cell_phones = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--cell_phones_total--by--geo--time.csv')
population = pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--datapoints--population_total--by--geo--time.csv')
geo_country_codes = (pd.read_csv('https://raw.githubusercontent.com/open-numbers/ddf--gapminder--systema_globalis/master/ddf--entities--geo--country.csv')
.rename(columns={'country': 'geo', 'name': 'country'}))
# + [markdown] id="AZmVTeCsX9RC" colab_type="text"
# ## Part 1. Join data
# + [markdown] id="GLzX58u4SfEy" colab_type="text"
# First, join the `cell_phones` and `population` dataframes (with an inner join on `geo` and `time`).
#
# The resulting dataframe's shape should be: (8590, 4)
# + id="GVV7Hnj4SXBa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="1b4d9eb6-cd20-4c22-a69c-02313ca27ccb"
cell_phones.head()
# + id="1QKjxM99vbFu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="4567c5b7-9218-4020-ae98-59a76c55fbe4"
population.head()
# + id="cT41Ph6jvfid" colab_type="code" colab={}
merged = pd.merge(population,cell_phones,how='inner', on=('geo','time'))
# + id="2_SG3hTwwPLR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3ece0237-0140-4bac-d548-bf3c99af1979"
merged.shape
# + [markdown] id="xsXpDbwwW241" colab_type="text"
# Then, select the `geo` and `country` columns from the `geo_country_codes` dataframe, and join with your population and cell phone data.
#
# The resulting dataframe's shape should be: (8590, 5)
# + id="Q2LaZta_W2CE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 270} outputId="a43e4cf0-70b8-4df9-d7a3-5404d04d80e3"
geo_country_codes.head()
# + id="atgrj6D8wiWe" colab_type="code" colab={}
final = pd.merge(merged,geo_country_codes[['geo','country']])
# + id="ZXLhrylaxKCH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9859f3f6-2193-4cda-bb71-e91b69baa505"
final.shape
# + id="_-5FszOmxgXu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1969} outputId="39fd809d-1e62-4ea8-80b1-64b84856ef5f"
final
# + id="sxEiJRf4y7gd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1071} outputId="ce9c6550-4522-4691-ab31-c720e1435c3f"
final['country'].value_counts().head(500)
# + [markdown] id="oK96Uj7vYjFX" colab_type="text"
# ## Part 2. Make features
# + [markdown] id="AD2fBNrOYzCG" colab_type="text"
# Calculate the number of cell phones per person, and add this column onto your dataframe.
#
# (You've calculated correctly if you get 1.220 cell phones per person in the United States in 2017.)
# + id="wXI9nQthYnFK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b2392ac2-ccac-42a6-cd0a-6ccced996da0"
cell_phones_per_person = len('cell_phones_total')/len('population_total')
cell_phones_per_person
# + id="LdaNwE2I3i_Z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="f3769d77-fce0-486d-87b5-68637ab74e8d"
final['cell_phones_per_person'] = cell_phones_per_person
final.head()
# + id="SKpXVi4H4QOR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 80} outputId="b76726de-49aa-446d-e951-1986e3b932b9"
final.loc[(final['country'] == 'United States') & (final['time'] == 2017)]
# + id="-0TOK--S577X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ee7e1e7e-8336-40fe-8770-c074dd658e4f"
395881000.0/324459463
# + [markdown] id="S3QFdsnRZMH6" colab_type="text"
# Modify the `geo` column to make the geo codes uppercase instead of lowercase.
# + id="93ADij8_YkOq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="ab4f5099-0dc5-4fc0-d46d-adb2ddf27021"
final['geo'] = final['geo'].str.upper()
final['geo'].head()
final.head()
# + [markdown] id="hlPDAFCfaF6C" colab_type="text"
# ## Part 3. Process data
# + [markdown] id="k-pudNWve2SQ" colab_type="text"
# Use the describe function, to describe your dataframe's numeric columns, and then its non-numeric columns.
#
# (You'll see the time period ranges from 1960 to 2017, and there are 195 unique countries represented.)
# + id="g26yemKre2Cu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="518f5db8-8aeb-4534-e8e3-70357966a49d"
final.describe()
# + id="6szgxQav2aDQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="d9a20096-4a16-47a5-fe41-239e8f358550"
# To show all numeric columns
import numpy as np
final.describe(include=[np.number])
# + id="ccR8DRhh2xQ1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="5c902559-d6d6-410e-b22f-e0993928c64b"
# To show all non-numeric columns
final.describe(include=[np.object])
# + [markdown] id="zALg-RrYaLcI" colab_type="text"
# In 2017, what were the top 5 countries with the most cell phones total?
#
# Your list of countries should have these totals:
#
# | country | cell phones total |
# |:-------:|:-----------------:|
# | ? | 1,474,097,000 |
# | ? | 1,168,902,277 |
# | ? | 458,923,202 |
# | ? | 395,881,000 |
# | ? | 236,488,548 |
#
#
# + id="jUDT-prSRo0a" colab_type="code" colab={}
f = final['time'] == 2017
# + id="aEkE86mPWzfg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="e8d08c24-a8c9-45e1-f8ad-81b4e080d297"
f2 = final[f]
f2.head()
# + id="DUsPi9CySMfQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="e3ad71c0-7309-46b3-d8c3-6d547761a97d"
f3 = f2.sort_values(by='cell_phones_total', ascending=False).head()
f3
# + id="C1DY2lrLRlTp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="702424c3-4a25-41f7-c4f6-a8562fa304b8"
# Answer
f4 = f3[['cell_phones_total','country']]
f4.columns = f4.columns.str.replace('_',' ')
f4
# + id="JdlWvezHaZxD" colab_type="code" colab={}
# This optional code formats float numbers with comma separators
pd.options.display.float_format = '{:,}'.format
# + [markdown] id="03V3Wln_h0dj" colab_type="text"
# 2017 was the first year that China had more cell phones than people.
#
# What was the first year that the USA had more cell phones than people?
# + id="r6-_86yLMiMH" colab_type="code" colab={}
f = final.query('cell_phones_total > population_total')
# + id="NqDAf8XUTnjN" colab_type="code" colab={}
# In 2014, US had more cell phones than number of people
f.tail(50)
# + [markdown] id="6J7iwMnTg8KZ" colab_type="text"
# ## Part 4. Reshape data
# + [markdown] id="LP9InazRkUxG" colab_type="text"
# Create a pivot table:
# - Columns: Years 2007—2017
# - Rows: China, India, United States, Indonesia, Brazil (order doesn't matter)
# - Values: Cell Phones Total
#
# The table's shape should be: (5, 11)
# + id="JD7mXXjLj4Ue" colab_type="code" colab={}
time1_df = final.loc[(final['time'] >= 2007) & (final['time'] <= 2017)]
time1
# + id="f7QmI17hedMX" colab_type="code" colab={}
subset = time1[time1['country'].isin(['China', 'India', 'United States', 'Brazil','Indonesia']) ]
# + id="cLfzqJ80X1pz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 272} outputId="af412449-b226-42a7-df55-17b9958deb8f"
table1 = subset.pivot_table(values = 'cell_phones_total',
index=['country'],
columns = ['time'])
table1.head()
# + [markdown] id="CNKTu2DCnAo6" colab_type="text"
# #### OPTIONAL BONUS QUESTION!
#
# Sort these 5 countries, by biggest increase in cell phones from 2007 to 2017.
#
# Which country had 935,282,277 more cell phones in 2017 versus 2007?
# + id="O4Aecv1fmQlj" colab_type="code" colab={}
# + [markdown] id="7iHkMsa3Rorh" colab_type="text"
# If you have the time and curiosity, what other questions can you ask and answer with this data?
| DS_Unit_1_Sprint_Challenge_2_Data_Wrangling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Dual CRISPR Screen Analysis
# # Step 7: Abundance Thresholds
# <NAME>, CCBB, UCSD (<EMAIL>)
#
# ## Instructions
#
# To run this notebook reproducibly, follow these steps:
# 1. Click **Kernel** > **Restart & Clear Output**
# 2. When prompted, click the red **Restart & clear all outputs** button
# 3. Fill in the values for your analysis for each of the variables in the [Input Parameters](#Input-Parameters) section
# 4. Click **Cell** > **Run All**
#
# ## Input Parameters
g_dataset_name = "Notebook7Test"
g_prepped_counts_run_prefix = "TestSet7"
g_prepped_counts_dir = '~/dual_crispr/test_data/test_set_7'
g_min_count_limit = 10 #Note: in absolute counts, not log2
g_max_fraction_acceptable_spline_density_diff = 0.02 # % of diff between max spline and min density
g_max_fraction_counts_excluded = 0.95 # any threshold throwing out >x% of counts is not acceptable
g_thresholds_run_prefix = ""
g_thresholds_dir = '~/dual_crispr/test_outputs/test_set_7'
# ## Automated Set-Up
# +
import inspect
import ccbb_pyutils.analysis_run_prefixes as ns_runs
import ccbb_pyutils.files_and_paths as ns_files
import ccbb_pyutils.notebook_logging as ns_logs
def describe_var_list(input_var_name_list):
description_list = ["{0}: {1}\n".format(name, eval(name)) for name in input_var_name_list]
return "".join(description_list)
ns_logs.set_stdout_info_logger()
# -
g_prepped_counts_dir = ns_files.expand_path(g_prepped_counts_dir)
g_thresholds_run_prefix = ns_runs.check_or_set(g_thresholds_run_prefix, ns_runs.generate_run_prefix(g_dataset_name))
g_thresholds_dir = ns_files.expand_path(g_thresholds_dir)
print(describe_var_list(['g_prepped_counts_dir', 'g_thresholds_run_prefix', 'g_thresholds_dir']))
ns_files.verify_or_make_dir(g_thresholds_dir)
# ## R Magic Import and Set-Up
# %load_ext rpy2.ipython
# +
from rpy2.robjects import r
import rpy2.robjects as robjects
gR = robjects.r
# -
# ## Scoring-Ready Counts File Load
import dual_crispr.scoring_prep as ns_prep
print(inspect.getsource(ns_prep.get_prepped_file_suffix))
import dual_crispr.construct_file_extracter as ns_extractor
print(inspect.getsource(ns_extractor.get_construct_header))
print(inspect.getsource(ns_extractor.get_potential_annotation_headers))
# +
import pandas
def get_prepped_counts_only_df(prepped_counts_dir, prepped_counts_run_prefix):
prepped_counts_suffix = ns_prep.get_prepped_file_suffix()
prepped_counts_fp = ns_files.build_multipart_fp(prepped_counts_dir, [prepped_counts_run_prefix,
prepped_counts_suffix])
prepped_counts_df = pandas.read_table(prepped_counts_fp, index_col=ns_extractor.get_construct_header())
total_headers = list(prepped_counts_df.columns.values)
unwanted_headers = ns_extractor.get_potential_annotation_headers()
count_headers = [x for x in total_headers if x not in unwanted_headers]
return prepped_counts_df.loc[:, count_headers]
# -
g_prepped_counts_only_df = get_prepped_counts_only_df(g_prepped_counts_dir, g_prepped_counts_run_prefix)
g_prepped_counts_only_df
# ## Variable Transfer from Python to R
# 'temp' assignments suppress printing of cruft stdout
temp = gR.assign('gPreppedCountsDf', g_prepped_counts_only_df)
temp = gR.assign('gMinCountLimit', g_min_count_limit)
temp = gR.assign('gMaxFractionAcceptableSplineDensityDiff', g_max_fraction_acceptable_spline_density_diff)
temp = gR.assign('gMaxFractionCountsExcluded', g_max_fraction_counts_excluded)
# ## R Set-Up
# + language="R"
#
# options(jupyter.plot_mimetypes = c("text/plain", "image/png" ))
# options(repr.plot.width=7, repr.plot.height=7)
# options(digits=10)
# -
# ## Abundance-Threshold Identification Heuristic
# + language="R"
#
# gManualColor = "black"
# gMinimumsColor = "orange"
# gMaximumsColor = "turquoise"
# gHistogramColor = "blue"
# gDensityColor = "lightgreen"
# gSplineColor = "lightpink"
# gChosenColor = "red"
# gSpar = NULL
#
#
# extremumIsAcceptable<-function(putativeThreshold, hist, maxCountFractionExcluded){
# result = FALSE
# if (!is.null(putativeThreshold)) {
# fractionCountsExcluded = getFractionCountsExcluded(hist, putativeThreshold,
# maxCountFractionExcluded)
# result = fractionCountsExcluded<maxCountFractionExcluded
# }
# return(result)
# }
#
#
# getFractionCountsExcluded<-function(hist, putativeThreshold, maxCountFractionExcluded){
# tempHistDf = data.frame(mids=hist$mids, counts=hist$counts)
# eligibleHistDf = tempHistDf[which(hist$mids<putativeThreshold), ]
# result = sum(eligibleHistDf$counts)/sum(hist$counts)
# return(result)
# }
#
# findExtremaIndices<-function(objWithXandY, getMins=TRUE){
# relevantDiff = if(getMins==TRUE) 2 else -2
# indicesOfExtrema = which(diff(sign(diff(objWithXandY$y)))==relevantDiff)+1
# return(indicesOfExtrema)
# }
#
# getlog2CountsAndFreqsAtExtrema<-function(densityObj, indicesOfExtrema){
# log2CountsAtExtrema = densityObj$x[indicesOfExtrema]
# densityFunc = approxfun(densityObj)
# freqsAtExtrema = densityFunc(log2CountsAtExtrema)
# result = data.frame(log2CountsAtExtrema, freqsAtExtrema)
# result = result[with(result, order(log2CountsAtExtrema)), ]
# return(result)
# }
#
# # general concept: identify the peak of the "main distribution", then look for the lowest point in the
# # "valley" between it and the noise spike at the low end of the counts histogram.
# # Not all count distributions have this general shape; for all known cases that don't, this method
# # will return NULL (rather than just silently picking a bad threshold).
# findSmallestMinLeftOfMax<-function(splineWithXandY, minCountLimit, hist, maxCountFractionExcluded){
# minLog2CountThreshold = log2(minCountLimit)
# result = NULL # assume failure
#
# #look for row indices of local interior maxima and local interior minima in input spline curve
# indicesOfMaxes = findExtremaIndices(splineWithXandY, FALSE)
# indicesOfMins = findExtremaIndices(splineWithXandY, TRUE)
#
# # give up if there aren't at least one of each; otherwise
# if (length(indicesOfMaxes)>0 & length(indicesOfMins)>0){
# # get x and y values in the rows representing the local interior maxima
# xAndYatMaxesDf = getlog2CountsAndFreqsAtExtrema(splineWithXandY, indicesOfMaxes)
# eligibleMaxesDf = xAndYatMaxesDf[which(
# xAndYatMaxesDf$log2CountsAtExtrema >= minLog2CountThreshold), ]
#
# # if there are no local interior maxima at x values gte the global minimum allowed, give up; otherwise
# if (nrow(eligibleMaxesDf)>0){
# # pick the x position of the eligible local interior maximum with the largest y value
# chosenMaxDf = eligibleMaxesDf[which(
# eligibleMaxesDf$freqsAtExtrema == max(eligibleMaxesDf$freqsAtExtrema)), ]
# rightmostLog2Count = chosenMaxDf$log2CountsAtExtrema[1]
#
# # get x and y values in the rows representing the local interior minima
# xAndYatMinsDf = getlog2CountsAndFreqsAtExtrema(splineWithXandY, indicesOfMins)
# eligibleMinsDf = xAndYatMinsDf[which(
# xAndYatMinsDf$log2CountsAtExtrema >= minLog2CountThreshold &
# xAndYatMinsDf$log2CountsAtExtrema < rightmostLog2Count), ]
#
# # if there are no local interior minima with x values gte the global minimum allowed and
# # lt the x position of the chosen maximum, give up; otherwise
# if (nrow(eligibleMinsDf)>0){
# # pick the x position of the eligible local interior minimum with the smallest y value
# chosenMinDf = eligibleMinsDf[which(
# eligibleMinsDf$freqsAtExtrema == min(eligibleMinsDf$freqsAtExtrema)), ]
# putativeResult = chosenMinDf$log2CountsAtExtrema
# # Only known situation where above logic picks a bad threshold is when all "real"
# # data is monotonically decreasing but there is (at least one) minute local maximum
# # in the noise at far right of the count distribution; extremumIsAcceptable sanity-checks
# # for that pathological case.
# if (extremumIsAcceptable(putativeResult, hist, maxCountFractionExcluded)){
# result = putativeResult
# }
# }
# }
# }
#
# return(result)
# }
#
# # helper for findSplineAndDensityNearPoint
# makeSplineAndDensityDf<-function(scaledDensityXandY, splineXandY){
# # Determine spline and (scaled) density y values at shared set of x values
# # where neither is NA, then calculate difference between spline and density
# # at each of those points.
# splineFunc = approxfun(splineXandY)
# splineYAtDensityX = splineFunc(scaledDensityXandY$x)
# result = data.frame(x=scaledDensityXandY$x, splineY=splineYAtDensityX,
# densityY=scaledDensityXandY$y)
# result = na.omit(result)
# result$y = result$splineY-result$densityY
# return(result)
# }
#
#
# # helper for findSplineAndDensityNearPoint
# getNearnessThreshold<-function(splineAndDensityDf, maxSplineDensityDiff){
# # Get global maximum value of spline function at any x
# # Get global minimum of scaled density function at any x
# # NB: x for max and min need not (and usually won't) be the same
# # Use these values to find the maximum difference between spline
# # and scaled density y values (regardless of x), then define
# # "near" to be when spline and scaled density y values for same x get within
# # the specified arbitrary fraction of that difference.
# maxSplineY = max(splineAndDensityDf$splineY)
# minDensityY = min(splineAndDensityDf$densityY)
# maxDiff = maxSplineY - minDensityY
# result = maxDiff * maxSplineDensityDiff
# return(result)
# }
#
#
# # general concept: find the leftmost point (greater than the global minimum allowed)
# # in the count distribution where the scaled density curve and the spline curve are
# # within the global arbitrary threshold of one another.
# # This gives worse results than findSmallestMinLeftOfMax on "good" count distributions,
# # so it isn't the first-choice approach, but it makes a good fall-back for count
# # distributions (especially noisy or low-signal ones) where findSmallestMinLeftOfMax
# # fails to find a threshold. Fails to find a threshold ONLY in cases where
# # spline and density curve never get "near" each other over range of
# # counts in the underlying count distribution.
# findSplineAndDensityNearPoint<-function(scaledDensityXandY, splineXandY, minCountLimit,
# maxFractionAcceptableSplineDensityDiff, hist, maxCountFractionExcluded){
#
# log2minCountLimit = log2(minCountLimit)
# maxSplineDensityDiff = maxFractionAcceptableSplineDensityDiff
# result = NULL # assume failure
#
# splineAndDensityDf = makeSplineAndDensityDf(scaledDensityXandY, splineXandY)
# nearnessThreshold = getNearnessThreshold(splineAndDensityDf, maxSplineDensityDiff)
#
# # if there are no records whose x positions are gte the global minimum allowed,
# # give up; otherwise
# eligibleSplineAndDensityDf = splineAndDensityDf[which(
# splineAndDensityDf$x >= log2minCountLimit), ]
# if (nrow(eligibleSplineAndDensityDf)>0){
#
# # Walk through all eligible x positions, from smallest toward largest.
# # Assuming you don't get lucky and just find an x value right on the threshold,
# # find the pair of x positions (if any such exist) that bracket the
# # spot where the spline and density curves get "near enough" to each other.
# # Return the point half-way between these two x positions (note that this is
# # obviously a punt--I *could* do numerical approximation to find it, or
# # set up a function that reached zero when the spline and density were
# # "near enough" and then optimize it, but frankly it just doesn't seem
# # worth the trouble ...)
# putativeResult = NULL
# largestXgtThresh = NULL
# smallestXltThresh = NULL
# for (i in 1:nrow(eligibleSplineAndDensityDf)){
# currYval = eligibleSplineAndDensityDf$y[i]
# currXval = eligibleSplineAndDensityDf$x[i]
# if (currYval == nearnessThreshold) {
# putativeResult = currXval
# break
# } else if (currYval <= nearnessThreshold) {
# smallestLtThresh = currXval
# if (is.null(largestXgtThresh)) {
# putativeResult = smallestLtThresh
# } else {
# putativeResult = (smallestLtThresh - largestXgtThresh)/2 + largestXgtThresh
# }
# break
# } else { # (currYval > nearnessThreshold)
# largestXgtThresh = currXval
# }
# }
#
# if (extremumIsAcceptable(putativeResult, hist, maxCountFractionExcluded)){
# result = putativeResult
# }
# }
#
# return(result)
# }
#
# analyzeCountsDist<-function(log2countsDfForSingleSample, rangeObj, minCountLimit, maxCountFractionExcluded,
# maxFractionAcceptableSplineDensityDiff){
#
# resultSummary = "No acceptable threshold found." # assume failure
# rge<-rangeObj
# increment = 0.05
#
# log2CurrCountsHist<-hist(log2countsDfForSingleSample,
# breaks=seq(0-increment,rge[2]+increment,by=increment),
# plot=FALSE)
#
# # density curve
# scaleFactor = sum(log2CurrCountsHist$counts)*increment
# log2CurrCountsDensity<-density(log2countsDfForSingleSample)
# scaledLog2CurrCountsDensityDf = data.frame(x=log2CurrCountsDensity$x,
# y=log2CurrCountsDensity$y*scaleFactor)
#
# # smoothing spline curve of non-zero freqs only
# log2CurrCountsHistXandY = data.frame(x=log2CurrCountsHist$mids, y=log2CurrCountsHist$count)
# nonZeroLog2CurrCountsHistXandY = log2CurrCountsHistXandY[which(log2CurrCountsHistXandY$y>0), ]
# log2CurrCountsSpline = smooth.spline(nonZeroLog2CurrCountsHistXandY$x, nonZeroLog2CurrCountsHistXandY$y)
#
# # threshold selection
# putativeThreshold = findSmallestMinLeftOfMax(log2CurrCountsSpline, minCountLimit,
# log2CurrCountsHist, maxCountFractionExcluded)
# if (!is.null(putativeThreshold)){
# resultSummary = "Smallest-local-minimum-in-valley method used."
# } else {
# putativeThreshold = findSplineAndDensityNearPoint(scaledLog2CurrCountsDensityDf, log2CurrCountsSpline,
# minCountLimit, maxFractionAcceptableSplineDensityDiff, log2CurrCountsHist, maxCountFractionExcluded)
# if (!is.null(putativeThreshold)){
# resultSummary = "Near-point-of-spline-and-density method used."
# }
# }
#
# result = list(threshold = putativeThreshold, resultSummary=resultSummary,
# histogram=log2CurrCountsHist,
# scaledDensity=scaledLog2CurrCountsDensityDf, spline=log2CurrCountsSpline)
# return(result)
# }
#
# drawAnalyzedCountsDist<-function(sampleName, rangeObj, analysisResult, manualThreshold=NULL){
# rge<-rangeObj
# xPositions = seq(from = 0, to = ceiling(rge[2])+1, by = 1)
# xLabels = 2^(xPositions)
# titleText = paste0(sampleName,"\n", analysisResult$resultSummary)
#
# hist = analysisResult$histogram
# plot(hist,
# col=gHistogramColor,
# border=FALSE,
# main=titleText,
# xaxt = 'n',
# xlab=""
# )
#
# axis(side = 1, at = xPositions, labels=xLabels, las=2)
# mtext("counts (pseudocount added to zeros only)", side=1, line=3)
#
# # density curve
# lines(analysisResult$scaledDensity,col=gDensityColor)
#
# # smoothing spline curve of non-zero freqs only
# lines(analysisResult$spline, col=gSplineColor)
#
# # rug plot of manual threshold, if any
# if (!is.null(manualThreshold)){
# rug(manualThreshold, col=gManualColor, lwd=3)
# }
#
# # vertical line of selected threshold, if any
# analysisThreshold = analysisResult$threshold
# if (!is.null(analysisThreshold)){
# abline(v=analysisThreshold, col=gChosenColor)
# fractionExcludedCounts = getFractionCountsExcluded(analysisResult$histogram,
# analysisThreshold, maxCountFractionExcluded)
# percentExcludedCounts = fractionExcludedCounts*100
# title(sub=paste0(format(round(percentExcludedCounts, 1), nsmall = 1), "% of counts excluded"))
# }
# }
#
# analyzeAndDrawCountsDists<-function(multiSampleCountsDf, minCountLimit, maxCountFractionExcluded,
# maxFractionAcceptableSplineDensityDiff, manualThresholds=NULL){
#
# resultDf = data.frame(sampleName = character(0), log2CountsThresh = numeric(0));
#
# multiSampleCountsDf[multiSampleCountsDf==0]<-1 #pseudocounts
# log2MultiSampleCountsDf = log2(multiSampleCountsDf)
# rangeObj = range(log2MultiSampleCountsDf)
#
# for (i in 1:ncol(multiSampleCountsDf)) {
# currSampleName = colnames(multiSampleCountsDf)[i]
# log2countsDfForSingleSample = log2MultiSampleCountsDf[, i]
# analysisResult = analyzeCountsDist(log2countsDfForSingleSample, rangeObj,
# minCountLimit, maxCountFractionExcluded, maxFractionAcceptableSplineDensityDiff)
# outputThreshold = if (is.null(analysisResult$threshold)) NA else analysisResult$threshold
# resultDf = rbind(resultDf, data.frame(sampleName=currSampleName, log2CountsThresh=outputThreshold))
#
#
# currManualThreshold = NULL
# if (!is.null(manualThresholds)){
# if (length(manualThresholds)>=i){
# currManualThreshold = log2(manualThresholds[i])
# }
# }
#
# drawAnalyzedCountsDist(currSampleName, rangeObj, analysisResult, currManualThreshold)
# }
#
# return(resultDf)
# }
# + language="R"
# gThresholdsDf = analyzeAndDrawCountsDists(gPreppedCountsDf, gMinCountLimit, gMaxFractionCountsExcluded,
# gMaxFractionAcceptableSplineDensityDiff)
# -
# %R gThresholdsDf
if gR['gThresholdsDf'].isnull().values.any():
raise RuntimeError("Automated abundance threshold selection was not able to identify thresholds for all samples.")
# ## Output to File
import dual_crispr.scoring_prep as ns_prep
print(inspect.getsource(ns_prep.get_sample_name_header))
print(inspect.getsource(ns_prep.get_abundance_thresh_header))
print(inspect.getsource(ns_prep.get_abundance_thresh_file_suffix))
def write_thresholds_file(thresholds_df, run_prefix, output_dir):
thresholds_df.columns = [ns_prep.get_sample_name_header(), ns_prep.get_abundance_thresh_header()]
output_fp = ns_files.build_multipart_fp(output_dir, [run_prefix, ns_prep.get_abundance_thresh_file_suffix()])
try:
thresholds_df.to_csv(output_fp, index=False, sep='\t')
except AttributeError: # if there is no to_csv method
thresholds_df.to_csvfile(output_fp, row_names=False, sep='\t')
write_thresholds_file(gR['gThresholdsDf'], g_thresholds_run_prefix, g_thresholds_dir)
print(ns_files.check_file_presence(g_thresholds_dir, g_thresholds_run_prefix,
ns_prep.get_abundance_thresh_file_suffix(),
check_failure_msg="Abundance threshold selection failed to produce an output file.")
)
| dual_crispr/distributed_files/notebooks/Dual CRISPR 7-Abundance Thresholds.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 6. Series Temporais
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import datetime
dados_completos = pd.read_csv('airline-passengers.csv')
# #### Visualizando todo o dataset
dados_completos.head()
# #### Verificando as características de todo o dataset
dados_completos.info()
# #### Ajustando o dataset para criar uma coluna do tipo "data"
dados_completos['datetime'] = pd.to_datetime(dados_completos['Month'])
dados_completos.head()
# #### Verificando a nova característica do nosso dataset (coluna "datatime")
dados_completos.info()
type(dados_completos) #verificando o tipo de dados
# #### Verificando se existem outliers
plt.figure(figsize=(10, 10))
g = sns.boxplot(dados_completos['Passengers']) #realiza o plot através da biblioteca seaborn
g.set_title('Box plot para o embarque passageiros')
# #### Plotando o gráfico da variação do número de passageiros no período
plt.figure(figsize=(20, 10))
g = sns.lineplot(x=dados_completos.index,y=dados_completos['Passengers'])
g.set_title('Série Temporal do embarque de passageiros')
g.set_xlabel('Índice')
g.set_ylabel('Número de passageiros em viagens de avião')
# #### Realizando a decomposição da série temporal
from statsmodels.tsa.seasonal import seasonal_decompose #biblioteca responsável por realizar a decomposição da série temporal
# #### Modificando o indice para ser temporal
df_serie_temporal = dados_completos.set_index('datetime')
# #### Verifica as colunas existentes
df_serie_temporal.columns
# #### Realiza o drop da coluna 'month'
df_serie_temporal.drop('Month', axis=1, inplace=True)
# #### Verifica o novo dataset
df_serie_temporal.head()
# #### Realizando a construção do modelo de decomposição da série temporal
decomposicao_aditiva = seasonal_decompose(df_serie_temporal, model='aditive', extrapolate_trend='freq') #aplica o modelo de decomposição aditiva
# #### Realiza o plot da decomposição
from pylab import rcParams
rcParams['figure.figsize'] = 18, 8
fig=decomposicao_aditiva.plot() #realiza o plot da decomposição
plt.show()
# #### Testando a estacionariedade da série temporal
from statsmodels.tsa.stattools import adfuller #importando o teste ADF
resultado_ADF = adfuller(df_serie_temporal.Passengers.values, autolag='AIC') #aplica o teste adf
# para o teste ADF a hipótese nula é que existe, pelo menos, uma raiz negativa na série temporal (série é não-estacionária)
print('ADF P-valor:',resultado_ADF[1] ) # com o p-valor maior que 0,05 a hipótese nula não é rejeitada
# #### Retirando a tendência da nossa série
# + jupyter={"outputs_hidden": true}
detrended = df_serie_temporal - decomposicao_aditiva.trend
plt.plot(detrended)
# -
# #### Retirando a sazomalidade
# + jupyter={"outputs_hidden": true}
deseasonalized = df_serie_temporal - decomposicao_aditiva.seasonal
plt.plot(deseasonalized)
# -
# #### Realizando a análise de autocorrelação nos dados
from statsmodels.graphics.tsaplots import plot_acf #importando a biblioteca para o plot da autocorrelação
plot_acf(df_serie_temporal, lags=50) #aplica a autocorrelação entre os dados
plt.show() #mostra uma correlação significativa com 14 lags
# #### Transformando a série em estacionária
df_serie_temporal['Passengers_diff'] = df_serie_temporal['Passengers'] - df_serie_temporal['Passengers'].shift(1) #aplica o primeiro "Shift" (derivada para tempo discreto)
df_serie_temporal['Passengers_diff'] = df_serie_temporal['Passengers_diff'].dropna() #retira os valores nulos
df_serie_temporal['Passengers_diff'].plot()
# #### Conferindo se agora está estacionária
X_diff = df_serie_temporal['Passengers_diff'].dropna().values
resultado_primeira_diff = adfuller(X_diff)
print('p-valor: %f' % resultado_primeira_diff[1]) #pvalor, praticamente 0.05, não rejeita a hipótese nula, mas vamos considerar que está estacionária
# + jupyter={"outputs_hidden": true, "source_hidden": true}
# !pip install tensorflow
# -
# #### Bibliotecas utilizadas para a construção dos modelos de previsão de vendas de passagens
import numpy
import matplotlib.pyplot as plt
import pandas
import math
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# #### Volta o dataset para o formato original
serie_passageiros = df_serie_temporal['Passengers'].values
# #### Normalizzação do banco de dados, necessário para que os algoritmos possam ter um comportamento mais "previsível"
scaler = MinMaxScaler(feature_range=(0, 1)) #cria o objeto que realiza a normalização dos dados por meio dos valores mínimos e máximos
dataset = scaler.fit_transform(serie_passageiros.reshape(-1, 1)) # aplica a escala
print(dataset[0:20])
# #### Divide o conjunto de dados em treinamento e teste
train_size = int(len(dataset) * 0.67) #encontra o valor máximo para o treinamento
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
print(len(train), len(test)) #tamanho do df para treinamento e teste
# #### Cria a matriz necessária para a entrada de dados
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), 0]
dataX.append(a)
dataY.append(dataset[i + look_back, 0])
return numpy.array(dataX), numpy.array(dataY)
# ### Cria o reshape para que os dados estejam em um formato ideal para entrada
look_back = 14 # será utilizado apenas um passo anterior para a previsão do futuro
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1]))
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1]))
trainX.shape
# #### Cria o modelo utilizando redes recorrentes e o LSTM
model = Sequential()
model.add(LSTM(4, input_shape=(1, look_back)))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
# #### Apresenta a arquitetura da rede
model.summary()
# #### Realiza o treinamento o modelo de previsão
# + jupyter={"outputs_hidden": true}
model.fit(trainX, trainY, epochs=100, batch_size=1, verbose=2)
# -
# #### Realiza as previsões
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# #### Coloca os dados no formato original
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# #### Realiza a mudança dos dados para a previsão
trainPredictPlot = numpy.empty_like(dataset)
trainPredictPlot[:, :] = numpy.nan
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict
#shift para os dados de teste
testPredictPlot = numpy.empty_like(dataset)
testPredictPlot[:, :] = numpy.nan
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict
# #### Realiza o plot dos dados de previsão e o real
plt.plot(scaler.inverse_transform(dataset),label='Dataset')
plt.plot(trainPredictPlot, label='Treinamento')
plt.plot(testPredictPlot,label='Previsão')
plt.xlabel("Tempo")
plt.ylabel("Número de Passagens Vendidas")
plt.legend()
plt.show()
| 02. Processamento de Linguagem Natural/06. Series Temporais.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <font size="8"> **Navigation Project: Report** </font>
# # Introduction
# This nb discussess results in severals experiments run in order to solve Navigation problem stated in `README.md`.
#
# The **champion model** is a regular dqn trained at `Navigation-dq-v01.ipynb` and model is stored at `models/dq_checkpoint_v01.pth`
#
# Source code for agent and model is on `../src/` (on repository root folder)
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
plt.style.use(['seaborn-poster'])
def read_exp_scores():
path = Path('models', 'experiments', '')
all_files = path.glob(f"*.csv")
all_files_lst = []
for file in all_files:
print(file)
df = pd.read_csv(file, index_col='idx_episode')
all_files_lst.append(df)
return pd.concat(all_files_lst, axis=0)
scores_df = read_exp_scores()
# # Learning Algorithm
# The learning algorithm used are **Deep Q Networks**, that aim to estimate the q-value function, when this function is properly estimated, the agent can interact with the environment and solve the game. With this function, for each current state `s`, the value of each action `a` can be estimated and therefore, if the estimation is good enough, a greedy policy will solve the game
#
# $$(s,a): \hat{q} $$
# $$ \text{greedy-policy}: max_{a}{\hat{q}(s)}$$
#
# That it is one the net is properly trained, to train these kind of networks, several techniques must be assambled:
#
# * $\epsilon$-greedy policy: In order to balance exploration-explotation trade-off, at traing time, sometimes a random action will be chosen (instead of the one suggested by models's estimation).
# * target (called TD-Target): y = R + γ max{q_est(S’,A, w)} it can be seen that this target is moving, as q_est improves over time;
# * Model: Deep neueral network, as hidden activation function `relu` is usded. the following hyperparameters:
# * hidden_layers: a list of # units by layer
# * drop_p (float): Dropout layer p parameter.
# * batch_size (int): Number of examples on each batch
# * learning_rate (float): Optimizer parameter
#
# This vanilla version will suffer to converge, several improvements have been added over time. On this project, two versions are implemented.
# * Fixed Target Q-net (dq): A more stable version is implemented using fixed targets, by using two sets of weights called online (w) and offline (w- kept constant for some mini-batches and used in TD target estimation), resulting:
# $$ R + \gamma max{\hat{q}(S’,A, w-)} - \hat{q}(S,A, w)$$
# * Double Q-net (doubledq): instead of directly picking max{q_est(S’,A, w-)}, first estimate the best action using online weightes and then evaluate this action on offli weights. This de-couples action selection and evaluation (when q_est is bad estimating, directly picking max will result in overestimation)
# $$ R + \gamma \hat{q}(S’,argmax_{a}\hat{q}(S',a,w), w-) - \hat{q}(S,A, w)$$
# * Dueling Q-net (dueldq): Split q_est in two streams: Q(S,A) = V(S) and A(S,A), where V(S) is the value for being in a concrete state and then A(S,A) maps small advangates that represents each action improvement over V(S)
#
# Both are implemented with `Experience Replay`, a technique that stores up to `buffer_size` experiences (s, a, r, s') in a buffer and then get a sample from it in order to provide a mini-batch of data for neural network gradient descent
#
# Online and offline weights are soft updated `soft_upd_param` every `update_every` steps. Epsilon is exponentially decayed over episodes to a minimum value `eps_start`, `eps_end` ,`eps_decay` (GLIE evolution of epsilon). Finally, $\gamma$ parameter stands for discount factor, that leverage present vs future rewards, this will impact on any agent/model combination
#
# Lastly, to run the train loop, a max number of episodes is allowed `n_episodes` and for each episode, number of time-steps are thresholded `max_t`. When the 100 window moving average of scores reaches `score_solved`, the environment is considered solved.
# +
import matplotlib.pyplot as plt
import numpy as np
fig, axs = plt.subplots(1,2, sharex=True, sharey=True, figsize=(16, 5))
eps1, eps2, eps3 = 0.995, 0.99, 0.95
ax1 = axs[0]
neps1 = np.arange(2000)
ax1.plot(1*eps1**neps1, label=rf'$\epsilon$={eps1}')
ax1.plot(1*eps2**neps1, label=rf'$\epsilon$={eps2}')
ax1.plot(1*eps3**neps1, label=rf'$\epsilon$={eps3}')
ax2 = axs[1]
neps2 = np.arange(1000)
ax2.plot(1*eps1**neps2, label=rf'$\epsilon$={eps1}')
ax2.plot(1*eps2**neps2, label=rf'$\epsilon$={eps2}')
ax2.plot(1*eps3**neps2, label=rf'$\epsilon$={eps3}')
for ax in axs:
ax.grid()
ax.axhline(0.1, color='black')
ax.legend()
plt.show()
# -
# The chosen value of `eps_start` is 0.95, it gets below 0.1 (a good standard to converge) before epside 500
# # Score by Agent
experiments= ['dqn:v01', 'doubledq:v01', 'dueldq:v01', 'dqn:v02']
# +
fig, axs = plt.subplots(len(experiments),1, figsize=(16,24))
for row_idx, ax in enumerate(axs):
ax1 = axs[row_idx]
experiment_name = experiments[row_idx]
scores_df[scores_df['experiment']==experiment_name].plot(ax=ax)
ax.axhline(13, color='black', alpha=0.5)
ax.legend(title=experiment_name)
ax.set_xlabel('number of episodes')
ax.grid()
plt.show()
# -
# # Discussion
# Every agent/model is trained with the same set of hyperparameters. Neural nets have 2 layers of 64 and 32 units, which seems a good trade-off between complexity and fitting time. An alternative architecture that adds dropout layers is also included
#
# The best agent/model seems to be `regular dqn` (with fixed td-target) as it solves the environmnet at episode 500. Double dqn also solves the environment a little later. However, Dueling dqn does not converge at all.
#
# `Double q-net`, in theory, avoids overestimation of TD-Targets, however, its results are sightly worse than regular dqn, this may be to weight soft updating or than this overestimation effect is not so important in this set up.
#
# `Duel q-net` does not converge, this may be due that by splitting V(S) and A(S,A) in this particular environment is not a good step; in fact V(S) will depend on banana's distribution and this changes over time. In addition, there should be some randomness in banana generation, therefore, there may not be a priveledged set of S and thus duel q-net is not a good architecture
#
# Finally, `prioritized experience replay` is not implemented due its additional complexity; from my point of view, there are not scarce or expensive transitions that justify this implementation
#
# **Further steps:**
#
# A more detailed hyperparamenter tunning in network architecture may yield better results, adding regularization with dropout layers does not improve convergence, therefore, it seems that [64, 32] does not overfit; fitting a more complex model (with some regularization) may benefit the agent
#
# In additon, as it only takes 500 epsidoes and $\epsilon$ reaches 0.1 slightly before, a lower `eps_start`=0.99 may be a good idea, so if the algorithm is expected to converge at 500 epsiodes, run the last 10% of episodes at $\epsilon = \text{eps_min}$
#
#
# ---
# **NOTE**
#
# Regarding Dueling qnetworks and its lack of convergence, I have run the same experiment on lander `dqn/rainbow/` on `lander` environment and copied the scores report to this folder `report-lander.jpg`. The agent solved the environment, but it took a lot of episodes. My conclusions are the following:
# * Spliting the stream in V(S) and A(S) does not yield and advantage on every problem, my guess is that it only impacts on problems where there are some priviledged states in V(S) where the distribution of advantages A(S,A) stronly changes (for example, reaching a goal)
# * Adding this extra complexity requires to fine tune the network hyperparameters, maybe to add more layers to any of the two streams, which makes more complicated the network definition
#
# In addition, Double q-net (in lander exercies) has also taken a little extra number of episodes to converge, my guess is that in both problem, overestimation of q in TD_Targets is not a serious issue, although in other kind of environments it may be
#
# ---
| p1_navigation/Report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
import random
import math
import re
import time
import numpy as np
import tensorflow as tf
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
ROOT_DIR = os.getcwd()
sys.path.append(ROOT_DIR)
from mrcnn import utils
from mrcnn import visualize
from mrcnn.visualize import display_images
import mrcnn.model as modellib
from mrcnn.model import log
import custom
# %matplotlib inline
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# -
custom_WEIGHTS_PATH = 'mask_rcnn_score_card_0009.h5'
config = custom.CustomConfig()
custom_DIR = os.path.join(ROOT_DIR, "datasets")
# +
class InferenceConfig(config.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
config = InferenceConfig()
config.display()
# -
DEVICE = "/cpu:0"
TEST_MODE = "inference"
def get_ax(rows=1, cols=1, size=16):
"""Return a Matplotlib Axes array to be used in
all visualizations in the notebook. Provide a
central point to control graph sizes.
Adjust the size attribute to control how big to render images
"""
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# +
dataset = custom.CustomDataset()
dataset.load_custom(custom_DIR, "val")
# Must call before using the dataset
dataset.prepare()
print("Images: {}\nClasses: {}".format(len(dataset.image_ids), dataset.class_names))
# -
with tf.device(DEVICE):
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=config)
print("Loading weights ", custom_WEIGHTS_PATH)
model.load_weights(custom_WEIGHTS_PATH, by_name=True)
# +
import cv2
image = cv2.imread('3.jpg')
image_resized, window, scale, paddings, _ = utils.resize_image(
image,
min_dim=800,
max_dim=1024,
mode='square'
)
image_padded, window, scale, paddings, _ = utils.resize_image(
image,
min_dim=1280,
max_dim=1280,
mode='square'
)
# -
result = model.detect([image_resized], verbose=1)
ax = get_ax(1)
r = result[0]
visualize.display_instances(image_resized, r['rois'], r['masks'], r['class_ids'],
dataset.class_names, r['scores'], ax=ax,
title='Predictions')
p = r['masks'][:,:,0].flatten()
p0 = np.where(p == True)[0][0]
p1 = np.where(p == True)[0][-1]
x0 = p0//image_resized.shape[0]
y0 = p0%image_resized.shape[0]
x1 = p1//image_resized.shape[0]
y1 = p1%image_resized.shape[0]
plt.imshow(image_resized[x0:x1, y0:y1, ::-1])
image_resized[x0:x1, y0:y1, ::-1].shape
len(r['class_ids'])
import skimage
# +
def split_x(img):
skimage.io.imshow(img)
ban = []
last_i = 0
for i in range(img.shape[1]):
c = np.count_nonzero(img[:, i] < .3)
if c > img.shape[0]*.8:
if i - last_i > 3:
plt.plot([last_i,last_i], [0,img.shape[0]], c='yellow')
plt.plot([i,i], [0,img2.shape[0]], c='green')
ban.append((last_i,i))
last_i = i
if i != last_i:
plt.plot([last_i,last_i], [0,img.shape[0]], c='yellow')
plt.plot([i,i], [0,img.shape[0]], c='green')
ban.append((last_i,i))
chars = []
for y0, y1 in ban:
img_sub = img[:, y0:y1]
chars.append(img_sub)
return chars
def split_y(img):
plt.figure()
skimage.io.imshow(img)
last_i = 0
ban = []
for i in range(img.shape[0]):
c = np.count_nonzero(img[i,:] < .3)
if c > img.shape[1]*.9:
if i - last_i > 3:
plt.plot([0, img.shape[1]],[last_i,last_i], c='yellow')
plt.plot([0, img.shape[1]],[i,i], c='green')
ban.append((last_i, i))
last_i = i
if i != last_i:
plt.plot([0, img.shape[1]],[last_i,last_i], c='yellow')
plt.plot([0, img.shape[1]],[i,i], c='green')
ban.append((last_i, i))
for r0, r1 in ban:
if r1 - r0 < img.shape[0]/3:
continue
img2 = img[r0:r1,:]
s_max = max(img2.shape)
pad_w = int((s_max - img2.shape[0])/2)
pad_h = int((s_max - img2.shape[1])/2)
img2 = np.pad(img2, ((pad_w,pad_w), (pad_h,pad_h)), mode='constant', constant_values=np.mean(img))
img2 = skimage.transform.resize(img2, (20,20), mode='reflect', anti_aliasing=True)
return img2
# -
img2 = skimage.color.rgb2gray(image_resized[x0:x1, y0:y1, :])
skimage.io.imshow(img2)
chars = split_x(img2)
chars2 = np.array([split_y(c) for c in chars])
| inspect_custom_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/willian-gabriel-unicamp/F-229/blob/main/Experimento_de_Resfriamento.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="td56JtYfRZ5Z" outputId="e7bbaa15-08b2-4d5c-cf5d-5f06ce319d3b"
from google.colab import drive
drive.mount('/content/drive')
#Instalar biblioteca lmfit
# !pip install lmfit
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#import lmfit
# + [markdown] id="UW_6t1zCSZ8I"
# # Importar data frames
# + colab={"base_uri": "https://localhost:8080/", "height": 233} id="_Ty60ShSRgTH" outputId="bf464b3a-2c0a-4fdc-bc9f-3ea6828c4d8c"
#garrafa térmica aberta
sheet_id_garrafa_aberta='1sN2kTrNoocruN402abYPbnojdXnaoUhNwtNgtOJrALY'
garrafa_aberta=pd.read_csv(f'https://docs.google.com/spreadsheets/d/{sheet_id_garrafa_aberta}/export?format=csv')
#calorímetro aberto
cal_aberto=pd.read_excel('/content/drive/MyDrive/F 229/Resfriamento/calorímetro aberto.xlsx')
t_cal_aberto=cal_aberto['tempo médio']
T_cal_aberto=cal_aberto['Kelvin']
#calorímetro fechado
cal_fechado=pd.read_excel('/content/drive/MyDrive/F 229/Resfriamento/calfechado.xlsx')
t_cal_fechado=cal_fechado['t(s)']
T_cal_fechado=cal_fechado['T(K)']
#G<NAME>
garrafa_fechada=pd.read_excel('/content/drive/MyDrive/F 229/Resfriamento/Garrafa Térmica Fechada.xlsx')
t_garrafa_fechada=garrafa_fechada['tempo']
T_garrafa_fechada=garrafa_fechada['Temperatura']
#G<NAME>
garrafa_vidro=pd.read_excel('/content/drive/MyDrive/F 229/Resfriamento/Garrafa de vidro.xlsx')
t_vidro=garrafa_vidro['média']
T_vidro=garrafa_vidro['temperat']
garrafa_vidro
# + [markdown] id="VmTxRS92y7cb"
# # Gráficos dos Dados Experimentais preliminares
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="Ec997UTESpiW" outputId="e76bc115-07a1-45e8-ab3b-c54e1385d332"
plt.scatter(garrafa_aberta['Tempo (s)'],garrafa_aberta['Temperatura (K)'],label='Pontos Experimentais')
plt.scatter(garrafa_aberta['Tempo2 (s)'][0:6],garrafa_aberta['Temperatura2 (K)'][0:6],label='Ponto Médio')
plt.errorbar(garrafa_aberta['Tempo2 (s)'][0:6],garrafa_aberta['Temperatura2 (K)'][0:6],xerr=np.array([3.75277675,15.29978213,13.85640646,16.16580754,10.96965511,2.020725942]),yerr=1,fmt='none',label='Incerteza',ecolor='black',elinewidth=2)
plt.xlabel('Tempo (s)',fontsize=12)
plt.ylabel('Temperatura (K)',fontsize=12)
plt.grid()
plt.legend(fontsize='small')
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="l5rOhbuA0jH9" outputId="bba16fd1-27a1-40e9-e1b6-1ed884cf18cc"
fig,ax1=plt.subplots(1,1)
ax1.scatter(garrafa_aberta['Tempo2 (s)'][0:6],garrafa_aberta['Temperatura2 (K)'][0:6],label='Média dos pontos')
ax1.errorbar(garrafa_aberta['Tempo2 (s)'][0:6],garrafa_aberta['Temperatura2 (K)'][0:6],xerr=np.array([0.5773502692,15.29978213,13.85640646,16.16580754,10.96965511,2.020725942]),yerr=1,fmt='none',label='Incerteza',ecolor='black',elinewidth=2)
ax1.set_ylabel('Temperatura (K)',fontsize=12)
ax1.set_xlabel('Tempo (s)',fontsize=12)
ax1.legend(fontsize='small')
ax1.grid()
plt.show()
# + [markdown] id="88C0KN9FB_sP"
# ## Linearização através da primeira equação
# + id="IwSfP76H3Fmt" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="35b7d848-95c6-4f92-bca2-af49ae5da005"
fig,ax1 = plt.subplots(1,1)
from scipy.stats import linregress
t=garrafa_aberta['Tempo2 (s)'][1:6]
T=garrafa_aberta['Temperatura2 (K)'][1:6]
t3= np.log(t)
T3 = np.log(T)
ax1.plot(t, T3,'o',label='Garrafa Térmica Aberta')
ax1.errorbar(t,T3,xerr=np.array([15.29978213,13.85640646,16.16580754,10.96965511,2.020725942]),yerr=0.002,fmt='none',label='Incerteza',ecolor='black',elinewidth=2)
a,b,_,_,_,= linregress(t,T3)
t_r = np.linspace(3,250)
ax1.plot(t_r,a*t_r+b, label='Reta de Regressão Linear')
plt.xlabel('(Tempo (s)')
plt.ylabel('$\ln$ (Temperatura (K))')
plt.legend()
ax1.grid()
# plt.savefig(pathfig+'fig_garrafa_termica_fechada.png')
plt.show()
a
# + [markdown] id="tplIkhyNNBx6"
# # Teste da Equação 1 Linearizada
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="sBrHDMvrGKiY" outputId="339f95d5-f062-4058-db39-1fb62eeb669f"
T_infinito=26.5+273.15
T_0=335.15
gamma=-a
fig,ax1 = plt.subplots(1,1)
t1=np.linspace(0,300)
T1= (T_0 - T_infinito)*np.exp(-gamma*t1) + T_infinito
ax1.scatter(t,T,label='Garra Térmica Aberta')
ax1.errorbar(t,T,xerr=np.array([15.29978213,13.85640646,16.16580754,10.96965511,2.020725942]),yerr=1,fmt='none',label='Incerteza',ecolor='black',elinewidth=2)
ax1.plot(t1,T1,'r',label='Curva Teórica Modelo 1')
plt.xlabel('Tempo (s)')
plt.ylabel('Temperatura (K)')
plt.legend()
plt.grid()
plt.show()
a
# + [markdown] id="3tYacVqEwrbb"
# # Definição das variações de temperatura
# + id="i9j_dOAQw01A"
T_infinito=26.5+273.15
#garrafa térmica aberta
T_0=335.15
Delta_T3=1/(T-T_infinito)**(1/3)
Delta_T4=1/(T-T_infinito)**(1/4)
Delta_T6=1/(T-T_infinito)**(1/6)
#calorímetro aberto
T_0_cal_aberto=340.15
Delta_T3_cal_aberto=1/(T_cal_aberto - T_infinito)**(1/3)
Delta_T4_cal_aberto=1/(T_cal_aberto - T_infinito)**(1/4)
Delta_T6_cal_aberto=1/(T_cal_aberto - T_infinito)**(1/6)
#calorímetro fechado
T_0_cal_aberto=343
Delta_T3_cal_fechado=1/(T_cal_fechado - T_infinito)**(1/3)
Delta_T4_cal_fechado=1/(T_cal_fechado - T_infinito)**(1/4)
Delta_T6_cal_fechado=1/(T_cal_fechado - T_infinito)**(1/6)
#<NAME>
T_0_cal_aberto=341.15
Delta_T3_gar_fechada=1/(T_garrafa_fechada - T_infinito)**(1/3)
Delta_T4_gar_fechada=1/(T_garrafa_fechada - T_infinito)**(1/4)
Delta_T6_gar_fechada=1/(T_garrafa_fechada - T_infinito)**(1/6)
#<NAME>
T_0_cal_aberto=340.15
Delta_T3_vidro=1/(T_vidro - T_infinito)**(1/3)
Delta_T4_vidro=1/(T_vidro - T_infinito)**(1/4)
Delta_T6_vidro=1/(T_vidro - T_infinito)**(1/6)
# + [markdown] id="p2HrQxaBQO9x"
# ## Modelo de Equação 2
# + [markdown] id="vzNRclhNfwgU"
# # n=1/3
# + colab={"base_uri": "https://localhost:8080/", "height": 787} id="FPfKzMaFJZ5p" outputId="c29c1ba8-1b26-4afd-d58e-3ad0823637a8"
fig,ax1 = plt.subplots(1,1)
#<NAME>
ax1.scatter(Delta_T3,t,label='Garra Térmica Aberta n=1/3')
a_3,b_3,_,_,_,= linregress(Delta_T3,t)
t_r = np.linspace(0.28,0.32)
ax1.plot(t_r,a_3*t_r+b_3)
#Calorímetro Aberto
ax1.scatter(Delta_T3_cal_aberto,t_cal_aberto,label='Calorímetro Aberto n=1/3')
a_3_cal_aberto,b_3_cal_aberto,_,_,_,= linregress(Delta_T3_cal_aberto,t_cal_aberto)
t_r = np.linspace(0.28,0.32)
ax1.plot(t_r,a_3_cal_aberto*t_r+b_3_cal_aberto)
#Calorímetro Fechado
ax1.scatter(Delta_T3_cal_fechado,t_cal_fechado,label='Calorímetro fechado n=1/3')
a_3_cal_fechado,b_3_cal_fechado,_,_,_,= linregress(Delta_T3_cal_fechado,t_cal_fechado)
t_r = np.linspace(0.27,0.32)
ax1.plot(t_r,a_3_cal_fechado*t_r+b_3_cal_fechado)
#<NAME>
ax1.scatter(Delta_T3_gar_fechada,t_garrafa_fechada,label='<NAME> n=1/3')
a_3_gar_fechada,b_3_gar_fechada,_,_,_,= linregress(Delta_T3_gar_fechada,t_garrafa_fechada)
t_r = np.linspace(0.27,0.35)
ax1.plot(t_r,a_3_gar_fechada*t_r+b_3_gar_fechada)
#<NAME>
ax1.scatter(Delta_T3_vidro,t_vidro,label='<NAME> n=1/3')
a_3_vidro,b_3_vidro,_,_,_,= linregress(Delta_T3_vidro,t_vidro)
t_r = np.linspace(0.27,0.32)
ax1.plot(t_r,a_3_vidro*t_r+b_3_vidro)
plt.ylabel('Tempo (s)')
plt.xlabel('Temperatura (K)')
plt.legend()
plt.grid()
fig,ax2 = plt.subplots(1,1)
#<NAME>
T_3=(a_3**3)/(-b_3+t)**3 +T_infinito
ax2.plot(t,T_3,label="<NAME>")
#ax2.scatter(t,T)
#Calorímetro Aberto
#T_3_cal_aberto=(a_3_cal_aberto**3)/(-b_3_cal_aberto+t_cal_aberto)**3 +T_infinito
#ax2.plot(t_cal_aberto,T_3_cal_aberto,label="Calorímetro Aberto")
#ax2.scatter(t_cal_aberto,T_cal_aberto)
#Calorímetro Fechado
T_3_cal_fechado=(a_3_cal_fechado**3)/(-b_3_cal_fechado+t_cal_fechado)**3 +T_infinito
ax2.plot(t_cal_fechado,T_3_cal_fechado,label="Calorímetro Fechado")
#ax2.scatter(t_cal_fechado,T_cal_fechado)
#Garrafa Térmica Fechada
T_3_gar_fechada=(a_3_gar_fechada**3)/(-b_3_gar_fechada+t_garrafa_fechada)**3 +T_infinito
ax2.plot(t_garrafa_fechada,T_3_gar_fechada,label="Garrafa Térmica Fechada")
#ax2.scatter(t_garrafa_fechada,T_garrafa_fechada)
#Garrafa de Vidro
T_3_vidro=(a_3_vidro**3)/(-b_3_vidro+t_vidro)**3 +T_infinito
ax2.plot(t_vidro,T_3_vidro,label="Garrafa de Vidro")
#ax2.scatter(t_vidro,T_vidro)
plt.xlabel('Tempo (s)')
plt.ylabel('Temperatura (K)')
plt.legend(fontsize=9, loc='upper rigth')
plt.grid()
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="HPb24ltbn5Ui" outputId="fbb302aa-7b55-4bca-ba39-f773d8187f45"
Delta_T3_cal_aberto
# + id="uWS7aNsGn41d"
# + [markdown] id="jOKGc7hffznW"
# # n=1/4
# + colab={"base_uri": "https://localhost:8080/", "height": 824} id="ndlogZlePCSe" outputId="b2c5f6d6-2e4b-4fe3-b078-453cc260cbd1"
fig,ax1 = plt.subplots(1,1)
#Garrafa Térmica Aberta
ax1.scatter(Delta_T4,t,label='Garra Térmica Aberta n=1/4')
a_4,b_4,_,_,_,= linregress(Delta_T4,t)
t_r = np.linspace(0.38,0.43)
ax1.plot(t_r,a_4*t_r+b_4,'g')
#Calorímetro Aberto
ax1.scatter(Delta_T4_cal_aberto,t_cal_aberto,label='Calorímetro Aberto n=1/4')
a_4_cal_aberto,b_4_cal_aberto,_,_,_,= linregress(Delta_T4_cal_aberto,t_cal_aberto)
t_r = np.linspace(0.38,0.43)
ax1.plot(t_r,a_4_cal_aberto*t_r+b_4_cal_aberto)
#Calorímetro Fechado
ax1.scatter(Delta_T4_cal_fechado,t_cal_fechado,label='Calorímetro fechado n=1/4')
a_4_cal_fechado,b_4_cal_fechado,_,_,_,= linregress(Delta_T4_cal_fechado,t_cal_fechado)
t_r = np.linspace(0.38,0.42)
ax1.plot(t_r,a_4_cal_fechado*t_r+b_4_cal_fechado)
#<NAME>
ax1.scatter(Delta_T4_gar_fechada,t_garrafa_fechada,label='<NAME> n=1/3')
a_4_gar_fechada,b_4_gar_fechada,_,_,_,= linregress(Delta_T4_gar_fechada,t_garrafa_fechada)
t_r = np.linspace(0.27,0.35)
ax1.plot(t_r,a_4_gar_fechada*t_r+b_4_gar_fechada)
#<NAME>
ax1.scatter(Delta_T4_vidro,t_vidro,label='<NAME> n=1/4')
a_4_vidro,b_4_vidro,_,_,_,= linregress(Delta_T4_vidro,t_vidro)
t_r = np.linspace(0.27,0.32)
ax1.plot(t_r,a_4_vidro*t_r+b_4_vidro)
plt.ylabel('Tempo (s)')
plt.xlabel('Temperatura (K)')
plt.legend()
plt.grid()
print(a_4)
print(b_4)
fig,ax2 = plt.subplots(1,1)
#<NAME>
T_4=(a_4**4)/(-b_4+t)**4 +T_infinito
ax2.plot(t,T_4,label="<NAME>")
#ax2.scatter(t,T)
#Calorímetro Aberto
#T_4_cal_aberto=(a_4_cal_aberto**4)/(-b_4_cal_aberto+t_cal_aberto)**4 +T_infinito
#ax2.plot(t_cal_aberto,T_4_cal_aberto,label="Calorímetro Aberto")
#ax2.scatter(t_cal_aberto,T_cal_aberto)
#Calorímetro Fechado
T_4_cal_fechado=(a_4_cal_fechado**4)/(-b_4_cal_fechado+t_cal_fechado)**4 +T_infinito
ax2.plot(t_cal_fechado,T_4_cal_fechado,label="Calorímetro Fechado")
#ax2.scatter(t_cal_fechado,T_cal_fechado)
#G<NAME>
T_4_gar_fechada=(a_4_gar_fechada**4)/(-b_4_gar_fechada+t_garrafa_fechada)**4 +T_infinito
ax2.plot(t_garrafa_fechada,T_4_gar_fechada,label="G<NAME>")
#ax2.scatter(t_garrafa_fechada,T_garrafa_fechada)
#G<NAME>
T_4_vidro=(a_4_vidro**4)/(-b_4_vidro+t_vidro)**4 +T_infinito
ax2.plot(t_vidro,T_4_vidro,label="G<NAME>ro")
#ax2.scatter(t_vidro,T_vidro)
plt.xlabel('Tempo (s)')
plt.ylabel('Temperatura (K)')
plt.legend(fontsize=9, loc='upper rigth')
plt.grid()
plt.show()
# + [markdown] id="VT641YBmgA3E"
# # n=1/6
# + colab={"base_uri": "https://localhost:8080/", "height": 541} id="cp_aZqR2WCoT" outputId="9c887b89-0628-41e3-e96e-300e9030ce0c"
fig,ax1 = plt.subplots(1,1)
#<NAME>
ax1.scatter(Delta_T6,t,label='Garrafa Térmica Aberta n=1/6')
a_6,b_6,_,_,_,= linregress(Delta_T6,t)
t_r = np.linspace(0.53,0.57)
ax1.plot(t_r,a_6*t_r+b_6,'y')
#Calorímetro Aberto
ax1.scatter(Delta_T6_cal_aberto,t_cal_aberto,label='Calorímetro Aberto n=1/4')
a_6_cal_aberto,b_6_cal_aberto,_,_,_,= linregress(Delta_T6_cal_aberto,t_cal_aberto)
t_r = np.linspace(0.53,0.56)
ax1.plot(t_r,a_6_cal_aberto*t_r+b_6_cal_aberto)
#Calorímetro Fechado
ax1.scatter(Delta_T6_cal_fechado,t_cal_fechado,label='Calorímetro Fechado n=1/6')
a_6_cal_fechado,b_6_cal_fechado,_,_,_,= linregress(Delta_T6_cal_fechado,t_cal_fechado)
t_r = np.linspace(0.53,0.56)
ax1.plot(t_r,a_6_cal_fechado*t_r+b_6_cal_fechado)
#G<NAME> Fechada
ax1.scatter(Delta_T6_gar_fechada,t_garrafa_fechada,label='Garrafa Térmica Fechada n=1/6')
a_6_gar_fechada,b_6_gar_fechada,_,_,_,= linregress(Delta_T6_gar_fechada,t_garrafa_fechada)
t_r = np.linspace(0.27,0.35)
ax1.plot(t_r,a_6_gar_fechada*t_r+b_6_gar_fechada)
#<NAME>
ax1.scatter(Delta_T4_vidro,t_vidro,label='<NAME> n=1/6')
a_6_vidro,b_6_vidro,_,_,_,= linregress(Delta_T6_vidro,t_vidro)
t_r = np.linspace(0.27,0.32)
ax1.plot(t_r,a_6_vidro*t_r+b_6_vidro)
plt.ylabel('Tempo (s)')
plt.xlabel('Temperatura (K)')
plt.legend()
plt.grid()
fig,ax2 = plt.subplots(1,1)
#<NAME>
T_6=(a_6**6)/(-b_6+t)**6 +T_infinito
ax2.plot(t,T_6,label="<NAME>")
ax2.scatter(t,T)
plt.errorbar(garrafa_aberta['Tempo2 (s)'][1:6],garrafa_aberta['Temperatura2 (K)'][1:6],xerr=np.array([15.29978213,13.85640646,16.16580754,10.96965511,2.020725942]),yerr=1,fmt='none',label='Incerteza',ecolor='black',elinewidth=2)
#Calorímetro Aberto
#T_6_cal_aberto=(a_6_cal_aberto**6)/(-b_6_cal_aberto+t_cal_aberto)**6 +T_infinito
#ax2.plot(t_cal_aberto,T_6_cal_aberto,label="Calorímetro Aberto")
#ax2.scatter(t_cal_aberto,T_cal_aberto)
#Calorímetro Fechado
#T_6_cal_fechado=(a_6_cal_fechado**6)/(-b_6_cal_fechado+t_cal_fechado)**6 +T_infinito
#ax2.plot(t_cal_fechado,T_6_cal_fechado,label="Calorímetro Fechado")
#ax2.scatter(t_cal_fechado,T_cal_fechado)
#G<NAME>
#T_6_gar_fechada=(a_6_gar_fechada**6)/(-b_6_gar_fechada+t_garrafa_fechada)**6 +T_infinito
#ax2.plot(t_garrafa_fechada,T_6_gar_fechada,label="Garrafa Térmica Fechada")
#ax2.scatter(t_garrafa_fechada,T_garrafa_fechada)
#<NAME>
#T_6_vidro=(a_6_vidro**6)/(-b_6_vidro+t_vidro)**6 +T_infinito
#ax2.plot(t_vidro,T_6_vidro,label="Garrafa de Vidro")
#ax2.scatter(t_vidro,T_vidro)
plt.xlabel('Tempo (s)')
plt.ylabel('Temperatura (K)')
plt.legend(fontsize=8, loc='upper right')
plt.grid()
#plt.xticks(range(0,250,20))
#plt.yticks(np.arange(330,343,0.9))
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="N3KA8lQKengD" outputId="98f812d3-d00d-4daa-bc69-ab5fa2f5f717"
#garrafa térmica aberta
print(a_6)
print(b_6)
# + id="t57q7bGpgFF7" colab={"base_uri": "https://localhost:8080/"} outputId="acdcc2b0-ad59-4425-86e9-663d1965bf96"
#calorímetro Fechado
print(a_6_cal_fechado)
print(b_6_cal_fechado)
# + colab={"base_uri": "https://localhost:8080/"} id="_BWEUKSUN4ip" outputId="8105429c-b94b-4b0e-9661-c112bc1fdf0c"
#<NAME>
print(a_6_gar_fechada)
print(b_6_gar_fechada)
# + colab={"base_uri": "https://localhost:8080/"} id="Iqe6eAgbOQ4u" outputId="28d55d68-7d8c-4db1-f247-17bef9ec0999"
#<NAME>
print(a_6_vidro)
print(b_6_vidro)
# + id="i2sK7laKOciU"
| Experimento_de_Resfriamento.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# $\int \frac{x^2 + 1}{x^2 -1}dx$
# $\int \frac{x^2 + 1}{x^2 -1}dx = \int dx + \int \frac{2}{x^2 - 1}dx$
# $x + \int \frac{2}{x^2-1}dx$
# $\int \frac{2}{x^2 -1}dx = 2 \cdot \int \frac{1}{(x-1)(x+1)}dx$
# $\frac{1}{(x-1)(x+1)} = \frac{A}{(x-1)} + \frac{B}{(x+1)}$
# $1 = A(x+1) + B(x-1)$
# $x = 1$
# $1 = A(1+1) + B(1-1)$
# $1 = 2A$
# $A = \frac{1}{2}$
# $x = -1$
# $1 = \frac{1}{2}(-1+1) + B(-1-1)$
# $1 = -2B$
# $B = -\frac{1}{2}$
# $\frac{1}{(x-1)(x+1)} = \frac{\frac{1}{2}}{(x-1)} + \frac{-\frac{1}{2}}{(x+1)}$
# $2 \cdot \int \frac{1}{(x-1)(x+1)}dx = x + 2 [\frac{1}{2}\cdot \int \frac{1}{(x-1)}dx - \frac{1}{2} \cdot \int \frac{1}{(x+1)}dx]$
# $2 \cdot \int \frac{1}{(x-1)(x+1)}dx = x + 2 [\frac{1}{2}ln(|x - 1|) - \frac{1}{2}ln(|x+1|)]$
# $2 \cdot \int \frac{1}{(x-1)(x+1)}dx = x + ln(|x - 1|) - ln(|x+1|) + C$
| Problemas 7.4/07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: TensorFlow 2.3 on Python 3.6 (CUDA 10.1)
# language: python
# name: python3
# ---
# + [markdown] id="iNFOeMfl3tIu"
# # 심층 신경망
# + [markdown] id="zKfwb8gS3tI2"
# <table align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/rickiepark/hg-mldl/blob/master/7-2.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩에서 실행하기</a>
# </td>
# </table>
# + [markdown] id="dPE5XsFhcfVD"
# ## 2개의 층
# + colab={"base_uri": "https://localhost:8080/"} id="4sNOMcdaFVKa" outputId="8fb4a265-8afe-45b0-d6fb-1fb0c21baec8"
from tensorflow import keras
(train_input, train_target), (test_input, test_target) = keras.datasets.fashion_mnist.load_data()
# + id="aJJiRMa6FkWx"
from sklearn.model_selection import train_test_split
train_scaled = train_input / 255.0
train_scaled = train_scaled.reshape(-1, 28*28)
train_scaled, val_scaled, train_target, val_target = train_test_split(
train_scaled, train_target, test_size=0.2, random_state=42)
# + id="MCZWQiEwF6MD"
dense1 = keras.layers.Dense(100, activation='sigmoid', input_shape=(784,))
dense2 = keras.layers.Dense(10, activation='softmax')
# + [markdown] id="Agy5gCVhcrm-"
# ## 심층 신경망 만들기
# + id="xmWL7kOoGB4P"
model = keras.Sequential([dense1, dense2])
# + colab={"base_uri": "https://localhost:8080/"} id="em0xDl8qa12J" outputId="742f2fbb-1bd9-4d7e-ac32-36aef770403c"
model.summary()
# + [markdown] id="qAi41rBTdk7k"
# ## 층을 추가하는 다른 방법
# + id="0XeV6V4ha3I8"
model = keras.Sequential([
keras.layers.Dense(100, activation='sigmoid', input_shape=(784,), name='hidden'),
keras.layers.Dense(10, activation='softmax', name='output')
], name='패션 MNIST 모델')
# + colab={"base_uri": "https://localhost:8080/"} id="bwXDLSOWbm3L" outputId="c9f7f871-295d-4b49-9d97-cabcd0167582"
model.summary()
# + id="yZSAxgZCbax7"
model = keras.Sequential()
model.add(keras.layers.Dense(100, activation='sigmoid', input_shape=(784,)))
model.add(keras.layers.Dense(10, activation='softmax'))
# + colab={"base_uri": "https://localhost:8080/"} id="bW2coaNQboe5" outputId="fa916b83-50a5-4ea1-a650-4bc30c5fbbb6"
model.summary()
# + colab={"base_uri": "https://localhost:8080/"} id="kkYrPJembpYk" outputId="3c14511b-5f49-4117-8f6d-4b39958b2f61"
model.compile(loss='sparse_categorical_crossentropy', metrics='accuracy')
model.fit(train_scaled, train_target, epochs=5)
# + [markdown] id="_dfXJDhwcyAK"
# ## 렐루 활성화 함수
# + id="dG7yF8g6b062"
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
# + colab={"base_uri": "https://localhost:8080/"} id="zHogWhu6g90a" outputId="0e76f17a-c095-4c20-cad1-e090d379f5cf"
model.summary()
# + id="JfPe_ruQdhqA"
(train_input, train_target), (test_input, test_target) = keras.datasets.fashion_mnist.load_data()
train_scaled = train_input / 255.0
train_scaled, val_scaled, train_target, val_target = train_test_split(
train_scaled, train_target, test_size=0.2, random_state=42)
# + colab={"base_uri": "https://localhost:8080/"} id="9PGejuuhdvvk" outputId="1b3ea805-8eb6-459d-ff68-3e2560523240"
model.compile(loss='sparse_categorical_crossentropy', metrics='accuracy')
model.fit(train_scaled, train_target, epochs=5)
# + colab={"base_uri": "https://localhost:8080/"} id="lVYLpnjeep4y" outputId="f3e7efd2-6dad-4ec0-b0c4-003ec4a1a432"
model.evaluate(val_scaled, val_target)
# + [markdown] id="3YtLsmGAoavz"
# ## 옵티마이저
# + id="NxVj04Haocwa"
model.compile(optimizer='sgd', loss='sparse_categorical_crossentropy', metrics='accuracy')
# + id="1426O4G8Hpfi"
sgd = keras.optimizers.SGD()
model.compile(optimizer=sgd, loss='sparse_categorical_crossentropy', metrics='accuracy')
# + id="Sh-HDiULlp18"
sgd = keras.optimizers.SGD(learning_rate=0.1)
# + id="uF1XolBXsl3a"
sgd = keras.optimizers.SGD(momentum=0.9, nesterov=True)
# + id="Hy2MENbL170j"
adagrad = keras.optimizers.Adagrad()
model.compile(optimizer=adagrad, loss='sparse_categorical_crossentropy', metrics='accuracy')
# + id="KkpbSMXWtakr"
rmsprop = keras.optimizers.RMSprop()
model.compile(optimizer=rmsprop, loss='sparse_categorical_crossentropy', metrics='accuracy')
# + id="Gdu0hQIAz4JW"
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(28, 28)))
model.add(keras.layers.Dense(100, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
# + colab={"base_uri": "https://localhost:8080/"} id="2tcxIfILoi1t" outputId="0ec02797-4718-4e96-cc9d-8f4c76f8901b"
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics='accuracy')
model.fit(train_scaled, train_target, epochs=5)
# + colab={"base_uri": "https://localhost:8080/"} id="8gxAWehsv9Gi" outputId="99d16952-16d3-4664-c3bd-4e37a7533515"
model.evaluate(val_scaled, val_target)
| 7-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import norm
def manhattan_metric(x, y):
return np.sum(np.abs(x - y), axis=1)
def compute_centers(X,clusters):
ans = []
for i in np.unique(clusters):
cluster = X[clusters == i]
if cluster is not None:
ans.append(np.mean(cluster,axis = 0))
return np.array(ans)
def clustering(X,centers,metric):
if metric == "manhattan":
return np.array([np.argmin(manhattan_metric(x,centers)) for x in X])
else:
return np.array([np.argmin(norm(x - centers, axis=1)) for x in X])
def KMeans(X,centers,metric="euclid"):
num_of_iters = 0
while True:
num_of_iters +=1
clusters = clustering(X,centers,metric)
centers = compute_centers(X,clusters)
if all(clusters == clustering(X,centers,metric)):
if metric == "manhattan":
diams = np.array([round(np.max(manhattan_metric(centers[i],X[clusters == i])),3) for i in range(len(centers))])
else:
diams = np.array([round(np.max(norm(X[clusters == i]-centers[i],axis=1)),3)for i in range(len(centers))])
return clusters, centers, num_of_iters,diams
# +
X = np.random.uniform(low=0,high=100,size=(200,2))
k = 5
centers = [X[np.random.choice(len(X),k)],
np.array([(X[:, 0].mean(), X[:, 1].min()),(min(X[:, 0]), min(X[:, 1])), (max(X[:, 0]), max(X[:, 1])), (max(X[:, 0]), min(X[:, 1])),
(min(X[:, 0]), max(X[:, 1]))])]
metrics = ['euclid','manhattan']
centers_name = ['random','max/min']
# +
fig, axes = plt.subplots(2,2,figsize=(13,13))
for i in range(2):
for j in range(2):
cls,cent,k,diams = KMeans(X,centers[i],metrics[j])
axes[i,j].scatter(X[:,0],X[:,1],c=cls)
axes[i,j].scatter(cent[:,0],cent[:,1],c="red",s=60, marker="D")
axes[i,j].set_title("Metrics: {}; Centers choice: {}; \n Iters: {} \n diams: {}".format(metrics[i],centers_name[j],k,diams))
fig.savefig("Lab12")
| LW12/LabWork12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import itertools
import matplotlib.pyplot as plt
import matplotlib as mpl
plt.style.use('fivethirtyeight')
mpl.rcParams['axes.facecolor'] = 'w'
mpl.rcParams['figure.facecolor'] = 'w'
mpl.rcParams['legend.framealpha'] = 1
import numpy as np
import progressbar as pb
import scipy.special as ss
# +
max_p = 5
max_t = 5
p_0 = list(itertools.product(*[[0,], np.arange(1, max_t+1)]))
p_greater = list(itertools.product(*[np.arange(1, max_p+1), [0,]]))
p_and_t_list = p_0 + p_greater
slot_list = list(itertools.product(*[p_and_t_list, np.arange(33)]))
# -
attack_length = 2
length_n_lists = list(itertools.product(*[slot_list]*attack_length))
len(length_n_lists)
330**2
# +
def calcSelfishDelay(p_list, e_list):
return 60 * attack_length + 40 * sum(p_list) + 8 * sum(map(lambda e: max(24 - e, 0), e_list[1:]))
def calcHonestDelay(e_list, t_list):
return 60 * attack_length + 40 * sum(t_list) + 8 * sum(map(lambda e: max(e - 8, 0), e_list))
def calcDelayDiff(p_list, e_list, t_list):
return calcSelfishDelay(p_list, e_list) - calcHonestDelay(e_list, t_list)
# -
def calcProb(alpha, p_list, e_list, t_list):
term1 = np.product(list(map(lambda e : ss.binom(32, e), e_list)))
term2 = np.power(1-alpha, 33*attack_length + sum(p_list) - sum(e_list))
term3 = np.power(alpha, attack_length + sum(t_list) + sum(e_list))
return term1 * term2 * term3
def getLists(slots):
p_list = []
t_list = []
e_list = []
for slot in slots:
p_list.append(slot[0][0])
t_list.append(slot[0][1])
e_list.append(slot[1])
return p_list, t_list, e_list
# +
alpha = 0.4
probabilites = []
bar = pb.ProgressBar()
for slots in bar(length_n_lists):
p_list, t_list, e_list = getLists(slots)
if (calcDelayDiff(p_list, e_list, t_list) < 0):
probabilites.append(calcProb(alpha, p_list, e_list, t_list))
# -
sum(probabilites), sum(probabilites) * 365 * 24 * 60
len(probabilites)
| stake/longer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ps)
# language: python
# name: ps
# ---
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import pyactlike
# -
# # Basic Usage
#
# We'll first read in some included example spectra.
# +
like = pyactlike.ACTPowerSpectrumData()
# Read an example spectrum from disk.
filename = like.data_dir + "/bf_ACTPol_WMAP_lcdm.minimum.theory_cl"
tt_lmax = 6000
ell, dell_tt, dell_te, dell_ee = np.genfromtxt(
filename,
delimiter=None,
unpack=True,
max_rows=tt_lmax - 1,
usecols=(0, 1, 2, 3),
)
plt.plot(ell, dell_tt, "-", label="TT")
plt.plot(ell, dell_ee, "-", label="EE")
plt.xlabel(r"Multipole, $\ell$")
plt.ylabel(r"$C_{\ell} \ell (\ell+1) / 2 \pi$ $[\mu$K$^2]$")
plt.legend()
plt.xlim(0,3000)
# -
# Next, we'll compute the likelihood of this spectrum given our ACT data. We'll check it against the expected answer.
# compute the likelihood
like = pyactlike.ACTPowerSpectrumData()
chi2 = -2 * like.loglike(dell_tt, dell_te, dell_ee, 1.0)
print("ACTPol chi2 = " + "{0:.12f}".format(chi2))
print("Expected: 288.252869629064")
| notebooks/Basic Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
# default_exp models.ultragcn
# -
# # UltraGCN
# > An efficient graph-convolutional recommendation model.
#
# Industrial recommender systems usually involve massive graphs due to the large numbers of users and items. However, current GCN-based models are hard to train with large graphs, which hinders their wide adoption in industry. This brings efficiency and scalability challenges for model designs. Some efforts have been made to simplify the design of GCN-based CF models, mainly by removing feature transformations and non-linear activations that are not necessary for CF. These proved beneficial. But it has been seen that message passing (i.e., neighborhood aggregation) on a large graph is usually time-consuming for CF. In particular, stacking multiple layers of message passing could lead to the slow convergence of GCN-based models on CF tasks. For example, in our experiments, three-layer LightGCN takes more than 700 epochs to converge to its best result on the Amazon Books dataset, which would be unacceptable in an industrial setting.
#
# UltraGCN has the following optimization objective:
#
# $$\mathcal{L} = \mathcal{L}_O + \lambda \mathcal{L}_C$$
#
# where 𝜆 is the hyper-parameter to control the importance weights of two loss terms.
#
# Moreover, except for user-item relationships, some other relationships (e.g., item-item and user-user relationships) also greatly contribute to the effectiveness of GCN-based models on CF. However, in conventional GCN-based models, these relationships are implicitly learned through the same message passing layers with user-item relationships. This not only leads to the unreasonable edge weight assignments, but also fails to capture the relative importances of different types of relationships.
#
# <img src='https://github.com/recohut/reco-static/raw/master/media/images/models/ultragcn.png'>
#
# ### Constraint Loss
#
# Instead of performing explicit message passing, LightGCN aims to directly approximate the convergence state by normalizing the embeddings to unit vectors and then maximize the dot product of both terms, which is equivalent to maximize the cosine similarity between $𝑒_u$ and $e_i$.
#
# $$\max \sum_{i \in \mathcal{N}(u)} \beta_{u,i}e_u^Te_i,\ \forall u \in U$$
#
# For ease of optimization, we further incorporate sigmoid activation and negative log likelihood, and derive the following loss:
#
# $$\mathcal{L}_C = - \sum_{u \in U} \sum_{i \in \mathcal{N}(u)} \beta_{u,i} \log(\sigma(e_u^Te_i)),$$
#
# To avoid over-smoothing (users and items could easily converge to the same embeddings), UltraGCN perform negative sampling (inspired by Word2vec). The constraint loss would then become:
#
# $$\mathcal{L}_C = - \sum_{(u,i) \in N^+} \beta_{u,i} \log(\sigma(e_u^Te_i)) - \sum_{(u,j) \in N^-} \beta_{u,j} \log(\sigma(e_u^Te_j))$$
#
# ### Optimization Loss
#
# Typically, CF models perform item recommendation by applying either pairwise BPR (Bayesian personalized ranking) loss or pointwise BCE (binary cross-entropy) loss for optimization. UltraGCN formulate CF as a link prediction problem in graph learning, and therefore chooses the following BCE loss as the main optimization objective. It is also consistent with the loss format of $\mathcal{L}_C$:
#
# $$\mathcal{L}_O = - \sum_{(u,i) \in N^+} \log(\sigma(e_u^Te_i)) - \sum_{(u,j) \in N^-} \log(\sigma(e_u^Te_j))$$
#
# ### Learning on Item-Item
#
# Moreover, except for user-item relationships, some other relationships (e.g., item-item and user-user relationships) also greatly contribute to the effectiveness of GCN-based models on CF. However, in conventional GCN-based models, these relationships are implicitly learned through the same message passing layers with user-item relationships. This not only leads to the unreasonable edge weight assignments, but also fails to capture the relative importances of different types of relationships.
#
# UltraGCN is flexible to extend to model many different relation graphs, such as user-user graphs, item-item graphs, and even knowledge graphs. For now, we will focus on the item-item co-occurrence graph, which has been shown to be useful for recommendation.
#
# For each positive (𝑢, 𝑖) pair, we first construct 𝐾 weighted positive (𝑢, 𝑗) pairs, for 𝑗 ∈ 𝑆 (𝑖). Then, we penalize the learning of these pairs with the more reasonable similarity score $\omega_{𝑖,𝑗}$ and derive the constraint loss $\mathcal{L}_𝐼$ on the item-item graph as follow:
#
# $$\mathcal{L}_I = \sum_{(u,i) \in N^+}\sum_{j \in S(i)} \omega_{i,j} \log (\sigma(e_u^Te_j))$$
#
# We can omit the negative sampling here as the negative sampling in $\mathcal{L}_𝐶$ and $\mathcal{L}_O$ has already enabled UltraGCN to counteract over-smoothing. With this constraint loss, we can extend UltraGCN to better learn item-item relationships, and finally derive the following training objective of UltraGCN,
#
# $$\mathcal{L} = \mathcal{L}_O + \lambda \mathcal{L}_C + \gamma\mathcal{L}_I$$
#
# where 𝜆 and 𝛾 are hyper-parameters to adjust the relative importances of user-item and item-item relationships, respectively.
#hide
from nbdev.showdoc import *
#export
import torch
import torch.nn as nn
import torch.nn.functional as F
#export
class UltraGCN(nn.Module):
def __init__(self, args, constraint_mat, ii_constraint_mat, ii_neighbor_mat):
super(UltraGCN, self).__init__()
self.user_num = args.user_num
self.item_num = args.item_num
self.embedding_dim = args.embedding_dim
self.w1 = args.w1
self.w2 = args.w2
self.w3 = args.w3
self.w4 = args.w4
self.negative_weight = args.negative_weight
self.gamma = args.gamma
self.lambda_ = args.lambda_
self.user_embeds = nn.Embedding(self.user_num, self.embedding_dim)
self.item_embeds = nn.Embedding(self.item_num, self.embedding_dim)
self.constraint_mat = constraint_mat
self.ii_constraint_mat = ii_constraint_mat
self.ii_neighbor_mat = ii_neighbor_mat
self.initial_weight = args.initial_weight
self.initial_weights()
def initial_weights(self):
nn.init.normal_(self.user_embeds.weight, std=self.initial_weight)
nn.init.normal_(self.item_embeds.weight, std=self.initial_weight)
def get_omegas(self, users, pos_items, neg_items):
device = self.get_device()
if self.w2 > 0:
pos_weight = self.constraint_mat[users * self.item_num + pos_items].to(device)
pow_weight = self.w1 + self.w2 * pos_weight
else:
pos_weight = self.w1 * torch.ones(len(pos_items)).to(device)
users = (users * self.item_num).unsqueeze(0)
if self.w4 > 0:
neg_weight = self.constraint_mat[torch.cat([users] * neg_items.size(1)).transpose(1, 0) + neg_items].flatten().to(device)
neg_weight = self.w3 + self.w4 * neg_weight
else:
neg_weight = self.w3 * torch.ones(neg_items.size(0) * neg_items.size(1)).to(device)
weight = torch.cat((pow_weight, neg_weight))
return weight
def cal_loss_L(self, users, pos_items, neg_items, omega_weight):
device = self.get_device()
user_embeds = self.user_embeds(users)
pos_embeds = self.item_embeds(pos_items)
neg_embeds = self.item_embeds(neg_items)
pos_scores = (user_embeds * pos_embeds).sum(dim=-1) # batch_size
user_embeds = user_embeds.unsqueeze(1)
neg_scores = (user_embeds * neg_embeds).sum(dim=-1) # batch_size * negative_num
neg_labels = torch.zeros(neg_scores.size()).to(device)
neg_loss = F.binary_cross_entropy_with_logits(neg_scores, neg_labels, weight = omega_weight[len(pos_scores):].view(neg_scores.size()), reduction='none').mean(dim = -1)
pos_labels = torch.ones(pos_scores.size()).to(device)
pos_loss = F.binary_cross_entropy_with_logits(pos_scores, pos_labels, weight = omega_weight[:len(pos_scores)], reduction='none')
loss = pos_loss + neg_loss * self.negative_weight
return loss.sum()
def cal_loss_I(self, users, pos_items):
device = self.get_device()
neighbor_embeds = self.item_embeds(self.ii_neighbor_mat[pos_items].to(device)) # len(pos_items) * num_neighbors * dim
sim_scores = self.ii_constraint_mat[pos_items].to(device) # len(pos_items) * num_neighbors
user_embeds = self.user_embeds(users).unsqueeze(1)
loss = -sim_scores * (user_embeds * neighbor_embeds).sum(dim=-1).sigmoid().log()
# loss = loss.sum(-1)
return loss.sum()
def norm_loss(self):
loss = 0.0
for parameter in self.parameters():
loss += torch.sum(parameter ** 2)
return loss / 2
def forward(self, users, pos_items, neg_items):
omega_weight = self.get_omegas(users, pos_items, neg_items)
loss = self.cal_loss_L(users, pos_items, neg_items, omega_weight)
loss += self.gamma * self.norm_loss()
loss += self.lambda_ * self.cal_loss_I(users, pos_items)
return loss
def test_foward(self, users):
items = torch.arange(self.item_num).to(users.device)
user_embeds = self.user_embeds(users)
item_embeds = self.item_embeds(items)
return user_embeds.mm(item_embeds.t())
def get_device(self):
return self.user_embeds.weight.device
#hide
# %reload_ext watermark
# %watermark -a "<NAME>." -m -iv -u -t -d
| nbs/models/models.ultragcn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R [conda env:py3_physeq]
# language: R
# name: conda-env-py3_physeq-r
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Goal" data-toc-modified-id="Goal-1"><span class="toc-item-num">1 </span>Goal</a></span></li><li><span><a href="#Var" data-toc-modified-id="Var-2"><span class="toc-item-num">2 </span>Var</a></span></li><li><span><a href="#Init" data-toc-modified-id="Init-3"><span class="toc-item-num">3 </span>Init</a></span></li><li><span><a href="#Just-mammals" data-toc-modified-id="Just-mammals-4"><span class="toc-item-num">4 </span>Just mammals</a></span></li><li><span><a href="#llmga-find-refs" data-toc-modified-id="llmga-find-refs-5"><span class="toc-item-num">5 </span>llmga-find-refs</a></span></li><li><span><a href="#Summary" data-toc-modified-id="Summary-6"><span class="toc-item-num">6 </span>Summary</a></span></li><li><span><a href="#sessionInfo" data-toc-modified-id="sessionInfo-7"><span class="toc-item-num">7 </span>sessionInfo</a></span></li></ul></div>
# -
# # Goal
#
# * Running LLMGA-find-refs pipeline on all mammal samples
# # Var
# +
work_dir = '/ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/MG_assembly_mam/LLMGA-find-refs/'
bracken_taxIDs = file.path(work_dir, 'LLMGP_bracken_files.tsv')
metadata_file = '/ebio/abt3_projects/Georg_animal_feces/data/mapping/unified_metadata_complete_190529.tsv'
pipeline_dir = '/ebio/abt3_projects/Georg_animal_feces/bin/llmga-find-refs/'
# -
# # Init
library(dplyr)
library(tidyr)
library(ggplot2)
source('/ebio/abt3_projects/Georg_animal_feces/code/misc_r_functions/init.R')
# # Just mammals
meta = read.delim(metadata_file, sep='\t') %>%
dplyr::select(SampleID, class, order, family, genus, scientific_name, diet, habitat)
meta %>% dfhead
taxIDs = read.delim(bracken_taxIDs, sep='\t') %>%
mutate(Sample = gsub('^XF', 'F', Sample))
taxIDs %>% dfhead
# +
# joining
taxIDs = taxIDs %>%
inner_join(meta, c('Sample'='SampleID'))
taxIDs %>% dfhead
# -
# all metadata
taxIDs %>%
group_by(class) %>%
summarize(n = n()) %>%
ungroup()
# +
taxIDs_f = taxIDs %>%
filter(class == 'Mammalia')
taxIDs_f %>% dfhead
# -
outF = file.path(work_dir, 'LLMGP_bracken_taxIDs_mam.tsv')
taxIDs_f %>%
arrange(class, order, family, genus) %>%
write.table(outF, sep='\t', quote=FALSE, row.names=FALSE)
cat('File written:', outF, '\n')
# # llmga-find-refs
F = file.path(work_dir, 'config.yaml')
cat_file(F)
# # Summary
P = file.path(work_dir, 'references', 'drep', 'dereplicated_genomes')
list.files(P, '*.fna', recursive=TRUE)
# # sessionInfo
pipelineInfo(pipeline_dir)
sessionInfo()
| HiSeqRuns_combined/04_assemblies/01_LLMGA/05_Mam/01_llmga-find-refs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img=mpimg.imread('1.jpg')
print(img)
plt.imshow(img)
img.shape # displays the dimension of the array
len(img) #display total number of elements in the array
print (img[:,:,0]) #print the channel 1 values in RGB channels
print (img[:,:,1]) ##print the channel 2 values in RGB channels
print (img[:,:,2]) #print the channel 3 values in RGB channels
len(img[1])
| Image2array Conversion logic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# # Data Preparation
data = pd.read_csv('Income_prediction.csv')
data.head()
data.columns
data.info()
data['workclass'].unique()
for col in data[['workclass','occupation','native.country']]:
data[col] = data[col].replace('?',np.nan)
data.dropna(how = 'any', inplace = True)
data
import warnings; warnings.simplefilter('ignore')
data['occupation'].unique()
data['education'].unique()
data['relationship'].unique()
data['workclass'].unique()
data['workclass'].value_counts()
# # Label Encoding
from sklearn.preprocessing import LabelEncoder
X1 = data[['occupation']]
lm = LabelEncoder()
a = ['occupation']
for i in np.arange(len(a)):
X1[a[i]] = lm.fit_transform(X1[a[i]])
data['occupation'] = X1
X2 = data[['education']]
lm = LabelEncoder()
b = ['education']
for i in np.arange(len(b)):
X2[b[i]] = lm.fit_transform(X2[b[i]])
data['education'] = X2
# +
X3 = data[['workclass']]
lm = LabelEncoder()
a = ['workclass']
for i in np.arange(len(a)):
X3[a[i]] = lm.fit_transform(X3[a[i]])
data['workclass'] = X3
# -
X4 = data[['native.country']]
lm = LabelEncoder()
a = ['native.country']
for i in np.arange(len(a)):
X4[a[i]] = lm.fit_transform(X4[a[i]])
data['native.country'] = X4
X5 = data[['marital.status']]
lm = LabelEncoder()
a = ['marital.status']
for i in np.arange(len(a)):
X5[a[i]] = lm.fit_transform(X5[a[i]])
data['marital.status'] = X5
X6 = data[['relationship']]
lm = LabelEncoder()
a = ['relationship']
for i in np.arange(len(a)):
X6[a[i]] = lm.fit_transform(X6[a[i]])
data['relationship'] = X6
inc = data[['income']]
lm = LabelEncoder()
a = ['income']
for i in np.arange(len(a)):
inc[a[i]] = lm.fit_transform(inc[a[i]])
data['income'] = inc
data
data.info()
y = pd.DataFrame(data['income'])
data.income.value_counts(normalize = True)
data1 = data.drop('income',axis = 1)
data1
# Applying dummy values
sx = pd.get_dummies(data1['sex'])
rc = pd.get_dummies(data1['race'])
sx
rc
data1 = pd.concat([data1,sx,rc],axis=1)
data1 = data1.drop(['sex','race'],axis = 1)
x = data1
x
# # Training the Model
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(data1, y, test_size=0.2,random_state = 2)
print(x_test.shape)
print(y_test.shape)
# # 1)Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
# +
from sklearn.ensemble import RandomForestClassifier
rft = RandomForestClassifier(n_estimators = 120,
criterion = 'entropy',
max_depth = 24,
max_features = 'auto',
bootstrap = False,
verbose = 2,
warm_start = True,
random_state = 2,
n_jobs = -1
)
rft.fit(x_train,y_train)
y_pred = rft.predict(x_test)
# -
print('Accuracy score = ',accuracy_score(y_test,y_pred))
print('Precision score =', precision_score(y_test,y_pred, average = 'binary'))
print('Recall score =',recall_score(y_test,y_pred, average = 'binary'))
print('f1 score = ',f1_score(y_test,y_pred,average = 'binary'))
confusion_matrix(y_test,y_pred)
# # 2) Logistic Regression
from sklearn.linear_model import LogisticRegression
logit = LogisticRegression(class_weight = {0:0.39, 1:1})
logit.fit(x_train,y_train)
y_pred = logit.predict(x_test)
print('Accuracy score = ',accuracy_score(y_test,y_pred))
print('Precision score =', precision_score(y_test,y_pred))
print('Recall score =',recall_score(y_test,y_pred))
print('f1 score = ',f1_score(y_test,y_pred))
confusion_matrix(y_test,y_pred)
# # 3) Decision Tree
# +
from sklearn.tree import DecisionTreeClassifier
from scipy.stats import randint
from sklearn.model_selection import RandomizedSearchCV
param_dist = {"max_depth": [3, None],
"max_features": randint(1, 9),
"min_samples_leaf": randint(1, 9),
"criterion": ["gini", "entropy"]}
dt_model = DecisionTreeClassifier()
tree_cv = RandomizedSearchCV(dt_model, param_dist, cv = 5)
tree_cv.fit(x_train,y_train)
y_pred = tree_cv.predict(x_test)
# Print the tuned parameters and score
print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_))
print("Best score is {}".format(tree_cv.best_score_))
# Print the tuned parameters and score
#print("Tuned Decision Tree Parameters: {}".format(tree_cv.best_params_))
#print("Best score is {}".format(tree_cv.best_score_))
# -
print('Accuracy score = ',accuracy_score(y_test,y_pred))
print('Precision score =', precision_score(y_test,y_pred))
print('Recall score =',recall_score(y_test,y_pred))
print('f1 score = ',f1_score(y_test,y_pred))
confusion_matrix(y_test,y_pred)
# # 4) KNN
# +
from sklearn.neighbors import KNeighborsClassifier
metric_k = []
neighbors = np.arange(1,25)
for k in neighbors:
classifier = KNeighborsClassifier(n_neighbors = k, metric = 'minkowski', p = 2)
classifier.fit(x_train,y_train)
y_pred = classifier.predict(x_test)
acc = accuracy_score(y_test,y_pred)
metric_k.append(acc)
# -
plt.plot(neighbors,metric_k,'o-')
plt.xlabel('k value')
plt.ylabel('Accuracy')
plt.grid()
classifier = KNeighborsClassifier(n_neighbors = 18, metric = 'minkowski', p = 2)
classifier.fit(x_train,y_train)
y_pred = classifier.predict(x_test)
print('Accuracy score = ',accuracy_score(y_test,y_pred))
print('Precision score =', precision_score(y_test,y_pred, average = 'binary'))
print('Recall score =',recall_score(y_test,y_pred, average = 'binary'))
print('f1 score = ',f1_score(y_test,y_pred,average = 'binary'))
confusion_matrix(y_test,y_pred)
# + active=""
#
# -
# # Best Deployed Model - Random Forest Classifier
# Accuracy score = 0.8604342781369136 |
# Precision score = 0.7582329317269076 |
# Recall score = 0.6356902356902356 |
# f1 score = 0.6915750915750916
| Income Prediction Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# ### Scatterplot Cost Model
# + active=""
# python -i backend/overall/cost_estimation_model.py scatter
# -
from numpy import dtype
cardinality = {
'dummyfloat1': 1,
'dummyfloat2': 1,
'id': 48895,
'name': 47906,
'host_id': 37457,
'host_name': 11453,
'neighbourhood_group': 5,
'neighbourhood': 221,
'latitude': 19048,
'longitude': 14718,
'room_type': 3,
'price': 674,
'minimum_nights': 109,
'number_of_reviews': 394,
'last_review': 1765,
'reviews_per_month': 938,
'calculated_host_listings_count': 47,
'availability_365': 366}
dtype = {'id': dtype('int64'),
'name': dtype('O'),
'host_id': dtype('int64'),
'host_name': dtype('O'),
'neighbourhood_group': dtype('O'),
'neighbourhood': dtype('O'),
'latitude': dtype('float64'),
'longitude': dtype('float64'),
'dummyfloat1': dtype('float64'),
'dummyfloat2': dtype('float64'),
'room_type': dtype('O'),
'price': dtype('int64'),
'minimum_nights': dtype('int64'),
'number_of_reviews': dtype('int64'),
'last_review': dtype('O'),
'reviews_per_month': dtype('float64'),
'calculated_host_listings_count': dtype('int64'),
'availability_365': dtype('int64')}
df = pd.read_csv("../result/costmodel_scatter.csv")
# df["id"]=df["attr1"]+";"+df["attr2"]
# df = df[df["id"]!="latitude;longitude"]
df["G_color1"] = df["attr1"].apply(lambda x: cardinality[x])
df["dtype1"] = df["attr1"].apply(lambda x: str(dtype[x]))
df["G_color2"] = df["attr2"].apply(lambda x: cardinality[x])
df["dtype2"] = df["attr2"].apply(lambda x: str(dtype[x]))
df["G_color"]=df["G_color1"]+df["G_color2"]
# +
y = np.array(df["time"])*1000
N = np.array(df["nPts"])
G_color = np.array(df["G_color"])
X = np.array([N,G_color])
from scipy.optimize import curve_fit
def func(x, a, b,c,d):
return a + b * x[0] +c*x[0]*x[1] + d*x[1]
popt, pcov = curve_fit(func, X,y)
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("coeff:",popt)
print ("error:", sum(perr))
# -
dim = 0
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
# When moving float columns around, e.g. longitude/latitude, each scatterplot takes slightly longer
# +
# df = pd.read_csv("../result/costmodel_scatter.csv")
# df["id"]=df["attr1"]+";"+df["attr2"]
# df = df[(df["id"]=="latitude;longitude")|(df["id"]=="dummyfloat1;dummyfloat2")]
# +
# import altair as alt
# alt.Chart(df).mark_circle().encode(
# y = "time",
# x = "nPts",
# color = alt.Color("id",type="nominal"),
# ).interactive()
# -
# ### Colored Scatterplot Cost Model
# + active=""
# python -i backend/overall/cost_estimation_model.py colorscatter
# -
df = pd.read_csv("../result/costmodel_colorscatter.csv")
# +
# df = pd.read_csv("../../lux-datasets/data/airbnb_nyc.csv")
# lux.config.heatmap = False
# from lux.vis.Vis import Vis
# for attr in ['host_id', 'host_name', 'neighbourhood_group','neighbourhood', 'room_type', 'number_of_reviews']:
# vis = Vis(['price','minimum_nights',lux.Clause(attr,channel="color")], df)
# vis._repr_html_()
# -
airbnb = pd.read_csv("../../lux-datasets/data/airbnb_nyc.csv")
# +
# df = pd.read_csv("../../lux-datasets/data/airbnb_nyc.csv")
# -
df["G_color"] = df["attr"].apply(lambda x: cardinality[x])
df["dtype"] = df["attr"].apply(lambda x: str(dtype[x]))
# +
import altair as alt
alt.Chart(df).mark_circle().encode(
y = "time",
x = "nPts",
color = alt.Color("G_color",type="nominal"),
).interactive()
# -
# ### Object dtype
df = pd.read_csv("../result/costmodel_colorscatter.csv")
df["G_color"] = df["attr"].apply(lambda x: cardinality[x])
df["dtype"] = df["attr"].apply(lambda x: str(dtype[x]))
df = df[df["dtype"]!="int64"]
# +
y = np.array(df["time"])*1000
N = np.array(df["nPts"])
G_color = np.array(df["G_color"])
X = np.array([N,G_color])
from scipy.optimize import curve_fit
def func(x, a, b,c,d):
return a + b * x[0] +c*x[0]*x[1] + d*x[1]
popt, pcov = curve_fit(func, X,y)
popt
# -
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
dim = 0
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
# ### Int dtype
df = pd.read_csv("../result/costmodel_colorscatter.csv")
df["G_color"] = df["attr"].apply(lambda x: cardinality[x])
df["dtype"] = df["attr"].apply(lambda x: str(dtype[x]))
df = df[df["dtype"]=="int64"]
# +
y = np.array(df["time"])*1000
N = np.array(df["nPts"])
G_color = np.array(df["G_color"])
X = np.array([N,G_color])
from scipy.optimize import curve_fit
def func(x, a, b,c,d):
return a + b * x[0] +c*x[0]*x[1] + d*x[1]
popt, pcov = curve_fit(func, X,y)
popt
# -
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
dim = 0
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
# ### Selection-based Scatterplot Cost Model
df = pd.read_csv("../result/costmodel_selection.csv")
df["G_color"] = df["attr"].apply(lambda x: cardinality[x])
df["dtype"] = df["attr"].apply(lambda x: str(dtype[x]))
# +
import altair as alt
alt.Chart(df).mark_circle().encode(
y = "time",
x = "nPts",
color = alt.Color("G_color",type="ordinal"),
column = alt.Column("dtype",type="nominal"),
).interactive()
# -
# ### The total cost of the scatterplot is the sum of fetching each of the channel columns:
df = pd.read_csv("../result/costmodel_scatter.csv")
df["id"]=df["attr1"]+";"+df["attr2"]
df = df[df["id"]=="price;minimum_nights"]
colordf = pd.read_csv("../result/costmodel_colorscatter.csv")
# +
import altair as alt
a = alt.Chart(df).mark_circle().encode(
y = "time",
x = "nPts",
).interactive()
b = alt.Chart(colordf).mark_circle(color="red").encode(
y = "time",
x = "nPts"
).interactive()
a+b
# -
# ### Bar Chart Cost model
df = pd.read_csv("../result/costmodel_bar.csv")
df["time"] = df["time"]*1000
bar = df[(df["G_color"]==0)]
# +
import altair as alt
alt.Chart(bar).mark_circle().encode(
y = "time",
x = "nPts",
color = alt.Color("G_axes",type="nominal"),
)
# -
N = np.array(bar["nPts"])
G_axes = np.array(bar["G_axes"])
y = np.array(bar["time"])
X = np.array([N,G_axes])
from scipy.optimize import curve_fit
def func(x, a, b,c,d):
return a + b * x[0] +c*x[0]*x[1] + d*x[1]
popt, pcov = curve_fit(func, X,y)
popt
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
dim = 0
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
# ### Color Bar Chart Cost Model
#
df = pd.read_csv("../result/costmodel_colorbar.csv")
df["time"] = df["time"]*1000
color_bar = df[(df["G_color"]!=0)]
color_bar["ga*gc"]=color_bar["G_axes"]*color_bar["G_color"]
alt.Chart(color_bar).mark_circle().encode(
y = "time",
x = "nPts",
color = alt.Color("G_axes",type="nominal")
)
alt.Chart(color_bar).mark_circle().encode(
y = "time",
x = "nPts",
color = alt.Color("G_color",type="nominal")
).interactive()
alt.Chart(color_bar).mark_circle().encode(
y = "time",
x = "nPts",
color = alt.Color("ga*gc",type="nominal")
).interactive()
N = np.array(color_bar["nPts"])
G_axes = np.array(color_bar["G_axes"])
G_color = np.array(color_bar["G_color"])
y = np.array(color_bar["time"])
X = np.array([N,G_axes,G_color])
from scipy.optimize import curve_fit
def func(x, a, b,c,d,e,f,g):
return a + b * x[0] + c*x[1]+ d*x[2] +e*x[0]*x[1] + f*x[1]*x[2] + g*x[0]*x[2]
popt, pcov = curve_fit(func, X,y)
popt
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
dim = 0
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
# ### Histogram
import altair as alt
df = pd.read_csv("../result/costmodel_histogram.csv")
df["time"] = df["time"]*1000
alt.Chart(df).mark_line().encode(
y = "time",
x = "nPts",
color = alt.Color("nbin",type="quantitative")
)
N = np.array(df["nPts"])
Nbin = np.array(df["nbin"])
y = np.array(df["time"])
X = np.array([N,Nbin])
from scipy.optimize import curve_fit
def func(x, a, b):#,c):
return a + b * x[0]#+ c*x[1] #+ d*x[0]*x[1]
popt, pcov = curve_fit(func, X,y)
popt
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
dim = 0
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
# Verify that coefficient value is the same with linear fitting
coeff, residuals, rank, singular_values, rcond = np.polyfit(N,y,1,full=True)
print ("Coefficients:",coeff)
print ("error:",residuals[0])
# Histogram processing speed has no dependence on the number of bins
# ### Heatmap Cost Model
df = pd.read_csv("../result/costmodel_heatmap.csv")
df["time"] = df["time"]*1000
df.nbin = df.nbin.astype(int)
alt.Chart(df).mark_line().encode(
y = "time",
x = "nPts",
color = alt.Color("nbin",type="ordinal"),
row="mark"
)
vdf = df[df["mark"]=="heatmap"]
N = np.array(vdf["nPts"])
# Ncells = np.array(vdf["nbin"]**2)
Nbins = np.array(vdf["nbin"])
y = np.array(vdf["time"])
X = np.array([N,Nbins])
from scipy.optimize import curve_fit
def func(x, a, b):
return a + b * x[0]#+ c*x[1]
popt, pcov = curve_fit(func, X,y)
print ("coefficient:", popt)
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
dim = 0
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
dim = 1
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
# - Heatmap processing time is also not very dependent on number of bins/cells. It is also not strongly dependent on the number of data points since the data has already been binned into fixed number of cells
vdf = df[df["mark"]=="categorical color heatmap"]
N = np.array(vdf["nPts"])
# Ncells = np.array(vdf["nbin"]**2)
Nbins = np.array(vdf["nbin"])
y = np.array(vdf["time"])
X = np.array([N,Nbins])
def func(x, a, b):
return a + b * x[0]#+ c*x[1]# + d * x[0]*x[1]
popt, pcov = curve_fit(func, X,y)
print ("coefficient:", popt)
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
alt.Chart(vdf).mark_line().encode(
y = "time",
x = "nPts",
color = alt.Color("nbin",type="ordinal")
).interactive()
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
vdf = df[df["mark"]=="quantitative color heatmap"]
# +
N = np.array(vdf["nPts"])
Ncells = np.array(vdf["nbin"]**2)
Nbins = np.array(vdf["nbin"])
y = np.array(vdf["time"])
X = np.array([N,Nbins])
def func(x, a, b):
return a + b * x[0]#+ c*x[1]# + d * x[0]*x[1]
popt, pcov = curve_fit(func, X,y)
print ("coefficient:", popt)
# -
alt.Chart(vdf).mark_line().encode(
y = "time",
x = "nPts",
color = alt.Color("nbin",type="ordinal")
).interactive()
y_pred = func(X,*popt)
perr = np.sqrt(np.diag(pcov))
print ("error:", sum(perr))
dim = 0
plt.plot(X[dim],y,'o')
plt.plot(X[dim],func(X,*popt),'o')
| paper/cost_model_fitting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Visualization of input data
# +
import numpy as np
import matplotlib.pyplot as pl
import matplotlib.dates as mdates
#import seaborn as sns
import datetime
import pandas as pd
#% matplotlib inline
#np.set_printoptions(threshold=np.nan)
import time
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
# -
# ### Read data from BEopt output
Data_df = pd.read_csv("data/200124_datafile_all_details.csv");
Data_df.head() #show first 5 lines
Data_df.dtypes # check types/ columns
# ### Change time stamp
Data_df["Date/Time"][:10] # date/time problematic, needs to start with 0...23
# change hour format from 1-24 tp 0-23
Data_df["Date/Time"]=Data_df["Date/Time"].str.replace('24:', '00:', regex=True)
#format time stamp
Data_df["month"] = pd.to_datetime(Data_df["Date/Time"], format=" %m/%d %H:%M:%S").dt.month
Data_df["day"] = pd.to_datetime(Data_df["Date/Time"], format=" %m/%d %H:%M:%S").dt.day
Data_df["hour"] = pd.to_datetime(Data_df["Date/Time"], format=" %m/%d %H:%M:%S").dt.hour -1
Data_df.drop("Date/Time", axis=1, inplace=True)
Data_df.tail()
# change hour format from 1-24 tp 0-23
Data_df["hour"] = Data_df["hour"].replace(-1, 23)
Data_df[["day","hour"]][:25]
Data_df[:][0:25]
Data_df.columns
Data_df.to_csv("data/200124_datafile_all_details_right_timestamp.csv", index=False)
# ### Aggregate values per month and hour
Data_df_mean = Data_df.groupby(['month', 'hour'], as_index=False).mean()
Data_df_mean.drop("day", axis=1, inplace=True)
Data_df_mean.head()
# ### Illustrate hot water loads
# + jupyter={"source_hidden": true}
x_axis = np.arange(0,24,1);
fig = pl.subplots(figsize=(8,8));
pl.subplots_adjust(hspace=0.12, wspace=0.05);
# Plots very first upper left
pl.subplot(4,3,1);
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['hotwater'][:], #+ Data_df_mean.loc[Data_df_mean['month'] == 12]['hotwater_suppl'][:],
label='HW', color= "steelblue");
#pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['hotwater_suppl'][:], label='HW_suppl', color="lightblue");
pl.annotate("12",xy=(0.4,4.6), fontsize=11, bbox={'facecolor':'white'});
pl.title('Hot water electricity consumption by month', fontsize=13, x=1.6);
pl.ylim(0, 5.4); pl.xlim(-1, 24);
pl.xticks(np.arange(0,25,6));
pl.ylabel('[ $kwh$ ]', fontsize=11, labelpad=3);
ax=pl.gca();
ax.axes.xaxis.set_ticklabels([]);
ax.grid(color='grey', linestyle='-', linewidth=0.5);
# Plots others in loop
for i in np.arange(2,13):
pl.subplot(4,3,i);
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['hotwater'][:], #+ Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['hotwater_suppl'][:],
label='HW', color= "steelblue");
#pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['hotwater_suppl'][:], label='HW_suppl', color="lightblue");
pl.annotate(i-1,xy=(0.4,4.6), fontsize=11, bbox={'facecolor':'white'});
pl.ylim(0, 5.4); pl.xlim(-1, 24);
pl.xticks(np.arange(0,25,6));
pl.ylabel('[ $kwh$ ]', fontsize=11, labelpad=3);
pl.xlabel('[ $hours$ ]', fontsize=11, labelpad=1);
ax=pl.gca();
ax.grid(color='grey', linestyle='-', linewidth=0.5);
if not i in [1,4,7,10]:
ax.axes.yaxis.set_ticklabels([]);
pl.ylabel(None);
if not i in np.arange(10,13):
ax.axes.xaxis.set_ticklabels([]);
pl.xlabel(None);
#pl.savefig('../latex/pics/HotWaterConsumption_v2.svg', bbox_inches='tight');
#pl.savefig('../latex/pics/HotWaterConsumption_v2.pgf', bbox_inches='tight');
# -
# ### Illustrate heating loads
Data_df.sum()
# + jupyter={"source_hidden": true}
x_axis = np.arange(0,24,1);
fig = pl.subplots(figsize=(8,8));
pl.subplots_adjust(hspace=0.05, wspace=0.05);
# Plots very first upper left
pl.subplot(4,3,1)
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['heating_fan'][:] + Data_df_mean.loc[Data_df_mean['month'] == 12]['heating'][:],
label=' $d_{fh}^{fan}$ ', color= "dodgerblue");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['heating'][:], label='heating', color="firebrick");
pl.annotate("12",xy=(.4, 5.4), fontsize=11, bbox={'facecolor':'white'});
pl.title('Heating electricity consumption by month', fontsize=13, x=1.6);
pl.ylim(0, 6.4); pl.xlim(-1, 24);
pl.xticks(np.arange(0,25,6));
pl.ylabel('[ $kwh$ ]', fontsize=11, labelpad=3);
ax=pl.gca();
ax.axes.xaxis.set_ticklabels([]);
ax.grid(color='grey', linestyle='-', linewidth=0.5);
# Plots others in loop
for i in np.arange(2,13):
pl.subplot(4,3,i)
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['heating_fan'][:] + Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['heating'][:],
label=' $d_{fh}^{fan}$ ', color= "dodgerblue");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['heating'][:], label='heating', color="firebrick");
pl.annotate(i-1,xy=(.4, 5.4), fontsize=11, bbox={'facecolor':'white'});
pl.ylim(0, 6.4); pl.xlim(-1, 24);
pl.xticks(np.arange(0,25,6));
pl.ylabel('[ $kwh$ ]', fontsize=11, labelpad=3);
pl.xlabel('[ $hours$ ]', fontsize=11, labelpad=1);
ax=pl.gca();
ax.grid(color='grey', linestyle='-', linewidth=0.5);
if not i in [1,4,7,10]:
ax.axes.yaxis.set_ticklabels([]);
pl.ylabel(None);
if not i in np.arange(10,13):
ax.axes.xaxis.set_ticklabels([]);
pl.xlabel(None);
if i == 12:
ax.legend(bbox_to_anchor=(-1.2, 0.8));
#pl.savefig('../latex/pics/HeatingConsumption_v2.svg', bbox_inches='tight');
#pl.savefig('../latex/pics/HeatingConsumption_v2.pgf', bbox_inches='tight');
# -
# ### Demand all combined
# + jupyter={"source_hidden": true}
x_axis = np.arange(0,24,1);
fig = pl.subplots(figsize=(8,8));
pl.subplots_adjust(hspace=0.05, wspace=0.05);
# Plots very first upper left
pl.subplot(4,3,1)
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['electkwh'][:] + Data_df_mean.loc[Data_df_mean['month'] == 12]['heating_fan'][:] + Data_df_mean.loc[Data_df_mean['month'] == 12]['heating'][:] + Data_df_mean.loc[Data_df_mean['month'] == 12]['hotwater'][:], # + Data_df_mean.loc[Data_df_mean['month'] == 12]['heating_suppl'][:],
label=' $d_{fh}$ ', color="firebrick");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['electkwh'][:] + Data_df_mean.loc[Data_df_mean['month'] == 12]['heating_fan'][:] + Data_df_mean.loc[Data_df_mean['month'] == 12]['hotwater'][:],
label=' $d_{fh}^{fan}$ ', color= "lightcoral");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['electkwh'][:] + Data_df_mean.loc[Data_df_mean['month'] == 12]['hotwater'][:], #+ Data_df_mean.loc[Data_df_mean['month'] == 12]['hotwater_suppl'][:],
label=' $d_{hw}$ ', color= "lightsteelblue");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['electkwh'][:], #+ Data_df_mean.loc[Data_df_mean['month'] == 12]['hotwater_suppl'][:],
label=' $d_e$ ', color= "orange");
#pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['heating'][:] + Data_df_mean.loc[Data_df_mean['month'] == 12]['heating_suppl'][:],
# label='heating_suppl', color= "tomato");
pl.annotate("12",xy=(.4, 6), fontsize=11, bbox={'facecolor':'white'});
#pl.title('Heating demand by month', fontsize=13, x=1.6);
pl.ylim(0, 7); pl.xlim(-1, 24);
pl.xticks(np.arange(0,25,6));
pl.ylabel('[ $kWh$ ]', fontsize=11, labelpad=3);
ax=pl.gca();
ax.axes.xaxis.set_ticklabels([]);
ax.grid(color='grey', linestyle='-', linewidth=0.5);
# Plots others in loop
for i in np.arange(2,13):
pl.subplot(4,3,i)
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['electkwh'][:] + Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['heating_fan'][:] + Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['heating'][:] + Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['hotwater'][:], # + Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['heating_suppl'][:],
label=' $d_{fh}$ ', color="firebrick");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['electkwh'][:] + Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['heating_fan'][:] + Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['hotwater'][:],
label=' $d_{fh}^{fan}$ ', color= "lightcoral");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['electkwh'][:] + Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['hotwater'][:], #+ Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['hotwater_suppl'][:],
label=' $d_{hw}$ ', color= "lightsteelblue");
# pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['heating'][:] + Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['heating_suppl'][:],
# label='heating_suppl', color= "tomato");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['electkwh'][:], #+ Data_df_mean.loc[Data_df_mean['month'] == 12]['hotwater_suppl'][:],
label=' $d_{e}$ ', color= "orange");
pl.annotate(i-1,xy=(.4, 6), fontsize=11, bbox={'facecolor':'white'});
pl.ylim(0, 7); pl.xlim(-1, 24);
pl.xticks(np.arange(0,25,6));
pl.ylabel('[ $kWh$ ]', fontsize=11, labelpad=3);
pl.xlabel('[ $hours$ ]', fontsize=11, labelpad=1);
ax=pl.gca();
ax.grid(color='grey', linestyle='-', linewidth=0.5);
if not i in [1,4,7,10]:
ax.axes.yaxis.set_ticklabels([]);
pl.ylabel(None);
if not i in np.arange(10,13):
ax.axes.xaxis.set_ticklabels([]);
pl.xlabel(None);
if i == 12:
ax.legend(bbox_to_anchor=(-1.2, 1));
#pl.savefig('../latex/pics/OverallDemand.svg', bbox_inches='tight');
#pl.savefig('../latex/pics/OverallDemand.pgf', bbox_inches='tight');
#pl.savefig('../latex/pics/OverallDemand.pdf', bbox_inches='tight');
# -
# ### Illustrate electricity loads
# + jupyter={"source_hidden": true}
x_axis = np.arange(0,24,1);
fig = pl.subplots(figsize=(8,8));
pl.subplots_adjust(hspace=0.05, wspace=0.05);
# Plots very first upper left
pl.subplot(4,3,1)
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['lights'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == 12]['lgappl'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == 12]['vent_fan'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == 12]['misc'][:],
label='lights', color= "gold");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['lgappl'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == 12]['vent_fan'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == 12]['misc'][:],
label='lgappl', color= "silver");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['vent_fan'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == 12]['misc'][:],
label='vent_fan', color="dodgerblue");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['misc'][:],
label='misc', color="teal");
pl.annotate("12",xy=(.4,4.6), fontsize=11, bbox={'facecolor':'white'});
pl.title('Electricity consumption by month', fontsize=13, x=1.6);
pl.ylim(0, 5.4); pl.xlim(-1, 24);
pl.xticks(np.arange(0,25,6));
pl.ylabel('[ $kwh$ ]', fontsize=11, labelpad=3);
ax=pl.gca();
ax.axes.xaxis.set_ticklabels([]);
ax.grid(color='grey', linestyle='-', linewidth=0.5);
# Plots others in loop
for i in np.arange(2,13):
pl.subplot(4,3,i)
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['lights'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['lgappl'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['vent_fan'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['misc'][:],
label='lights', color= "gold");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['lgappl'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['vent_fan'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['misc'][:],
label='appliances', color= "silver");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['vent_fan'][:]
+ Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['misc'][:],
label='ventilation', color="dodgerblue");
pl.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['misc'][:],
label='misc. loads', color="teal");
pl.annotate(i-1,xy=(.4,4.6), fontsize=11, bbox={'facecolor':'white'});
pl.ylim(0, 5.4); pl.xlim(-1, 24);
pl.xticks(np.arange(0,25,6));
pl.ylabel('[ $kwh$ ]', fontsize=11, labelpad=3);
pl.xlabel('[ $hours$ ]', fontsize=11, labelpad=1);
ax=pl.gca();
ax.grid(color='grey', linestyle='-', linewidth=0.5);
if not i in [1,4,7,10]:
ax.axes.yaxis.set_ticklabels([]);
pl.ylabel(None);
if not i in np.arange(10,13):
ax.axes.xaxis.set_ticklabels([]);
pl.xlabel(None);
if i == 12:
ax.legend(bbox_to_anchor=(1, 1));
#pl.savefig('../latex/pics/ElectricityConsumption_v2.svg', bbox_inches='tight');
# -
# ### Illustrate PV generation
Data_df_mean.loc[Data_df_mean['month'] == 2]['Temperature'][:]
# + jupyter={"source_hidden": true}
x_axis = np.arange(0,24,1);
fig = pl.figure(figsize=(8,8));
pl.subplots_adjust(hspace=0.12, wspace=0.05);
# Plots very first upper left
ax1 = fig.add_subplot(4,3,1);
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax1.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['PV_generation'][:], color= "orange");
ax2.plot(x_axis, Data_df_mean.loc[Data_df_mean['month'] == 12]['Temperature'][:], color= "black");
ax1.annotate("12",xy=(.4,8.4), fontsize=11, bbox={'facecolor':'white'});
pl.title('PV generation and temperatures by month', fontsize=13, x=1.6);
ax1.set_xticks(np.arange(0,25,6));
ax1.set_ylabel('[ $kwh$ ]', fontsize=11, labelpad=3);
ax1.axes.xaxis.set_ticklabels([]);
ax1.grid(color='grey', linestyle='-', linewidth=0.5);
ax1.set_ylim([0, 10]); ax2.set_ylim([-15, 30]); ax1.set_xlim([-1, 24]);
# Plots others in loop
for i in np.arange(2,13):
ax1=fig.add_subplot(4,3,i);
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
ax1.bar(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['PV_generation'][:], color= "orange");
ax2.plot(x_axis, Data_df_mean.loc[Data_df_mean['month'] == (i-1)]['Temperature'][:], color= "black");
ax1.annotate(i-1,xy=(.4,8.4), fontsize=11, bbox={'facecolor':'white'});
ax1.set_xticks(np.arange(0,25,6));
ax2.set_yticks(np.arange(-10,31,10));
ax2.set_yticklabels(np.arange(-10,31,10));
ax1.set_ylabel('[ $kwh$ ]', fontsize=11, labelpad=3);
ax2.set_ylabel('[ $^{\circ}C$ ]', fontsize=11, labelpad=1);
ax1.set_xlabel('[ $hours$ ]', fontsize=11, labelpad=1);
ax1.grid(color='grey', linestyle='-', linewidth=0.5);
ax1.set_ylim([0, 10]); ax2.set_ylim([-15, 30]); ax1.set_xlim([-1, 24]);
if not i in [1,4,7,10]:
ax1.axes.yaxis.set_ticklabels([]);
ax1.set_ylabel(None);
if not i in np.arange(10,13):
ax1.axes.xaxis.set_ticklabels([]);
ax1.set_xlabel(None);
if not i in [3,6,9,12]:
ax2.axes.yaxis.set_ticklabels([]);
ax2.set_ylabel(None);
#pl.savefig('../latex/pics/PV_generation_temp_v2.svg', bbox_inches='tight');
| single_building/data/200102_SHEMS_timestamp_visualization_input_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rbqpark/tinger/blob/main/ChordAnalysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Nbwsgsd9LeeU"
# #**Chord Analysis Code Process**
# 1. Identify Chinese or English Song
#
# 2. Select correct comparison language:
# ```
# if song == chinese:
# pd = playlist_english
# if song == english:
# pd = palylist_chinese
# ```
#
# 3. Use function to generate another column, this new column calculates the difference between each chord (7 element list)
#
# 4. Apply the Sequence Matcher code to calculate the similarity score to the input and every song in the database
#
# 5. Get the max five and retrieve song titles (best matches)
#
# 6. Output (and create graphs)
#
# + id="WMwE3IitiMf_" colab={"base_uri": "https://localhost:8080/"} outputId="4e941255-b230-4364-af30-3c29894aec2b"
from google.colab import drive
drive.mount('/content/drive')
# + id="llzKgr3os4kH"
import gspread
import pandas as pd
from google.colab import auth
from oauth2client.client import GoogleCredentials
auth.authenticate_user()
gc = gspread.authorize(GoogleCredentials.get_application_default())
# + id="pZXZkzsNtyic"
import pandas as pd
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="XDIsttsWt1d9" outputId="eddfbec6-bb24-4995-8e7e-391769224642"
# Path to CSV file. May need to change depending on user.
path = '/content/drive/MyDrive/Tinger/Documentation/'
# + id="2t3vfvvG6Bbq"
##convert CSV data to dataframe, keeping only the three important columns we need
english = pd.read_csv(path + 'Final Playlist Data (English).csv')
english = english[['Song Title', 'Artist', 'Main Chord Progression']]
##convert every entry of the "Main Chord Progression" column into a list of strings
chord_progressions = english['Main Chord Progression']
chord_list = chord_progressions.values.tolist()
chord_list
#convert the strings into individual lists, take the difference between each number on the list
#in order to find the intervals of the songs. These intervals, from a musical standpoint, will
#be very useful in helping us identify which songs are most similar (ex: Major 7ths, Perfect 5ths)
empty_list = []
for value in chord_list:
temp = list(map(int, value.split(',')))
empty_list.append([j-i for i, j in zip(temp[:-1], temp[1:])])
english['Chord Difference'] = empty_list
#START THE SAME PROCESS BUT FOR CHINESE SONGS
#convert the mandarin songs to a dataframe
mandarin = pd.read_csv(path + 'Final Playlist Data (Mandarin).csv')
mandarin = mandarin[['Song Title', 'Artist', 'Main Chord Progression']]
mandarin.head()
##convert the series into a list of strings
chord_progressions = mandarin['Main Chord Progression']
chord_list_mandarin = chord_progressions.values.tolist()
#convert the strings into individual lists, take the difference between each number on the list
empty_list = []
for value in chord_list_mandarin:
temp = list(map(int, value.split(',')))
empty_list.append([j-i for i, j in zip(temp[:-1], temp[1:])])
mandarin['Chord Difference'] = empty_list
# + id="gSpAj0UGl7Pv"
#Sequence Matcher Function I will use to compare all lists to one another
#SequenceMatcher is a class available in python module named “difflib”. It can be used for comparing pairs of input sequences.
#The basic idea is to find the longest contiguous matching subsequence (LCS) that contains no “junk” elements. This does not yield minimal edit sequences, but does tend to yield matches that “look right” to people.
from difflib import SequenceMatcher
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
# + id="oc4PleeZK9Ls" colab={"base_uri": "https://localhost:8080/"} outputId="cc019e73-1d04-4cb8-d306-e0efbc0677e9"
#HERE IS WHERE WE PUT THE INPUT
#STITCHING ALL PIECES TOGETHER TO EXPORT THE 10 MOST SIMILAR SONGS AS A LIST
import re
input = 'Unpack Your Heart'
for x in input[0]:
#retrieve chord progression and calculate the difference. then iterate through the entire column with the similar function
if re.search(u'[\u4e00-\u9fff]', x):
diff = mandarin.loc[mandarin['Song Title'] == input]['Chord Difference'].values[0]
english['score'] = english['Chord Difference'].map(lambda y: similar(y, diff))
english = english.sort_values('score', ascending = False)
print(english[['score', 'Song Title']].values[:10])
else:
diff = english.loc[english['Song Title'] == input]['Chord Difference'].values[0]
mandarin['score'] = mandarin['Chord Difference'].map(lambda y: similar(y, diff))
mandarin = mandarin.sort_values('score', ascending = False)
print(mandarin[['score', 'Song Title']].values[:10])
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="RkqBLabrGzAa" outputId="c34eab57-2825-475c-c637-2816f495765e"
## Test graphs
import re
#input = "We Don't Talk Anymore"
input = 'You Are The Reason'
for x in input[0]:
#retrieve chord progression and calculate the difference. Iterate.
diff = english.loc[english['Song Title'] == input]['Chord Difference'].values[0]
english['score'] = english['Chord Difference'].map(lambda y: similar(y, diff))
english = english.sort_values('score', ascending = False)
for_graph = english[['score', 'Song Title']].head(5)
for_graph.plot.bar(x='Song Title')
# + id="D43TMO1mm42p" colab={"base_uri": "https://localhost:8080/", "height": 244} outputId="488314ae-d32e-405e-ca30-fbf0eb772b2b"
import re
input = '因为你所以我'
for x in input[0]:
#retrieve chord progression and calculate the difference. then iterate through the entire column with the similar function
diff = mandarin.loc[mandarin['Song Title'] == input]['Chord Difference'].values[0]
mandarin['score'] = mandarin['Chord Difference'].map(lambda y: similar(y, diff))
mandarin = mandarin.sort_values('score', ascending = False)
for_graph = mandarin[['score', 'Song Title']].head(10)
for_graph.plot.bar()
print(for_graph)
# + id="wE-Om4GKsgfa"
| ChordAnalysis.ipynb |